id
int64
0
3.29k
file_name
stringlengths
4
37
programming_language
stringclasses
2 values
method_name
stringlengths
3
112
code_before
stringlengths
701
809k
code_after
stringlengths
701
809k
func_before
stringlengths
40
60.4k
func_after
stringlengths
43
61.2k
diff
stringlengths
67
133k
num_lines_added
int64
1
1.49k
num_lines_deleted
int64
1
1.13k
num_lines_in_file
float64
23
18.6k
num_tokens_in_file
float64
129
172k
num_lines_in_method
int64
1
259
num_tokens_in_method
int64
10
1.29k
method_complexity
int64
1
110
repo
stringclasses
267 values
cve_id
stringlengths
13
16
cwe_id
stringclasses
8 values
2,533
tif_pixarlog.c
C
horizontalDifference8
/* $Id$ */ /* * Copyright (c) 1996-1997 Sam Leffler * Copyright (c) 1996 Pixar * * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Pixar, Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Pixar, Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL PIXAR, SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include "tiffiop.h" #ifdef PIXARLOG_SUPPORT /* * TIFF Library. * PixarLog Compression Support * * Contributed by Dan McCoy. * * PixarLog film support uses the TIFF library to store companded * 11 bit values into a tiff file, which are compressed using the * zip compressor. * * The codec can take as input and produce as output 32-bit IEEE float values * as well as 16-bit or 8-bit unsigned integer values. * * On writing any of the above are converted into the internal * 11-bit log format. In the case of 8 and 16 bit values, the * input is assumed to be unsigned linear color values that represent * the range 0-1. In the case of IEEE values, the 0-1 range is assumed to * be the normal linear color range, in addition over 1 values are * accepted up to a value of about 25.0 to encode "hot" highlights and such. * The encoding is lossless for 8-bit values, slightly lossy for the * other bit depths. The actual color precision should be better * than the human eye can perceive with extra room to allow for * error introduced by further image computation. As with any quantized * color format, it is possible to perform image calculations which * expose the quantization error. This format should certainly be less * susceptible to such errors than standard 8-bit encodings, but more * susceptible than straight 16-bit or 32-bit encodings. * * On reading the internal format is converted to the desired output format. * The program can request which format it desires by setting the internal * pseudo tag TIFFTAG_PIXARLOGDATAFMT to one of these possible values: * PIXARLOGDATAFMT_FLOAT = provide IEEE float values. * PIXARLOGDATAFMT_16BIT = provide unsigned 16-bit integer values * PIXARLOGDATAFMT_8BIT = provide unsigned 8-bit integer values * * alternately PIXARLOGDATAFMT_8BITABGR provides unsigned 8-bit integer * values with the difference that if there are exactly three or four channels * (rgb or rgba) it swaps the channel order (bgr or abgr). * * PIXARLOGDATAFMT_11BITLOG provides the internal encoding directly * packed in 16-bit values. However no tools are supplied for interpreting * these values. * * "hot" (over 1.0) areas written in floating point get clamped to * 1.0 in the integer data types. * * When the file is closed after writing, the bit depth and sample format * are set always to appear as if 8-bit data has been written into it. * That way a naive program unaware of the particulars of the encoding * gets the format it is most likely able to handle. * * The codec does it's own horizontal differencing step on the coded * values so the libraries predictor stuff should be turned off. * The codec also handle byte swapping the encoded values as necessary * since the library does not have the information necessary * to know the bit depth of the raw unencoded buffer. * * NOTE: This decoder does not appear to update tif_rawcp, and tif_rawcc. * This can cause problems with the implementation of CHUNKY_STRIP_READ_SUPPORT * as noted in http://trac.osgeo.org/gdal/ticket/3894. FrankW - Jan'11 */ #include "tif_predict.h" #include "zlib.h" #include <stdio.h> #include <stdlib.h> #include <math.h> /* Tables for converting to/from 11 bit coded values */ #define TSIZE 2048 /* decode table size (11-bit tokens) */ #define TSIZEP1 2049 /* Plus one for slop */ #define ONE 1250 /* token value of 1.0 exactly */ #define RATIO 1.004 /* nominal ratio for log part */ #define CODE_MASK 0x7ff /* 11 bits. */ static float Fltsize; static float LogK1, LogK2; #define REPEAT(n, op) { int i; i=n; do { i--; op; } while (i>0); } static void horizontalAccumulateF(uint16 *wp, int n, int stride, float *op, float *ToLinearF) { register unsigned int cr, cg, cb, ca, mask; register float t0, t1, t2, t3; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { t0 = ToLinearF[cr = (wp[0] & mask)]; t1 = ToLinearF[cg = (wp[1] & mask)]; t2 = ToLinearF[cb = (wp[2] & mask)]; op[0] = t0; op[1] = t1; op[2] = t2; n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; t0 = ToLinearF[(cr += wp[0]) & mask]; t1 = ToLinearF[(cg += wp[1]) & mask]; t2 = ToLinearF[(cb += wp[2]) & mask]; op[0] = t0; op[1] = t1; op[2] = t2; } } else if (stride == 4) { t0 = ToLinearF[cr = (wp[0] & mask)]; t1 = ToLinearF[cg = (wp[1] & mask)]; t2 = ToLinearF[cb = (wp[2] & mask)]; t3 = ToLinearF[ca = (wp[3] & mask)]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; t0 = ToLinearF[(cr += wp[0]) & mask]; t1 = ToLinearF[(cg += wp[1]) & mask]; t2 = ToLinearF[(cb += wp[2]) & mask]; t3 = ToLinearF[(ca += wp[3]) & mask]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; } } else { REPEAT(stride, *op = ToLinearF[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinearF[*wp&mask]; wp++; op++) n -= stride; } } } } static void horizontalAccumulate12(uint16 *wp, int n, int stride, int16 *op, float *ToLinearF) { register unsigned int cr, cg, cb, ca, mask; register float t0, t1, t2, t3; #define SCALE12 2048.0F #define CLAMP12(t) (((t) < 3071) ? (uint16) (t) : 3071) if (n >= stride) { mask = CODE_MASK; if (stride == 3) { t0 = ToLinearF[cr = (wp[0] & mask)] * SCALE12; t1 = ToLinearF[cg = (wp[1] & mask)] * SCALE12; t2 = ToLinearF[cb = (wp[2] & mask)] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; t0 = ToLinearF[(cr += wp[0]) & mask] * SCALE12; t1 = ToLinearF[(cg += wp[1]) & mask] * SCALE12; t2 = ToLinearF[(cb += wp[2]) & mask] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); } } else if (stride == 4) { t0 = ToLinearF[cr = (wp[0] & mask)] * SCALE12; t1 = ToLinearF[cg = (wp[1] & mask)] * SCALE12; t2 = ToLinearF[cb = (wp[2] & mask)] * SCALE12; t3 = ToLinearF[ca = (wp[3] & mask)] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); op[3] = CLAMP12(t3); n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; t0 = ToLinearF[(cr += wp[0]) & mask] * SCALE12; t1 = ToLinearF[(cg += wp[1]) & mask] * SCALE12; t2 = ToLinearF[(cb += wp[2]) & mask] * SCALE12; t3 = ToLinearF[(ca += wp[3]) & mask] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); op[3] = CLAMP12(t3); } } else { REPEAT(stride, t0 = ToLinearF[*wp&mask] * SCALE12; *op = CLAMP12(t0); wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; t0 = ToLinearF[wp[stride]&mask]*SCALE12; *op = CLAMP12(t0); wp++; op++) n -= stride; } } } } static void horizontalAccumulate16(uint16 *wp, int n, int stride, uint16 *op, uint16 *ToLinear16) { register unsigned int cr, cg, cb, ca, mask; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = ToLinear16[cr = (wp[0] & mask)]; op[1] = ToLinear16[cg = (wp[1] & mask)]; op[2] = ToLinear16[cb = (wp[2] & mask)]; n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; op[0] = ToLinear16[(cr += wp[0]) & mask]; op[1] = ToLinear16[(cg += wp[1]) & mask]; op[2] = ToLinear16[(cb += wp[2]) & mask]; } } else if (stride == 4) { op[0] = ToLinear16[cr = (wp[0] & mask)]; op[1] = ToLinear16[cg = (wp[1] & mask)]; op[2] = ToLinear16[cb = (wp[2] & mask)]; op[3] = ToLinear16[ca = (wp[3] & mask)]; n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; op[0] = ToLinear16[(cr += wp[0]) & mask]; op[1] = ToLinear16[(cg += wp[1]) & mask]; op[2] = ToLinear16[(cb += wp[2]) & mask]; op[3] = ToLinear16[(ca += wp[3]) & mask]; } } else { REPEAT(stride, *op = ToLinear16[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinear16[*wp&mask]; wp++; op++) n -= stride; } } } } /* * Returns the log encoded 11-bit values with the horizontal * differencing undone. */ static void horizontalAccumulate11(uint16 *wp, int n, int stride, uint16 *op) { register unsigned int cr, cg, cb, ca, mask; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = wp[0]; op[1] = wp[1]; op[2] = wp[2]; cr = wp[0]; cg = wp[1]; cb = wp[2]; n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; op[0] = (uint16)((cr += wp[0]) & mask); op[1] = (uint16)((cg += wp[1]) & mask); op[2] = (uint16)((cb += wp[2]) & mask); } } else if (stride == 4) { op[0] = wp[0]; op[1] = wp[1]; op[2] = wp[2]; op[3] = wp[3]; cr = wp[0]; cg = wp[1]; cb = wp[2]; ca = wp[3]; n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; op[0] = (uint16)((cr += wp[0]) & mask); op[1] = (uint16)((cg += wp[1]) & mask); op[2] = (uint16)((cb += wp[2]) & mask); op[3] = (uint16)((ca += wp[3]) & mask); } } else { REPEAT(stride, *op = *wp&mask; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = *wp&mask; wp++; op++) n -= stride; } } } } static void horizontalAccumulate8(uint16 *wp, int n, int stride, unsigned char *op, unsigned char *ToLinear8) { register unsigned int cr, cg, cb, ca, mask; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = ToLinear8[cr = (wp[0] & mask)]; op[1] = ToLinear8[cg = (wp[1] & mask)]; op[2] = ToLinear8[cb = (wp[2] & mask)]; n -= 3; while (n > 0) { n -= 3; wp += 3; op += 3; op[0] = ToLinear8[(cr += wp[0]) & mask]; op[1] = ToLinear8[(cg += wp[1]) & mask]; op[2] = ToLinear8[(cb += wp[2]) & mask]; } } else if (stride == 4) { op[0] = ToLinear8[cr = (wp[0] & mask)]; op[1] = ToLinear8[cg = (wp[1] & mask)]; op[2] = ToLinear8[cb = (wp[2] & mask)]; op[3] = ToLinear8[ca = (wp[3] & mask)]; n -= 4; while (n > 0) { n -= 4; wp += 4; op += 4; op[0] = ToLinear8[(cr += wp[0]) & mask]; op[1] = ToLinear8[(cg += wp[1]) & mask]; op[2] = ToLinear8[(cb += wp[2]) & mask]; op[3] = ToLinear8[(ca += wp[3]) & mask]; } } else { REPEAT(stride, *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; } } } } static void horizontalAccumulate8abgr(uint16 *wp, int n, int stride, unsigned char *op, unsigned char *ToLinear8) { register unsigned int cr, cg, cb, ca, mask; register unsigned char t0, t1, t2, t3; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = 0; t1 = ToLinear8[cb = (wp[2] & mask)]; t2 = ToLinear8[cg = (wp[1] & mask)]; t3 = ToLinear8[cr = (wp[0] & mask)]; op[1] = t1; op[2] = t2; op[3] = t3; n -= 3; while (n > 0) { n -= 3; wp += 3; op += 4; op[0] = 0; t1 = ToLinear8[(cb += wp[2]) & mask]; t2 = ToLinear8[(cg += wp[1]) & mask]; t3 = ToLinear8[(cr += wp[0]) & mask]; op[1] = t1; op[2] = t2; op[3] = t3; } } else if (stride == 4) { t0 = ToLinear8[ca = (wp[3] & mask)]; t1 = ToLinear8[cb = (wp[2] & mask)]; t2 = ToLinear8[cg = (wp[1] & mask)]; t3 = ToLinear8[cr = (wp[0] & mask)]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; n -= 4; while (n > 0) { n -= 4; wp += 4; op += 4; t0 = ToLinear8[(ca += wp[3]) & mask]; t1 = ToLinear8[(cb += wp[2]) & mask]; t2 = ToLinear8[(cg += wp[1]) & mask]; t3 = ToLinear8[(cr += wp[0]) & mask]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; } } else { REPEAT(stride, *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; } } } } /* * State block for each open TIFF * file using PixarLog compression/decompression. */ typedef struct { TIFFPredictorState predict; z_stream stream; tmsize_t tbuf_size; /* only set/used on reading for now */ uint16 *tbuf; uint16 stride; int state; int user_datafmt; int quality; #define PLSTATE_INIT 1 TIFFVSetMethod vgetparent; /* super-class method */ TIFFVSetMethod vsetparent; /* super-class method */ float *ToLinearF; uint16 *ToLinear16; unsigned char *ToLinear8; uint16 *FromLT2; uint16 *From14; /* Really for 16-bit data, but we shift down 2 */ uint16 *From8; } PixarLogState; static int PixarLogMakeTables(PixarLogState *sp) { /* * We make several tables here to convert between various external * representations (float, 16-bit, and 8-bit) and the internal * 11-bit companded representation. The 11-bit representation has two * distinct regions. A linear bottom end up through .018316 in steps * of about .000073, and a region of constant ratio up to about 25. * These floating point numbers are stored in the main table ToLinearF. * All other tables are derived from this one. The tables (and the * ratios) are continuous at the internal seam. */ int nlin, lt2size; int i, j; double b, c, linstep, v; float *ToLinearF; uint16 *ToLinear16; unsigned char *ToLinear8; uint16 *FromLT2; uint16 *From14; /* Really for 16-bit data, but we shift down 2 */ uint16 *From8; c = log(RATIO); nlin = (int)(1./c); /* nlin must be an integer */ c = 1./nlin; b = exp(-c*ONE); /* multiplicative scale factor [b*exp(c*ONE) = 1] */ linstep = b*c*exp(1.); LogK1 = (float)(1./c); /* if (v >= 2) token = k1*log(v*k2) */ LogK2 = (float)(1./b); lt2size = (int)(2./linstep) + 1; FromLT2 = (uint16 *)_TIFFmalloc(lt2size*sizeof(uint16)); From14 = (uint16 *)_TIFFmalloc(16384*sizeof(uint16)); From8 = (uint16 *)_TIFFmalloc(256*sizeof(uint16)); ToLinearF = (float *)_TIFFmalloc(TSIZEP1 * sizeof(float)); ToLinear16 = (uint16 *)_TIFFmalloc(TSIZEP1 * sizeof(uint16)); ToLinear8 = (unsigned char *)_TIFFmalloc(TSIZEP1 * sizeof(unsigned char)); if (FromLT2 == NULL || From14 == NULL || From8 == NULL || ToLinearF == NULL || ToLinear16 == NULL || ToLinear8 == NULL) { if (FromLT2) _TIFFfree(FromLT2); if (From14) _TIFFfree(From14); if (From8) _TIFFfree(From8); if (ToLinearF) _TIFFfree(ToLinearF); if (ToLinear16) _TIFFfree(ToLinear16); if (ToLinear8) _TIFFfree(ToLinear8); sp->FromLT2 = NULL; sp->From14 = NULL; sp->From8 = NULL; sp->ToLinearF = NULL; sp->ToLinear16 = NULL; sp->ToLinear8 = NULL; return 0; } j = 0; for (i = 0; i < nlin; i++) { v = i * linstep; ToLinearF[j++] = (float)v; } for (i = nlin; i < TSIZE; i++) ToLinearF[j++] = (float)(b*exp(c*i)); ToLinearF[2048] = ToLinearF[2047]; for (i = 0; i < TSIZEP1; i++) { v = ToLinearF[i]*65535.0 + 0.5; ToLinear16[i] = (v > 65535.0) ? 65535 : (uint16)v; v = ToLinearF[i]*255.0 + 0.5; ToLinear8[i] = (v > 255.0) ? 255 : (unsigned char)v; } j = 0; for (i = 0; i < lt2size; i++) { if ((i*linstep)*(i*linstep) > ToLinearF[j]*ToLinearF[j+1]) j++; FromLT2[i] = (uint16)j; } /* * Since we lose info anyway on 16-bit data, we set up a 14-bit * table and shift 16-bit values down two bits on input. * saves a little table space. */ j = 0; for (i = 0; i < 16384; i++) { while ((i/16383.)*(i/16383.) > ToLinearF[j]*ToLinearF[j+1]) j++; From14[i] = (uint16)j; } j = 0; for (i = 0; i < 256; i++) { while ((i/255.)*(i/255.) > ToLinearF[j]*ToLinearF[j+1]) j++; From8[i] = (uint16)j; } Fltsize = (float)(lt2size/2); sp->ToLinearF = ToLinearF; sp->ToLinear16 = ToLinear16; sp->ToLinear8 = ToLinear8; sp->FromLT2 = FromLT2; sp->From14 = From14; sp->From8 = From8; return 1; } #define DecoderState(tif) ((PixarLogState*) (tif)->tif_data) #define EncoderState(tif) ((PixarLogState*) (tif)->tif_data) static int PixarLogEncode(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s); static int PixarLogDecode(TIFF* tif, uint8* op, tmsize_t occ, uint16 s); #define PIXARLOGDATAFMT_UNKNOWN -1 static int PixarLogGuessDataFmt(TIFFDirectory *td) { int guess = PIXARLOGDATAFMT_UNKNOWN; int format = td->td_sampleformat; /* If the user didn't tell us his datafmt, * take our best guess from the bitspersample. */ switch (td->td_bitspersample) { case 32: if (format == SAMPLEFORMAT_IEEEFP) guess = PIXARLOGDATAFMT_FLOAT; break; case 16: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_UINT) guess = PIXARLOGDATAFMT_16BIT; break; case 12: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_INT) guess = PIXARLOGDATAFMT_12BITPICIO; break; case 11: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_UINT) guess = PIXARLOGDATAFMT_11BITLOG; break; case 8: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_UINT) guess = PIXARLOGDATAFMT_8BIT; break; } return guess; } static tmsize_t multiply_ms(tmsize_t m1, tmsize_t m2) { tmsize_t bytes = m1 * m2; if (m1 && bytes / m1 != m2) bytes = 0; return bytes; } static tmsize_t add_ms(tmsize_t m1, tmsize_t m2) { tmsize_t bytes = m1 + m2; /* if either input is zero, assume overflow already occurred */ if (m1 == 0 || m2 == 0) bytes = 0; else if (bytes <= m1 || bytes <= m2) bytes = 0; return bytes; } static int PixarLogFixupTags(TIFF* tif) { (void) tif; return (1); } static int PixarLogSetupDecode(TIFF* tif) { static const char module[] = "PixarLogSetupDecode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState* sp = DecoderState(tif); tmsize_t tbuf_size; assert(sp != NULL); /* Make sure no byte swapping happens on the data * after decompression. */ tif->tif_postdecode = _TIFFNoPostDecode; /* for some reason, we can't do this in TIFFInitPixarLog */ sp->stride = (td->td_planarconfig == PLANARCONFIG_CONTIG ? td->td_samplesperpixel : 1); tbuf_size = multiply_ms(multiply_ms(multiply_ms(sp->stride, td->td_imagewidth), td->td_rowsperstrip), sizeof(uint16)); /* add one more stride in case input ends mid-stride */ tbuf_size = add_ms(tbuf_size, sizeof(uint16) * sp->stride); if (tbuf_size == 0) return (0); /* TODO: this is an error return without error report through TIFFErrorExt */ sp->tbuf = (uint16 *) _TIFFmalloc(tbuf_size); if (sp->tbuf == NULL) return (0); sp->tbuf_size = tbuf_size; if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) sp->user_datafmt = PixarLogGuessDataFmt(td); if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) { TIFFErrorExt(tif->tif_clientdata, module, "PixarLog compression can't handle bits depth/data format combination (depth: %d)", td->td_bitspersample); return (0); } if (inflateInit(&sp->stream) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "%s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } else { sp->state |= PLSTATE_INIT; return (1); } } /* * Setup state for decoding a strip. */ static int PixarLogPreDecode(TIFF* tif, uint16 s) { static const char module[] = "PixarLogPreDecode"; PixarLogState* sp = DecoderState(tif); (void) s; assert(sp != NULL); sp->stream.next_in = tif->tif_rawdata; assert(sizeof(sp->stream.avail_in)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_in = (uInt) tif->tif_rawcc; if ((tmsize_t)sp->stream.avail_in != tif->tif_rawcc) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } return (inflateReset(&sp->stream) == Z_OK); } static int PixarLogDecode(TIFF* tif, uint8* op, tmsize_t occ, uint16 s) { static const char module[] = "PixarLogDecode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState* sp = DecoderState(tif); tmsize_t i; tmsize_t nsamples; int llen; uint16 *up; switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: nsamples = occ / sizeof(float); /* XXX float == 32 bits */ break; case PIXARLOGDATAFMT_16BIT: case PIXARLOGDATAFMT_12BITPICIO: case PIXARLOGDATAFMT_11BITLOG: nsamples = occ / sizeof(uint16); /* XXX uint16 == 16 bits */ break; case PIXARLOGDATAFMT_8BIT: case PIXARLOGDATAFMT_8BITABGR: nsamples = occ; break; default: TIFFErrorExt(tif->tif_clientdata, module, "%d bit input not supported in PixarLog", td->td_bitspersample); return 0; } llen = sp->stride * td->td_imagewidth; (void) s; assert(sp != NULL); sp->stream.next_out = (unsigned char *) sp->tbuf; assert(sizeof(sp->stream.avail_out)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_out = (uInt) (nsamples * sizeof(uint16)); if (sp->stream.avail_out != nsamples * sizeof(uint16)) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } /* Check that we will not fill more than what was allocated */ if ((tmsize_t)sp->stream.avail_out > sp->tbuf_size) { TIFFErrorExt(tif->tif_clientdata, module, "sp->stream.avail_out > sp->tbuf_size"); return (0); } do { int state = inflate(&sp->stream, Z_PARTIAL_FLUSH); if (state == Z_STREAM_END) { break; /* XXX */ } if (state == Z_DATA_ERROR) { TIFFErrorExt(tif->tif_clientdata, module, "Decoding error at scanline %lu, %s", (unsigned long) tif->tif_row, sp->stream.msg ? sp->stream.msg : "(null)"); if (inflateSync(&sp->stream) != Z_OK) return (0); continue; } if (state != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } } while (sp->stream.avail_out > 0); /* hopefully, we got all the bytes we needed */ if (sp->stream.avail_out != 0) { TIFFErrorExt(tif->tif_clientdata, module, "Not enough data at scanline %lu (short " TIFF_UINT64_FORMAT " bytes)", (unsigned long) tif->tif_row, (TIFF_UINT64_T) sp->stream.avail_out); return (0); } up = sp->tbuf; /* Swap bytes in the data if from a different endian machine. */ if (tif->tif_flags & TIFF_SWAB) TIFFSwabArrayOfShort(up, nsamples); /* * if llen is not an exact multiple of nsamples, the decode operation * may overflow the output buffer, so truncate it enough to prevent * that but still salvage as much data as possible. */ if (nsamples % llen) { TIFFWarningExt(tif->tif_clientdata, module, "stride %lu is not a multiple of sample count, " "%lu, data truncated.", (unsigned long) llen, (unsigned long) nsamples); nsamples -= nsamples % llen; } for (i = 0; i < nsamples; i += llen, up += llen) { switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: horizontalAccumulateF(up, llen, sp->stride, (float *)op, sp->ToLinearF); op += llen * sizeof(float); break; case PIXARLOGDATAFMT_16BIT: horizontalAccumulate16(up, llen, sp->stride, (uint16 *)op, sp->ToLinear16); op += llen * sizeof(uint16); break; case PIXARLOGDATAFMT_12BITPICIO: horizontalAccumulate12(up, llen, sp->stride, (int16 *)op, sp->ToLinearF); op += llen * sizeof(int16); break; case PIXARLOGDATAFMT_11BITLOG: horizontalAccumulate11(up, llen, sp->stride, (uint16 *)op); op += llen * sizeof(uint16); break; case PIXARLOGDATAFMT_8BIT: horizontalAccumulate8(up, llen, sp->stride, (unsigned char *)op, sp->ToLinear8); op += llen * sizeof(unsigned char); break; case PIXARLOGDATAFMT_8BITABGR: horizontalAccumulate8abgr(up, llen, sp->stride, (unsigned char *)op, sp->ToLinear8); op += llen * sizeof(unsigned char); break; default: TIFFErrorExt(tif->tif_clientdata, module, "Unsupported bits/sample: %d", td->td_bitspersample); return (0); } } return (1); } static int PixarLogSetupEncode(TIFF* tif) { static const char module[] = "PixarLogSetupEncode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState* sp = EncoderState(tif); tmsize_t tbuf_size; assert(sp != NULL); /* for some reason, we can't do this in TIFFInitPixarLog */ sp->stride = (td->td_planarconfig == PLANARCONFIG_CONTIG ? td->td_samplesperpixel : 1); tbuf_size = multiply_ms(multiply_ms(multiply_ms(sp->stride, td->td_imagewidth), td->td_rowsperstrip), sizeof(uint16)); if (tbuf_size == 0) return (0); /* TODO: this is an error return without error report through TIFFErrorExt */ sp->tbuf = (uint16 *) _TIFFmalloc(tbuf_size); if (sp->tbuf == NULL) return (0); if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) sp->user_datafmt = PixarLogGuessDataFmt(td); if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) { TIFFErrorExt(tif->tif_clientdata, module, "PixarLog compression can't handle %d bit linear encodings", td->td_bitspersample); return (0); } if (deflateInit(&sp->stream, sp->quality) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "%s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } else { sp->state |= PLSTATE_INIT; return (1); } } /* * Reset encoding state at the start of a strip. */ static int PixarLogPreEncode(TIFF* tif, uint16 s) { static const char module[] = "PixarLogPreEncode"; PixarLogState *sp = EncoderState(tif); (void) s; assert(sp != NULL); sp->stream.next_out = tif->tif_rawdata; assert(sizeof(sp->stream.avail_out)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_out = (uInt)tif->tif_rawdatasize; if ((tmsize_t)sp->stream.avail_out != tif->tif_rawdatasize) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } return (deflateReset(&sp->stream) == Z_OK); } static void horizontalDifferenceF(float *ip, int n, int stride, uint16 *wp, uint16 *FromLT2) { int32 r1, g1, b1, a1, r2, g2, b2, a2, mask; float fltsize = Fltsize; #define CLAMP(v) ( (v<(float)0.) ? 0 \ : (v<(float)2.) ? FromLT2[(int)(v*fltsize)] \ : (v>(float)24.2) ? 2047 \ : LogK1*log(v*LogK2) + 0.5 ) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = (uint16) CLAMP(ip[0]); g2 = wp[1] = (uint16) CLAMP(ip[1]); b2 = wp[2] = (uint16) CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; wp += 3; ip += 3; r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; } } else if (stride == 4) { r2 = wp[0] = (uint16) CLAMP(ip[0]); g2 = wp[1] = (uint16) CLAMP(ip[1]); b2 = wp[2] = (uint16) CLAMP(ip[2]); a2 = wp[3] = (uint16) CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; wp += 4; ip += 4; r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; a1 = (int32) CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1; } } else { ip += n - 1; /* point to last one */ wp += n - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp--; ip--) } } } static void horizontalDifference16(unsigned short *ip, int n, int stride, unsigned short *wp, uint16 *From14) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; /* assumption is unsigned pixel values */ #undef CLAMP #define CLAMP(v) From14[(v) >> 2] mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; wp += 3; ip += 3; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; wp += 4; ip += 4; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1; } } else { ip += n - 1; /* point to last one */ wp += n - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } } static void horizontalDifference8(unsigned char *ip, int n, int stride, unsigned short *wp, uint16 *From8) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; #undef CLAMP #define CLAMP(v) (From8[(v)]) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; r1 = CLAMP(ip[3]); wp[3] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[4]); wp[4] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[5]); wp[5] = (uint16)((b1-b2) & mask); b2 = b1; wp += 3; ip += 3; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; r1 = CLAMP(ip[4]); wp[4] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[5]); wp[5] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[6]); wp[6] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[7]); wp[7] = (uint16)((a1-a2) & mask); a2 = a1; wp += 4; ip += 4; } } else { wp += n + stride - 1; /* point to last one */ ip += n + stride - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } } /* * Encode a chunk of pixels. */ static int PixarLogEncode(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) { static const char module[] = "PixarLogEncode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState *sp = EncoderState(tif); tmsize_t i; tmsize_t n; int llen; unsigned short * up; (void) s; switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: n = cc / sizeof(float); /* XXX float == 32 bits */ break; case PIXARLOGDATAFMT_16BIT: case PIXARLOGDATAFMT_12BITPICIO: case PIXARLOGDATAFMT_11BITLOG: n = cc / sizeof(uint16); /* XXX uint16 == 16 bits */ break; case PIXARLOGDATAFMT_8BIT: case PIXARLOGDATAFMT_8BITABGR: n = cc; break; default: TIFFErrorExt(tif->tif_clientdata, module, "%d bit input not supported in PixarLog", td->td_bitspersample); return 0; } llen = sp->stride * td->td_imagewidth; /* Check against the number of elements (of size uint16) of sp->tbuf */ if( n > (tmsize_t)(td->td_rowsperstrip * llen) ) { TIFFErrorExt(tif->tif_clientdata, module, "Too many input bytes provided"); return 0; } for (i = 0, up = sp->tbuf; i < n; i += llen, up += llen) { switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: horizontalDifferenceF((float *)bp, llen, sp->stride, up, sp->FromLT2); bp += llen * sizeof(float); break; case PIXARLOGDATAFMT_16BIT: horizontalDifference16((uint16 *)bp, llen, sp->stride, up, sp->From14); bp += llen * sizeof(uint16); break; case PIXARLOGDATAFMT_8BIT: horizontalDifference8((unsigned char *)bp, llen, sp->stride, up, sp->From8); bp += llen * sizeof(unsigned char); break; default: TIFFErrorExt(tif->tif_clientdata, module, "%d bit input not supported in PixarLog", td->td_bitspersample); return 0; } } sp->stream.next_in = (unsigned char *) sp->tbuf; assert(sizeof(sp->stream.avail_in)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_in = (uInt) (n * sizeof(uint16)); if ((sp->stream.avail_in / sizeof(uint16)) != (uInt) n) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } do { if (deflate(&sp->stream, Z_NO_FLUSH) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "Encoder error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } if (sp->stream.avail_out == 0) { tif->tif_rawcc = tif->tif_rawdatasize; TIFFFlushData1(tif); sp->stream.next_out = tif->tif_rawdata; sp->stream.avail_out = (uInt) tif->tif_rawdatasize; /* this is a safe typecast, as check is made already in PixarLogPreEncode */ } } while (sp->stream.avail_in > 0); return (1); } /* * Finish off an encoded strip by flushing the last * string and tacking on an End Of Information code. */ static int PixarLogPostEncode(TIFF* tif) { static const char module[] = "PixarLogPostEncode"; PixarLogState *sp = EncoderState(tif); int state; sp->stream.avail_in = 0; do { state = deflate(&sp->stream, Z_FINISH); switch (state) { case Z_STREAM_END: case Z_OK: if ((tmsize_t)sp->stream.avail_out != tif->tif_rawdatasize) { tif->tif_rawcc = tif->tif_rawdatasize - sp->stream.avail_out; TIFFFlushData1(tif); sp->stream.next_out = tif->tif_rawdata; sp->stream.avail_out = (uInt) tif->tif_rawdatasize; /* this is a safe typecast, as check is made already in PixarLogPreEncode */ } break; default: TIFFErrorExt(tif->tif_clientdata, module, "ZLib error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } } while (state != Z_STREAM_END); return (1); } static void PixarLogClose(TIFF* tif) { TIFFDirectory *td = &tif->tif_dir; /* In a really sneaky (and really incorrect, and untruthful, and * troublesome, and error-prone) maneuver that completely goes against * the spirit of TIFF, and breaks TIFF, on close, we covertly * modify both bitspersample and sampleformat in the directory to * indicate 8-bit linear. This way, the decode "just works" even for * readers that don't know about PixarLog, or how to set * the PIXARLOGDATFMT pseudo-tag. */ td->td_bitspersample = 8; td->td_sampleformat = SAMPLEFORMAT_UINT; } static void PixarLogCleanup(TIFF* tif) { PixarLogState* sp = (PixarLogState*) tif->tif_data; assert(sp != 0); (void)TIFFPredictorCleanup(tif); tif->tif_tagmethods.vgetfield = sp->vgetparent; tif->tif_tagmethods.vsetfield = sp->vsetparent; if (sp->FromLT2) _TIFFfree(sp->FromLT2); if (sp->From14) _TIFFfree(sp->From14); if (sp->From8) _TIFFfree(sp->From8); if (sp->ToLinearF) _TIFFfree(sp->ToLinearF); if (sp->ToLinear16) _TIFFfree(sp->ToLinear16); if (sp->ToLinear8) _TIFFfree(sp->ToLinear8); if (sp->state&PLSTATE_INIT) { if (tif->tif_mode == O_RDONLY) inflateEnd(&sp->stream); else deflateEnd(&sp->stream); } if (sp->tbuf) _TIFFfree(sp->tbuf); _TIFFfree(sp); tif->tif_data = NULL; _TIFFSetDefaultCompressionState(tif); } static int PixarLogVSetField(TIFF* tif, uint32 tag, va_list ap) { static const char module[] = "PixarLogVSetField"; PixarLogState *sp = (PixarLogState *)tif->tif_data; int result; switch (tag) { case TIFFTAG_PIXARLOGQUALITY: sp->quality = (int) va_arg(ap, int); if (tif->tif_mode != O_RDONLY && (sp->state&PLSTATE_INIT)) { if (deflateParams(&sp->stream, sp->quality, Z_DEFAULT_STRATEGY) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } } return (1); case TIFFTAG_PIXARLOGDATAFMT: sp->user_datafmt = (int) va_arg(ap, int); /* Tweak the TIFF header so that the rest of libtiff knows what * size of data will be passed between app and library, and * assume that the app knows what it is doing and is not * confused by these header manipulations... */ switch (sp->user_datafmt) { case PIXARLOGDATAFMT_8BIT: case PIXARLOGDATAFMT_8BITABGR: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); break; case PIXARLOGDATAFMT_11BITLOG: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 16); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); break; case PIXARLOGDATAFMT_12BITPICIO: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 16); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_INT); break; case PIXARLOGDATAFMT_16BIT: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 16); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); break; case PIXARLOGDATAFMT_FLOAT: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 32); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_IEEEFP); break; } /* * Must recalculate sizes should bits/sample change. */ tif->tif_tilesize = isTiled(tif) ? TIFFTileSize(tif) : (tmsize_t)(-1); tif->tif_scanlinesize = TIFFScanlineSize(tif); result = 1; /* NB: pseudo tag */ break; default: result = (*sp->vsetparent)(tif, tag, ap); } return (result); } static int PixarLogVGetField(TIFF* tif, uint32 tag, va_list ap) { PixarLogState *sp = (PixarLogState *)tif->tif_data; switch (tag) { case TIFFTAG_PIXARLOGQUALITY: *va_arg(ap, int*) = sp->quality; break; case TIFFTAG_PIXARLOGDATAFMT: *va_arg(ap, int*) = sp->user_datafmt; break; default: return (*sp->vgetparent)(tif, tag, ap); } return (1); } static const TIFFField pixarlogFields[] = { {TIFFTAG_PIXARLOGDATAFMT, 0, 0, TIFF_ANY, 0, TIFF_SETGET_INT, TIFF_SETGET_UNDEFINED, FIELD_PSEUDO, FALSE, FALSE, "", NULL}, {TIFFTAG_PIXARLOGQUALITY, 0, 0, TIFF_ANY, 0, TIFF_SETGET_INT, TIFF_SETGET_UNDEFINED, FIELD_PSEUDO, FALSE, FALSE, "", NULL} }; int TIFFInitPixarLog(TIFF* tif, int scheme) { static const char module[] = "TIFFInitPixarLog"; PixarLogState* sp; assert(scheme == COMPRESSION_PIXARLOG); /* * Merge codec-specific tag information. */ if (!_TIFFMergeFields(tif, pixarlogFields, TIFFArrayCount(pixarlogFields))) { TIFFErrorExt(tif->tif_clientdata, module, "Merging PixarLog codec-specific tags failed"); return 0; } /* * Allocate state block so tag methods have storage to record values. */ tif->tif_data = (uint8*) _TIFFmalloc(sizeof (PixarLogState)); if (tif->tif_data == NULL) goto bad; sp = (PixarLogState*) tif->tif_data; _TIFFmemset(sp, 0, sizeof (*sp)); sp->stream.data_type = Z_BINARY; sp->user_datafmt = PIXARLOGDATAFMT_UNKNOWN; /* * Install codec methods. */ tif->tif_fixuptags = PixarLogFixupTags; tif->tif_setupdecode = PixarLogSetupDecode; tif->tif_predecode = PixarLogPreDecode; tif->tif_decoderow = PixarLogDecode; tif->tif_decodestrip = PixarLogDecode; tif->tif_decodetile = PixarLogDecode; tif->tif_setupencode = PixarLogSetupEncode; tif->tif_preencode = PixarLogPreEncode; tif->tif_postencode = PixarLogPostEncode; tif->tif_encoderow = PixarLogEncode; tif->tif_encodestrip = PixarLogEncode; tif->tif_encodetile = PixarLogEncode; tif->tif_close = PixarLogClose; tif->tif_cleanup = PixarLogCleanup; /* Override SetField so we can handle our private pseudo-tag */ sp->vgetparent = tif->tif_tagmethods.vgetfield; tif->tif_tagmethods.vgetfield = PixarLogVGetField; /* hook for codec tags */ sp->vsetparent = tif->tif_tagmethods.vsetfield; tif->tif_tagmethods.vsetfield = PixarLogVSetField; /* hook for codec tags */ /* Default values for codec-specific fields */ sp->quality = Z_DEFAULT_COMPRESSION; /* default comp. level */ sp->state = 0; /* we don't wish to use the predictor, * the default is none, which predictor value 1 */ (void) TIFFPredictorInit(tif); /* * build the companding tables */ PixarLogMakeTables(sp); return (1); bad: TIFFErrorExt(tif->tif_clientdata, module, "No space for PixarLog state block"); return (0); } #endif /* PIXARLOG_SUPPORT */ /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
/* $Id$ */ /* * Copyright (c) 1996-1997 Sam Leffler * Copyright (c) 1996 Pixar * * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Pixar, Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Pixar, Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL PIXAR, SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include "tiffiop.h" #ifdef PIXARLOG_SUPPORT /* * TIFF Library. * PixarLog Compression Support * * Contributed by Dan McCoy. * * PixarLog film support uses the TIFF library to store companded * 11 bit values into a tiff file, which are compressed using the * zip compressor. * * The codec can take as input and produce as output 32-bit IEEE float values * as well as 16-bit or 8-bit unsigned integer values. * * On writing any of the above are converted into the internal * 11-bit log format. In the case of 8 and 16 bit values, the * input is assumed to be unsigned linear color values that represent * the range 0-1. In the case of IEEE values, the 0-1 range is assumed to * be the normal linear color range, in addition over 1 values are * accepted up to a value of about 25.0 to encode "hot" highlights and such. * The encoding is lossless for 8-bit values, slightly lossy for the * other bit depths. The actual color precision should be better * than the human eye can perceive with extra room to allow for * error introduced by further image computation. As with any quantized * color format, it is possible to perform image calculations which * expose the quantization error. This format should certainly be less * susceptible to such errors than standard 8-bit encodings, but more * susceptible than straight 16-bit or 32-bit encodings. * * On reading the internal format is converted to the desired output format. * The program can request which format it desires by setting the internal * pseudo tag TIFFTAG_PIXARLOGDATAFMT to one of these possible values: * PIXARLOGDATAFMT_FLOAT = provide IEEE float values. * PIXARLOGDATAFMT_16BIT = provide unsigned 16-bit integer values * PIXARLOGDATAFMT_8BIT = provide unsigned 8-bit integer values * * alternately PIXARLOGDATAFMT_8BITABGR provides unsigned 8-bit integer * values with the difference that if there are exactly three or four channels * (rgb or rgba) it swaps the channel order (bgr or abgr). * * PIXARLOGDATAFMT_11BITLOG provides the internal encoding directly * packed in 16-bit values. However no tools are supplied for interpreting * these values. * * "hot" (over 1.0) areas written in floating point get clamped to * 1.0 in the integer data types. * * When the file is closed after writing, the bit depth and sample format * are set always to appear as if 8-bit data has been written into it. * That way a naive program unaware of the particulars of the encoding * gets the format it is most likely able to handle. * * The codec does it's own horizontal differencing step on the coded * values so the libraries predictor stuff should be turned off. * The codec also handle byte swapping the encoded values as necessary * since the library does not have the information necessary * to know the bit depth of the raw unencoded buffer. * * NOTE: This decoder does not appear to update tif_rawcp, and tif_rawcc. * This can cause problems with the implementation of CHUNKY_STRIP_READ_SUPPORT * as noted in http://trac.osgeo.org/gdal/ticket/3894. FrankW - Jan'11 */ #include "tif_predict.h" #include "zlib.h" #include <stdio.h> #include <stdlib.h> #include <math.h> /* Tables for converting to/from 11 bit coded values */ #define TSIZE 2048 /* decode table size (11-bit tokens) */ #define TSIZEP1 2049 /* Plus one for slop */ #define ONE 1250 /* token value of 1.0 exactly */ #define RATIO 1.004 /* nominal ratio for log part */ #define CODE_MASK 0x7ff /* 11 bits. */ static float Fltsize; static float LogK1, LogK2; #define REPEAT(n, op) { int i; i=n; do { i--; op; } while (i>0); } static void horizontalAccumulateF(uint16 *wp, int n, int stride, float *op, float *ToLinearF) { register unsigned int cr, cg, cb, ca, mask; register float t0, t1, t2, t3; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { t0 = ToLinearF[cr = (wp[0] & mask)]; t1 = ToLinearF[cg = (wp[1] & mask)]; t2 = ToLinearF[cb = (wp[2] & mask)]; op[0] = t0; op[1] = t1; op[2] = t2; n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; t0 = ToLinearF[(cr += wp[0]) & mask]; t1 = ToLinearF[(cg += wp[1]) & mask]; t2 = ToLinearF[(cb += wp[2]) & mask]; op[0] = t0; op[1] = t1; op[2] = t2; } } else if (stride == 4) { t0 = ToLinearF[cr = (wp[0] & mask)]; t1 = ToLinearF[cg = (wp[1] & mask)]; t2 = ToLinearF[cb = (wp[2] & mask)]; t3 = ToLinearF[ca = (wp[3] & mask)]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; t0 = ToLinearF[(cr += wp[0]) & mask]; t1 = ToLinearF[(cg += wp[1]) & mask]; t2 = ToLinearF[(cb += wp[2]) & mask]; t3 = ToLinearF[(ca += wp[3]) & mask]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; } } else { REPEAT(stride, *op = ToLinearF[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinearF[*wp&mask]; wp++; op++) n -= stride; } } } } static void horizontalAccumulate12(uint16 *wp, int n, int stride, int16 *op, float *ToLinearF) { register unsigned int cr, cg, cb, ca, mask; register float t0, t1, t2, t3; #define SCALE12 2048.0F #define CLAMP12(t) (((t) < 3071) ? (uint16) (t) : 3071) if (n >= stride) { mask = CODE_MASK; if (stride == 3) { t0 = ToLinearF[cr = (wp[0] & mask)] * SCALE12; t1 = ToLinearF[cg = (wp[1] & mask)] * SCALE12; t2 = ToLinearF[cb = (wp[2] & mask)] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; t0 = ToLinearF[(cr += wp[0]) & mask] * SCALE12; t1 = ToLinearF[(cg += wp[1]) & mask] * SCALE12; t2 = ToLinearF[(cb += wp[2]) & mask] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); } } else if (stride == 4) { t0 = ToLinearF[cr = (wp[0] & mask)] * SCALE12; t1 = ToLinearF[cg = (wp[1] & mask)] * SCALE12; t2 = ToLinearF[cb = (wp[2] & mask)] * SCALE12; t3 = ToLinearF[ca = (wp[3] & mask)] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); op[3] = CLAMP12(t3); n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; t0 = ToLinearF[(cr += wp[0]) & mask] * SCALE12; t1 = ToLinearF[(cg += wp[1]) & mask] * SCALE12; t2 = ToLinearF[(cb += wp[2]) & mask] * SCALE12; t3 = ToLinearF[(ca += wp[3]) & mask] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); op[3] = CLAMP12(t3); } } else { REPEAT(stride, t0 = ToLinearF[*wp&mask] * SCALE12; *op = CLAMP12(t0); wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; t0 = ToLinearF[wp[stride]&mask]*SCALE12; *op = CLAMP12(t0); wp++; op++) n -= stride; } } } } static void horizontalAccumulate16(uint16 *wp, int n, int stride, uint16 *op, uint16 *ToLinear16) { register unsigned int cr, cg, cb, ca, mask; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = ToLinear16[cr = (wp[0] & mask)]; op[1] = ToLinear16[cg = (wp[1] & mask)]; op[2] = ToLinear16[cb = (wp[2] & mask)]; n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; op[0] = ToLinear16[(cr += wp[0]) & mask]; op[1] = ToLinear16[(cg += wp[1]) & mask]; op[2] = ToLinear16[(cb += wp[2]) & mask]; } } else if (stride == 4) { op[0] = ToLinear16[cr = (wp[0] & mask)]; op[1] = ToLinear16[cg = (wp[1] & mask)]; op[2] = ToLinear16[cb = (wp[2] & mask)]; op[3] = ToLinear16[ca = (wp[3] & mask)]; n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; op[0] = ToLinear16[(cr += wp[0]) & mask]; op[1] = ToLinear16[(cg += wp[1]) & mask]; op[2] = ToLinear16[(cb += wp[2]) & mask]; op[3] = ToLinear16[(ca += wp[3]) & mask]; } } else { REPEAT(stride, *op = ToLinear16[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinear16[*wp&mask]; wp++; op++) n -= stride; } } } } /* * Returns the log encoded 11-bit values with the horizontal * differencing undone. */ static void horizontalAccumulate11(uint16 *wp, int n, int stride, uint16 *op) { register unsigned int cr, cg, cb, ca, mask; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = wp[0]; op[1] = wp[1]; op[2] = wp[2]; cr = wp[0]; cg = wp[1]; cb = wp[2]; n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; op[0] = (uint16)((cr += wp[0]) & mask); op[1] = (uint16)((cg += wp[1]) & mask); op[2] = (uint16)((cb += wp[2]) & mask); } } else if (stride == 4) { op[0] = wp[0]; op[1] = wp[1]; op[2] = wp[2]; op[3] = wp[3]; cr = wp[0]; cg = wp[1]; cb = wp[2]; ca = wp[3]; n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; op[0] = (uint16)((cr += wp[0]) & mask); op[1] = (uint16)((cg += wp[1]) & mask); op[2] = (uint16)((cb += wp[2]) & mask); op[3] = (uint16)((ca += wp[3]) & mask); } } else { REPEAT(stride, *op = *wp&mask; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = *wp&mask; wp++; op++) n -= stride; } } } } static void horizontalAccumulate8(uint16 *wp, int n, int stride, unsigned char *op, unsigned char *ToLinear8) { register unsigned int cr, cg, cb, ca, mask; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = ToLinear8[cr = (wp[0] & mask)]; op[1] = ToLinear8[cg = (wp[1] & mask)]; op[2] = ToLinear8[cb = (wp[2] & mask)]; n -= 3; while (n > 0) { n -= 3; wp += 3; op += 3; op[0] = ToLinear8[(cr += wp[0]) & mask]; op[1] = ToLinear8[(cg += wp[1]) & mask]; op[2] = ToLinear8[(cb += wp[2]) & mask]; } } else if (stride == 4) { op[0] = ToLinear8[cr = (wp[0] & mask)]; op[1] = ToLinear8[cg = (wp[1] & mask)]; op[2] = ToLinear8[cb = (wp[2] & mask)]; op[3] = ToLinear8[ca = (wp[3] & mask)]; n -= 4; while (n > 0) { n -= 4; wp += 4; op += 4; op[0] = ToLinear8[(cr += wp[0]) & mask]; op[1] = ToLinear8[(cg += wp[1]) & mask]; op[2] = ToLinear8[(cb += wp[2]) & mask]; op[3] = ToLinear8[(ca += wp[3]) & mask]; } } else { REPEAT(stride, *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; } } } } static void horizontalAccumulate8abgr(uint16 *wp, int n, int stride, unsigned char *op, unsigned char *ToLinear8) { register unsigned int cr, cg, cb, ca, mask; register unsigned char t0, t1, t2, t3; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = 0; t1 = ToLinear8[cb = (wp[2] & mask)]; t2 = ToLinear8[cg = (wp[1] & mask)]; t3 = ToLinear8[cr = (wp[0] & mask)]; op[1] = t1; op[2] = t2; op[3] = t3; n -= 3; while (n > 0) { n -= 3; wp += 3; op += 4; op[0] = 0; t1 = ToLinear8[(cb += wp[2]) & mask]; t2 = ToLinear8[(cg += wp[1]) & mask]; t3 = ToLinear8[(cr += wp[0]) & mask]; op[1] = t1; op[2] = t2; op[3] = t3; } } else if (stride == 4) { t0 = ToLinear8[ca = (wp[3] & mask)]; t1 = ToLinear8[cb = (wp[2] & mask)]; t2 = ToLinear8[cg = (wp[1] & mask)]; t3 = ToLinear8[cr = (wp[0] & mask)]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; n -= 4; while (n > 0) { n -= 4; wp += 4; op += 4; t0 = ToLinear8[(ca += wp[3]) & mask]; t1 = ToLinear8[(cb += wp[2]) & mask]; t2 = ToLinear8[(cg += wp[1]) & mask]; t3 = ToLinear8[(cr += wp[0]) & mask]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; } } else { REPEAT(stride, *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; } } } } /* * State block for each open TIFF * file using PixarLog compression/decompression. */ typedef struct { TIFFPredictorState predict; z_stream stream; tmsize_t tbuf_size; /* only set/used on reading for now */ uint16 *tbuf; uint16 stride; int state; int user_datafmt; int quality; #define PLSTATE_INIT 1 TIFFVSetMethod vgetparent; /* super-class method */ TIFFVSetMethod vsetparent; /* super-class method */ float *ToLinearF; uint16 *ToLinear16; unsigned char *ToLinear8; uint16 *FromLT2; uint16 *From14; /* Really for 16-bit data, but we shift down 2 */ uint16 *From8; } PixarLogState; static int PixarLogMakeTables(PixarLogState *sp) { /* * We make several tables here to convert between various external * representations (float, 16-bit, and 8-bit) and the internal * 11-bit companded representation. The 11-bit representation has two * distinct regions. A linear bottom end up through .018316 in steps * of about .000073, and a region of constant ratio up to about 25. * These floating point numbers are stored in the main table ToLinearF. * All other tables are derived from this one. The tables (and the * ratios) are continuous at the internal seam. */ int nlin, lt2size; int i, j; double b, c, linstep, v; float *ToLinearF; uint16 *ToLinear16; unsigned char *ToLinear8; uint16 *FromLT2; uint16 *From14; /* Really for 16-bit data, but we shift down 2 */ uint16 *From8; c = log(RATIO); nlin = (int)(1./c); /* nlin must be an integer */ c = 1./nlin; b = exp(-c*ONE); /* multiplicative scale factor [b*exp(c*ONE) = 1] */ linstep = b*c*exp(1.); LogK1 = (float)(1./c); /* if (v >= 2) token = k1*log(v*k2) */ LogK2 = (float)(1./b); lt2size = (int)(2./linstep) + 1; FromLT2 = (uint16 *)_TIFFmalloc(lt2size*sizeof(uint16)); From14 = (uint16 *)_TIFFmalloc(16384*sizeof(uint16)); From8 = (uint16 *)_TIFFmalloc(256*sizeof(uint16)); ToLinearF = (float *)_TIFFmalloc(TSIZEP1 * sizeof(float)); ToLinear16 = (uint16 *)_TIFFmalloc(TSIZEP1 * sizeof(uint16)); ToLinear8 = (unsigned char *)_TIFFmalloc(TSIZEP1 * sizeof(unsigned char)); if (FromLT2 == NULL || From14 == NULL || From8 == NULL || ToLinearF == NULL || ToLinear16 == NULL || ToLinear8 == NULL) { if (FromLT2) _TIFFfree(FromLT2); if (From14) _TIFFfree(From14); if (From8) _TIFFfree(From8); if (ToLinearF) _TIFFfree(ToLinearF); if (ToLinear16) _TIFFfree(ToLinear16); if (ToLinear8) _TIFFfree(ToLinear8); sp->FromLT2 = NULL; sp->From14 = NULL; sp->From8 = NULL; sp->ToLinearF = NULL; sp->ToLinear16 = NULL; sp->ToLinear8 = NULL; return 0; } j = 0; for (i = 0; i < nlin; i++) { v = i * linstep; ToLinearF[j++] = (float)v; } for (i = nlin; i < TSIZE; i++) ToLinearF[j++] = (float)(b*exp(c*i)); ToLinearF[2048] = ToLinearF[2047]; for (i = 0; i < TSIZEP1; i++) { v = ToLinearF[i]*65535.0 + 0.5; ToLinear16[i] = (v > 65535.0) ? 65535 : (uint16)v; v = ToLinearF[i]*255.0 + 0.5; ToLinear8[i] = (v > 255.0) ? 255 : (unsigned char)v; } j = 0; for (i = 0; i < lt2size; i++) { if ((i*linstep)*(i*linstep) > ToLinearF[j]*ToLinearF[j+1]) j++; FromLT2[i] = (uint16)j; } /* * Since we lose info anyway on 16-bit data, we set up a 14-bit * table and shift 16-bit values down two bits on input. * saves a little table space. */ j = 0; for (i = 0; i < 16384; i++) { while ((i/16383.)*(i/16383.) > ToLinearF[j]*ToLinearF[j+1]) j++; From14[i] = (uint16)j; } j = 0; for (i = 0; i < 256; i++) { while ((i/255.)*(i/255.) > ToLinearF[j]*ToLinearF[j+1]) j++; From8[i] = (uint16)j; } Fltsize = (float)(lt2size/2); sp->ToLinearF = ToLinearF; sp->ToLinear16 = ToLinear16; sp->ToLinear8 = ToLinear8; sp->FromLT2 = FromLT2; sp->From14 = From14; sp->From8 = From8; return 1; } #define DecoderState(tif) ((PixarLogState*) (tif)->tif_data) #define EncoderState(tif) ((PixarLogState*) (tif)->tif_data) static int PixarLogEncode(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s); static int PixarLogDecode(TIFF* tif, uint8* op, tmsize_t occ, uint16 s); #define PIXARLOGDATAFMT_UNKNOWN -1 static int PixarLogGuessDataFmt(TIFFDirectory *td) { int guess = PIXARLOGDATAFMT_UNKNOWN; int format = td->td_sampleformat; /* If the user didn't tell us his datafmt, * take our best guess from the bitspersample. */ switch (td->td_bitspersample) { case 32: if (format == SAMPLEFORMAT_IEEEFP) guess = PIXARLOGDATAFMT_FLOAT; break; case 16: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_UINT) guess = PIXARLOGDATAFMT_16BIT; break; case 12: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_INT) guess = PIXARLOGDATAFMT_12BITPICIO; break; case 11: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_UINT) guess = PIXARLOGDATAFMT_11BITLOG; break; case 8: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_UINT) guess = PIXARLOGDATAFMT_8BIT; break; } return guess; } static tmsize_t multiply_ms(tmsize_t m1, tmsize_t m2) { tmsize_t bytes = m1 * m2; if (m1 && bytes / m1 != m2) bytes = 0; return bytes; } static tmsize_t add_ms(tmsize_t m1, tmsize_t m2) { tmsize_t bytes = m1 + m2; /* if either input is zero, assume overflow already occurred */ if (m1 == 0 || m2 == 0) bytes = 0; else if (bytes <= m1 || bytes <= m2) bytes = 0; return bytes; } static int PixarLogFixupTags(TIFF* tif) { (void) tif; return (1); } static int PixarLogSetupDecode(TIFF* tif) { static const char module[] = "PixarLogSetupDecode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState* sp = DecoderState(tif); tmsize_t tbuf_size; assert(sp != NULL); /* Make sure no byte swapping happens on the data * after decompression. */ tif->tif_postdecode = _TIFFNoPostDecode; /* for some reason, we can't do this in TIFFInitPixarLog */ sp->stride = (td->td_planarconfig == PLANARCONFIG_CONTIG ? td->td_samplesperpixel : 1); tbuf_size = multiply_ms(multiply_ms(multiply_ms(sp->stride, td->td_imagewidth), td->td_rowsperstrip), sizeof(uint16)); /* add one more stride in case input ends mid-stride */ tbuf_size = add_ms(tbuf_size, sizeof(uint16) * sp->stride); if (tbuf_size == 0) return (0); /* TODO: this is an error return without error report through TIFFErrorExt */ sp->tbuf = (uint16 *) _TIFFmalloc(tbuf_size); if (sp->tbuf == NULL) return (0); sp->tbuf_size = tbuf_size; if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) sp->user_datafmt = PixarLogGuessDataFmt(td); if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) { TIFFErrorExt(tif->tif_clientdata, module, "PixarLog compression can't handle bits depth/data format combination (depth: %d)", td->td_bitspersample); return (0); } if (inflateInit(&sp->stream) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "%s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } else { sp->state |= PLSTATE_INIT; return (1); } } /* * Setup state for decoding a strip. */ static int PixarLogPreDecode(TIFF* tif, uint16 s) { static const char module[] = "PixarLogPreDecode"; PixarLogState* sp = DecoderState(tif); (void) s; assert(sp != NULL); sp->stream.next_in = tif->tif_rawdata; assert(sizeof(sp->stream.avail_in)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_in = (uInt) tif->tif_rawcc; if ((tmsize_t)sp->stream.avail_in != tif->tif_rawcc) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } return (inflateReset(&sp->stream) == Z_OK); } static int PixarLogDecode(TIFF* tif, uint8* op, tmsize_t occ, uint16 s) { static const char module[] = "PixarLogDecode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState* sp = DecoderState(tif); tmsize_t i; tmsize_t nsamples; int llen; uint16 *up; switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: nsamples = occ / sizeof(float); /* XXX float == 32 bits */ break; case PIXARLOGDATAFMT_16BIT: case PIXARLOGDATAFMT_12BITPICIO: case PIXARLOGDATAFMT_11BITLOG: nsamples = occ / sizeof(uint16); /* XXX uint16 == 16 bits */ break; case PIXARLOGDATAFMT_8BIT: case PIXARLOGDATAFMT_8BITABGR: nsamples = occ; break; default: TIFFErrorExt(tif->tif_clientdata, module, "%d bit input not supported in PixarLog", td->td_bitspersample); return 0; } llen = sp->stride * td->td_imagewidth; (void) s; assert(sp != NULL); sp->stream.next_out = (unsigned char *) sp->tbuf; assert(sizeof(sp->stream.avail_out)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_out = (uInt) (nsamples * sizeof(uint16)); if (sp->stream.avail_out != nsamples * sizeof(uint16)) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } /* Check that we will not fill more than what was allocated */ if ((tmsize_t)sp->stream.avail_out > sp->tbuf_size) { TIFFErrorExt(tif->tif_clientdata, module, "sp->stream.avail_out > sp->tbuf_size"); return (0); } do { int state = inflate(&sp->stream, Z_PARTIAL_FLUSH); if (state == Z_STREAM_END) { break; /* XXX */ } if (state == Z_DATA_ERROR) { TIFFErrorExt(tif->tif_clientdata, module, "Decoding error at scanline %lu, %s", (unsigned long) tif->tif_row, sp->stream.msg ? sp->stream.msg : "(null)"); if (inflateSync(&sp->stream) != Z_OK) return (0); continue; } if (state != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } } while (sp->stream.avail_out > 0); /* hopefully, we got all the bytes we needed */ if (sp->stream.avail_out != 0) { TIFFErrorExt(tif->tif_clientdata, module, "Not enough data at scanline %lu (short " TIFF_UINT64_FORMAT " bytes)", (unsigned long) tif->tif_row, (TIFF_UINT64_T) sp->stream.avail_out); return (0); } up = sp->tbuf; /* Swap bytes in the data if from a different endian machine. */ if (tif->tif_flags & TIFF_SWAB) TIFFSwabArrayOfShort(up, nsamples); /* * if llen is not an exact multiple of nsamples, the decode operation * may overflow the output buffer, so truncate it enough to prevent * that but still salvage as much data as possible. */ if (nsamples % llen) { TIFFWarningExt(tif->tif_clientdata, module, "stride %lu is not a multiple of sample count, " "%lu, data truncated.", (unsigned long) llen, (unsigned long) nsamples); nsamples -= nsamples % llen; } for (i = 0; i < nsamples; i += llen, up += llen) { switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: horizontalAccumulateF(up, llen, sp->stride, (float *)op, sp->ToLinearF); op += llen * sizeof(float); break; case PIXARLOGDATAFMT_16BIT: horizontalAccumulate16(up, llen, sp->stride, (uint16 *)op, sp->ToLinear16); op += llen * sizeof(uint16); break; case PIXARLOGDATAFMT_12BITPICIO: horizontalAccumulate12(up, llen, sp->stride, (int16 *)op, sp->ToLinearF); op += llen * sizeof(int16); break; case PIXARLOGDATAFMT_11BITLOG: horizontalAccumulate11(up, llen, sp->stride, (uint16 *)op); op += llen * sizeof(uint16); break; case PIXARLOGDATAFMT_8BIT: horizontalAccumulate8(up, llen, sp->stride, (unsigned char *)op, sp->ToLinear8); op += llen * sizeof(unsigned char); break; case PIXARLOGDATAFMT_8BITABGR: horizontalAccumulate8abgr(up, llen, sp->stride, (unsigned char *)op, sp->ToLinear8); op += llen * sizeof(unsigned char); break; default: TIFFErrorExt(tif->tif_clientdata, module, "Unsupported bits/sample: %d", td->td_bitspersample); return (0); } } return (1); } static int PixarLogSetupEncode(TIFF* tif) { static const char module[] = "PixarLogSetupEncode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState* sp = EncoderState(tif); tmsize_t tbuf_size; assert(sp != NULL); /* for some reason, we can't do this in TIFFInitPixarLog */ sp->stride = (td->td_planarconfig == PLANARCONFIG_CONTIG ? td->td_samplesperpixel : 1); tbuf_size = multiply_ms(multiply_ms(multiply_ms(sp->stride, td->td_imagewidth), td->td_rowsperstrip), sizeof(uint16)); if (tbuf_size == 0) return (0); /* TODO: this is an error return without error report through TIFFErrorExt */ sp->tbuf = (uint16 *) _TIFFmalloc(tbuf_size); if (sp->tbuf == NULL) return (0); if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) sp->user_datafmt = PixarLogGuessDataFmt(td); if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) { TIFFErrorExt(tif->tif_clientdata, module, "PixarLog compression can't handle %d bit linear encodings", td->td_bitspersample); return (0); } if (deflateInit(&sp->stream, sp->quality) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "%s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } else { sp->state |= PLSTATE_INIT; return (1); } } /* * Reset encoding state at the start of a strip. */ static int PixarLogPreEncode(TIFF* tif, uint16 s) { static const char module[] = "PixarLogPreEncode"; PixarLogState *sp = EncoderState(tif); (void) s; assert(sp != NULL); sp->stream.next_out = tif->tif_rawdata; assert(sizeof(sp->stream.avail_out)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_out = (uInt)tif->tif_rawdatasize; if ((tmsize_t)sp->stream.avail_out != tif->tif_rawdatasize) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } return (deflateReset(&sp->stream) == Z_OK); } static void horizontalDifferenceF(float *ip, int n, int stride, uint16 *wp, uint16 *FromLT2) { int32 r1, g1, b1, a1, r2, g2, b2, a2, mask; float fltsize = Fltsize; #define CLAMP(v) ( (v<(float)0.) ? 0 \ : (v<(float)2.) ? FromLT2[(int)(v*fltsize)] \ : (v>(float)24.2) ? 2047 \ : LogK1*log(v*LogK2) + 0.5 ) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = (uint16) CLAMP(ip[0]); g2 = wp[1] = (uint16) CLAMP(ip[1]); b2 = wp[2] = (uint16) CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; wp += 3; ip += 3; r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; } } else if (stride == 4) { r2 = wp[0] = (uint16) CLAMP(ip[0]); g2 = wp[1] = (uint16) CLAMP(ip[1]); b2 = wp[2] = (uint16) CLAMP(ip[2]); a2 = wp[3] = (uint16) CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; wp += 4; ip += 4; r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; a1 = (int32) CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1; } } else { REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp++; ip++) n -= stride; while (n > 0) { REPEAT(stride, wp[0] = (uint16)(((int32)CLAMP(ip[0])-(int32)CLAMP(ip[-stride])) & mask); wp++; ip++) n -= stride; } } } } static void horizontalDifference16(unsigned short *ip, int n, int stride, unsigned short *wp, uint16 *From14) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; /* assumption is unsigned pixel values */ #undef CLAMP #define CLAMP(v) From14[(v) >> 2] mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; wp += 3; ip += 3; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; wp += 4; ip += 4; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1; } } else { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp++; ip++) n -= stride; while (n > 0) { REPEAT(stride, wp[0] = (uint16)((CLAMP(ip[0])-CLAMP(ip[-stride])) & mask); wp++; ip++) n -= stride; } } } } static void horizontalDifference8(unsigned char *ip, int n, int stride, unsigned short *wp, uint16 *From8) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; #undef CLAMP #define CLAMP(v) (From8[(v)]) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; r1 = CLAMP(ip[3]); wp[3] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[4]); wp[4] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[5]); wp[5] = (uint16)((b1-b2) & mask); b2 = b1; wp += 3; ip += 3; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; r1 = CLAMP(ip[4]); wp[4] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[5]); wp[5] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[6]); wp[6] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[7]); wp[7] = (uint16)((a1-a2) & mask); a2 = a1; wp += 4; ip += 4; } } else { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp++; ip++) n -= stride; while (n > 0) { REPEAT(stride, wp[0] = (uint16)((CLAMP(ip[0])-CLAMP(ip[-stride])) & mask); wp++; ip++) n -= stride; } } } } /* * Encode a chunk of pixels. */ static int PixarLogEncode(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) { static const char module[] = "PixarLogEncode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState *sp = EncoderState(tif); tmsize_t i; tmsize_t n; int llen; unsigned short * up; (void) s; switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: n = cc / sizeof(float); /* XXX float == 32 bits */ break; case PIXARLOGDATAFMT_16BIT: case PIXARLOGDATAFMT_12BITPICIO: case PIXARLOGDATAFMT_11BITLOG: n = cc / sizeof(uint16); /* XXX uint16 == 16 bits */ break; case PIXARLOGDATAFMT_8BIT: case PIXARLOGDATAFMT_8BITABGR: n = cc; break; default: TIFFErrorExt(tif->tif_clientdata, module, "%d bit input not supported in PixarLog", td->td_bitspersample); return 0; } llen = sp->stride * td->td_imagewidth; /* Check against the number of elements (of size uint16) of sp->tbuf */ if( n > (tmsize_t)(td->td_rowsperstrip * llen) ) { TIFFErrorExt(tif->tif_clientdata, module, "Too many input bytes provided"); return 0; } for (i = 0, up = sp->tbuf; i < n; i += llen, up += llen) { switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: horizontalDifferenceF((float *)bp, llen, sp->stride, up, sp->FromLT2); bp += llen * sizeof(float); break; case PIXARLOGDATAFMT_16BIT: horizontalDifference16((uint16 *)bp, llen, sp->stride, up, sp->From14); bp += llen * sizeof(uint16); break; case PIXARLOGDATAFMT_8BIT: horizontalDifference8((unsigned char *)bp, llen, sp->stride, up, sp->From8); bp += llen * sizeof(unsigned char); break; default: TIFFErrorExt(tif->tif_clientdata, module, "%d bit input not supported in PixarLog", td->td_bitspersample); return 0; } } sp->stream.next_in = (unsigned char *) sp->tbuf; assert(sizeof(sp->stream.avail_in)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_in = (uInt) (n * sizeof(uint16)); if ((sp->stream.avail_in / sizeof(uint16)) != (uInt) n) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } do { if (deflate(&sp->stream, Z_NO_FLUSH) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "Encoder error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } if (sp->stream.avail_out == 0) { tif->tif_rawcc = tif->tif_rawdatasize; TIFFFlushData1(tif); sp->stream.next_out = tif->tif_rawdata; sp->stream.avail_out = (uInt) tif->tif_rawdatasize; /* this is a safe typecast, as check is made already in PixarLogPreEncode */ } } while (sp->stream.avail_in > 0); return (1); } /* * Finish off an encoded strip by flushing the last * string and tacking on an End Of Information code. */ static int PixarLogPostEncode(TIFF* tif) { static const char module[] = "PixarLogPostEncode"; PixarLogState *sp = EncoderState(tif); int state; sp->stream.avail_in = 0; do { state = deflate(&sp->stream, Z_FINISH); switch (state) { case Z_STREAM_END: case Z_OK: if ((tmsize_t)sp->stream.avail_out != tif->tif_rawdatasize) { tif->tif_rawcc = tif->tif_rawdatasize - sp->stream.avail_out; TIFFFlushData1(tif); sp->stream.next_out = tif->tif_rawdata; sp->stream.avail_out = (uInt) tif->tif_rawdatasize; /* this is a safe typecast, as check is made already in PixarLogPreEncode */ } break; default: TIFFErrorExt(tif->tif_clientdata, module, "ZLib error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } } while (state != Z_STREAM_END); return (1); } static void PixarLogClose(TIFF* tif) { TIFFDirectory *td = &tif->tif_dir; /* In a really sneaky (and really incorrect, and untruthful, and * troublesome, and error-prone) maneuver that completely goes against * the spirit of TIFF, and breaks TIFF, on close, we covertly * modify both bitspersample and sampleformat in the directory to * indicate 8-bit linear. This way, the decode "just works" even for * readers that don't know about PixarLog, or how to set * the PIXARLOGDATFMT pseudo-tag. */ td->td_bitspersample = 8; td->td_sampleformat = SAMPLEFORMAT_UINT; } static void PixarLogCleanup(TIFF* tif) { PixarLogState* sp = (PixarLogState*) tif->tif_data; assert(sp != 0); (void)TIFFPredictorCleanup(tif); tif->tif_tagmethods.vgetfield = sp->vgetparent; tif->tif_tagmethods.vsetfield = sp->vsetparent; if (sp->FromLT2) _TIFFfree(sp->FromLT2); if (sp->From14) _TIFFfree(sp->From14); if (sp->From8) _TIFFfree(sp->From8); if (sp->ToLinearF) _TIFFfree(sp->ToLinearF); if (sp->ToLinear16) _TIFFfree(sp->ToLinear16); if (sp->ToLinear8) _TIFFfree(sp->ToLinear8); if (sp->state&PLSTATE_INIT) { if (tif->tif_mode == O_RDONLY) inflateEnd(&sp->stream); else deflateEnd(&sp->stream); } if (sp->tbuf) _TIFFfree(sp->tbuf); _TIFFfree(sp); tif->tif_data = NULL; _TIFFSetDefaultCompressionState(tif); } static int PixarLogVSetField(TIFF* tif, uint32 tag, va_list ap) { static const char module[] = "PixarLogVSetField"; PixarLogState *sp = (PixarLogState *)tif->tif_data; int result; switch (tag) { case TIFFTAG_PIXARLOGQUALITY: sp->quality = (int) va_arg(ap, int); if (tif->tif_mode != O_RDONLY && (sp->state&PLSTATE_INIT)) { if (deflateParams(&sp->stream, sp->quality, Z_DEFAULT_STRATEGY) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } } return (1); case TIFFTAG_PIXARLOGDATAFMT: sp->user_datafmt = (int) va_arg(ap, int); /* Tweak the TIFF header so that the rest of libtiff knows what * size of data will be passed between app and library, and * assume that the app knows what it is doing and is not * confused by these header manipulations... */ switch (sp->user_datafmt) { case PIXARLOGDATAFMT_8BIT: case PIXARLOGDATAFMT_8BITABGR: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); break; case PIXARLOGDATAFMT_11BITLOG: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 16); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); break; case PIXARLOGDATAFMT_12BITPICIO: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 16); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_INT); break; case PIXARLOGDATAFMT_16BIT: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 16); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); break; case PIXARLOGDATAFMT_FLOAT: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 32); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_IEEEFP); break; } /* * Must recalculate sizes should bits/sample change. */ tif->tif_tilesize = isTiled(tif) ? TIFFTileSize(tif) : (tmsize_t)(-1); tif->tif_scanlinesize = TIFFScanlineSize(tif); result = 1; /* NB: pseudo tag */ break; default: result = (*sp->vsetparent)(tif, tag, ap); } return (result); } static int PixarLogVGetField(TIFF* tif, uint32 tag, va_list ap) { PixarLogState *sp = (PixarLogState *)tif->tif_data; switch (tag) { case TIFFTAG_PIXARLOGQUALITY: *va_arg(ap, int*) = sp->quality; break; case TIFFTAG_PIXARLOGDATAFMT: *va_arg(ap, int*) = sp->user_datafmt; break; default: return (*sp->vgetparent)(tif, tag, ap); } return (1); } static const TIFFField pixarlogFields[] = { {TIFFTAG_PIXARLOGDATAFMT, 0, 0, TIFF_ANY, 0, TIFF_SETGET_INT, TIFF_SETGET_UNDEFINED, FIELD_PSEUDO, FALSE, FALSE, "", NULL}, {TIFFTAG_PIXARLOGQUALITY, 0, 0, TIFF_ANY, 0, TIFF_SETGET_INT, TIFF_SETGET_UNDEFINED, FIELD_PSEUDO, FALSE, FALSE, "", NULL} }; int TIFFInitPixarLog(TIFF* tif, int scheme) { static const char module[] = "TIFFInitPixarLog"; PixarLogState* sp; assert(scheme == COMPRESSION_PIXARLOG); /* * Merge codec-specific tag information. */ if (!_TIFFMergeFields(tif, pixarlogFields, TIFFArrayCount(pixarlogFields))) { TIFFErrorExt(tif->tif_clientdata, module, "Merging PixarLog codec-specific tags failed"); return 0; } /* * Allocate state block so tag methods have storage to record values. */ tif->tif_data = (uint8*) _TIFFmalloc(sizeof (PixarLogState)); if (tif->tif_data == NULL) goto bad; sp = (PixarLogState*) tif->tif_data; _TIFFmemset(sp, 0, sizeof (*sp)); sp->stream.data_type = Z_BINARY; sp->user_datafmt = PIXARLOGDATAFMT_UNKNOWN; /* * Install codec methods. */ tif->tif_fixuptags = PixarLogFixupTags; tif->tif_setupdecode = PixarLogSetupDecode; tif->tif_predecode = PixarLogPreDecode; tif->tif_decoderow = PixarLogDecode; tif->tif_decodestrip = PixarLogDecode; tif->tif_decodetile = PixarLogDecode; tif->tif_setupencode = PixarLogSetupEncode; tif->tif_preencode = PixarLogPreEncode; tif->tif_postencode = PixarLogPostEncode; tif->tif_encoderow = PixarLogEncode; tif->tif_encodestrip = PixarLogEncode; tif->tif_encodetile = PixarLogEncode; tif->tif_close = PixarLogClose; tif->tif_cleanup = PixarLogCleanup; /* Override SetField so we can handle our private pseudo-tag */ sp->vgetparent = tif->tif_tagmethods.vgetfield; tif->tif_tagmethods.vgetfield = PixarLogVGetField; /* hook for codec tags */ sp->vsetparent = tif->tif_tagmethods.vsetfield; tif->tif_tagmethods.vsetfield = PixarLogVSetField; /* hook for codec tags */ /* Default values for codec-specific fields */ sp->quality = Z_DEFAULT_COMPRESSION; /* default comp. level */ sp->state = 0; /* we don't wish to use the predictor, * the default is none, which predictor value 1 */ (void) TIFFPredictorInit(tif); /* * build the companding tables */ PixarLogMakeTables(sp); return (1); bad: TIFFErrorExt(tif->tif_clientdata, module, "No space for PixarLog state block"); return (0); } #endif /* PIXARLOG_SUPPORT */ /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
horizontalDifference8(unsigned char *ip, int n, int stride, unsigned short *wp, uint16 *From8) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; #undef CLAMP #define CLAMP(v) (From8[(v)]) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; r1 = CLAMP(ip[3]); wp[3] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[4]); wp[4] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[5]); wp[5] = (uint16)((b1-b2) & mask); b2 = b1; wp += 3; ip += 3; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; r1 = CLAMP(ip[4]); wp[4] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[5]); wp[5] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[6]); wp[6] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[7]); wp[7] = (uint16)((a1-a2) & mask); a2 = a1; wp += 4; ip += 4; } } else { wp += n + stride - 1; /* point to last one */ ip += n + stride - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } }
horizontalDifference8(unsigned char *ip, int n, int stride, unsigned short *wp, uint16 *From8) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; #undef CLAMP #define CLAMP(v) (From8[(v)]) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; r1 = CLAMP(ip[3]); wp[3] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[4]); wp[4] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[5]); wp[5] = (uint16)((b1-b2) & mask); b2 = b1; wp += 3; ip += 3; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; r1 = CLAMP(ip[4]); wp[4] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[5]); wp[5] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[6]); wp[6] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[7]); wp[7] = (uint16)((a1-a2) & mask); a2 = a1; wp += 4; ip += 4; } } else { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp++; ip++) n -= stride; while (n > 0) { REPEAT(stride, wp[0] = (uint16)((CLAMP(ip[0])-CLAMP(ip[-stride])) & mask); wp++; ip++) n -= stride; } } } }
{'added': [(986, ' REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp++; ip++)'), (987, ' n -= stride;'), (988, ' while (n > 0) {'), (989, ' REPEAT(stride,'), (990, ' wp[0] = (uint16)(((int32)CLAMP(ip[0])-(int32)CLAMP(ip[-stride])) & mask);'), (991, ' wp++; ip++)'), (992, ' n -= stride;'), (993, ' }'), (1036, ' REPEAT(stride, wp[0] = CLAMP(ip[0]); wp++; ip++)'), (1039, ' REPEAT(stride,'), (1040, ' wp[0] = (uint16)((CLAMP(ip[0])-CLAMP(ip[-stride])) & mask);'), (1041, ' wp++; ip++)'), (1042, ' n -= stride;'), (1043, ' }'), (1086, ' REPEAT(stride, wp[0] = CLAMP(ip[0]); wp++; ip++)'), (1087, ' n -= stride;'), (1088, ' while (n > 0) {'), (1089, ' REPEAT(stride,'), (1090, ' wp[0] = (uint16)((CLAMP(ip[0])-CLAMP(ip[-stride])) & mask);'), (1091, ' wp++; ip++)'), (1092, ' n -= stride;'), (1093, ' }'), (1094, ' }')], 'deleted': [(986, '\t ip += n - 1;\t/* point to last one */'), (987, '\t wp += n - 1;\t/* point to last one */'), (988, '\t n -= stride;'), (989, '\t while (n > 0) {'), (990, '\t\tREPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]);'), (991, '\t\t\t\twp[stride] -= wp[0];'), (992, '\t\t\t\twp[stride] &= mask;'), (993, '\t\t\t\twp--; ip--)'), (994, '\t\tn -= stride;'), (995, '\t }'), (996, '\t REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp--; ip--)'), (1039, '\t ip += n - 1;\t/* point to last one */'), (1040, '\t wp += n - 1;\t/* point to last one */'), (1043, '\t\tREPEAT(stride, wp[0] = CLAMP(ip[0]);'), (1044, '\t\t\t\twp[stride] -= wp[0];'), (1045, '\t\t\t\twp[stride] &= mask;'), (1046, '\t\t\t\twp--; ip--)'), (1047, '\t\tn -= stride;'), (1048, '\t }'), (1049, '\t REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--)'), (1092, '\t wp += n + stride - 1;\t/* point to last one */'), (1093, '\t ip += n + stride - 1;\t/* point to last one */'), (1094, '\t n -= stride;'), (1095, '\t while (n > 0) {'), (1096, '\t\tREPEAT(stride, wp[0] = CLAMP(ip[0]);'), (1097, '\t\t\t\twp[stride] -= wp[0];'), (1098, '\t\t\t\twp[stride] &= mask;'), (1099, '\t\t\t\twp--; ip--)'), (1100, '\t\tn -= stride;'), (1101, '\t }'), (1102, '\t REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--)'), (1103, '\t}')]}
23
32
1,107
9,011
46
548
7
https://github.com/vadz/libtiff
CVE-2016-9533
CWE-119
1,118
reshape.cc
C++
tflite::ops::builtin::reshape::ShapeIsVector
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include <string.h> #include <memory> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace reshape { constexpr int kInputTensor = 0; constexpr int kShapeTensor = 1; constexpr int kOutputTensor = 0; TfLiteIntArray* GetOutputShape(TfLiteContext*, TfLiteNode*); TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = GetOutputShape(context, node); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> scoped_output_shape(output_shape, TfLiteIntArrayFree); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the // input. Here we calculate what that dimension should be so that the number // of output elements in the same as the number of input elements. int num_input_elements = NumElements(input); int num_output_elements = 1; int stretch_dim = -1; for (int i = 0; i < output_shape->size; ++i) { int value = output_shape->data[i]; if (value == -1) { TF_LITE_ENSURE_EQ(context, stretch_dim, -1); stretch_dim = i; } else { num_output_elements *= value; } } if (stretch_dim != -1) { output_shape->data[stretch_dim] = num_input_elements / num_output_elements; num_output_elements *= output_shape->data[stretch_dim]; } TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); return context->ResizeTensor(context, output, scoped_output_shape.release()); } inline TfLiteIntArray* GetOutputShapeFromTensor(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(shape->dims->data[0]); for (int i = 0; i < output_shape->size; ++i) { output_shape->data[i] = shape->data.i32[i]; } return output_shape; } inline TfLiteIntArray* GetOutputShapeFromParam(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteReshapeParams*>(node->builtin_data); // The function is returned above this line if the shape tensor is usable. // Now fallback to the shape parameter in `TfLiteReshapeParams`. int num_dimensions = params->num_dimensions; if (num_dimensions == 1 && params->shape[0] == 0) { // Legacy tflite models use a shape parameter of [0] to indicate scalars, // so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during // toco conversion. num_dimensions = 0; } TfLiteIntArray* output_shape = TfLiteIntArrayCreate(num_dimensions); for (int i = 0; i < num_dimensions; ++i) { output_shape->data[i] = params->shape[i]; } return output_shape; } // Check if the shape tensor is valid. Shapes should be int32 vectors. inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); return (shape->dims->size == 1 && shape->type == kTfLiteInt32); } TfLiteIntArray* GetOutputShape(TfLiteContext* context, TfLiteNode* node) { if (NumInputs(node) == 2 && ShapeIsVector(context, node)) { return GetOutputShapeFromTensor(context, node); } else { return GetOutputShapeFromParam(context, node); } } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); // Always postpone sizing string tensors, even if we could in principle // calculate their shapes now. String tensors don't benefit from having their // shapes precalculated because the actual memory can only be allocated after // we know all the content. TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type != kTfLiteString) { if (NumInputs(node) == 1 || IsConstantTensor(GetInput(context, node, kShapeTensor))) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } else { SetTensorToDynamic(output); } } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // There are two ways in which the 'output' can be made dynamic: it could be // a string tensor, or its shape cannot be calculated during Prepare(). In // either case, we now have all the information to calculate its shape. if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } // Note that string tensors are always "dynamic" in the sense that their size // is not known until we have all the content. This applies even when their // shape is known ahead of time. As a result, a string tensor is never given // any memory by ResizeOutput(), and we need to do it manually here. Since // reshape doesn't change the data, the output tensor needs exactly as many // bytes as the input tensor. if (output->type == kTfLiteString) { auto bytes_required = input->bytes; TfLiteTensorRealloc(bytes_required, output); output->bytes = bytes_required; } memcpy(output->data.raw, input->data.raw, input->bytes); return kTfLiteOk; } } // namespace reshape TfLiteRegistration* Register_RESHAPE() { static TfLiteRegistration r = {nullptr, nullptr, reshape::Prepare, reshape::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include <string.h> #include <memory> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace reshape { constexpr int kInputTensor = 0; constexpr int kShapeTensor = 1; constexpr int kOutputTensor = 0; TfLiteIntArray* GetOutputShape(TfLiteContext*, TfLiteNode*); TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = GetOutputShape(context, node); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> scoped_output_shape(output_shape, TfLiteIntArrayFree); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the // input. Here we calculate what that dimension should be so that the number // of output elements in the same as the number of input elements. int num_input_elements = NumElements(input); int num_output_elements = 1; int stretch_dim = -1; for (int i = 0; i < output_shape->size; ++i) { int value = output_shape->data[i]; if (value == -1) { TF_LITE_ENSURE_EQ(context, stretch_dim, -1); stretch_dim = i; } else { num_output_elements *= value; } } if (stretch_dim != -1) { output_shape->data[stretch_dim] = num_input_elements / num_output_elements; num_output_elements *= output_shape->data[stretch_dim]; } TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); return context->ResizeTensor(context, output, scoped_output_shape.release()); } inline TfLiteIntArray* GetOutputShapeFromTensor(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); if (shape == nullptr) return nullptr; TfLiteIntArray* output_shape = TfLiteIntArrayCreate(shape->dims->data[0]); for (int i = 0; i < output_shape->size; ++i) { output_shape->data[i] = shape->data.i32[i]; } return output_shape; } inline TfLiteIntArray* GetOutputShapeFromParam(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteReshapeParams*>(node->builtin_data); // The function is returned above this line if the shape tensor is usable. // Now fallback to the shape parameter in `TfLiteReshapeParams`. int num_dimensions = params->num_dimensions; if (num_dimensions == 1 && params->shape[0] == 0) { // Legacy tflite models use a shape parameter of [0] to indicate scalars, // so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during // toco conversion. num_dimensions = 0; } TfLiteIntArray* output_shape = TfLiteIntArrayCreate(num_dimensions); for (int i = 0; i < num_dimensions; ++i) { output_shape->data[i] = params->shape[i]; } return output_shape; } // Check if the shape tensor is valid. Shapes should be int32 vectors. inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); return (shape != nullptr && shape->dims->size == 1 && shape->type == kTfLiteInt32); } TfLiteIntArray* GetOutputShape(TfLiteContext* context, TfLiteNode* node) { if (NumInputs(node) == 2 && ShapeIsVector(context, node)) { return GetOutputShapeFromTensor(context, node); } else { return GetOutputShapeFromParam(context, node); } } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); // Always postpone sizing string tensors, even if we could in principle // calculate their shapes now. String tensors don't benefit from having their // shapes precalculated because the actual memory can only be allocated after // we know all the content. TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (output->type != kTfLiteString) { if (NumInputs(node) == 1 || IsConstantTensor(GetInput(context, node, kShapeTensor))) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } else { SetTensorToDynamic(output); } } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // There are two ways in which the 'output' can be made dynamic: it could be // a string tensor, or its shape cannot be calculated during Prepare(). In // either case, we now have all the information to calculate its shape. if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } // Note that string tensors are always "dynamic" in the sense that their size // is not known until we have all the content. This applies even when their // shape is known ahead of time. As a result, a string tensor is never given // any memory by ResizeOutput(), and we need to do it manually here. Since // reshape doesn't change the data, the output tensor needs exactly as many // bytes as the input tensor. if (output->type == kTfLiteString) { auto bytes_required = input->bytes; TfLiteTensorRealloc(bytes_required, output); output->bytes = bytes_required; } memcpy(output->data.raw, input->data.raw, input->bytes); return kTfLiteOk; } } // namespace reshape TfLiteRegistration* Register_RESHAPE() { static TfLiteRegistration r = {nullptr, nullptr, reshape::Prepare, reshape::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); return (shape->dims->size == 1 && shape->type == kTfLiteInt32); }
inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); return (shape != nullptr && shape->dims->size == 1 && shape->type == kTfLiteInt32); }
{'added': [(41, ' const TfLiteTensor* input;'), (42, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (43, ' TfLiteTensor* output;'), (44, ' TF_LITE_ENSURE_OK(context,'), (45, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (76, ' if (shape == nullptr) return nullptr;'), (110, ' return (shape != nullptr && shape->dims->size == 1 &&'), (111, ' shape->type == kTfLiteInt32);'), (130, ' TfLiteTensor* output;'), (131, ' TF_LITE_ENSURE_OK(context,'), (132, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (145, ' const TfLiteTensor* input;'), (146, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (147, ' TfLiteTensor* output;'), (148, ' TF_LITE_ENSURE_OK(context,'), (149, ' GetOutputSafe(context, node, kOutputTensor, &output));')], 'deleted': [(41, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (42, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (106, ' return (shape->dims->size == 1 && shape->type == kTfLiteInt32);'), (125, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (138, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (139, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);')]}
16
6
120
851
4
43
2
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
1,115
reshape.cc
C++
tflite::ops::builtin::reshape::Eval
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include <string.h> #include <memory> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace reshape { constexpr int kInputTensor = 0; constexpr int kShapeTensor = 1; constexpr int kOutputTensor = 0; TfLiteIntArray* GetOutputShape(TfLiteContext*, TfLiteNode*); TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = GetOutputShape(context, node); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> scoped_output_shape(output_shape, TfLiteIntArrayFree); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the // input. Here we calculate what that dimension should be so that the number // of output elements in the same as the number of input elements. int num_input_elements = NumElements(input); int num_output_elements = 1; int stretch_dim = -1; for (int i = 0; i < output_shape->size; ++i) { int value = output_shape->data[i]; if (value == -1) { TF_LITE_ENSURE_EQ(context, stretch_dim, -1); stretch_dim = i; } else { num_output_elements *= value; } } if (stretch_dim != -1) { output_shape->data[stretch_dim] = num_input_elements / num_output_elements; num_output_elements *= output_shape->data[stretch_dim]; } TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); return context->ResizeTensor(context, output, scoped_output_shape.release()); } inline TfLiteIntArray* GetOutputShapeFromTensor(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(shape->dims->data[0]); for (int i = 0; i < output_shape->size; ++i) { output_shape->data[i] = shape->data.i32[i]; } return output_shape; } inline TfLiteIntArray* GetOutputShapeFromParam(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteReshapeParams*>(node->builtin_data); // The function is returned above this line if the shape tensor is usable. // Now fallback to the shape parameter in `TfLiteReshapeParams`. int num_dimensions = params->num_dimensions; if (num_dimensions == 1 && params->shape[0] == 0) { // Legacy tflite models use a shape parameter of [0] to indicate scalars, // so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during // toco conversion. num_dimensions = 0; } TfLiteIntArray* output_shape = TfLiteIntArrayCreate(num_dimensions); for (int i = 0; i < num_dimensions; ++i) { output_shape->data[i] = params->shape[i]; } return output_shape; } // Check if the shape tensor is valid. Shapes should be int32 vectors. inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); return (shape->dims->size == 1 && shape->type == kTfLiteInt32); } TfLiteIntArray* GetOutputShape(TfLiteContext* context, TfLiteNode* node) { if (NumInputs(node) == 2 && ShapeIsVector(context, node)) { return GetOutputShapeFromTensor(context, node); } else { return GetOutputShapeFromParam(context, node); } } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); // Always postpone sizing string tensors, even if we could in principle // calculate their shapes now. String tensors don't benefit from having their // shapes precalculated because the actual memory can only be allocated after // we know all the content. TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type != kTfLiteString) { if (NumInputs(node) == 1 || IsConstantTensor(GetInput(context, node, kShapeTensor))) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } else { SetTensorToDynamic(output); } } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // There are two ways in which the 'output' can be made dynamic: it could be // a string tensor, or its shape cannot be calculated during Prepare(). In // either case, we now have all the information to calculate its shape. if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } // Note that string tensors are always "dynamic" in the sense that their size // is not known until we have all the content. This applies even when their // shape is known ahead of time. As a result, a string tensor is never given // any memory by ResizeOutput(), and we need to do it manually here. Since // reshape doesn't change the data, the output tensor needs exactly as many // bytes as the input tensor. if (output->type == kTfLiteString) { auto bytes_required = input->bytes; TfLiteTensorRealloc(bytes_required, output); output->bytes = bytes_required; } memcpy(output->data.raw, input->data.raw, input->bytes); return kTfLiteOk; } } // namespace reshape TfLiteRegistration* Register_RESHAPE() { static TfLiteRegistration r = {nullptr, nullptr, reshape::Prepare, reshape::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include <string.h> #include <memory> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace reshape { constexpr int kInputTensor = 0; constexpr int kShapeTensor = 1; constexpr int kOutputTensor = 0; TfLiteIntArray* GetOutputShape(TfLiteContext*, TfLiteNode*); TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = GetOutputShape(context, node); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> scoped_output_shape(output_shape, TfLiteIntArrayFree); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the // input. Here we calculate what that dimension should be so that the number // of output elements in the same as the number of input elements. int num_input_elements = NumElements(input); int num_output_elements = 1; int stretch_dim = -1; for (int i = 0; i < output_shape->size; ++i) { int value = output_shape->data[i]; if (value == -1) { TF_LITE_ENSURE_EQ(context, stretch_dim, -1); stretch_dim = i; } else { num_output_elements *= value; } } if (stretch_dim != -1) { output_shape->data[stretch_dim] = num_input_elements / num_output_elements; num_output_elements *= output_shape->data[stretch_dim]; } TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); return context->ResizeTensor(context, output, scoped_output_shape.release()); } inline TfLiteIntArray* GetOutputShapeFromTensor(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); if (shape == nullptr) return nullptr; TfLiteIntArray* output_shape = TfLiteIntArrayCreate(shape->dims->data[0]); for (int i = 0; i < output_shape->size; ++i) { output_shape->data[i] = shape->data.i32[i]; } return output_shape; } inline TfLiteIntArray* GetOutputShapeFromParam(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteReshapeParams*>(node->builtin_data); // The function is returned above this line if the shape tensor is usable. // Now fallback to the shape parameter in `TfLiteReshapeParams`. int num_dimensions = params->num_dimensions; if (num_dimensions == 1 && params->shape[0] == 0) { // Legacy tflite models use a shape parameter of [0] to indicate scalars, // so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during // toco conversion. num_dimensions = 0; } TfLiteIntArray* output_shape = TfLiteIntArrayCreate(num_dimensions); for (int i = 0; i < num_dimensions; ++i) { output_shape->data[i] = params->shape[i]; } return output_shape; } // Check if the shape tensor is valid. Shapes should be int32 vectors. inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); return (shape != nullptr && shape->dims->size == 1 && shape->type == kTfLiteInt32); } TfLiteIntArray* GetOutputShape(TfLiteContext* context, TfLiteNode* node) { if (NumInputs(node) == 2 && ShapeIsVector(context, node)) { return GetOutputShapeFromTensor(context, node); } else { return GetOutputShapeFromParam(context, node); } } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); // Always postpone sizing string tensors, even if we could in principle // calculate their shapes now. String tensors don't benefit from having their // shapes precalculated because the actual memory can only be allocated after // we know all the content. TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (output->type != kTfLiteString) { if (NumInputs(node) == 1 || IsConstantTensor(GetInput(context, node, kShapeTensor))) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } else { SetTensorToDynamic(output); } } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // There are two ways in which the 'output' can be made dynamic: it could be // a string tensor, or its shape cannot be calculated during Prepare(). In // either case, we now have all the information to calculate its shape. if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } // Note that string tensors are always "dynamic" in the sense that their size // is not known until we have all the content. This applies even when their // shape is known ahead of time. As a result, a string tensor is never given // any memory by ResizeOutput(), and we need to do it manually here. Since // reshape doesn't change the data, the output tensor needs exactly as many // bytes as the input tensor. if (output->type == kTfLiteString) { auto bytes_required = input->bytes; TfLiteTensorRealloc(bytes_required, output); output->bytes = bytes_required; } memcpy(output->data.raw, input->data.raw, input->bytes); return kTfLiteOk; } } // namespace reshape TfLiteRegistration* Register_RESHAPE() { static TfLiteRegistration r = {nullptr, nullptr, reshape::Prepare, reshape::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // There are two ways in which the 'output' can be made dynamic: it could be // a string tensor, or its shape cannot be calculated during Prepare(). In // either case, we now have all the information to calculate its shape. if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } // Note that string tensors are always "dynamic" in the sense that their size // is not known until we have all the content. This applies even when their // shape is known ahead of time. As a result, a string tensor is never given // any memory by ResizeOutput(), and we need to do it manually here. Since // reshape doesn't change the data, the output tensor needs exactly as many // bytes as the input tensor. if (output->type == kTfLiteString) { auto bytes_required = input->bytes; TfLiteTensorRealloc(bytes_required, output); output->bytes = bytes_required; } memcpy(output->data.raw, input->data.raw, input->bytes); return kTfLiteOk; }
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // There are two ways in which the 'output' can be made dynamic: it could be // a string tensor, or its shape cannot be calculated during Prepare(). In // either case, we now have all the information to calculate its shape. if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } // Note that string tensors are always "dynamic" in the sense that their size // is not known until we have all the content. This applies even when their // shape is known ahead of time. As a result, a string tensor is never given // any memory by ResizeOutput(), and we need to do it manually here. Since // reshape doesn't change the data, the output tensor needs exactly as many // bytes as the input tensor. if (output->type == kTfLiteString) { auto bytes_required = input->bytes; TfLiteTensorRealloc(bytes_required, output); output->bytes = bytes_required; } memcpy(output->data.raw, input->data.raw, input->bytes); return kTfLiteOk; }
{'added': [(41, ' const TfLiteTensor* input;'), (42, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (43, ' TfLiteTensor* output;'), (44, ' TF_LITE_ENSURE_OK(context,'), (45, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (76, ' if (shape == nullptr) return nullptr;'), (110, ' return (shape != nullptr && shape->dims->size == 1 &&'), (111, ' shape->type == kTfLiteInt32);'), (130, ' TfLiteTensor* output;'), (131, ' TF_LITE_ENSURE_OK(context,'), (132, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (145, ' const TfLiteTensor* input;'), (146, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (147, ' TfLiteTensor* output;'), (148, ' TF_LITE_ENSURE_OK(context,'), (149, ' GetOutputSafe(context, node, kOutputTensor, &output));')], 'deleted': [(41, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (42, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (106, ' return (shape->dims->size == 1 && shape->type == kTfLiteInt32);'), (125, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (138, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (139, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);')]}
16
6
120
851
14
112
3
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
1,986
ne.c
C
r_bin_ne_get_segments
/* radare - LGPL - Copyright 2019-2022 - GustavoLCR */ #include "ne.h" static char *__get_target_os(r_bin_ne_obj_t *bin) { switch (bin->ne_header->targOS) { case 1: return "OS/2"; case 2: return "Windows"; case 3: return "European MS-DOS 4.x"; case 4: return "Windows 386"; case 5: return "BOSS (Borland Operating System Services)"; default: return "Unknown"; } } static int __translate_perms(int flags) { int perms = 0; if (flags & IS_RX) { if (flags & IS_DATA) { perms = R_PERM_R; } else { perms = R_PERM_X; } } if (!perms) { perms = R_PERM_RWX; } return perms; } static char *__read_nonnull_str_at(RBuffer *buf, ut64 offset) { ut8 sz = r_buf_read8_at (buf, offset); if (!sz) { return NULL; } char *str = malloc ((ut64)sz + 1); if (!str) { return NULL; } r_buf_read_at (buf, offset + 1, (ut8 *)str, sz); str[sz] = '\0'; return str; } static char *__func_name_from_ord(const char *module, ut16 ordinal) { if (!module) { return NULL; } char *lower_module = strdup (module); r_str_case (lower_module, false); char *path = r_str_newf (R_JOIN_4_PATHS ("%s", R2_SDB_FORMAT, "dll", "%s.sdb"), r_sys_prefix (NULL), lower_module); free (lower_module); char *ord = r_str_newf ("%d", ordinal); char *name; if (r_file_exists (path)) { Sdb *sdb = sdb_new (NULL, path, 0); name = sdb_get (sdb, ord, NULL); if (!name) { name = ord; } else { free (ord); } sdb_close (sdb); free (sdb); } else { name = ord; } free (path); return name; } RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) { int i; if (!bin) { return NULL; } RList *segments = r_list_newf (free); for (i = 0; i < bin->ne_header->SegCount; i++) { RBinSection *bs = R_NEW0 (RBinSection); if (!bs) { return segments; } NE_image_segment_entry *se = &bin->segment_entries[i]; bs->size = se->length; bs->vsize = se->minAllocSz ? se->minAllocSz : 64000; bs->bits = R_SYS_BITS_16; bs->is_data = se->flags & IS_DATA; bs->perm = __translate_perms (se->flags); bs->paddr = (ut64)se->offset * bin->alignment; bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr); bs->is_segment = true; r_list_append (segments, bs); } bin->segments = segments; return segments; } static int __find_symbol_by_paddr(const void *paddr, const void *sym) { return (int)!(*(ut64 *)paddr == ((RBinSymbol *)sym)->paddr); } RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (entries) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; } static char *__resource_type_str(int type) { char *typeName; switch (type) { case 1: typeName = "CURSOR"; break; case 2: typeName = "BITMAP"; break; case 3: typeName = "ICON"; break; case 4: typeName = "MENU"; break; case 5: typeName = "DIALOG"; break; case 6: typeName = "STRING"; break; case 7: typeName = "FONTDIR"; break; case 8: typeName = "FONT"; break; case 9: typeName = "ACCELERATOR"; break; case 10: typeName = "RCDATA"; break; case 11: typeName = "MESSAGETABLE"; break; case 12: typeName = "GROUP_CURSOR"; break; case 14: typeName = "GROUP_ICON"; break; case 15: typeName = "NAMETABLE"; break; case 16: typeName = "VERSION"; break; case 17: typeName = "DLGINCLUDE"; break; case 19: typeName = "PLUGPLAY"; break; case 20: typeName = "VXD"; break; case 21: typeName = "ANICURSOR"; break; case 22: typeName = "ANIICON"; break; case 23: typeName = "HTML"; break; case 24: typeName = "MANIFEST"; break; default: return r_str_newf ("UNKNOWN (%d)", type); } return strdup (typeName); } static void __free_resource_entry(void *entry) { r_ne_resource_entry *en = (r_ne_resource_entry *)entry; free (en->name); free (en); } static void __free_resource(void *resource) { r_ne_resource *res = (r_ne_resource *)resource; free (res->name); r_list_free (res->entry); free (res); } static bool __ne_get_resources(r_bin_ne_obj_t *bin) { if (!bin->resources) { bin->resources = r_list_newf (__free_resource); } ut16 resoff = bin->ne_header->ResTableOffset + bin->header_offset; ut16 alignment = r_buf_read_le16_at (bin->buf, resoff); ut32 off = resoff + 2; while (true) { NE_image_typeinfo_entry ti = {0}; r_ne_resource *res = R_NEW0 (r_ne_resource); if (!res) { break; } res->entry = r_list_newf (__free_resource_entry); if (!res->entry) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ti, sizeof (ti)); if (!ti.rtTypeID) { break; } else if (ti.rtTypeID & 0x8000) { res->name = __resource_type_str (ti.rtTypeID & ~0x8000); } else { // Offset to resident name table res->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ti.rtTypeID); } off += sizeof (NE_image_typeinfo_entry); int i; for (i = 0; i < ti.rtResourceCount; i++) { NE_image_nameinfo_entry ni; r_ne_resource_entry *ren = R_NEW0 (r_ne_resource_entry); if (!ren) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ni, sizeof (NE_image_nameinfo_entry)); ren->offset = ni.rnOffset << alignment; ren->size = ni.rnLength; if (ni.rnID & 0x8000) { ren->name = r_str_newf ("%d", ni.rnID & ~0x8000); } else { // Offset to resident name table ren->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ni.rnID); } r_list_append (res->entry, ren); off += sizeof (NE_image_nameinfo_entry); } r_list_append (bin->resources, res); } return true; } RList *r_bin_ne_get_imports(r_bin_ne_obj_t *bin) { RList *imports = r_list_newf ((RListFree)r_bin_import_free); if (!imports) { return NULL; } ut16 off = bin->ne_header->ImportNameTable + bin->header_offset + 1; int i; for (i = 0; i < bin->ne_header->ModRefs; i++) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { break; } ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { r_bin_import_free (imp); break; } off++; char *name = malloc ((ut64)sz + 1); if (!name) { break; } r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; imp->name = name; imp->ordinal = i + 1; r_list_append (imports, imp); off += sz; } bin->imports = imports; return imports; } RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) { if (!bin->entry_table) { return NULL; } RList *entries = r_list_newf (free); if (!entries) { return NULL; } RList *segments = r_bin_ne_get_segments (bin); if (!segments) { r_list_free (entries); return NULL; } if (bin->ne_header->csEntryPoint) { RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } entry->bits = 16; ut32 entry_cs = bin->ne_header->csEntryPoint; RBinSection *s = r_list_get_n (segments, entry_cs - 1); entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0); r_list_append (entries, entry); } int off = 0; size_t tableat = bin->header_offset + bin->ne_header->EntryTableOffset; while (off < bin->ne_header->EntryTableLength) { if (tableat + off >= r_buf_size (bin->buf)) { break; } ut8 bundle_length = *(ut8 *)(bin->entry_table + off); if (!bundle_length) { break; } off++; ut8 bundle_type = *(ut8 *)(bin->entry_table + off); off++; int i; for (i = 0; i < bundle_length; i++) { if (tableat + off + 4 >= r_buf_size (bin->buf)) { break; } RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } off++; if (!bundle_type) { // Skip off--; free (entry); break; } else if (bundle_type == 0xff) { // moveable off += 2; ut8 segnum = *(bin->entry_table + off); off++; if (off > bin->ne_header->EntryTableLength) { free (entry); break; } ut16 segoff = r_read_le16 (bin->entry_table + off); if (segnum > 0 && segnum < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff; } } else { // Fixed if (off + 2 >= bin->ne_header->EntryTableLength) { free (entry); break; } ut16 delta = r_read_le16 (bin->entry_table + off); if (bundle_type < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset * bin->alignment + delta; } } off += 2; r_list_append (entries, entry); } } r_list_free (segments); bin->entries = entries; return entries; } RList *r_bin_ne_get_relocs(r_bin_ne_obj_t *bin) { RList *segments = bin->segments; if (!segments) { return NULL; } RList *entries = bin->entries; if (!entries) { return NULL; } RList *symbols = bin->symbols; if (!symbols) { return NULL; } ut16 *modref = calloc (bin->ne_header->ModRefs, sizeof (ut16)); if (!modref) { return NULL; } r_buf_read_at (bin->buf, (ut64)bin->ne_header->ModRefTable + bin->header_offset, (ut8 *)modref, bin->ne_header->ModRefs * sizeof (ut16)); RList *relocs = r_list_newf (free); if (!relocs) { free (modref); return NULL; } RListIter *it; RBinSection *seg; int index = -1; r_list_foreach (segments, it, seg) { index++; if (!(bin->segment_entries[index].flags & RELOCINFO)) { continue; } ut32 off = seg->paddr + seg->size; ut32 start = off; ut16 length = r_buf_read_le16_at (bin->buf, off); if (!length) { continue; } off += 2; // size_t buf_size = r_buf_size (bin->buf); while (off < start + length * sizeof (NE_image_reloc_item)) { // && off + sizeof (NE_image_reloc_item) < buf_size) NE_image_reloc_item rel = {0}; if (r_buf_read_at (bin->buf, off, (ut8 *)&rel, sizeof (rel)) < 1) { return NULL; } RBinReloc *reloc = R_NEW0 (RBinReloc); if (!reloc) { return NULL; } reloc->paddr = seg->paddr + rel.offset; switch (rel.type) { case LOBYTE: reloc->type = R_BIN_RELOC_8; break; case SEL_16: case OFF_16: reloc->type = R_BIN_RELOC_16; break; case POI_32: case OFF_32: reloc->type = R_BIN_RELOC_32; break; case POI_48: reloc->type = R_BIN_RELOC_64; break; } ut32 offset; if (rel.flags & (IMPORTED_ORD | IMPORTED_NAME)) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { free (reloc); break; } char *name = NULL; if (rel.index > bin->ne_header->ModRefs) { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } else if (rel.index > 0) { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } if (rel.flags & IMPORTED_ORD) { imp->ordinal = rel.func_ord; char *fname = __func_name_from_ord (name, rel.func_ord); imp->name = r_str_newf ("%s.%s", name, fname); free (fname); } else { offset = bin->header_offset + bin->ne_header->ImportNameTable + rel.name_off; char *func = __read_nonnull_str_at (bin->buf, offset); imp->name = r_str_newf ("%s.%s", name, func); free (func); } free (name); reloc->import = imp; } else if (rel.flags & OSFIXUP) { // TODO } else { if (strstr (seg->name, "FIXED")) { RBinSection *s = r_list_get_n (segments, rel.segnum - 1); if (s) { offset = s->paddr + rel.segoff; } else { offset = -1; } } else { RBinAddr *entry = r_list_get_n (entries, rel.entry_ordinal - 1); if (entry) { offset = entry->paddr; } else { offset = -1; } } reloc->addend = offset; RBinSymbol *sym = NULL; RListIter *sit; r_list_foreach (symbols, sit, sym) { if (sym->paddr == reloc->addend) { reloc->symbol = sym; break; } } } if (rel.flags & ADDITIVE) { reloc->additive = 1; r_list_append (relocs, reloc); } else { do { #define NE_BUG 0 #if NE_BUG if (reloc->paddr + 4 < r_buf_size (bin->buf)) { break; } #endif r_list_append (relocs, reloc); offset = r_buf_read_le16_at (bin->buf, reloc->paddr); RBinReloc *tmp = reloc; reloc = R_NEW0 (RBinReloc); if (!reloc) { break; } *reloc = *tmp; reloc->paddr = seg->paddr + offset; } while (offset != 0xFFFF); free (reloc); } off += sizeof (NE_image_reloc_item); } } free (modref); return relocs; } void __init(RBuffer *buf, r_bin_ne_obj_t *bin) { bin->header_offset = r_buf_read_le16_at (buf, 0x3c); bin->ne_header = R_NEW0 (NE_image_header); if (!bin->ne_header) { return; } bin->buf = buf; // XXX this is endian unsafe if (r_buf_read_at (buf, bin->header_offset, (ut8 *)bin->ne_header, sizeof (NE_image_header)) < 1) { R_FREE (bin->ne_header); return; } if (bin->ne_header->FileAlnSzShftCnt > 15) { bin->ne_header->FileAlnSzShftCnt = 15; } ut64 from = bin->ne_header->ModRefTable + bin->header_offset; ut64 left = r_buf_size (bin->buf) - from; if (from + bin->ne_header->ModRefs * sizeof (ut16) >= left) { bin->ne_header->ModRefs = left / sizeof (ut16); } bin->alignment = 1 << bin->ne_header->FileAlnSzShftCnt; if (!bin->alignment) { bin->alignment = 1 << 9; } bin->os = __get_target_os (bin); ut16 offset = bin->ne_header->SegTableOffset + bin->header_offset; size_t size = bin->ne_header->SegCount * sizeof (NE_image_segment_entry); if (offset >= r_buf_size (bin->buf)) { return; } size_t remaining = r_buf_size (bin->buf) - offset; size = R_MIN (remaining, size); bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); // * sizeof (NE_image_segment_entry); bin->segment_entries = calloc (1, size); if (size >= remaining) { bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); } if (!bin->segment_entries) { return; } r_buf_read_at (buf, offset, (ut8 *)bin->segment_entries, size); bin->entry_table = calloc (4, bin->ne_header->EntryTableLength); if (!bin->entry_table) { R_FREE (bin->segment_entries); return; } r_buf_read_at (buf, (ut64)bin->header_offset + bin->ne_header->EntryTableOffset, bin->entry_table, bin->ne_header->EntryTableLength); bin->imports = r_bin_ne_get_imports (bin); __ne_get_resources (bin); } void r_bin_ne_free(r_bin_ne_obj_t *bin) { // r_list_free (bin->imports); // double free r_list_free (bin->resources); free (bin->entry_table); free (bin->ne_header); free (bin->resident_name_table); free (bin->segment_entries); free (bin); } r_bin_ne_obj_t *r_bin_ne_new_buf(RBuffer *buf, bool verbose) { r_bin_ne_obj_t *bin = R_NEW0 (r_bin_ne_obj_t); if (!bin) { return NULL; } __init(buf, bin); return bin; }
/* radare - LGPL - Copyright 2019-2022 - GustavoLCR */ #include "ne.h" static char *__get_target_os(r_bin_ne_obj_t *bin) { switch (bin->ne_header->targOS) { case 1: return "OS/2"; case 2: return "Windows"; case 3: return "European MS-DOS 4.x"; case 4: return "Windows 386"; case 5: return "BOSS (Borland Operating System Services)"; default: return "Unknown"; } } static int __translate_perms(int flags) { int perms = 0; if (flags & IS_RX) { if (flags & IS_DATA) { perms = R_PERM_R; } else { perms = R_PERM_X; } } if (!perms) { perms = R_PERM_RWX; } return perms; } static char *__read_nonnull_str_at(RBuffer *buf, ut64 offset) { ut8 sz = r_buf_read8_at (buf, offset); if (!sz) { return NULL; } char *str = malloc ((ut64)sz + 1); if (!str) { return NULL; } r_buf_read_at (buf, offset + 1, (ut8 *)str, sz); str[sz] = '\0'; return str; } static char *__func_name_from_ord(const char *module, ut16 ordinal) { if (!module) { return NULL; } char *lower_module = strdup (module); r_str_case (lower_module, false); char *path = r_str_newf (R_JOIN_4_PATHS ("%s", R2_SDB_FORMAT, "dll", "%s.sdb"), r_sys_prefix (NULL), lower_module); free (lower_module); char *ord = r_str_newf ("%d", ordinal); char *name; if (r_file_exists (path)) { Sdb *sdb = sdb_new (NULL, path, 0); name = sdb_get (sdb, ord, NULL); if (!name) { name = ord; } else { free (ord); } sdb_close (sdb); free (sdb); } else { name = ord; } free (path); return name; } RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) { int i; if (!bin || !bin->segment_entries) { return NULL; } RList *segments = r_list_newf (free); for (i = 0; i < bin->ne_header->SegCount; i++) { RBinSection *bs = R_NEW0 (RBinSection); if (!bs) { return segments; } NE_image_segment_entry *se = &bin->segment_entries[i]; bs->size = se->length; bs->vsize = se->minAllocSz ? se->minAllocSz : 64000; bs->bits = R_SYS_BITS_16; bs->is_data = se->flags & IS_DATA; bs->perm = __translate_perms (se->flags); bs->paddr = (ut64)se->offset * bin->alignment; bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr); bs->is_segment = true; r_list_append (segments, bs); } bin->segments = segments; return segments; } static int __find_symbol_by_paddr(const void *paddr, const void *sym) { return (int)!(*(ut64 *)paddr == ((RBinSymbol *)sym)->paddr); } RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (entries) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; } static char *__resource_type_str(int type) { char *typeName; switch (type) { case 1: typeName = "CURSOR"; break; case 2: typeName = "BITMAP"; break; case 3: typeName = "ICON"; break; case 4: typeName = "MENU"; break; case 5: typeName = "DIALOG"; break; case 6: typeName = "STRING"; break; case 7: typeName = "FONTDIR"; break; case 8: typeName = "FONT"; break; case 9: typeName = "ACCELERATOR"; break; case 10: typeName = "RCDATA"; break; case 11: typeName = "MESSAGETABLE"; break; case 12: typeName = "GROUP_CURSOR"; break; case 14: typeName = "GROUP_ICON"; break; case 15: typeName = "NAMETABLE"; break; case 16: typeName = "VERSION"; break; case 17: typeName = "DLGINCLUDE"; break; case 19: typeName = "PLUGPLAY"; break; case 20: typeName = "VXD"; break; case 21: typeName = "ANICURSOR"; break; case 22: typeName = "ANIICON"; break; case 23: typeName = "HTML"; break; case 24: typeName = "MANIFEST"; break; default: return r_str_newf ("UNKNOWN (%d)", type); } return strdup (typeName); } static void __free_resource_entry(void *entry) { r_ne_resource_entry *en = (r_ne_resource_entry *)entry; free (en->name); free (en); } static void __free_resource(void *resource) { r_ne_resource *res = (r_ne_resource *)resource; free (res->name); r_list_free (res->entry); free (res); } static bool __ne_get_resources(r_bin_ne_obj_t *bin) { if (!bin->resources) { bin->resources = r_list_newf (__free_resource); } ut16 resoff = bin->ne_header->ResTableOffset + bin->header_offset; ut16 alignment = r_buf_read_le16_at (bin->buf, resoff); ut32 off = resoff + 2; while (true) { NE_image_typeinfo_entry ti = {0}; r_ne_resource *res = R_NEW0 (r_ne_resource); if (!res) { break; } res->entry = r_list_newf (__free_resource_entry); if (!res->entry) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ti, sizeof (ti)); if (!ti.rtTypeID) { break; } else if (ti.rtTypeID & 0x8000) { res->name = __resource_type_str (ti.rtTypeID & ~0x8000); } else { // Offset to resident name table res->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ti.rtTypeID); } off += sizeof (NE_image_typeinfo_entry); int i; for (i = 0; i < ti.rtResourceCount; i++) { NE_image_nameinfo_entry ni; r_ne_resource_entry *ren = R_NEW0 (r_ne_resource_entry); if (!ren) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ni, sizeof (NE_image_nameinfo_entry)); ren->offset = ni.rnOffset << alignment; ren->size = ni.rnLength; if (ni.rnID & 0x8000) { ren->name = r_str_newf ("%d", ni.rnID & ~0x8000); } else { // Offset to resident name table ren->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ni.rnID); } r_list_append (res->entry, ren); off += sizeof (NE_image_nameinfo_entry); } r_list_append (bin->resources, res); } return true; } RList *r_bin_ne_get_imports(r_bin_ne_obj_t *bin) { RList *imports = r_list_newf ((RListFree)r_bin_import_free); if (!imports) { return NULL; } ut16 off = bin->ne_header->ImportNameTable + bin->header_offset + 1; int i; for (i = 0; i < bin->ne_header->ModRefs; i++) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { break; } ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { r_bin_import_free (imp); break; } off++; char *name = malloc ((ut64)sz + 1); if (!name) { break; } r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; imp->name = name; imp->ordinal = i + 1; r_list_append (imports, imp); off += sz; } bin->imports = imports; return imports; } RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) { if (!bin->entry_table) { return NULL; } RList *entries = r_list_newf (free); if (!entries) { return NULL; } RList *segments = r_bin_ne_get_segments (bin); if (!segments) { r_list_free (entries); return NULL; } if (bin->ne_header->csEntryPoint) { RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } entry->bits = 16; ut32 entry_cs = bin->ne_header->csEntryPoint; RBinSection *s = r_list_get_n (segments, entry_cs - 1); entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0); r_list_append (entries, entry); } int off = 0; size_t tableat = bin->header_offset + bin->ne_header->EntryTableOffset; while (off < bin->ne_header->EntryTableLength) { if (tableat + off >= r_buf_size (bin->buf)) { break; } ut8 bundle_length = *(ut8 *)(bin->entry_table + off); if (!bundle_length) { break; } off++; ut8 bundle_type = *(ut8 *)(bin->entry_table + off); off++; int i; for (i = 0; i < bundle_length; i++) { if (tableat + off + 4 >= r_buf_size (bin->buf)) { break; } RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } off++; if (!bundle_type) { // Skip off--; free (entry); break; } else if (bundle_type == 0xff) { // moveable off += 2; ut8 segnum = *(bin->entry_table + off); off++; if (off > bin->ne_header->EntryTableLength) { free (entry); break; } ut16 segoff = r_read_le16 (bin->entry_table + off); if (segnum > 0 && segnum < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff; } } else { // Fixed if (off + 2 >= bin->ne_header->EntryTableLength) { free (entry); break; } ut16 delta = r_read_le16 (bin->entry_table + off); if (bundle_type < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset * bin->alignment + delta; } } off += 2; r_list_append (entries, entry); } } r_list_free (segments); bin->entries = entries; return entries; } RList *r_bin_ne_get_relocs(r_bin_ne_obj_t *bin) { RList *segments = bin->segments; if (!segments) { return NULL; } RList *entries = bin->entries; if (!entries) { return NULL; } RList *symbols = bin->symbols; if (!symbols) { return NULL; } ut16 *modref = calloc (bin->ne_header->ModRefs, sizeof (ut16)); if (!modref) { return NULL; } r_buf_read_at (bin->buf, (ut64)bin->ne_header->ModRefTable + bin->header_offset, (ut8 *)modref, bin->ne_header->ModRefs * sizeof (ut16)); RList *relocs = r_list_newf (free); if (!relocs) { free (modref); return NULL; } RListIter *it; RBinSection *seg; int index = -1; r_list_foreach (segments, it, seg) { index++; if (!(bin->segment_entries[index].flags & RELOCINFO)) { continue; } ut32 off = seg->paddr + seg->size; ut32 start = off; ut16 length = r_buf_read_le16_at (bin->buf, off); if (!length) { continue; } off += 2; // size_t buf_size = r_buf_size (bin->buf); while (off < start + length * sizeof (NE_image_reloc_item)) { // && off + sizeof (NE_image_reloc_item) < buf_size) NE_image_reloc_item rel = {0}; if (r_buf_read_at (bin->buf, off, (ut8 *)&rel, sizeof (rel)) < 1) { return NULL; } RBinReloc *reloc = R_NEW0 (RBinReloc); if (!reloc) { return NULL; } reloc->paddr = seg->paddr + rel.offset; switch (rel.type) { case LOBYTE: reloc->type = R_BIN_RELOC_8; break; case SEL_16: case OFF_16: reloc->type = R_BIN_RELOC_16; break; case POI_32: case OFF_32: reloc->type = R_BIN_RELOC_32; break; case POI_48: reloc->type = R_BIN_RELOC_64; break; } ut32 offset; if (rel.flags & (IMPORTED_ORD | IMPORTED_NAME)) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { free (reloc); break; } char *name = NULL; if (rel.index > bin->ne_header->ModRefs) { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } else if (rel.index > 0) { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } if (rel.flags & IMPORTED_ORD) { imp->ordinal = rel.func_ord; char *fname = __func_name_from_ord (name, rel.func_ord); imp->name = r_str_newf ("%s.%s", name, fname); free (fname); } else { offset = bin->header_offset + bin->ne_header->ImportNameTable + rel.name_off; char *func = __read_nonnull_str_at (bin->buf, offset); imp->name = r_str_newf ("%s.%s", name, func); free (func); } free (name); reloc->import = imp; } else if (rel.flags & OSFIXUP) { // TODO } else { if (strstr (seg->name, "FIXED")) { RBinSection *s = r_list_get_n (segments, rel.segnum - 1); if (s) { offset = s->paddr + rel.segoff; } else { offset = -1; } } else { RBinAddr *entry = r_list_get_n (entries, rel.entry_ordinal - 1); if (entry) { offset = entry->paddr; } else { offset = -1; } } reloc->addend = offset; RBinSymbol *sym = NULL; RListIter *sit; r_list_foreach (symbols, sit, sym) { if (sym->paddr == reloc->addend) { reloc->symbol = sym; break; } } } if (rel.flags & ADDITIVE) { reloc->additive = 1; r_list_append (relocs, reloc); } else { do { #define NE_BUG 0 #if NE_BUG if (reloc->paddr + 4 < r_buf_size (bin->buf)) { break; } #endif r_list_append (relocs, reloc); offset = r_buf_read_le16_at (bin->buf, reloc->paddr); RBinReloc *tmp = reloc; reloc = R_NEW0 (RBinReloc); if (!reloc) { break; } *reloc = *tmp; reloc->paddr = seg->paddr + offset; } while (offset != 0xFFFF); free (reloc); } off += sizeof (NE_image_reloc_item); } } free (modref); return relocs; } void __init(RBuffer *buf, r_bin_ne_obj_t *bin) { bin->header_offset = r_buf_read_le16_at (buf, 0x3c); bin->ne_header = R_NEW0 (NE_image_header); if (!bin->ne_header) { return; } bin->buf = buf; // XXX this is endian unsafe if (r_buf_read_at (buf, bin->header_offset, (ut8 *)bin->ne_header, sizeof (NE_image_header)) < 1) { R_FREE (bin->ne_header); return; } if (bin->ne_header->FileAlnSzShftCnt > 15) { bin->ne_header->FileAlnSzShftCnt = 15; } ut64 from = bin->ne_header->ModRefTable + bin->header_offset; ut64 left = r_buf_size (bin->buf) - from; if (from + bin->ne_header->ModRefs * sizeof (ut16) >= left) { bin->ne_header->ModRefs = left / sizeof (ut16); } bin->alignment = 1 << bin->ne_header->FileAlnSzShftCnt; if (!bin->alignment) { bin->alignment = 1 << 9; } bin->os = __get_target_os (bin); ut16 offset = bin->ne_header->SegTableOffset + bin->header_offset; size_t size = bin->ne_header->SegCount * sizeof (NE_image_segment_entry); if (offset >= r_buf_size (bin->buf)) { return; } size_t remaining = r_buf_size (bin->buf) - offset; size = R_MIN (remaining, size); bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); // * sizeof (NE_image_segment_entry); bin->segment_entries = calloc (1, size); if (size >= remaining) { bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); } if (!bin->segment_entries) { return; } r_buf_read_at (buf, offset, (ut8 *)bin->segment_entries, size); bin->entry_table = calloc (4, bin->ne_header->EntryTableLength); if (!bin->entry_table) { R_FREE (bin->segment_entries); return; } r_buf_read_at (buf, (ut64)bin->header_offset + bin->ne_header->EntryTableOffset, bin->entry_table, bin->ne_header->EntryTableLength); bin->imports = r_bin_ne_get_imports (bin); __ne_get_resources (bin); } void r_bin_ne_free(r_bin_ne_obj_t *bin) { // r_list_free (bin->imports); // double free r_list_free (bin->resources); free (bin->entry_table); free (bin->ne_header); free (bin->resident_name_table); free (bin->segment_entries); free (bin); } r_bin_ne_obj_t *r_bin_ne_new_buf(RBuffer *buf, bool verbose) { r_bin_ne_obj_t *bin = R_NEW0 (r_bin_ne_obj_t); if (!bin) { return NULL; } __init(buf, bin); return bin; }
RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) { int i; if (!bin) { return NULL; } RList *segments = r_list_newf (free); for (i = 0; i < bin->ne_header->SegCount; i++) { RBinSection *bs = R_NEW0 (RBinSection); if (!bs) { return segments; } NE_image_segment_entry *se = &bin->segment_entries[i]; bs->size = se->length; bs->vsize = se->minAllocSz ? se->minAllocSz : 64000; bs->bits = R_SYS_BITS_16; bs->is_data = se->flags & IS_DATA; bs->perm = __translate_perms (se->flags); bs->paddr = (ut64)se->offset * bin->alignment; bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr); bs->is_segment = true; r_list_append (segments, bs); } bin->segments = segments; return segments; }
RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) { int i; if (!bin || !bin->segment_entries) { return NULL; } RList *segments = r_list_newf (free); for (i = 0; i < bin->ne_header->SegCount; i++) { RBinSection *bs = R_NEW0 (RBinSection); if (!bs) { return segments; } NE_image_segment_entry *se = &bin->segment_entries[i]; bs->size = se->length; bs->vsize = se->minAllocSz ? se->minAllocSz : 64000; bs->bits = R_SYS_BITS_16; bs->is_data = se->flags & IS_DATA; bs->perm = __translate_perms (se->flags); bs->paddr = (ut64)se->offset * bin->alignment; bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr); bs->is_segment = true; r_list_append (segments, bs); } bin->segments = segments; return segments; }
{'added': [(80, '\tif (!bin || !bin->segment_entries) {')], 'deleted': [(80, '\tif (!bin) {')]}
1
1
628
3,777
25
190
6
https://github.com/radareorg/radare2
CVE-2022-1382
CWE-476
2,323
sndfile.c
C
sf_open_virtual
/* ** Copyright (C) 1999-2016 Erik de Castro Lopo <erikd@mega-nerd.com> ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation; either version 2.1 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "sfconfig.h" #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #include "sndfile.h" #include "sfendian.h" #include "common.h" #define SNDFILE_MAGICK 0x1234C0DE #ifdef __APPLE__ /* ** Detect if a compile for a universal binary is being attempted and barf if it is. ** See the URL below for the rationale. */ #ifdef __BIG_ENDIAN__ #if (CPU_IS_LITTLE_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #ifdef __LITTLE_ENDIAN__ #if (CPU_IS_BIG_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #endif typedef struct { int error ; const char *str ; } ErrorStruct ; static ErrorStruct SndfileErrors [] = { /* Public error values and their associated strings. */ { SF_ERR_NO_ERROR , "No Error." }, { SF_ERR_UNRECOGNISED_FORMAT , "Format not recognised." }, { SF_ERR_SYSTEM , "System error." /* Often replaced. */ }, { SF_ERR_MALFORMED_FILE , "Supported file format but file is malformed." }, { SF_ERR_UNSUPPORTED_ENCODING , "Supported file format but unsupported encoding." }, /* Private error values and their associated strings. */ { SFE_ZERO_MAJOR_FORMAT , "Error : major format is 0." }, { SFE_ZERO_MINOR_FORMAT , "Error : minor format is 0." }, { SFE_BAD_FILE , "File does not exist or is not a regular file (possibly a pipe?)." }, { SFE_BAD_FILE_READ , "File exists but no data could be read." }, { SFE_OPEN_FAILED , "Could not open file." }, { SFE_BAD_SNDFILE_PTR , "Not a valid SNDFILE* pointer." }, { SFE_BAD_SF_INFO_PTR , "NULL SF_INFO pointer passed to libsndfile." }, { SFE_BAD_SF_INCOMPLETE , "SF_PRIVATE struct incomplete and end of header parsing." }, { SFE_BAD_FILE_PTR , "Bad FILE pointer." }, { SFE_BAD_INT_PTR , "Internal error, Bad pointer." }, { SFE_BAD_STAT_SIZE , "Error : software was misconfigured at compile time (sizeof statbuf.st_size)." }, { SFE_NO_TEMP_DIR , "Error : Could not file temp dir." }, { SFE_MALLOC_FAILED , "Internal malloc () failed." }, { SFE_UNIMPLEMENTED , "File contains data in an unimplemented format." }, { SFE_BAD_READ_ALIGN , "Attempt to read a non-integer number of channels." }, { SFE_BAD_WRITE_ALIGN , "Attempt to write a non-integer number of channels." }, { SFE_UNKNOWN_FORMAT , "File contains data in an unknown format." }, { SFE_NOT_READMODE , "Read attempted on file currently open for write." }, { SFE_NOT_WRITEMODE , "Write attempted on file currently open for read." }, { SFE_BAD_MODE_RW , "Error : This file format does not support read/write mode." }, { SFE_BAD_SF_INFO , "Internal error : SF_INFO struct incomplete." }, { SFE_BAD_OFFSET , "Error : supplied offset beyond end of file." }, { SFE_NO_EMBED_SUPPORT , "Error : embedding not supported for this file format." }, { SFE_NO_EMBEDDED_RDWR , "Error : cannot open embedded file read/write." }, { SFE_NO_PIPE_WRITE , "Error : this file format does not support pipe write." }, { SFE_BAD_VIRTUAL_IO , "Error : bad pointer on SF_VIRTUAL_IO struct." }, { SFE_BAD_BROADCAST_INFO_SIZE , "Error : bad coding_history_size in SF_BROADCAST_INFO struct." }, { SFE_BAD_BROADCAST_INFO_TOO_BIG , "Error : SF_BROADCAST_INFO struct too large." }, { SFE_BAD_CART_INFO_SIZE , "Error: SF_CART_INFO struct too large." }, { SFE_BAD_CART_INFO_TOO_BIG , "Error: bag tag_text_size in SF_CART_INFO struct." }, { SFE_INTERLEAVE_MODE , "Attempt to write to file with non-interleaved data." }, { SFE_INTERLEAVE_SEEK , "Bad karma in seek during interleave read operation." }, { SFE_INTERLEAVE_READ , "Bad karma in read during interleave read operation." }, { SFE_INTERNAL , "Unspecified internal error." }, { SFE_BAD_COMMAND_PARAM , "Bad parameter passed to function sf_command." }, { SFE_BAD_ENDIAN , "Bad endian-ness. Try default endian-ness" }, { SFE_CHANNEL_COUNT_ZERO , "Channel count is zero." }, { SFE_CHANNEL_COUNT , "Too many channels specified." }, { SFE_CHANNEL_COUNT_BAD , "Bad channel count." }, { SFE_BAD_SEEK , "Internal psf_fseek() failed." }, { SFE_NOT_SEEKABLE , "Seek attempted on unseekable file type." }, { SFE_AMBIGUOUS_SEEK , "Error : combination of file open mode and seek command is ambiguous." }, { SFE_WRONG_SEEK , "Error : invalid seek parameters." }, { SFE_SEEK_FAILED , "Error : parameters OK, but psf_seek() failed." }, { SFE_BAD_OPEN_MODE , "Error : bad mode parameter for file open." }, { SFE_OPEN_PIPE_RDWR , "Error : attempt to open a pipe in read/write mode." }, { SFE_RDWR_POSITION , "Error on RDWR position (cryptic)." }, { SFE_RDWR_BAD_HEADER , "Error : Cannot open file in read/write mode due to string data in header." }, { SFE_CMD_HAS_DATA , "Error : Command fails because file already has audio data." }, { SFE_STR_NO_SUPPORT , "Error : File type does not support string data." }, { SFE_STR_NOT_WRITE , "Error : Trying to set a string when file is not in write mode." }, { SFE_STR_MAX_DATA , "Error : Maximum string data storage reached." }, { SFE_STR_MAX_COUNT , "Error : Maximum string data count reached." }, { SFE_STR_BAD_TYPE , "Error : Bad string data type." }, { SFE_STR_NO_ADD_END , "Error : file type does not support strings added at end of file." }, { SFE_STR_BAD_STRING , "Error : bad string." }, { SFE_STR_WEIRD , "Error : Weird string error." }, { SFE_WAV_NO_RIFF , "Error in WAV file. No 'RIFF' chunk marker." }, { SFE_WAV_NO_WAVE , "Error in WAV file. No 'WAVE' chunk marker." }, { SFE_WAV_NO_FMT , "Error in WAV/W64/RF64 file. No 'fmt ' chunk marker." }, { SFE_WAV_BAD_FMT , "Error in WAV/W64/RF64 file. Malformed 'fmt ' chunk." }, { SFE_WAV_FMT_SHORT , "Error in WAV/W64/RF64 file. Short 'fmt ' chunk." }, { SFE_WAV_BAD_FACT , "Error in WAV file. 'fact' chunk out of place." }, { SFE_WAV_BAD_PEAK , "Error in WAV file. Bad 'PEAK' chunk." }, { SFE_WAV_PEAK_B4_FMT , "Error in WAV file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_WAV_BAD_FORMAT , "Error in WAV file. Errors in 'fmt ' chunk." }, { SFE_WAV_BAD_BLOCKALIGN , "Error in WAV file. Block alignment in 'fmt ' chunk is incorrect." }, { SFE_WAV_NO_DATA , "Error in WAV file. No 'data' chunk marker." }, { SFE_WAV_BAD_LIST , "Error in WAV file. Malformed LIST chunk." }, { SFE_WAV_UNKNOWN_CHUNK , "Error in WAV file. File contains an unknown chunk marker." }, { SFE_WAV_WVPK_DATA , "Error in WAV file. Data is in WAVPACK format." }, { SFE_WAV_ADPCM_NOT4BIT , "Error in ADPCM WAV file. Invalid bit width." }, { SFE_WAV_ADPCM_CHANNELS , "Error in ADPCM WAV file. Invalid number of channels." }, { SFE_WAV_ADPCM_SAMPLES , "Error in ADPCM WAV file. Invalid number of samples per block." }, { SFE_WAV_GSM610_FORMAT , "Error in GSM610 WAV file. Invalid format chunk." }, { SFE_AIFF_NO_FORM , "Error in AIFF file, bad 'FORM' marker." }, { SFE_AIFF_AIFF_NO_FORM , "Error in AIFF file, 'AIFF' marker without 'FORM'." }, { SFE_AIFF_COMM_NO_FORM , "Error in AIFF file, 'COMM' marker without 'FORM'." }, { SFE_AIFF_SSND_NO_COMM , "Error in AIFF file, 'SSND' marker without 'COMM'." }, { SFE_AIFF_UNKNOWN_CHUNK , "Error in AIFF file, unknown chunk." }, { SFE_AIFF_COMM_CHUNK_SIZE, "Error in AIFF file, bad 'COMM' chunk size." }, { SFE_AIFF_BAD_COMM_CHUNK , "Error in AIFF file, bad 'COMM' chunk." }, { SFE_AIFF_PEAK_B4_COMM , "Error in AIFF file. 'PEAK' chunk found before 'COMM' chunk." }, { SFE_AIFF_BAD_PEAK , "Error in AIFF file. Bad 'PEAK' chunk." }, { SFE_AIFF_NO_SSND , "Error in AIFF file, bad 'SSND' chunk." }, { SFE_AIFF_NO_DATA , "Error in AIFF file, no sound data." }, { SFE_AIFF_RW_SSND_NOT_LAST, "Error in AIFF file, RDWR only possible if SSND chunk at end of file." }, { SFE_AU_UNKNOWN_FORMAT , "Error in AU file, unknown format." }, { SFE_AU_NO_DOTSND , "Error in AU file, missing '.snd' or 'dns.' marker." }, { SFE_AU_EMBED_BAD_LEN , "Embedded AU file with unknown length." }, { SFE_RAW_READ_BAD_SPEC , "Error while opening RAW file for read. Must specify format and channels.\n" "Possibly trying to open unsupported format." }, { SFE_RAW_BAD_BITWIDTH , "Error. RAW file bitwidth must be a multiple of 8." }, { SFE_RAW_BAD_FORMAT , "Error. Bad format field in SF_INFO struct when opening a RAW file for read." }, { SFE_PAF_NO_MARKER , "Error in PAF file, no marker." }, { SFE_PAF_VERSION , "Error in PAF file, bad version." }, { SFE_PAF_UNKNOWN_FORMAT , "Error in PAF file, unknown format." }, { SFE_PAF_SHORT_HEADER , "Error in PAF file. File shorter than minimal header." }, { SFE_PAF_BAD_CHANNELS , "Error in PAF file. Bad channel count." }, { SFE_SVX_NO_FORM , "Error in 8SVX / 16SV file, no 'FORM' marker." }, { SFE_SVX_NO_BODY , "Error in 8SVX / 16SV file, no 'BODY' marker." }, { SFE_SVX_NO_DATA , "Error in 8SVX / 16SV file, no sound data." }, { SFE_SVX_BAD_COMP , "Error in 8SVX / 16SV file, unsupported compression format." }, { SFE_SVX_BAD_NAME_LENGTH , "Error in 8SVX / 16SV file, NAME chunk too long." }, { SFE_NIST_BAD_HEADER , "Error in NIST file, bad header." }, { SFE_NIST_CRLF_CONVERISON, "Error : NIST file damaged by Windows CR -> CRLF conversion process." }, { SFE_NIST_BAD_ENCODING , "Error in NIST file, unsupported compression format." }, { SFE_VOC_NO_CREATIVE , "Error in VOC file, no 'Creative Voice File' marker." }, { SFE_VOC_BAD_FORMAT , "Error in VOC file, bad format." }, { SFE_VOC_BAD_VERSION , "Error in VOC file, bad version number." }, { SFE_VOC_BAD_MARKER , "Error in VOC file, bad marker in file." }, { SFE_VOC_BAD_SECTIONS , "Error in VOC file, incompatible VOC sections." }, { SFE_VOC_MULTI_SAMPLERATE, "Error in VOC file, more than one sample rate defined." }, { SFE_VOC_MULTI_SECTION , "Unimplemented VOC file feature, file contains multiple sound sections." }, { SFE_VOC_MULTI_PARAM , "Error in VOC file, file contains multiple bit or channel widths." }, { SFE_VOC_SECTION_COUNT , "Error in VOC file, too many sections." }, { SFE_VOC_NO_PIPE , "Error : not able to operate on VOC files over a pipe." }, { SFE_IRCAM_NO_MARKER , "Error in IRCAM file, bad IRCAM marker." }, { SFE_IRCAM_BAD_CHANNELS , "Error in IRCAM file, bad channel count." }, { SFE_IRCAM_UNKNOWN_FORMAT, "Error in IRCAM file, unknown encoding format." }, { SFE_W64_64_BIT , "Error in W64 file, file contains 64 bit offset." }, { SFE_W64_NO_RIFF , "Error in W64 file. No 'riff' chunk marker." }, { SFE_W64_NO_WAVE , "Error in W64 file. No 'wave' chunk marker." }, { SFE_W64_NO_DATA , "Error in W64 file. No 'data' chunk marker." }, { SFE_W64_ADPCM_NOT4BIT , "Error in ADPCM W64 file. Invalid bit width." }, { SFE_W64_ADPCM_CHANNELS , "Error in ADPCM W64 file. Invalid number of channels." }, { SFE_W64_GSM610_FORMAT , "Error in GSM610 W64 file. Invalid format chunk." }, { SFE_MAT4_BAD_NAME , "Error in MAT4 file. No variable name." }, { SFE_MAT4_NO_SAMPLERATE , "Error in MAT4 file. No sample rate." }, { SFE_MAT5_BAD_ENDIAN , "Error in MAT5 file. Not able to determine endian-ness." }, { SFE_MAT5_NO_BLOCK , "Error in MAT5 file. Bad block structure." }, { SFE_MAT5_SAMPLE_RATE , "Error in MAT5 file. Not able to determine sample rate." }, { SFE_PVF_NO_PVF1 , "Error in PVF file. No PVF1 marker." }, { SFE_PVF_BAD_HEADER , "Error in PVF file. Bad header." }, { SFE_PVF_BAD_BITWIDTH , "Error in PVF file. Bad bit width." }, { SFE_XI_BAD_HEADER , "Error in XI file. Bad header." }, { SFE_XI_EXCESS_SAMPLES , "Error in XI file. Excess samples in file." }, { SFE_XI_NO_PIPE , "Error : not able to operate on XI files over a pipe." }, { SFE_HTK_NO_PIPE , "Error : not able to operate on HTK files over a pipe." }, { SFE_SDS_NOT_SDS , "Error : not an SDS file." }, { SFE_SDS_BAD_BIT_WIDTH , "Error : bad bit width for SDS file." }, { SFE_SD2_FD_DISALLOWED , "Error : cannot open SD2 file without a file name." }, { SFE_SD2_BAD_DATA_OFFSET , "Error : bad data offset." }, { SFE_SD2_BAD_MAP_OFFSET , "Error : bad map offset." }, { SFE_SD2_BAD_DATA_LENGTH , "Error : bad data length." }, { SFE_SD2_BAD_MAP_LENGTH , "Error : bad map length." }, { SFE_SD2_BAD_RSRC , "Error : bad resource fork." }, { SFE_SD2_BAD_SAMPLE_SIZE , "Error : bad sample size." }, { SFE_FLAC_BAD_HEADER , "Error : bad flac header." }, { SFE_FLAC_NEW_DECODER , "Error : problem while creating flac decoder." }, { SFE_FLAC_INIT_DECODER , "Error : problem while initialization of the flac decoder." }, { SFE_FLAC_LOST_SYNC , "Error : flac decoder lost sync." }, { SFE_FLAC_BAD_SAMPLE_RATE, "Error : flac does not support this sample rate." }, { SFE_FLAC_UNKOWN_ERROR , "Error : unknown error in flac decoder." }, { SFE_WVE_NOT_WVE , "Error : not a WVE file." }, { SFE_WVE_NO_PIPE , "Error : not able to operate on WVE files over a pipe." }, { SFE_DWVW_BAD_BITWIDTH , "Error : Bad bit width for DWVW encoding. Must be 12, 16 or 24." }, { SFE_G72X_NOT_MONO , "Error : G72x encoding does not support more than 1 channel." }, { SFE_VORBIS_ENCODER_BUG , "Error : Sample rate chosen is known to trigger a Vorbis encoder bug on this CPU." }, { SFE_RF64_NOT_RF64 , "Error : Not an RF64 file." }, { SFE_RF64_PEAK_B4_FMT , "Error in RF64 file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_RF64_NO_DATA , "Error in RF64 file. No 'data' chunk marker." }, { SFE_ALAC_FAIL_TMPFILE , "Error : Failed to open tmp file for ALAC encoding." }, { SFE_BAD_CHUNK_PTR , "Error : Bad SF_CHUNK_INFO pointer." }, { SFE_UNKNOWN_CHUNK , "Error : Unknown chunk marker." }, { SFE_BAD_CHUNK_FORMAT , "Error : Reading/writing chunks from this file format is not supported." }, { SFE_BAD_CHUNK_MARKER , "Error : Bad chunk marker." }, { SFE_BAD_CHUNK_DATA_PTR , "Error : Bad data pointer in SF_CHUNK_INFO struct." }, { SFE_FILENAME_TOO_LONG , "Error : Supplied filename too long." }, { SFE_MAX_ERROR , "Maximum error number." }, { SFE_MAX_ERROR + 1 , NULL } } ; /*------------------------------------------------------------------------------ */ static int format_from_extension (SF_PRIVATE *psf) ; static int guess_file_type (SF_PRIVATE *psf) ; static int validate_sfinfo (SF_INFO *sfinfo) ; static int validate_psf (SF_PRIVATE *psf) ; static void save_header_info (SF_PRIVATE *psf) ; static int copy_filename (SF_PRIVATE *psf, const char *path) ; static int psf_close (SF_PRIVATE *psf) ; static int try_resource_fork (SF_PRIVATE * psf) ; /*------------------------------------------------------------------------------ ** Private (static) variables. */ int sf_errno = 0 ; static char sf_parselog [SF_BUFFER_LEN] = { 0 } ; static char sf_syserr [SF_SYSERR_LEN] = { 0 } ; /*------------------------------------------------------------------------------ */ #define VALIDATE_SNDFILE_AND_ASSIGN_PSF(a, b, c) \ { if ((a) == NULL) \ { sf_errno = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ (b) = (SF_PRIVATE*) (a) ; \ if ((b)->virtual_io == SF_FALSE && \ psf_file_valid (b) == 0) \ { (b)->error = SFE_BAD_FILE_PTR ; \ return 0 ; \ } ; \ if ((b)->Magick != SNDFILE_MAGICK) \ { (b)->error = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ if (c) (b)->error = 0 ; \ } /*------------------------------------------------------------------------------ ** Public functions. */ SNDFILE* sf_open (const char *path, int mode, SF_INFO *sfinfo) { SF_PRIVATE *psf ; /* Ultimate sanity check. */ assert (sizeof (sf_count_t) == 8) ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf_log_printf (psf, "File : %s\n", path) ; if (copy_filename (psf, path) != 0) { sf_errno = psf->error ; return NULL ; } ; psf->file.mode = mode ; if (strcmp (path, "-") == 0) psf->error = psf_set_stdio (psf) ; else psf->error = psf_fopen (psf) ; return psf_open_file (psf, sfinfo) ; } /* sf_open */ SNDFILE* sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */ SNDFILE* sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */ int sf_close (SNDFILE *sndfile) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_close (psf) ; } /* sf_close */ void sf_write_sync (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE *) sndfile) == NULL) return ; psf_fsync (psf) ; return ; } /* sf_write_sync */ /*============================================================================== */ const char* sf_error_number (int errnum) { static const char *bad_errnum = "No error defined for this error number. This is a bug in libsndfile." ; int k ; if (errnum == SFE_MAX_ERROR) return SndfileErrors [0].str ; if (errnum < 0 || errnum > SFE_MAX_ERROR) { /* This really shouldn't happen in release versions. */ printf ("Not a valid error number (%d).\n", errnum) ; return bad_errnum ; } ; for (k = 0 ; SndfileErrors [k].str ; k++) if (errnum == SndfileErrors [k].error) return SndfileErrors [k].str ; return bad_errnum ; } /* sf_error_number */ const char* sf_strerror (SNDFILE *sndfile) { SF_PRIVATE *psf = NULL ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; if (errnum == SFE_SYSTEM && sf_syserr [0]) return sf_syserr ; } else { psf = (SF_PRIVATE *) sndfile ; if (psf->Magick != SNDFILE_MAGICK) return "sf_strerror : Bad magic number." ; errnum = psf->error ; if (errnum == SFE_SYSTEM && psf->syserr [0]) return psf->syserr ; } ; return sf_error_number (errnum) ; } /* sf_strerror */ /*------------------------------------------------------------------------------ */ int sf_error (SNDFILE *sndfile) { SF_PRIVATE *psf ; if (sndfile == NULL) return sf_errno ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; if (psf->error) return psf->error ; return 0 ; } /* sf_error */ /*------------------------------------------------------------------------------ */ int sf_perror (SNDFILE *sndfile) { SF_PRIVATE *psf ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; } else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; fprintf (stderr, "%s\n", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_perror */ /*------------------------------------------------------------------------------ */ int sf_error_str (SNDFILE *sndfile, char *str, size_t maxlen) { SF_PRIVATE *psf ; int errnum ; if (str == NULL) return SFE_INTERNAL ; if (sndfile == NULL) errnum = sf_errno ; else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; snprintf (str, maxlen, "%s", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_error_str */ /*============================================================================== */ int sf_format_check (const SF_INFO *info) { int subformat, endian ; subformat = SF_CODEC (info->format) ; endian = SF_ENDIAN (info->format) ; /* This is the place where each file format can check if the suppiled ** SF_INFO struct is valid. ** Return 0 on failure, 1 ons success. */ if (info->channels < 1 || info->channels > SF_MAX_CHANNELS) return 0 ; if (info->samplerate < 0) return 0 ; switch (SF_CONTAINER (info->format)) { case SF_FORMAT_WAV : /* WAV now allows both endian, RIFF or RIFX (little or big respectively) */ if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_WAVEX : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_AIFF : /* AIFF does allow both endian-nesses for PCM data.*/ if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; /* For other encodings reject any endian-ness setting. */ if (endian != 0) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_IMA_ADPCM && (info->channels == 1 || info->channels == 2)) return 1 ; break ; case SF_FORMAT_AU : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_24 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_40 && info->channels == 1) return 1 ; break ; case SF_FORMAT_CAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_ALAC_16 || subformat == SF_FORMAT_ALAC_20) return 1 ; if (subformat == SF_FORMAT_ALAC_24 || subformat == SF_FORMAT_ALAC_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_RAW : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_ULAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_VOX_ADPCM && info->channels == 1) return 1 ; break ; case SF_FORMAT_PAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SVX : /* SVX only supports writing mono SVX files. */ if (info->channels > 1) return 0 ; /* Always big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_NIST : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_IRCAM : if (info->channels > 256) return 0 ; if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_FLOAT) return 1 ; break ; case SF_FORMAT_VOC : if (info->channels > 2) return 0 ; /* VOC is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_W64 : /* W64 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT4 : if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT5 : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_PVF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_XI : if (info->channels != 1) return 0 ; if (subformat == SF_FORMAT_DPCM_8 || subformat == SF_FORMAT_DPCM_16) return 1 ; break ; case SF_FORMAT_HTK : if (info->channels != 1) return 0 ; /* HTK is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_SDS : if (info->channels != 1) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_AVR : if (info->channels > 2) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_FLAC : /* FLAC can't do more than 8 channels. */ if (info->channels > 8) return 0 ; if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SD2 : /* SD2 is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_WVE : if (info->channels > 1) return 0 ; /* WVE is strictly big endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_OGG : if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_VORBIS) return 1 ; break ; case SF_FORMAT_MPC2K : if (info->channels > 2) return 0 ; /* MPC2000 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_RF64 : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; default : break ; } ; return 0 ; } /* sf_format_check */ /*------------------------------------------------------------------------------ */ const char * sf_version_string (void) { #if ENABLE_EXPERIMENTAL_CODE return PACKAGE_NAME "-" PACKAGE_VERSION "-exp" ; #else return PACKAGE_NAME "-" PACKAGE_VERSION ; #endif } /*------------------------------------------------------------------------------ */ int sf_command (SNDFILE *sndfile, int command, void *data, int datasize) { SF_PRIVATE *psf = (SF_PRIVATE *) sndfile ; double quality ; int old_value ; /* This set of commands do not need the sndfile parameter. */ switch (command) { case SFC_GET_LIB_VERSION : if (data == NULL) { if (psf) psf->error = SFE_BAD_COMMAND_PARAM ; return SFE_BAD_COMMAND_PARAM ; } ; snprintf (data, datasize, "%s", sf_version_string ()) ; return strlen (data) ; case SFC_GET_SIMPLE_FORMAT_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_simple_count () ; return 0 ; case SFC_GET_SIMPLE_FORMAT : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_simple (data) ; case SFC_GET_FORMAT_MAJOR_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_major_count () ; return 0 ; case SFC_GET_FORMAT_MAJOR : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_major (data) ; case SFC_GET_FORMAT_SUBTYPE_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_subtype_count () ; return 0 ; case SFC_GET_FORMAT_SUBTYPE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_subtype (data) ; case SFC_GET_FORMAT_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_info (data) ; } ; if (sndfile == NULL && command == SFC_GET_LOG_INFO) { if (data == NULL) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; snprintf (data, datasize, "%s", sf_parselog) ; return strlen (data) ; } ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; switch (command) { case SFC_SET_NORM_FLOAT : old_value = psf->norm_float ; psf->norm_float = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_CURRENT_SF_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; memcpy (data, &psf->sf, sizeof (SF_INFO)) ; break ; case SFC_SET_NORM_DOUBLE : old_value = psf->norm_double ; psf->norm_double = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_NORM_FLOAT : return psf->norm_float ; case SFC_GET_NORM_DOUBLE : return psf->norm_double ; case SFC_SET_SCALE_FLOAT_INT_READ : old_value = psf->float_int_mult ; psf->float_int_mult = (datasize != 0) ? SF_TRUE : SF_FALSE ; if (psf->float_int_mult && psf->float_max < 0.0) /* Scale to prevent wrap-around distortion. */ psf->float_max = (32768.0 / 32767.0) * psf_calc_signal_max (psf, SF_FALSE) ; return old_value ; case SFC_SET_SCALE_INT_FLOAT_WRITE : old_value = psf->scale_int_float ; psf->scale_int_float = (datasize != 0) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_SET_ADD_PEAK_CHUNK : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and AIFF support the PEAK chunk. */ switch (format) { case SF_FORMAT_AIFF : case SF_FORMAT_CAF : case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_RF64 : break ; default : return SF_FALSE ; } ; format = SF_CODEC (psf->sf.format) ; /* Only files containg the following data types support the PEAK chunk. */ if (format != SF_FORMAT_FLOAT && format != SF_FORMAT_DOUBLE) return SF_FALSE ; } ; /* Can only do this is in SFM_WRITE mode. */ if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; /* Everything seems OK, so set psf->has_peak and re-write header. */ if (datasize == SF_FALSE && psf->peak_info != NULL) { free (psf->peak_info) ; psf->peak_info = NULL ; } else if (psf->peak_info == NULL) { psf->peak_info = peak_info_calloc (psf->sf.channels) ; if (psf->peak_info != NULL) psf->peak_info->peak_loc = SF_PEAK_START ; } ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return datasize ; case SFC_SET_ADD_HEADER_PAD_CHUNK : return SF_FALSE ; case SFC_GET_LOG_INFO : if (data == NULL) return SFE_BAD_COMMAND_PARAM ; snprintf (data, datasize, "%s", psf->parselog.buf) ; break ; case SFC_CALC_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_FALSE) ; break ; case SFC_CALC_NORM_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_TRUE) ; break ; case SFC_CALC_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_FALSE) ; case SFC_CALC_NORM_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_TRUE) ; case SFC_GET_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_signal_max (psf, (double *) data) ; case SFC_GET_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_max_all_channels (psf, (double*) data) ; case SFC_UPDATE_HEADER_NOW : if (psf->write_header) psf->write_header (psf, SF_TRUE) ; break ; case SFC_SET_UPDATE_HEADER_AUTO : psf->auto_header = datasize ? SF_TRUE : SF_FALSE ; return psf->auto_header ; break ; case SFC_SET_ADD_DITHER_ON_WRITE : case SFC_SET_ADD_DITHER_ON_READ : /* ** FIXME ! ** These are obsolete. Just return. ** Remove some time after version 1.0.8. */ break ; case SFC_SET_DITHER_ON_WRITE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->write_dither, data, sizeof (psf->write_dither)) ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_WRITE) ; break ; case SFC_SET_DITHER_ON_READ : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->read_dither, data, sizeof (psf->read_dither)) ; if (psf->file.mode == SFM_READ || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_READ) ; break ; case SFC_FILE_TRUNCATE : if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_TRUE ; if (datasize != sizeof (sf_count_t)) return SF_TRUE ; if (data == NULL || datasize != sizeof (sf_count_t)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } else { sf_count_t position ; position = *((sf_count_t*) data) ; if (sf_seek (sndfile, position, SEEK_SET) != position) return SF_TRUE ; psf->sf.frames = position ; position = psf_fseek (psf, 0, SEEK_CUR) ; return psf_ftruncate (psf, position) ; } ; break ; case SFC_SET_RAW_START_OFFSET : if (data == NULL || datasize != sizeof (sf_count_t)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) return (psf->error = SFE_BAD_COMMAND_PARAM) ; psf->dataoffset = *((sf_count_t*) data) ; sf_seek (sndfile, 0, SEEK_CUR) ; break ; case SFC_GET_EMBED_FILE_INFO : if (data == NULL || datasize != sizeof (SF_EMBED_FILE_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; ((SF_EMBED_FILE_INFO*) data)->offset = psf->fileoffset ; ((SF_EMBED_FILE_INFO*) data)->length = psf->filelength ; break ; /* Lite remove start */ case SFC_TEST_IEEE_FLOAT_REPLACE : psf->ieee_replace = (datasize) ? SF_TRUE : SF_FALSE ; if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_FLOAT) float32_init (psf) ; else if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_DOUBLE) double64_init (psf) ; else return (psf->error = SFE_BAD_COMMAND_PARAM) ; break ; /* Lite remove end */ case SFC_SET_CLIPPING : psf->add_clipping = (datasize) ? SF_TRUE : SF_FALSE ; return psf->add_clipping ; case SFC_GET_CLIPPING : return psf->add_clipping ; case SFC_GET_LOOP_INFO : if (datasize != sizeof (SF_LOOP_INFO) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->loop_info == NULL) return SF_FALSE ; memcpy (data, psf->loop_info, sizeof (SF_LOOP_INFO)) ; return SF_TRUE ; case SFC_SET_BROADCAST_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 supports the BEXT (Broadcast) chunk. */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_WAVEX && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode. */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->broadcast_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (broadcast_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_BROADCAST_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return broadcast_var_get (psf, data, datasize) ; case SFC_SET_CART_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 support cart chunk format */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->cart_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (cart_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_CART_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return cart_var_get (psf, data, datasize) ; case SFC_GET_CUE_COUNT : if (datasize != sizeof (uint32_t) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues != NULL) { *((uint32_t *) data) = psf->cues->cue_count ; return SF_TRUE ; } ; return SF_FALSE ; case SFC_GET_CUE : if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL) return SF_FALSE ; psf_get_cues (psf, data, datasize) ; return SF_TRUE ; case SFC_SET_CUE : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL && (psf->cues = psf_cues_dup (data)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; return SF_TRUE ; case SFC_GET_INSTRUMENT : if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL) return SF_FALSE ; memcpy (data, psf->instrument, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_SET_INSTRUMENT : /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->instrument, data, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_RAW_DATA_NEEDS_ENDSWAP : return psf->data_endswap ; case SFC_GET_CHANNEL_MAP_INFO : if (psf->channel_map == NULL) return SF_FALSE ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; memcpy (data, psf->channel_map, datasize) ; return SF_TRUE ; case SFC_SET_CHANNEL_MAP_INFO : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; { int *iptr ; for (iptr = data ; iptr < (int*) data + psf->sf.channels ; iptr++) { if (*iptr <= SF_CHANNEL_MAP_INVALID || *iptr >= SF_CHANNEL_MAP_MAX) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; } ; } ; free (psf->channel_map) ; if ((psf->channel_map = malloc (datasize)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->channel_map, data, datasize) ; /* ** Pass the command down to the container's command handler. ** Don't pass user data, use validated psf->channel_map data instead. */ if (psf->command) return psf->command (psf, command, NULL, 0) ; return SF_FALSE ; case SFC_SET_VBR_ENCODING_QUALITY : if (data == NULL || datasize != sizeof (double)) return SF_FALSE ; quality = *((double *) data) ; quality = 1.0 - SF_MAX (0.0, SF_MIN (1.0, quality)) ; return sf_command (sndfile, SFC_SET_COMPRESSION_LEVEL, &quality, sizeof (quality)) ; default : /* Must be a file specific command. Pass it on. */ if (psf->command) return psf->command (psf, command, data, datasize) ; psf_log_printf (psf, "*** sf_command : cmd = 0x%X\n", command) ; return (psf->error = SFE_BAD_COMMAND_PARAM) ; } ; return 0 ; } /* sf_command */ /*------------------------------------------------------------------------------ */ sf_count_t sf_seek (SNDFILE *sndfile, sf_count_t offset, int whence) { SF_PRIVATE *psf ; sf_count_t seek_from_start = 0, retval ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (! psf->sf.seekable) { psf->error = SFE_NOT_SEEKABLE ; return PSF_SEEK_ERROR ; } ; /* If the whence parameter has a mode ORed in, check to see that ** it makes sense. */ if (((whence & SFM_MASK) == SFM_WRITE && psf->file.mode == SFM_READ) || ((whence & SFM_MASK) == SFM_READ && psf->file.mode == SFM_WRITE)) { psf->error = SFE_WRONG_SEEK ; return PSF_SEEK_ERROR ; } ; /* Convert all SEEK_CUR and SEEK_END into seek_from_start to be ** used with SEEK_SET. */ switch (whence) { /* The SEEK_SET behaviour is independant of mode. */ case SEEK_SET : case SEEK_SET | SFM_READ : case SEEK_SET | SFM_WRITE : case SEEK_SET | SFM_RDWR : seek_from_start = offset ; break ; /* The SEEK_CUR is a little more tricky. */ case SEEK_CUR : if (offset == 0) { if (psf->file.mode == SFM_READ) return psf->read_current ; if (psf->file.mode == SFM_WRITE) return psf->write_current ; } ; if (psf->file.mode == SFM_READ) seek_from_start = psf->read_current + offset ; else if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) seek_from_start = psf->write_current + offset ; else psf->error = SFE_AMBIGUOUS_SEEK ; break ; case SEEK_CUR | SFM_READ : if (offset == 0) return psf->read_current ; seek_from_start = psf->read_current + offset ; break ; case SEEK_CUR | SFM_WRITE : if (offset == 0) return psf->write_current ; seek_from_start = psf->write_current + offset ; break ; /* The SEEK_END */ case SEEK_END : case SEEK_END | SFM_READ : case SEEK_END | SFM_WRITE : seek_from_start = psf->sf.frames + offset ; break ; default : psf->error = SFE_BAD_SEEK ; break ; } ; if (psf->error) return PSF_SEEK_ERROR ; if (psf->file.mode == SFM_RDWR || psf->file.mode == SFM_WRITE) { if (seek_from_start < 0) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; } else if (seek_from_start < 0 || seek_from_start > psf->sf.frames) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; if (psf->seek) { int new_mode = (whence & SFM_MASK) ? (whence & SFM_MASK) : psf->file.mode ; retval = psf->seek (psf, new_mode, seek_from_start) ; switch (new_mode) { case SFM_READ : psf->read_current = retval ; break ; case SFM_WRITE : psf->write_current = retval ; break ; case SFM_RDWR : psf->read_current = retval ; psf->write_current = retval ; new_mode = SFM_READ ; break ; } ; psf->last_op = new_mode ; return retval ; } ; psf->error = SFE_AMBIGUOUS_SEEK ; return PSF_SEEK_ERROR ; } /* sf_seek */ /*------------------------------------------------------------------------------ */ const char* sf_get_string (SNDFILE *sndfile, int str_type) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return NULL ; if (psf->Magick != SNDFILE_MAGICK) return NULL ; return psf_get_string (psf, str_type) ; } /* sf_get_string */ int sf_set_string (SNDFILE *sndfile, int str_type, const char* str) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_set_string (psf, str_type, str) ; } /* sf_get_string */ /*------------------------------------------------------------------------------ */ int sf_current_byterate (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return -1 ; if (psf->Magick != SNDFILE_MAGICK) return -1 ; /* This should cover all PCM and floating point formats. */ if (psf->bytewidth) return psf->sf.samplerate * psf->sf.channels * psf->bytewidth ; if (psf->byterate) return psf->byterate (psf) ; switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_IMA_ADPCM : case SF_FORMAT_MS_ADPCM : case SF_FORMAT_VOX_ADPCM : return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_GSM610 : return (psf->sf.samplerate * psf->sf.channels * 13000) / 8000 ; case SF_FORMAT_G721_32 : /* 32kbs G721 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_G723_24 : /* 24kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 3) / 8 ; case SF_FORMAT_G723_40 : /* 40kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 5) / 8 ; default : break ; } ; return -1 ; } /* sf_current_byterate */ /*============================================================================== */ sf_count_t sf_read_raw (SNDFILE *sndfile, void *ptr, sf_count_t bytes) { SF_PRIVATE *psf ; sf_count_t count, extra ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (bytes < 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, bytes) ; return 0 ; } ; if (bytes % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf_fread (ptr, 1, bytes, psf) ; if (psf->read_current + count / blockwidth <= psf->sf.frames) psf->read_current += count / blockwidth ; else { count = (psf->sf.frames - psf->read_current) * blockwidth ; extra = bytes - count ; psf_memset (((char *) ptr) + count, 0, extra) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_short (SNDFILE *sndfile, short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_short */ sf_count_t sf_readf_short (SNDFILE *sndfile, short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_int (SNDFILE *sndfile, int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_int */ sf_count_t sf_readf_int (SNDFILE *sndfile, int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_float (SNDFILE *sndfile, float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_float */ sf_count_t sf_readf_float (SNDFILE *sndfile, float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_double (SNDFILE *sndfile, double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_double */ sf_count_t sf_readf_double (SNDFILE *sndfile, double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_double */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_raw (SNDFILE *sndfile, const void *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf_fwrite (ptr, 1, len, psf) ; psf->write_current += count / blockwidth ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_short (SNDFILE *sndfile, const short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_short */ sf_count_t sf_writef_short (SNDFILE *sndfile, const short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_int (SNDFILE *sndfile, const int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_int */ sf_count_t sf_writef_int (SNDFILE *sndfile, const int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_float (SNDFILE *sndfile, const float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_float */ sf_count_t sf_writef_float (SNDFILE *sndfile, const float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_double (SNDFILE *sndfile, const double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_double */ sf_count_t sf_writef_double (SNDFILE *sndfile, const double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_double */ /*========================================================================= ** Private functions. */ static int try_resource_fork (SF_PRIVATE * psf) { int old_error = psf->error ; /* Set READ mode now, to see if resource fork exists. */ psf->rsrc.mode = SFM_READ ; if (psf_open_rsrc (psf) != 0) { psf->error = old_error ; return 0 ; } ; /* More checking here. */ psf_log_printf (psf, "Resource fork : %s\n", psf->rsrc.path.c) ; return SF_FORMAT_SD2 ; } /* try_resource_fork */ static int format_from_extension (SF_PRIVATE *psf) { char *cptr ; char buffer [16] ; int format = 0 ; if ((cptr = strrchr (psf->file.name.c, '.')) == NULL) return 0 ; cptr ++ ; if (strlen (cptr) > sizeof (buffer) - 1) return 0 ; psf_strlcpy (buffer, sizeof (buffer), cptr) ; buffer [sizeof (buffer) - 1] = 0 ; /* Convert everything in the buffer to lower case. */ cptr = buffer ; while (*cptr) { *cptr = tolower (*cptr) ; cptr ++ ; } ; cptr = buffer ; if (strcmp (cptr, "au") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "snd") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "vox") == 0 || strcmp (cptr, "vox8") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "vox6") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 6000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "gsm") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_GSM610 ; } /* For RAW files, make sure the dataoffset if set correctly. */ if ((SF_CONTAINER (format)) == SF_FORMAT_RAW) psf->dataoffset = 0 ; return format ; } /* format_from_extension */ static int guess_file_type (SF_PRIVATE *psf) { uint32_t buffer [3], format ; if (psf_binheader_readf (psf, "b", &buffer, SIGNED_SIZEOF (buffer)) != SIGNED_SIZEOF (buffer)) { psf->error = SFE_BAD_FILE_READ ; return 0 ; } ; if ((buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'F') || buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'X')) && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_WAV ; if (buffer [0] == MAKE_MARKER ('F', 'O', 'R', 'M')) { if (buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'F') || buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'C')) return SF_FORMAT_AIFF ; if (buffer [2] == MAKE_MARKER ('8', 'S', 'V', 'X') || buffer [2] == MAKE_MARKER ('1', '6', 'S', 'V')) return SF_FORMAT_SVX ; return 0 ; } ; if (buffer [0] == MAKE_MARKER ('.', 's', 'n', 'd') || buffer [0] == MAKE_MARKER ('d', 'n', 's', '.')) return SF_FORMAT_AU ; if ((buffer [0] == MAKE_MARKER ('f', 'a', 'p', ' ') || buffer [0] == MAKE_MARKER (' ', 'p', 'a', 'f'))) return SF_FORMAT_PAF ; if (buffer [0] == MAKE_MARKER ('N', 'I', 'S', 'T')) return SF_FORMAT_NIST ; if (buffer [0] == MAKE_MARKER ('C', 'r', 'e', 'a') && buffer [1] == MAKE_MARKER ('t', 'i', 'v', 'e')) return SF_FORMAT_VOC ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0xF8, 0xFF)) == MAKE_MARKER (0x64, 0xA3, 0x00, 0x00) || (buffer [0] & MAKE_MARKER (0xFF, 0xF8, 0xFF, 0xFF)) == MAKE_MARKER (0x00, 0x00, 0xA3, 0x64)) return SF_FORMAT_IRCAM ; if (buffer [0] == MAKE_MARKER ('r', 'i', 'f', 'f')) return SF_FORMAT_W64 ; if (buffer [0] == MAKE_MARKER (0, 0, 0x03, 0xE8) && buffer [1] == MAKE_MARKER (0, 0, 0, 1) && buffer [2] == MAKE_MARKER (0, 0, 0, 1)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER (0, 0, 0, 0) && buffer [1] == MAKE_MARKER (1, 0, 0, 0) && buffer [2] == MAKE_MARKER (1, 0, 0, 0)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER ('M', 'A', 'T', 'L') && buffer [1] == MAKE_MARKER ('A', 'B', ' ', '5')) return SF_FORMAT_MAT5 ; if (buffer [0] == MAKE_MARKER ('P', 'V', 'F', '1')) return SF_FORMAT_PVF ; if (buffer [0] == MAKE_MARKER ('E', 'x', 't', 'e') && buffer [1] == MAKE_MARKER ('n', 'd', 'e', 'd') && buffer [2] == MAKE_MARKER (' ', 'I', 'n', 's')) return SF_FORMAT_XI ; if (buffer [0] == MAKE_MARKER ('c', 'a', 'f', 'f') && buffer [2] == MAKE_MARKER ('d', 'e', 's', 'c')) return SF_FORMAT_CAF ; if (buffer [0] == MAKE_MARKER ('O', 'g', 'g', 'S')) return SF_FORMAT_OGG ; if (buffer [0] == MAKE_MARKER ('A', 'L', 'a', 'w') && buffer [1] == MAKE_MARKER ('S', 'o', 'u', 'n') && buffer [2] == MAKE_MARKER ('d', 'F', 'i', 'l')) return SF_FORMAT_WVE ; if (buffer [0] == MAKE_MARKER ('D', 'i', 'a', 'm') && buffer [1] == MAKE_MARKER ('o', 'n', 'd', 'W') && buffer [2] == MAKE_MARKER ('a', 'r', 'e', ' ')) return SF_FORMAT_DWD ; if (buffer [0] == MAKE_MARKER ('L', 'M', '8', '9') || buffer [0] == MAKE_MARKER ('5', '3', 0, 0)) return SF_FORMAT_TXW ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0x80, 0xFF)) == MAKE_MARKER (0xF0, 0x7E, 0, 0x01)) return SF_FORMAT_SDS ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0, 0)) == MAKE_MARKER (1, 4, 0, 0)) return SF_FORMAT_MPC2K ; if (buffer [0] == MAKE_MARKER ('C', 'A', 'T', ' ') && buffer [2] == MAKE_MARKER ('R', 'E', 'X', '2')) return SF_FORMAT_REX2 ; if (buffer [0] == MAKE_MARKER (0x30, 0x26, 0xB2, 0x75) && buffer [1] == MAKE_MARKER (0x8E, 0x66, 0xCF, 0x11)) return 0 /*-SF_FORMAT_WMA-*/ ; /* HMM (Hidden Markov Model) Tool Kit. */ if (buffer [2] == MAKE_MARKER (0, 2, 0, 0) && 2 * ((int64_t) BE2H_32 (buffer [0])) + 12 == psf->filelength) return SF_FORMAT_HTK ; if (buffer [0] == MAKE_MARKER ('f', 'L', 'a', 'C')) return SF_FORMAT_FLAC ; if (buffer [0] == MAKE_MARKER ('2', 'B', 'I', 'T')) return SF_FORMAT_AVR ; if (buffer [0] == MAKE_MARKER ('R', 'F', '6', '4') && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_RF64 ; if (buffer [0] == MAKE_MARKER ('I', 'D', '3', 3)) { psf_log_printf (psf, "Found 'ID3' marker.\n") ; if (id3_skip (psf)) return guess_file_type (psf) ; return 0 ; } ; /* Turtle Beach SMP 16-bit */ if (buffer [0] == MAKE_MARKER ('S', 'O', 'U', 'N') && buffer [1] == MAKE_MARKER ('D', ' ', 'S', 'A')) return 0 ; /* Yamaha sampler format. */ if (buffer [0] == MAKE_MARKER ('S', 'Y', '8', '0') || buffer [0] == MAKE_MARKER ('S', 'Y', '8', '5')) return 0 ; if (buffer [0] == MAKE_MARKER ('a', 'j', 'k', 'g')) return 0 /*-SF_FORMAT_SHN-*/ ; /* This must be the last one. */ if (psf->filelength > 0 && (format = try_resource_fork (psf)) != 0) return format ; return 0 ; } /* guess_file_type */ static int validate_sfinfo (SF_INFO *sfinfo) { if (sfinfo->samplerate < 1) return 0 ; if (sfinfo->frames < 0) return 0 ; if (sfinfo->channels < 1) return 0 ; if ((SF_CONTAINER (sfinfo->format)) == 0) return 0 ; if ((SF_CODEC (sfinfo->format)) == 0) return 0 ; if (sfinfo->sections < 1) return 0 ; return 1 ; } /* validate_sfinfo */ static int validate_psf (SF_PRIVATE *psf) { if (psf->datalength < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : datalength == %D.\n", psf->datalength) ; return 0 ; } ; if (psf->dataoffset < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : dataoffset == %D.\n", psf->dataoffset) ; return 0 ; } ; if (psf->blockwidth && psf->blockwidth != psf->sf.channels * psf->bytewidth) { psf_log_printf (psf, "Invalid SF_PRIVATE field : channels * bytewidth == %d.\n", psf->sf.channels * psf->bytewidth) ; return 0 ; } ; return 1 ; } /* validate_psf */ static void save_header_info (SF_PRIVATE *psf) { snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; } /* save_header_info */ static int copy_filename (SF_PRIVATE *psf, const char *path) { const char *ccptr ; char *cptr ; if (strlen (path) > 1 && strlen (path) - 1 >= sizeof (psf->file.path.c)) { psf->error = SFE_FILENAME_TOO_LONG ; return psf->error ; } ; snprintf (psf->file.path.c, sizeof (psf->file.path.c), "%s", path) ; if ((ccptr = strrchr (path, '/')) || (ccptr = strrchr (path, '\\'))) ccptr ++ ; else ccptr = path ; snprintf (psf->file.name.c, sizeof (psf->file.name.c), "%s", ccptr) ; /* Now grab the directory. */ snprintf (psf->file.dir.c, sizeof (psf->file.dir.c), "%s", path) ; if ((cptr = strrchr (psf->file.dir.c, '/')) || (cptr = strrchr (psf->file.dir.c, '\\'))) cptr [1] = 0 ; else psf->file.dir.c [0] = 0 ; return 0 ; } /* copy_filename */ /*============================================================================== */ static int psf_close (SF_PRIVATE *psf) { uint32_t k ; int error = 0 ; if (psf->codec_close) { error = psf->codec_close (psf) ; /* To prevent it being called in psf->container_close(). */ psf->codec_close = NULL ; } ; if (psf->container_close) error = psf->container_close (psf) ; error = psf_fclose (psf) ; psf_close_rsrc (psf) ; /* For an ISO C compliant implementation it is ok to free a NULL pointer. */ free (psf->container_data) ; free (psf->codec_data) ; free (psf->interleave) ; free (psf->dither) ; free (psf->peak_info) ; free (psf->broadcast_16k) ; free (psf->loop_info) ; free (psf->instrument) ; free (psf->cues) ; free (psf->channel_map) ; free (psf->format_desc) ; free (psf->strings.storage) ; if (psf->wchunks.chunks) for (k = 0 ; k < psf->wchunks.used ; k++) free (psf->wchunks.chunks [k].data) ; free (psf->rchunks.chunks) ; free (psf->wchunks.chunks) ; free (psf->iterator) ; free (psf->cart_16k) ; memset (psf, 0, sizeof (SF_PRIVATE)) ; free (psf) ; return error ; } /* psf_close */ SNDFILE * psf_open_file (SF_PRIVATE *psf, SF_INFO *sfinfo) { int error, format ; sf_errno = error = 0 ; sf_parselog [0] = 0 ; if (psf->error) { error = psf->error ; goto error_exit ; } ; if (psf->file.mode != SFM_READ && psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) { error = SFE_BAD_OPEN_MODE ; goto error_exit ; } ; if (sfinfo == NULL) { error = SFE_BAD_SF_INFO_PTR ; goto error_exit ; } ; if (psf->file.mode == SFM_READ) { if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_RAW) { if (sf_format_check (sfinfo) == 0) { error = SFE_RAW_BAD_FORMAT ; goto error_exit ; } ; } else memset (sfinfo, 0, sizeof (SF_INFO)) ; } ; memcpy (&psf->sf, sfinfo, sizeof (SF_INFO)) ; psf->Magick = SNDFILE_MAGICK ; psf->norm_float = SF_TRUE ; psf->norm_double = SF_TRUE ; psf->dataoffset = -1 ; psf->datalength = -1 ; psf->read_current = -1 ; psf->write_current = -1 ; psf->auto_header = SF_FALSE ; psf->rwf_endian = SF_ENDIAN_LITTLE ; psf->seek = psf_default_seek ; psf->float_int_mult = 0 ; psf->float_max = -1.0 ; /* An attempt at a per SF_PRIVATE unique id. */ psf->unique_id = psf_rand_int32 () ; psf->sf.sections = 1 ; psf->is_pipe = psf_is_pipe (psf) ; if (psf->is_pipe) { psf->sf.seekable = SF_FALSE ; psf->filelength = SF_COUNT_MAX ; } else { psf->sf.seekable = SF_TRUE ; /* File is open, so get the length. */ psf->filelength = psf_get_filelen (psf) ; } ; if (psf->fileoffset > 0) { switch (psf->file.mode) { case SFM_READ : if (psf->filelength < 44) { psf_log_printf (psf, "Short filelength: %D (fileoffset: %D)\n", psf->filelength, psf->fileoffset) ; error = SFE_BAD_OFFSET ; goto error_exit ; } ; break ; case SFM_WRITE : psf->fileoffset = 0 ; psf_fseek (psf, 0, SEEK_END) ; psf->fileoffset = psf_ftell (psf) ; break ; case SFM_RDWR : error = SFE_NO_EMBEDDED_RDWR ; goto error_exit ; } ; psf_log_printf (psf, "Embedded file offset : %D\n", psf->fileoffset) ; } ; if (psf->filelength == SF_COUNT_MAX) psf_log_printf (psf, "Length : unknown\n") ; else psf_log_printf (psf, "Length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_WRITE || (psf->file.mode == SFM_RDWR && psf->filelength == 0)) { /* If the file is being opened for write or RDWR and the file is currently ** empty, then the SF_INFO struct must contain valid data. */ if ((SF_CONTAINER (psf->sf.format)) == 0) { error = SFE_ZERO_MAJOR_FORMAT ; goto error_exit ; } ; if ((SF_CODEC (psf->sf.format)) == 0) { error = SFE_ZERO_MINOR_FORMAT ; goto error_exit ; } ; if (sf_format_check (&psf->sf) == 0) { error = SFE_BAD_OPEN_FORMAT ; goto error_exit ; } ; } else if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) { /* If type RAW has not been specified then need to figure out file type. */ psf->sf.format = guess_file_type (psf) ; if (psf->sf.format == 0) psf->sf.format = format_from_extension (psf) ; } ; /* Prevent unnecessary seeks */ psf->last_op = psf->file.mode ; /* Set bytewidth if known. */ switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_PCM_S8 : case SF_FORMAT_PCM_U8 : case SF_FORMAT_ULAW : case SF_FORMAT_ALAW : case SF_FORMAT_DPCM_8 : psf->bytewidth = 1 ; break ; case SF_FORMAT_PCM_16 : case SF_FORMAT_DPCM_16 : psf->bytewidth = 2 ; break ; case SF_FORMAT_PCM_24 : psf->bytewidth = 3 ; break ; case SF_FORMAT_PCM_32 : case SF_FORMAT_FLOAT : psf->bytewidth = 4 ; break ; case SF_FORMAT_DOUBLE : psf->bytewidth = 8 ; break ; } ; /* Call the initialisation function for the relevant file type. */ switch (SF_CONTAINER (psf->sf.format)) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : error = wav_open (psf) ; break ; case SF_FORMAT_AIFF : error = aiff_open (psf) ; break ; case SF_FORMAT_AU : error = au_open (psf) ; break ; case SF_FORMAT_RAW : error = raw_open (psf) ; break ; case SF_FORMAT_W64 : error = w64_open (psf) ; break ; case SF_FORMAT_RF64 : error = rf64_open (psf) ; break ; /* Lite remove start */ case SF_FORMAT_PAF : error = paf_open (psf) ; break ; case SF_FORMAT_SVX : error = svx_open (psf) ; break ; case SF_FORMAT_NIST : error = nist_open (psf) ; break ; case SF_FORMAT_IRCAM : error = ircam_open (psf) ; break ; case SF_FORMAT_VOC : error = voc_open (psf) ; break ; case SF_FORMAT_SDS : error = sds_open (psf) ; break ; case SF_FORMAT_OGG : error = ogg_open (psf) ; break ; case SF_FORMAT_TXW : error = txw_open (psf) ; break ; case SF_FORMAT_WVE : error = wve_open (psf) ; break ; case SF_FORMAT_DWD : error = dwd_open (psf) ; break ; case SF_FORMAT_MAT4 : error = mat4_open (psf) ; break ; case SF_FORMAT_MAT5 : error = mat5_open (psf) ; break ; case SF_FORMAT_PVF : error = pvf_open (psf) ; break ; case SF_FORMAT_XI : error = xi_open (psf) ; break ; case SF_FORMAT_HTK : error = htk_open (psf) ; break ; case SF_FORMAT_SD2 : error = sd2_open (psf) ; break ; case SF_FORMAT_REX2 : error = rx2_open (psf) ; break ; case SF_FORMAT_AVR : error = avr_open (psf) ; break ; case SF_FORMAT_FLAC : error = flac_open (psf) ; break ; case SF_FORMAT_CAF : error = caf_open (psf) ; break ; case SF_FORMAT_MPC2K : error = mpc2k_open (psf) ; break ; /* Lite remove end */ default : error = SFE_UNKNOWN_FORMAT ; } ; if (error) goto error_exit ; /* For now, check whether embedding is supported. */ format = SF_CONTAINER (psf->sf.format) ; if (psf->fileoffset > 0) { switch (format) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_AIFF : case SF_FORMAT_AU : /* Actual embedded files. */ break ; case SF_FORMAT_FLAC : /* Flac with an ID3v2 header? */ break ; default : error = SFE_NO_EMBED_SUPPORT ; goto error_exit ; } ; } ; if (psf->fileoffset > 0) psf_log_printf (psf, "Embedded file length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_RDWR && sf_format_check (&psf->sf) == 0) { error = SFE_BAD_MODE_RW ; goto error_exit ; } ; if (validate_sfinfo (&psf->sf) == 0) { psf_log_SF_INFO (psf) ; save_header_info (psf) ; error = SFE_BAD_SF_INFO ; goto error_exit ; } ; if (validate_psf (psf) == 0) { save_header_info (psf) ; error = SFE_INTERNAL ; goto error_exit ; } ; psf->read_current = 0 ; psf->write_current = 0 ; if (psf->file.mode == SFM_RDWR) { psf->write_current = psf->sf.frames ; psf->have_written = psf->sf.frames > 0 ? SF_TRUE : SF_FALSE ; } ; memcpy (sfinfo, &psf->sf, sizeof (SF_INFO)) ; if (psf->file.mode == SFM_WRITE) { /* Zero out these fields. */ sfinfo->frames = 0 ; sfinfo->sections = 0 ; sfinfo->seekable = 0 ; } ; return (SNDFILE *) psf ; error_exit : sf_errno = error ; if (error == SFE_SYSTEM) snprintf (sf_syserr, sizeof (sf_syserr), "%s", psf->syserr) ; snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; switch (error) { case SF_ERR_SYSTEM : case SF_ERR_UNSUPPORTED_ENCODING : case SFE_UNIMPLEMENTED : break ; case SFE_RAW_BAD_FORMAT : break ; default : if (psf->file.mode == SFM_READ) { psf_log_printf (psf, "Parse error : %s\n", sf_error_number (error)) ; error = SF_ERR_MALFORMED_FILE ; } ; } ; psf_close (psf) ; return NULL ; } /* psf_open_file */ /*============================================================================== ** Chunk getting and setting. ** This works for AIFF, CAF, RF64 and WAV. ** It doesn't work for W64 because W64 uses weird GUID style chunk markers. */ int sf_set_chunk (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->set_chunk) return psf->set_chunk (psf, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_set_chunk */ SF_CHUNK_ITERATOR * sf_get_chunk_iterator (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info) return psf_get_chunk_iterator (psf, chunk_info->id) ; return psf_get_chunk_iterator (psf, NULL) ; } /* sf_get_chunk_iterator */ SF_CHUNK_ITERATOR * sf_next_chunk_iterator (SF_CHUNK_ITERATOR * iterator) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->next_chunk_iterator) return psf->next_chunk_iterator (psf, iterator) ; return NULL ; } /* sf_get_chunk_iterator_next */ int sf_get_chunk_size (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_size) return psf->get_chunk_size (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; return 0 ; } /* sf_get_chunk_size */ int sf_get_chunk_data (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_data) return psf->get_chunk_data (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_get_chunk_data */
/* ** Copyright (C) 1999-2016 Erik de Castro Lopo <erikd@mega-nerd.com> ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation; either version 2.1 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "sfconfig.h" #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #include "sndfile.h" #include "sfendian.h" #include "common.h" #define SNDFILE_MAGICK 0x1234C0DE #ifdef __APPLE__ /* ** Detect if a compile for a universal binary is being attempted and barf if it is. ** See the URL below for the rationale. */ #ifdef __BIG_ENDIAN__ #if (CPU_IS_LITTLE_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #ifdef __LITTLE_ENDIAN__ #if (CPU_IS_BIG_ENDIAN == 1) #error "Universal binary compile detected. See http://www.mega-nerd.com/libsndfile/FAQ.html#Q018" #endif #endif #endif typedef struct { int error ; const char *str ; } ErrorStruct ; static ErrorStruct SndfileErrors [] = { /* Public error values and their associated strings. */ { SF_ERR_NO_ERROR , "No Error." }, { SF_ERR_UNRECOGNISED_FORMAT , "Format not recognised." }, { SF_ERR_SYSTEM , "System error." /* Often replaced. */ }, { SF_ERR_MALFORMED_FILE , "Supported file format but file is malformed." }, { SF_ERR_UNSUPPORTED_ENCODING , "Supported file format but unsupported encoding." }, /* Private error values and their associated strings. */ { SFE_ZERO_MAJOR_FORMAT , "Error : major format is 0." }, { SFE_ZERO_MINOR_FORMAT , "Error : minor format is 0." }, { SFE_BAD_FILE , "File does not exist or is not a regular file (possibly a pipe?)." }, { SFE_BAD_FILE_READ , "File exists but no data could be read." }, { SFE_OPEN_FAILED , "Could not open file." }, { SFE_BAD_SNDFILE_PTR , "Not a valid SNDFILE* pointer." }, { SFE_BAD_SF_INFO_PTR , "NULL SF_INFO pointer passed to libsndfile." }, { SFE_BAD_SF_INCOMPLETE , "SF_PRIVATE struct incomplete and end of header parsing." }, { SFE_BAD_FILE_PTR , "Bad FILE pointer." }, { SFE_BAD_INT_PTR , "Internal error, Bad pointer." }, { SFE_BAD_STAT_SIZE , "Error : software was misconfigured at compile time (sizeof statbuf.st_size)." }, { SFE_NO_TEMP_DIR , "Error : Could not file temp dir." }, { SFE_MALLOC_FAILED , "Internal malloc () failed." }, { SFE_UNIMPLEMENTED , "File contains data in an unimplemented format." }, { SFE_BAD_READ_ALIGN , "Attempt to read a non-integer number of channels." }, { SFE_BAD_WRITE_ALIGN , "Attempt to write a non-integer number of channels." }, { SFE_UNKNOWN_FORMAT , "File contains data in an unknown format." }, { SFE_NOT_READMODE , "Read attempted on file currently open for write." }, { SFE_NOT_WRITEMODE , "Write attempted on file currently open for read." }, { SFE_BAD_MODE_RW , "Error : This file format does not support read/write mode." }, { SFE_BAD_SF_INFO , "Internal error : SF_INFO struct incomplete." }, { SFE_BAD_OFFSET , "Error : supplied offset beyond end of file." }, { SFE_NO_EMBED_SUPPORT , "Error : embedding not supported for this file format." }, { SFE_NO_EMBEDDED_RDWR , "Error : cannot open embedded file read/write." }, { SFE_NO_PIPE_WRITE , "Error : this file format does not support pipe write." }, { SFE_BAD_VIRTUAL_IO , "Error : bad pointer on SF_VIRTUAL_IO struct." }, { SFE_BAD_BROADCAST_INFO_SIZE , "Error : bad coding_history_size in SF_BROADCAST_INFO struct." }, { SFE_BAD_BROADCAST_INFO_TOO_BIG , "Error : SF_BROADCAST_INFO struct too large." }, { SFE_BAD_CART_INFO_SIZE , "Error: SF_CART_INFO struct too large." }, { SFE_BAD_CART_INFO_TOO_BIG , "Error: bag tag_text_size in SF_CART_INFO struct." }, { SFE_INTERLEAVE_MODE , "Attempt to write to file with non-interleaved data." }, { SFE_INTERLEAVE_SEEK , "Bad karma in seek during interleave read operation." }, { SFE_INTERLEAVE_READ , "Bad karma in read during interleave read operation." }, { SFE_INTERNAL , "Unspecified internal error." }, { SFE_BAD_COMMAND_PARAM , "Bad parameter passed to function sf_command." }, { SFE_BAD_ENDIAN , "Bad endian-ness. Try default endian-ness" }, { SFE_CHANNEL_COUNT_ZERO , "Channel count is zero." }, { SFE_CHANNEL_COUNT , "Too many channels specified." }, { SFE_CHANNEL_COUNT_BAD , "Bad channel count." }, { SFE_BAD_SEEK , "Internal psf_fseek() failed." }, { SFE_NOT_SEEKABLE , "Seek attempted on unseekable file type." }, { SFE_AMBIGUOUS_SEEK , "Error : combination of file open mode and seek command is ambiguous." }, { SFE_WRONG_SEEK , "Error : invalid seek parameters." }, { SFE_SEEK_FAILED , "Error : parameters OK, but psf_seek() failed." }, { SFE_BAD_OPEN_MODE , "Error : bad mode parameter for file open." }, { SFE_OPEN_PIPE_RDWR , "Error : attempt to open a pipe in read/write mode." }, { SFE_RDWR_POSITION , "Error on RDWR position (cryptic)." }, { SFE_RDWR_BAD_HEADER , "Error : Cannot open file in read/write mode due to string data in header." }, { SFE_CMD_HAS_DATA , "Error : Command fails because file already has audio data." }, { SFE_STR_NO_SUPPORT , "Error : File type does not support string data." }, { SFE_STR_NOT_WRITE , "Error : Trying to set a string when file is not in write mode." }, { SFE_STR_MAX_DATA , "Error : Maximum string data storage reached." }, { SFE_STR_MAX_COUNT , "Error : Maximum string data count reached." }, { SFE_STR_BAD_TYPE , "Error : Bad string data type." }, { SFE_STR_NO_ADD_END , "Error : file type does not support strings added at end of file." }, { SFE_STR_BAD_STRING , "Error : bad string." }, { SFE_STR_WEIRD , "Error : Weird string error." }, { SFE_WAV_NO_RIFF , "Error in WAV file. No 'RIFF' chunk marker." }, { SFE_WAV_NO_WAVE , "Error in WAV file. No 'WAVE' chunk marker." }, { SFE_WAV_NO_FMT , "Error in WAV/W64/RF64 file. No 'fmt ' chunk marker." }, { SFE_WAV_BAD_FMT , "Error in WAV/W64/RF64 file. Malformed 'fmt ' chunk." }, { SFE_WAV_FMT_SHORT , "Error in WAV/W64/RF64 file. Short 'fmt ' chunk." }, { SFE_WAV_BAD_FACT , "Error in WAV file. 'fact' chunk out of place." }, { SFE_WAV_BAD_PEAK , "Error in WAV file. Bad 'PEAK' chunk." }, { SFE_WAV_PEAK_B4_FMT , "Error in WAV file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_WAV_BAD_FORMAT , "Error in WAV file. Errors in 'fmt ' chunk." }, { SFE_WAV_BAD_BLOCKALIGN , "Error in WAV file. Block alignment in 'fmt ' chunk is incorrect." }, { SFE_WAV_NO_DATA , "Error in WAV file. No 'data' chunk marker." }, { SFE_WAV_BAD_LIST , "Error in WAV file. Malformed LIST chunk." }, { SFE_WAV_UNKNOWN_CHUNK , "Error in WAV file. File contains an unknown chunk marker." }, { SFE_WAV_WVPK_DATA , "Error in WAV file. Data is in WAVPACK format." }, { SFE_WAV_ADPCM_NOT4BIT , "Error in ADPCM WAV file. Invalid bit width." }, { SFE_WAV_ADPCM_CHANNELS , "Error in ADPCM WAV file. Invalid number of channels." }, { SFE_WAV_ADPCM_SAMPLES , "Error in ADPCM WAV file. Invalid number of samples per block." }, { SFE_WAV_GSM610_FORMAT , "Error in GSM610 WAV file. Invalid format chunk." }, { SFE_AIFF_NO_FORM , "Error in AIFF file, bad 'FORM' marker." }, { SFE_AIFF_AIFF_NO_FORM , "Error in AIFF file, 'AIFF' marker without 'FORM'." }, { SFE_AIFF_COMM_NO_FORM , "Error in AIFF file, 'COMM' marker without 'FORM'." }, { SFE_AIFF_SSND_NO_COMM , "Error in AIFF file, 'SSND' marker without 'COMM'." }, { SFE_AIFF_UNKNOWN_CHUNK , "Error in AIFF file, unknown chunk." }, { SFE_AIFF_COMM_CHUNK_SIZE, "Error in AIFF file, bad 'COMM' chunk size." }, { SFE_AIFF_BAD_COMM_CHUNK , "Error in AIFF file, bad 'COMM' chunk." }, { SFE_AIFF_PEAK_B4_COMM , "Error in AIFF file. 'PEAK' chunk found before 'COMM' chunk." }, { SFE_AIFF_BAD_PEAK , "Error in AIFF file. Bad 'PEAK' chunk." }, { SFE_AIFF_NO_SSND , "Error in AIFF file, bad 'SSND' chunk." }, { SFE_AIFF_NO_DATA , "Error in AIFF file, no sound data." }, { SFE_AIFF_RW_SSND_NOT_LAST, "Error in AIFF file, RDWR only possible if SSND chunk at end of file." }, { SFE_AU_UNKNOWN_FORMAT , "Error in AU file, unknown format." }, { SFE_AU_NO_DOTSND , "Error in AU file, missing '.snd' or 'dns.' marker." }, { SFE_AU_EMBED_BAD_LEN , "Embedded AU file with unknown length." }, { SFE_RAW_READ_BAD_SPEC , "Error while opening RAW file for read. Must specify format and channels.\n" "Possibly trying to open unsupported format." }, { SFE_RAW_BAD_BITWIDTH , "Error. RAW file bitwidth must be a multiple of 8." }, { SFE_RAW_BAD_FORMAT , "Error. Bad format field in SF_INFO struct when opening a RAW file for read." }, { SFE_PAF_NO_MARKER , "Error in PAF file, no marker." }, { SFE_PAF_VERSION , "Error in PAF file, bad version." }, { SFE_PAF_UNKNOWN_FORMAT , "Error in PAF file, unknown format." }, { SFE_PAF_SHORT_HEADER , "Error in PAF file. File shorter than minimal header." }, { SFE_PAF_BAD_CHANNELS , "Error in PAF file. Bad channel count." }, { SFE_SVX_NO_FORM , "Error in 8SVX / 16SV file, no 'FORM' marker." }, { SFE_SVX_NO_BODY , "Error in 8SVX / 16SV file, no 'BODY' marker." }, { SFE_SVX_NO_DATA , "Error in 8SVX / 16SV file, no sound data." }, { SFE_SVX_BAD_COMP , "Error in 8SVX / 16SV file, unsupported compression format." }, { SFE_SVX_BAD_NAME_LENGTH , "Error in 8SVX / 16SV file, NAME chunk too long." }, { SFE_NIST_BAD_HEADER , "Error in NIST file, bad header." }, { SFE_NIST_CRLF_CONVERISON, "Error : NIST file damaged by Windows CR -> CRLF conversion process." }, { SFE_NIST_BAD_ENCODING , "Error in NIST file, unsupported compression format." }, { SFE_VOC_NO_CREATIVE , "Error in VOC file, no 'Creative Voice File' marker." }, { SFE_VOC_BAD_FORMAT , "Error in VOC file, bad format." }, { SFE_VOC_BAD_VERSION , "Error in VOC file, bad version number." }, { SFE_VOC_BAD_MARKER , "Error in VOC file, bad marker in file." }, { SFE_VOC_BAD_SECTIONS , "Error in VOC file, incompatible VOC sections." }, { SFE_VOC_MULTI_SAMPLERATE, "Error in VOC file, more than one sample rate defined." }, { SFE_VOC_MULTI_SECTION , "Unimplemented VOC file feature, file contains multiple sound sections." }, { SFE_VOC_MULTI_PARAM , "Error in VOC file, file contains multiple bit or channel widths." }, { SFE_VOC_SECTION_COUNT , "Error in VOC file, too many sections." }, { SFE_VOC_NO_PIPE , "Error : not able to operate on VOC files over a pipe." }, { SFE_IRCAM_NO_MARKER , "Error in IRCAM file, bad IRCAM marker." }, { SFE_IRCAM_BAD_CHANNELS , "Error in IRCAM file, bad channel count." }, { SFE_IRCAM_UNKNOWN_FORMAT, "Error in IRCAM file, unknown encoding format." }, { SFE_W64_64_BIT , "Error in W64 file, file contains 64 bit offset." }, { SFE_W64_NO_RIFF , "Error in W64 file. No 'riff' chunk marker." }, { SFE_W64_NO_WAVE , "Error in W64 file. No 'wave' chunk marker." }, { SFE_W64_NO_DATA , "Error in W64 file. No 'data' chunk marker." }, { SFE_W64_ADPCM_NOT4BIT , "Error in ADPCM W64 file. Invalid bit width." }, { SFE_W64_ADPCM_CHANNELS , "Error in ADPCM W64 file. Invalid number of channels." }, { SFE_W64_GSM610_FORMAT , "Error in GSM610 W64 file. Invalid format chunk." }, { SFE_MAT4_BAD_NAME , "Error in MAT4 file. No variable name." }, { SFE_MAT4_NO_SAMPLERATE , "Error in MAT4 file. No sample rate." }, { SFE_MAT5_BAD_ENDIAN , "Error in MAT5 file. Not able to determine endian-ness." }, { SFE_MAT5_NO_BLOCK , "Error in MAT5 file. Bad block structure." }, { SFE_MAT5_SAMPLE_RATE , "Error in MAT5 file. Not able to determine sample rate." }, { SFE_PVF_NO_PVF1 , "Error in PVF file. No PVF1 marker." }, { SFE_PVF_BAD_HEADER , "Error in PVF file. Bad header." }, { SFE_PVF_BAD_BITWIDTH , "Error in PVF file. Bad bit width." }, { SFE_XI_BAD_HEADER , "Error in XI file. Bad header." }, { SFE_XI_EXCESS_SAMPLES , "Error in XI file. Excess samples in file." }, { SFE_XI_NO_PIPE , "Error : not able to operate on XI files over a pipe." }, { SFE_HTK_NO_PIPE , "Error : not able to operate on HTK files over a pipe." }, { SFE_SDS_NOT_SDS , "Error : not an SDS file." }, { SFE_SDS_BAD_BIT_WIDTH , "Error : bad bit width for SDS file." }, { SFE_SD2_FD_DISALLOWED , "Error : cannot open SD2 file without a file name." }, { SFE_SD2_BAD_DATA_OFFSET , "Error : bad data offset." }, { SFE_SD2_BAD_MAP_OFFSET , "Error : bad map offset." }, { SFE_SD2_BAD_DATA_LENGTH , "Error : bad data length." }, { SFE_SD2_BAD_MAP_LENGTH , "Error : bad map length." }, { SFE_SD2_BAD_RSRC , "Error : bad resource fork." }, { SFE_SD2_BAD_SAMPLE_SIZE , "Error : bad sample size." }, { SFE_FLAC_BAD_HEADER , "Error : bad flac header." }, { SFE_FLAC_NEW_DECODER , "Error : problem while creating flac decoder." }, { SFE_FLAC_INIT_DECODER , "Error : problem while initialization of the flac decoder." }, { SFE_FLAC_LOST_SYNC , "Error : flac decoder lost sync." }, { SFE_FLAC_BAD_SAMPLE_RATE, "Error : flac does not support this sample rate." }, { SFE_FLAC_UNKOWN_ERROR , "Error : unknown error in flac decoder." }, { SFE_WVE_NOT_WVE , "Error : not a WVE file." }, { SFE_WVE_NO_PIPE , "Error : not able to operate on WVE files over a pipe." }, { SFE_DWVW_BAD_BITWIDTH , "Error : Bad bit width for DWVW encoding. Must be 12, 16 or 24." }, { SFE_G72X_NOT_MONO , "Error : G72x encoding does not support more than 1 channel." }, { SFE_VORBIS_ENCODER_BUG , "Error : Sample rate chosen is known to trigger a Vorbis encoder bug on this CPU." }, { SFE_RF64_NOT_RF64 , "Error : Not an RF64 file." }, { SFE_RF64_PEAK_B4_FMT , "Error in RF64 file. 'PEAK' chunk found before 'fmt ' chunk." }, { SFE_RF64_NO_DATA , "Error in RF64 file. No 'data' chunk marker." }, { SFE_ALAC_FAIL_TMPFILE , "Error : Failed to open tmp file for ALAC encoding." }, { SFE_BAD_CHUNK_PTR , "Error : Bad SF_CHUNK_INFO pointer." }, { SFE_UNKNOWN_CHUNK , "Error : Unknown chunk marker." }, { SFE_BAD_CHUNK_FORMAT , "Error : Reading/writing chunks from this file format is not supported." }, { SFE_BAD_CHUNK_MARKER , "Error : Bad chunk marker." }, { SFE_BAD_CHUNK_DATA_PTR , "Error : Bad data pointer in SF_CHUNK_INFO struct." }, { SFE_FILENAME_TOO_LONG , "Error : Supplied filename too long." }, { SFE_BAD_HEADER_ALLOC , "Error : Required header allocation is too large." }, { SFE_MAX_ERROR , "Maximum error number." }, { SFE_MAX_ERROR + 1 , NULL } } ; /*------------------------------------------------------------------------------ */ static int format_from_extension (SF_PRIVATE *psf) ; static int guess_file_type (SF_PRIVATE *psf) ; static int validate_sfinfo (SF_INFO *sfinfo) ; static int validate_psf (SF_PRIVATE *psf) ; static void save_header_info (SF_PRIVATE *psf) ; static int copy_filename (SF_PRIVATE *psf, const char *path) ; static int psf_close (SF_PRIVATE *psf) ; static int try_resource_fork (SF_PRIVATE * psf) ; /*------------------------------------------------------------------------------ ** Private (static) variables. */ int sf_errno = 0 ; static char sf_parselog [SF_BUFFER_LEN] = { 0 } ; static char sf_syserr [SF_SYSERR_LEN] = { 0 } ; /*------------------------------------------------------------------------------ */ #define VALIDATE_SNDFILE_AND_ASSIGN_PSF(a, b, c) \ { if ((a) == NULL) \ { sf_errno = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ (b) = (SF_PRIVATE*) (a) ; \ if ((b)->virtual_io == SF_FALSE && \ psf_file_valid (b) == 0) \ { (b)->error = SFE_BAD_FILE_PTR ; \ return 0 ; \ } ; \ if ((b)->Magick != SNDFILE_MAGICK) \ { (b)->error = SFE_BAD_SNDFILE_PTR ; \ return 0 ; \ } ; \ if (c) (b)->error = 0 ; \ } /*------------------------------------------------------------------------------ ** Public functions. */ SNDFILE* sf_open (const char *path, int mode, SF_INFO *sfinfo) { SF_PRIVATE *psf ; /* Ultimate sanity check. */ assert (sizeof (sf_count_t) == 8) ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf_log_printf (psf, "File : %s\n", path) ; if (copy_filename (psf, path) != 0) { sf_errno = psf->error ; return NULL ; } ; psf->file.mode = mode ; if (strcmp (path, "-") == 0) psf->error = psf_set_stdio (psf) ; else psf->error = psf_fopen (psf) ; return psf_open_file (psf, sfinfo) ; } /* sf_open */ SNDFILE* sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */ SNDFILE* sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */ int sf_close (SNDFILE *sndfile) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_close (psf) ; } /* sf_close */ void sf_write_sync (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE *) sndfile) == NULL) return ; psf_fsync (psf) ; return ; } /* sf_write_sync */ /*============================================================================== */ const char* sf_error_number (int errnum) { static const char *bad_errnum = "No error defined for this error number. This is a bug in libsndfile." ; int k ; if (errnum == SFE_MAX_ERROR) return SndfileErrors [0].str ; if (errnum < 0 || errnum > SFE_MAX_ERROR) { /* This really shouldn't happen in release versions. */ printf ("Not a valid error number (%d).\n", errnum) ; return bad_errnum ; } ; for (k = 0 ; SndfileErrors [k].str ; k++) if (errnum == SndfileErrors [k].error) return SndfileErrors [k].str ; return bad_errnum ; } /* sf_error_number */ const char* sf_strerror (SNDFILE *sndfile) { SF_PRIVATE *psf = NULL ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; if (errnum == SFE_SYSTEM && sf_syserr [0]) return sf_syserr ; } else { psf = (SF_PRIVATE *) sndfile ; if (psf->Magick != SNDFILE_MAGICK) return "sf_strerror : Bad magic number." ; errnum = psf->error ; if (errnum == SFE_SYSTEM && psf->syserr [0]) return psf->syserr ; } ; return sf_error_number (errnum) ; } /* sf_strerror */ /*------------------------------------------------------------------------------ */ int sf_error (SNDFILE *sndfile) { SF_PRIVATE *psf ; if (sndfile == NULL) return sf_errno ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; if (psf->error) return psf->error ; return 0 ; } /* sf_error */ /*------------------------------------------------------------------------------ */ int sf_perror (SNDFILE *sndfile) { SF_PRIVATE *psf ; int errnum ; if (sndfile == NULL) { errnum = sf_errno ; } else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; fprintf (stderr, "%s\n", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_perror */ /*------------------------------------------------------------------------------ */ int sf_error_str (SNDFILE *sndfile, char *str, size_t maxlen) { SF_PRIVATE *psf ; int errnum ; if (str == NULL) return SFE_INTERNAL ; if (sndfile == NULL) errnum = sf_errno ; else { VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 0) ; errnum = psf->error ; } ; snprintf (str, maxlen, "%s", sf_error_number (errnum)) ; return SFE_NO_ERROR ; } /* sf_error_str */ /*============================================================================== */ int sf_format_check (const SF_INFO *info) { int subformat, endian ; subformat = SF_CODEC (info->format) ; endian = SF_ENDIAN (info->format) ; /* This is the place where each file format can check if the suppiled ** SF_INFO struct is valid. ** Return 0 on failure, 1 ons success. */ if (info->channels < 1 || info->channels > SF_MAX_CHANNELS) return 0 ; if (info->samplerate < 0) return 0 ; switch (SF_CONTAINER (info->format)) { case SF_FORMAT_WAV : /* WAV now allows both endian, RIFF or RIFX (little or big respectively) */ if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_WAVEX : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_AIFF : /* AIFF does allow both endian-nesses for PCM data.*/ if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; /* For other encodings reject any endian-ness setting. */ if (endian != 0) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_IMA_ADPCM && (info->channels == 1 || info->channels == 2)) return 1 ; break ; case SF_FORMAT_AU : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_G721_32 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_24 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_G723_40 && info->channels == 1) return 1 ; break ; case SF_FORMAT_CAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_ALAC_16 || subformat == SF_FORMAT_ALAC_20) return 1 ; if (subformat == SF_FORMAT_ALAC_24 || subformat == SF_FORMAT_ALAC_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_RAW : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; if (subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_ULAW) return 1 ; if ((subformat == SF_FORMAT_DWVW_12 || subformat == SF_FORMAT_DWVW_16 || subformat == SF_FORMAT_DWVW_24) && info-> channels == 1) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_VOX_ADPCM && info->channels == 1) return 1 ; break ; case SF_FORMAT_PAF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SVX : /* SVX only supports writing mono SVX files. */ if (info->channels > 1) return 0 ; /* Always big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_NIST : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_IRCAM : if (info->channels > 256) return 0 ; if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW || subformat == SF_FORMAT_FLOAT) return 1 ; break ; case SF_FORMAT_VOC : if (info->channels > 2) return 0 ; /* VOC is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_W64 : /* W64 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if ((subformat == SF_FORMAT_IMA_ADPCM || subformat == SF_FORMAT_MS_ADPCM) && info->channels <= 2) return 1 ; if (subformat == SF_FORMAT_GSM610 && info->channels == 1) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT4 : if (subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_MAT5 : if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; case SF_FORMAT_PVF : if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_XI : if (info->channels != 1) return 0 ; if (subformat == SF_FORMAT_DPCM_8 || subformat == SF_FORMAT_DPCM_16) return 1 ; break ; case SF_FORMAT_HTK : if (info->channels != 1) return 0 ; /* HTK is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_SDS : if (info->channels != 1) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_AVR : if (info->channels > 2) return 0 ; /* SDS is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_FLAC : /* FLAC can't do more than 8 channels. */ if (info->channels > 8) return 0 ; if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24) return 1 ; break ; case SF_FORMAT_SD2 : /* SD2 is strictly big endian. */ if (endian == SF_ENDIAN_LITTLE || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_S8 || subformat == SF_FORMAT_PCM_16 || subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; break ; case SF_FORMAT_WVE : if (info->channels > 1) return 0 ; /* WVE is strictly big endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_ALAW) return 1 ; break ; case SF_FORMAT_OGG : if (endian != SF_ENDIAN_FILE) return 0 ; if (subformat == SF_FORMAT_VORBIS) return 1 ; break ; case SF_FORMAT_MPC2K : if (info->channels > 2) return 0 ; /* MPC2000 is strictly little endian. */ if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_16) return 1 ; break ; case SF_FORMAT_RF64 : if (endian == SF_ENDIAN_BIG || endian == SF_ENDIAN_CPU) return 0 ; if (subformat == SF_FORMAT_PCM_U8 || subformat == SF_FORMAT_PCM_16) return 1 ; if (subformat == SF_FORMAT_PCM_24 || subformat == SF_FORMAT_PCM_32) return 1 ; if (subformat == SF_FORMAT_ULAW || subformat == SF_FORMAT_ALAW) return 1 ; if (subformat == SF_FORMAT_FLOAT || subformat == SF_FORMAT_DOUBLE) return 1 ; break ; default : break ; } ; return 0 ; } /* sf_format_check */ /*------------------------------------------------------------------------------ */ const char * sf_version_string (void) { #if ENABLE_EXPERIMENTAL_CODE return PACKAGE_NAME "-" PACKAGE_VERSION "-exp" ; #else return PACKAGE_NAME "-" PACKAGE_VERSION ; #endif } /*------------------------------------------------------------------------------ */ int sf_command (SNDFILE *sndfile, int command, void *data, int datasize) { SF_PRIVATE *psf = (SF_PRIVATE *) sndfile ; double quality ; int old_value ; /* This set of commands do not need the sndfile parameter. */ switch (command) { case SFC_GET_LIB_VERSION : if (data == NULL) { if (psf) psf->error = SFE_BAD_COMMAND_PARAM ; return SFE_BAD_COMMAND_PARAM ; } ; snprintf (data, datasize, "%s", sf_version_string ()) ; return strlen (data) ; case SFC_GET_SIMPLE_FORMAT_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_simple_count () ; return 0 ; case SFC_GET_SIMPLE_FORMAT : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_simple (data) ; case SFC_GET_FORMAT_MAJOR_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_major_count () ; return 0 ; case SFC_GET_FORMAT_MAJOR : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_major (data) ; case SFC_GET_FORMAT_SUBTYPE_COUNT : if (data == NULL || datasize != SIGNED_SIZEOF (int)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; *((int*) data) = psf_get_format_subtype_count () ; return 0 ; case SFC_GET_FORMAT_SUBTYPE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_subtype (data) ; case SFC_GET_FORMAT_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_FORMAT_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; return psf_get_format_info (data) ; } ; if (sndfile == NULL && command == SFC_GET_LOG_INFO) { if (data == NULL) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; snprintf (data, datasize, "%s", sf_parselog) ; return strlen (data) ; } ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; switch (command) { case SFC_SET_NORM_FLOAT : old_value = psf->norm_float ; psf->norm_float = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_CURRENT_SF_INFO : if (data == NULL || datasize != SIGNED_SIZEOF (SF_INFO)) return (sf_errno = SFE_BAD_COMMAND_PARAM) ; memcpy (data, &psf->sf, sizeof (SF_INFO)) ; break ; case SFC_SET_NORM_DOUBLE : old_value = psf->norm_double ; psf->norm_double = (datasize) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_GET_NORM_FLOAT : return psf->norm_float ; case SFC_GET_NORM_DOUBLE : return psf->norm_double ; case SFC_SET_SCALE_FLOAT_INT_READ : old_value = psf->float_int_mult ; psf->float_int_mult = (datasize != 0) ? SF_TRUE : SF_FALSE ; if (psf->float_int_mult && psf->float_max < 0.0) /* Scale to prevent wrap-around distortion. */ psf->float_max = (32768.0 / 32767.0) * psf_calc_signal_max (psf, SF_FALSE) ; return old_value ; case SFC_SET_SCALE_INT_FLOAT_WRITE : old_value = psf->scale_int_float ; psf->scale_int_float = (datasize != 0) ? SF_TRUE : SF_FALSE ; return old_value ; case SFC_SET_ADD_PEAK_CHUNK : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and AIFF support the PEAK chunk. */ switch (format) { case SF_FORMAT_AIFF : case SF_FORMAT_CAF : case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_RF64 : break ; default : return SF_FALSE ; } ; format = SF_CODEC (psf->sf.format) ; /* Only files containg the following data types support the PEAK chunk. */ if (format != SF_FORMAT_FLOAT && format != SF_FORMAT_DOUBLE) return SF_FALSE ; } ; /* Can only do this is in SFM_WRITE mode. */ if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; /* Everything seems OK, so set psf->has_peak and re-write header. */ if (datasize == SF_FALSE && psf->peak_info != NULL) { free (psf->peak_info) ; psf->peak_info = NULL ; } else if (psf->peak_info == NULL) { psf->peak_info = peak_info_calloc (psf->sf.channels) ; if (psf->peak_info != NULL) psf->peak_info->peak_loc = SF_PEAK_START ; } ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return datasize ; case SFC_SET_ADD_HEADER_PAD_CHUNK : return SF_FALSE ; case SFC_GET_LOG_INFO : if (data == NULL) return SFE_BAD_COMMAND_PARAM ; snprintf (data, datasize, "%s", psf->parselog.buf) ; break ; case SFC_CALC_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_FALSE) ; break ; case SFC_CALC_NORM_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; *((double*) data) = psf_calc_signal_max (psf, SF_TRUE) ; break ; case SFC_CALC_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_FALSE) ; case SFC_CALC_NORM_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) return (psf->error = SFE_BAD_COMMAND_PARAM) ; return psf_calc_max_all_channels (psf, (double*) data, SF_TRUE) ; case SFC_GET_SIGNAL_MAX : if (data == NULL || datasize != sizeof (double)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_signal_max (psf, (double *) data) ; case SFC_GET_MAX_ALL_CHANNELS : if (data == NULL || datasize != SIGNED_SIZEOF (double) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return psf_get_max_all_channels (psf, (double*) data) ; case SFC_UPDATE_HEADER_NOW : if (psf->write_header) psf->write_header (psf, SF_TRUE) ; break ; case SFC_SET_UPDATE_HEADER_AUTO : psf->auto_header = datasize ? SF_TRUE : SF_FALSE ; return psf->auto_header ; break ; case SFC_SET_ADD_DITHER_ON_WRITE : case SFC_SET_ADD_DITHER_ON_READ : /* ** FIXME ! ** These are obsolete. Just return. ** Remove some time after version 1.0.8. */ break ; case SFC_SET_DITHER_ON_WRITE : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->write_dither, data, sizeof (psf->write_dither)) ; if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_WRITE) ; break ; case SFC_SET_DITHER_ON_READ : if (data == NULL || datasize != SIGNED_SIZEOF (SF_DITHER_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; memcpy (&psf->read_dither, data, sizeof (psf->read_dither)) ; if (psf->file.mode == SFM_READ || psf->file.mode == SFM_RDWR) dither_init (psf, SFM_READ) ; break ; case SFC_FILE_TRUNCATE : if (psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) return SF_TRUE ; if (datasize != sizeof (sf_count_t)) return SF_TRUE ; if (data == NULL || datasize != sizeof (sf_count_t)) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } else { sf_count_t position ; position = *((sf_count_t*) data) ; if (sf_seek (sndfile, position, SEEK_SET) != position) return SF_TRUE ; psf->sf.frames = position ; position = psf_fseek (psf, 0, SEEK_CUR) ; return psf_ftruncate (psf, position) ; } ; break ; case SFC_SET_RAW_START_OFFSET : if (data == NULL || datasize != sizeof (sf_count_t)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) return (psf->error = SFE_BAD_COMMAND_PARAM) ; psf->dataoffset = *((sf_count_t*) data) ; sf_seek (sndfile, 0, SEEK_CUR) ; break ; case SFC_GET_EMBED_FILE_INFO : if (data == NULL || datasize != sizeof (SF_EMBED_FILE_INFO)) return (psf->error = SFE_BAD_COMMAND_PARAM) ; ((SF_EMBED_FILE_INFO*) data)->offset = psf->fileoffset ; ((SF_EMBED_FILE_INFO*) data)->length = psf->filelength ; break ; /* Lite remove start */ case SFC_TEST_IEEE_FLOAT_REPLACE : psf->ieee_replace = (datasize) ? SF_TRUE : SF_FALSE ; if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_FLOAT) float32_init (psf) ; else if ((SF_CODEC (psf->sf.format)) == SF_FORMAT_DOUBLE) double64_init (psf) ; else return (psf->error = SFE_BAD_COMMAND_PARAM) ; break ; /* Lite remove end */ case SFC_SET_CLIPPING : psf->add_clipping = (datasize) ? SF_TRUE : SF_FALSE ; return psf->add_clipping ; case SFC_GET_CLIPPING : return psf->add_clipping ; case SFC_GET_LOOP_INFO : if (datasize != sizeof (SF_LOOP_INFO) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->loop_info == NULL) return SF_FALSE ; memcpy (data, psf->loop_info, sizeof (SF_LOOP_INFO)) ; return SF_TRUE ; case SFC_SET_BROADCAST_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 supports the BEXT (Broadcast) chunk. */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_WAVEX && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode. */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->broadcast_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (broadcast_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_BROADCAST_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return broadcast_var_get (psf, data, datasize) ; case SFC_SET_CART_INFO : { int format = SF_CONTAINER (psf->sf.format) ; /* Only WAV and RF64 support cart chunk format */ if (format != SF_FORMAT_WAV && format != SF_FORMAT_RF64) return SF_FALSE ; } ; /* Only makes sense in SFM_WRITE or SFM_RDWR mode */ if ((psf->file.mode != SFM_WRITE) && (psf->file.mode != SFM_RDWR)) return SF_FALSE ; /* If data has already been written this must fail. */ if (psf->cart_16k == NULL && psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (NOT (cart_var_set (psf, data, datasize))) return SF_FALSE ; if (psf->write_header) psf->write_header (psf, SF_TRUE) ; return SF_TRUE ; case SFC_GET_CART_INFO : if (data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; return cart_var_get (psf, data, datasize) ; case SFC_GET_CUE_COUNT : if (datasize != sizeof (uint32_t) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues != NULL) { *((uint32_t *) data) = psf->cues->cue_count ; return SF_TRUE ; } ; return SF_FALSE ; case SFC_GET_CUE : if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL) return SF_FALSE ; psf_get_cues (psf, data, datasize) ; return SF_TRUE ; case SFC_SET_CUE : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_CUES) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->cues == NULL && (psf->cues = psf_cues_dup (data)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; return SF_TRUE ; case SFC_GET_INSTRUMENT : if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL) return SF_FALSE ; memcpy (data, psf->instrument, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_SET_INSTRUMENT : /* If data has already been written this must fail. */ if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (datasize != sizeof (SF_INSTRUMENT) || data == NULL) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->instrument, data, sizeof (SF_INSTRUMENT)) ; return SF_TRUE ; case SFC_RAW_DATA_NEEDS_ENDSWAP : return psf->data_endswap ; case SFC_GET_CHANNEL_MAP_INFO : if (psf->channel_map == NULL) return SF_FALSE ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; memcpy (data, psf->channel_map, datasize) ; return SF_TRUE ; case SFC_SET_CHANNEL_MAP_INFO : if (psf->have_written) { psf->error = SFE_CMD_HAS_DATA ; return SF_FALSE ; } ; if (data == NULL || datasize != SIGNED_SIZEOF (psf->channel_map [0]) * psf->sf.channels) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; { int *iptr ; for (iptr = data ; iptr < (int*) data + psf->sf.channels ; iptr++) { if (*iptr <= SF_CHANNEL_MAP_INVALID || *iptr >= SF_CHANNEL_MAP_MAX) { psf->error = SFE_BAD_COMMAND_PARAM ; return SF_FALSE ; } ; } ; } ; free (psf->channel_map) ; if ((psf->channel_map = malloc (datasize)) == NULL) { psf->error = SFE_MALLOC_FAILED ; return SF_FALSE ; } ; memcpy (psf->channel_map, data, datasize) ; /* ** Pass the command down to the container's command handler. ** Don't pass user data, use validated psf->channel_map data instead. */ if (psf->command) return psf->command (psf, command, NULL, 0) ; return SF_FALSE ; case SFC_SET_VBR_ENCODING_QUALITY : if (data == NULL || datasize != sizeof (double)) return SF_FALSE ; quality = *((double *) data) ; quality = 1.0 - SF_MAX (0.0, SF_MIN (1.0, quality)) ; return sf_command (sndfile, SFC_SET_COMPRESSION_LEVEL, &quality, sizeof (quality)) ; default : /* Must be a file specific command. Pass it on. */ if (psf->command) return psf->command (psf, command, data, datasize) ; psf_log_printf (psf, "*** sf_command : cmd = 0x%X\n", command) ; return (psf->error = SFE_BAD_COMMAND_PARAM) ; } ; return 0 ; } /* sf_command */ /*------------------------------------------------------------------------------ */ sf_count_t sf_seek (SNDFILE *sndfile, sf_count_t offset, int whence) { SF_PRIVATE *psf ; sf_count_t seek_from_start = 0, retval ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (! psf->sf.seekable) { psf->error = SFE_NOT_SEEKABLE ; return PSF_SEEK_ERROR ; } ; /* If the whence parameter has a mode ORed in, check to see that ** it makes sense. */ if (((whence & SFM_MASK) == SFM_WRITE && psf->file.mode == SFM_READ) || ((whence & SFM_MASK) == SFM_READ && psf->file.mode == SFM_WRITE)) { psf->error = SFE_WRONG_SEEK ; return PSF_SEEK_ERROR ; } ; /* Convert all SEEK_CUR and SEEK_END into seek_from_start to be ** used with SEEK_SET. */ switch (whence) { /* The SEEK_SET behaviour is independant of mode. */ case SEEK_SET : case SEEK_SET | SFM_READ : case SEEK_SET | SFM_WRITE : case SEEK_SET | SFM_RDWR : seek_from_start = offset ; break ; /* The SEEK_CUR is a little more tricky. */ case SEEK_CUR : if (offset == 0) { if (psf->file.mode == SFM_READ) return psf->read_current ; if (psf->file.mode == SFM_WRITE) return psf->write_current ; } ; if (psf->file.mode == SFM_READ) seek_from_start = psf->read_current + offset ; else if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR) seek_from_start = psf->write_current + offset ; else psf->error = SFE_AMBIGUOUS_SEEK ; break ; case SEEK_CUR | SFM_READ : if (offset == 0) return psf->read_current ; seek_from_start = psf->read_current + offset ; break ; case SEEK_CUR | SFM_WRITE : if (offset == 0) return psf->write_current ; seek_from_start = psf->write_current + offset ; break ; /* The SEEK_END */ case SEEK_END : case SEEK_END | SFM_READ : case SEEK_END | SFM_WRITE : seek_from_start = psf->sf.frames + offset ; break ; default : psf->error = SFE_BAD_SEEK ; break ; } ; if (psf->error) return PSF_SEEK_ERROR ; if (psf->file.mode == SFM_RDWR || psf->file.mode == SFM_WRITE) { if (seek_from_start < 0) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; } else if (seek_from_start < 0 || seek_from_start > psf->sf.frames) { psf->error = SFE_BAD_SEEK ; return PSF_SEEK_ERROR ; } ; if (psf->seek) { int new_mode = (whence & SFM_MASK) ? (whence & SFM_MASK) : psf->file.mode ; retval = psf->seek (psf, new_mode, seek_from_start) ; switch (new_mode) { case SFM_READ : psf->read_current = retval ; break ; case SFM_WRITE : psf->write_current = retval ; break ; case SFM_RDWR : psf->read_current = retval ; psf->write_current = retval ; new_mode = SFM_READ ; break ; } ; psf->last_op = new_mode ; return retval ; } ; psf->error = SFE_AMBIGUOUS_SEEK ; return PSF_SEEK_ERROR ; } /* sf_seek */ /*------------------------------------------------------------------------------ */ const char* sf_get_string (SNDFILE *sndfile, int str_type) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return NULL ; if (psf->Magick != SNDFILE_MAGICK) return NULL ; return psf_get_string (psf, str_type) ; } /* sf_get_string */ int sf_set_string (SNDFILE *sndfile, int str_type, const char* str) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; return psf_set_string (psf, str_type, str) ; } /* sf_get_string */ /*------------------------------------------------------------------------------ */ int sf_current_byterate (SNDFILE *sndfile) { SF_PRIVATE *psf ; if ((psf = (SF_PRIVATE*) sndfile) == NULL) return -1 ; if (psf->Magick != SNDFILE_MAGICK) return -1 ; /* This should cover all PCM and floating point formats. */ if (psf->bytewidth) return psf->sf.samplerate * psf->sf.channels * psf->bytewidth ; if (psf->byterate) return psf->byterate (psf) ; switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_IMA_ADPCM : case SF_FORMAT_MS_ADPCM : case SF_FORMAT_VOX_ADPCM : return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_GSM610 : return (psf->sf.samplerate * psf->sf.channels * 13000) / 8000 ; case SF_FORMAT_G721_32 : /* 32kbs G721 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels) / 2 ; case SF_FORMAT_G723_24 : /* 24kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 3) / 8 ; case SF_FORMAT_G723_40 : /* 40kbs G723 ADPCM encoding. */ return (psf->sf.samplerate * psf->sf.channels * 5) / 8 ; default : break ; } ; return -1 ; } /* sf_current_byterate */ /*============================================================================== */ sf_count_t sf_read_raw (SNDFILE *sndfile, void *ptr, sf_count_t bytes) { SF_PRIVATE *psf ; sf_count_t count, extra ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (bytes < 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, bytes) ; return 0 ; } ; if (bytes % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf_fread (ptr, 1, bytes, psf) ; if (psf->read_current + count / blockwidth <= psf->sf.frames) psf->read_current += count / blockwidth ; else { count = (psf->sf.frames - psf->read_current) * blockwidth ; extra = bytes - count ; psf_memset (((char *) ptr) + count, 0, extra) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_short (SNDFILE *sndfile, short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_short */ sf_count_t sf_readf_short (SNDFILE *sndfile, short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (short)) ; return 0 ; /* End of file. */ } ; if (psf->read_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_short (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (short)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_int (SNDFILE *sndfile, int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_int */ sf_count_t sf_readf_int (SNDFILE *sndfile, int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (int)) ; return 0 ; } ; if (psf->read_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_int (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (int)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_float (SNDFILE *sndfile, float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_float */ sf_count_t sf_readf_float (SNDFILE *sndfile, float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (float)) ; return 0 ; } ; if (psf->read_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_float (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (float)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_read_double (SNDFILE *sndfile, double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_READ_ALIGN ; return 0 ; } ; if (len <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, len * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, len) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = len - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count ; } /* sf_read_double */ sf_count_t sf_readf_double (SNDFILE *sndfile, double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count, extra ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_WRITE) { psf->error = SFE_NOT_READMODE ; return 0 ; } ; if (frames <= 0 || psf->read_current >= psf->sf.frames) { psf_memset (ptr, 0, frames * psf->sf.channels * sizeof (double)) ; return 0 ; } ; if (psf->read_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_READ) if (psf->seek (psf, SFM_READ, psf->read_current) < 0) return 0 ; count = psf->read_double (psf, ptr, frames * psf->sf.channels) ; if (psf->read_current + count / psf->sf.channels <= psf->sf.frames) psf->read_current += count / psf->sf.channels ; else { count = (psf->sf.frames - psf->read_current) * psf->sf.channels ; extra = frames * psf->sf.channels - count ; psf_memset (ptr + count, 0, extra * sizeof (double)) ; psf->read_current = psf->sf.frames ; } ; psf->last_op = SFM_READ ; return count / psf->sf.channels ; } /* sf_readf_double */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_raw (SNDFILE *sndfile, const void *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; int bytewidth, blockwidth ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; bytewidth = (psf->bytewidth > 0) ? psf->bytewidth : 1 ; blockwidth = (psf->blockwidth > 0) ? psf->blockwidth : 1 ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % (psf->sf.channels * bytewidth)) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf_fwrite (ptr, 1, len, psf) ; psf->write_current += count / blockwidth ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_raw */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_short (SNDFILE *sndfile, const short *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_short */ sf_count_t sf_writef_short (SNDFILE *sndfile, const short *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_short == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_short (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_short */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_int (SNDFILE *sndfile, const int *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_int */ sf_count_t sf_writef_int (SNDFILE *sndfile, const int *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_int == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_int (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_int */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_float (SNDFILE *sndfile, const float *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_float */ sf_count_t sf_writef_float (SNDFILE *sndfile, const float *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_float == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_float (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_float */ /*------------------------------------------------------------------------------ */ sf_count_t sf_write_double (SNDFILE *sndfile, const double *ptr, sf_count_t len) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (len % psf->sf.channels) { psf->error = SFE_BAD_WRITE_ALIGN ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, len) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count ; } /* sf_write_double */ sf_count_t sf_writef_double (SNDFILE *sndfile, const double *ptr, sf_count_t frames) { SF_PRIVATE *psf ; sf_count_t count ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->file.mode == SFM_READ) { psf->error = SFE_NOT_WRITEMODE ; return 0 ; } ; if (psf->write_double == NULL || psf->seek == NULL) { psf->error = SFE_UNIMPLEMENTED ; return 0 ; } ; if (psf->last_op != SFM_WRITE) if (psf->seek (psf, SFM_WRITE, psf->write_current) < 0) return 0 ; if (psf->have_written == SF_FALSE && psf->write_header != NULL) psf->write_header (psf, SF_FALSE) ; psf->have_written = SF_TRUE ; count = psf->write_double (psf, ptr, frames * psf->sf.channels) ; psf->write_current += count / psf->sf.channels ; psf->last_op = SFM_WRITE ; if (psf->write_current > psf->sf.frames) { psf->sf.frames = psf->write_current ; psf->dataend = 0 ; } ; if (psf->auto_header && psf->write_header != NULL) psf->write_header (psf, SF_TRUE) ; return count / psf->sf.channels ; } /* sf_writef_double */ /*========================================================================= ** Private functions. */ static int try_resource_fork (SF_PRIVATE * psf) { int old_error = psf->error ; /* Set READ mode now, to see if resource fork exists. */ psf->rsrc.mode = SFM_READ ; if (psf_open_rsrc (psf) != 0) { psf->error = old_error ; return 0 ; } ; /* More checking here. */ psf_log_printf (psf, "Resource fork : %s\n", psf->rsrc.path.c) ; return SF_FORMAT_SD2 ; } /* try_resource_fork */ static int format_from_extension (SF_PRIVATE *psf) { char *cptr ; char buffer [16] ; int format = 0 ; if ((cptr = strrchr (psf->file.name.c, '.')) == NULL) return 0 ; cptr ++ ; if (strlen (cptr) > sizeof (buffer) - 1) return 0 ; psf_strlcpy (buffer, sizeof (buffer), cptr) ; buffer [sizeof (buffer) - 1] = 0 ; /* Convert everything in the buffer to lower case. */ cptr = buffer ; while (*cptr) { *cptr = tolower (*cptr) ; cptr ++ ; } ; cptr = buffer ; if (strcmp (cptr, "au") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "snd") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_ULAW ; } else if (strcmp (cptr, "vox") == 0 || strcmp (cptr, "vox8") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "vox6") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 6000 ; format = SF_FORMAT_RAW | SF_FORMAT_VOX_ADPCM ; } else if (strcmp (cptr, "gsm") == 0) { psf->sf.channels = 1 ; psf->sf.samplerate = 8000 ; format = SF_FORMAT_RAW | SF_FORMAT_GSM610 ; } /* For RAW files, make sure the dataoffset if set correctly. */ if ((SF_CONTAINER (format)) == SF_FORMAT_RAW) psf->dataoffset = 0 ; return format ; } /* format_from_extension */ static int guess_file_type (SF_PRIVATE *psf) { uint32_t buffer [3], format ; if (psf_binheader_readf (psf, "b", &buffer, SIGNED_SIZEOF (buffer)) != SIGNED_SIZEOF (buffer)) { psf->error = SFE_BAD_FILE_READ ; return 0 ; } ; if ((buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'F') || buffer [0] == MAKE_MARKER ('R', 'I', 'F', 'X')) && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_WAV ; if (buffer [0] == MAKE_MARKER ('F', 'O', 'R', 'M')) { if (buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'F') || buffer [2] == MAKE_MARKER ('A', 'I', 'F', 'C')) return SF_FORMAT_AIFF ; if (buffer [2] == MAKE_MARKER ('8', 'S', 'V', 'X') || buffer [2] == MAKE_MARKER ('1', '6', 'S', 'V')) return SF_FORMAT_SVX ; return 0 ; } ; if (buffer [0] == MAKE_MARKER ('.', 's', 'n', 'd') || buffer [0] == MAKE_MARKER ('d', 'n', 's', '.')) return SF_FORMAT_AU ; if ((buffer [0] == MAKE_MARKER ('f', 'a', 'p', ' ') || buffer [0] == MAKE_MARKER (' ', 'p', 'a', 'f'))) return SF_FORMAT_PAF ; if (buffer [0] == MAKE_MARKER ('N', 'I', 'S', 'T')) return SF_FORMAT_NIST ; if (buffer [0] == MAKE_MARKER ('C', 'r', 'e', 'a') && buffer [1] == MAKE_MARKER ('t', 'i', 'v', 'e')) return SF_FORMAT_VOC ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0xF8, 0xFF)) == MAKE_MARKER (0x64, 0xA3, 0x00, 0x00) || (buffer [0] & MAKE_MARKER (0xFF, 0xF8, 0xFF, 0xFF)) == MAKE_MARKER (0x00, 0x00, 0xA3, 0x64)) return SF_FORMAT_IRCAM ; if (buffer [0] == MAKE_MARKER ('r', 'i', 'f', 'f')) return SF_FORMAT_W64 ; if (buffer [0] == MAKE_MARKER (0, 0, 0x03, 0xE8) && buffer [1] == MAKE_MARKER (0, 0, 0, 1) && buffer [2] == MAKE_MARKER (0, 0, 0, 1)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER (0, 0, 0, 0) && buffer [1] == MAKE_MARKER (1, 0, 0, 0) && buffer [2] == MAKE_MARKER (1, 0, 0, 0)) return SF_FORMAT_MAT4 ; if (buffer [0] == MAKE_MARKER ('M', 'A', 'T', 'L') && buffer [1] == MAKE_MARKER ('A', 'B', ' ', '5')) return SF_FORMAT_MAT5 ; if (buffer [0] == MAKE_MARKER ('P', 'V', 'F', '1')) return SF_FORMAT_PVF ; if (buffer [0] == MAKE_MARKER ('E', 'x', 't', 'e') && buffer [1] == MAKE_MARKER ('n', 'd', 'e', 'd') && buffer [2] == MAKE_MARKER (' ', 'I', 'n', 's')) return SF_FORMAT_XI ; if (buffer [0] == MAKE_MARKER ('c', 'a', 'f', 'f') && buffer [2] == MAKE_MARKER ('d', 'e', 's', 'c')) return SF_FORMAT_CAF ; if (buffer [0] == MAKE_MARKER ('O', 'g', 'g', 'S')) return SF_FORMAT_OGG ; if (buffer [0] == MAKE_MARKER ('A', 'L', 'a', 'w') && buffer [1] == MAKE_MARKER ('S', 'o', 'u', 'n') && buffer [2] == MAKE_MARKER ('d', 'F', 'i', 'l')) return SF_FORMAT_WVE ; if (buffer [0] == MAKE_MARKER ('D', 'i', 'a', 'm') && buffer [1] == MAKE_MARKER ('o', 'n', 'd', 'W') && buffer [2] == MAKE_MARKER ('a', 'r', 'e', ' ')) return SF_FORMAT_DWD ; if (buffer [0] == MAKE_MARKER ('L', 'M', '8', '9') || buffer [0] == MAKE_MARKER ('5', '3', 0, 0)) return SF_FORMAT_TXW ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0x80, 0xFF)) == MAKE_MARKER (0xF0, 0x7E, 0, 0x01)) return SF_FORMAT_SDS ; if ((buffer [0] & MAKE_MARKER (0xFF, 0xFF, 0, 0)) == MAKE_MARKER (1, 4, 0, 0)) return SF_FORMAT_MPC2K ; if (buffer [0] == MAKE_MARKER ('C', 'A', 'T', ' ') && buffer [2] == MAKE_MARKER ('R', 'E', 'X', '2')) return SF_FORMAT_REX2 ; if (buffer [0] == MAKE_MARKER (0x30, 0x26, 0xB2, 0x75) && buffer [1] == MAKE_MARKER (0x8E, 0x66, 0xCF, 0x11)) return 0 /*-SF_FORMAT_WMA-*/ ; /* HMM (Hidden Markov Model) Tool Kit. */ if (buffer [2] == MAKE_MARKER (0, 2, 0, 0) && 2 * ((int64_t) BE2H_32 (buffer [0])) + 12 == psf->filelength) return SF_FORMAT_HTK ; if (buffer [0] == MAKE_MARKER ('f', 'L', 'a', 'C')) return SF_FORMAT_FLAC ; if (buffer [0] == MAKE_MARKER ('2', 'B', 'I', 'T')) return SF_FORMAT_AVR ; if (buffer [0] == MAKE_MARKER ('R', 'F', '6', '4') && buffer [2] == MAKE_MARKER ('W', 'A', 'V', 'E')) return SF_FORMAT_RF64 ; if (buffer [0] == MAKE_MARKER ('I', 'D', '3', 3)) { psf_log_printf (psf, "Found 'ID3' marker.\n") ; if (id3_skip (psf)) return guess_file_type (psf) ; return 0 ; } ; /* Turtle Beach SMP 16-bit */ if (buffer [0] == MAKE_MARKER ('S', 'O', 'U', 'N') && buffer [1] == MAKE_MARKER ('D', ' ', 'S', 'A')) return 0 ; /* Yamaha sampler format. */ if (buffer [0] == MAKE_MARKER ('S', 'Y', '8', '0') || buffer [0] == MAKE_MARKER ('S', 'Y', '8', '5')) return 0 ; if (buffer [0] == MAKE_MARKER ('a', 'j', 'k', 'g')) return 0 /*-SF_FORMAT_SHN-*/ ; /* This must be the last one. */ if (psf->filelength > 0 && (format = try_resource_fork (psf)) != 0) return format ; return 0 ; } /* guess_file_type */ static int validate_sfinfo (SF_INFO *sfinfo) { if (sfinfo->samplerate < 1) return 0 ; if (sfinfo->frames < 0) return 0 ; if (sfinfo->channels < 1) return 0 ; if ((SF_CONTAINER (sfinfo->format)) == 0) return 0 ; if ((SF_CODEC (sfinfo->format)) == 0) return 0 ; if (sfinfo->sections < 1) return 0 ; return 1 ; } /* validate_sfinfo */ static int validate_psf (SF_PRIVATE *psf) { if (psf->datalength < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : datalength == %D.\n", psf->datalength) ; return 0 ; } ; if (psf->dataoffset < 0) { psf_log_printf (psf, "Invalid SF_PRIVATE field : dataoffset == %D.\n", psf->dataoffset) ; return 0 ; } ; if (psf->blockwidth && psf->blockwidth != psf->sf.channels * psf->bytewidth) { psf_log_printf (psf, "Invalid SF_PRIVATE field : channels * bytewidth == %d.\n", psf->sf.channels * psf->bytewidth) ; return 0 ; } ; return 1 ; } /* validate_psf */ static void save_header_info (SF_PRIVATE *psf) { snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; } /* save_header_info */ static int copy_filename (SF_PRIVATE *psf, const char *path) { const char *ccptr ; char *cptr ; if (strlen (path) > 1 && strlen (path) - 1 >= sizeof (psf->file.path.c)) { psf->error = SFE_FILENAME_TOO_LONG ; return psf->error ; } ; snprintf (psf->file.path.c, sizeof (psf->file.path.c), "%s", path) ; if ((ccptr = strrchr (path, '/')) || (ccptr = strrchr (path, '\\'))) ccptr ++ ; else ccptr = path ; snprintf (psf->file.name.c, sizeof (psf->file.name.c), "%s", ccptr) ; /* Now grab the directory. */ snprintf (psf->file.dir.c, sizeof (psf->file.dir.c), "%s", path) ; if ((cptr = strrchr (psf->file.dir.c, '/')) || (cptr = strrchr (psf->file.dir.c, '\\'))) cptr [1] = 0 ; else psf->file.dir.c [0] = 0 ; return 0 ; } /* copy_filename */ /*============================================================================== */ static int psf_close (SF_PRIVATE *psf) { uint32_t k ; int error = 0 ; if (psf->codec_close) { error = psf->codec_close (psf) ; /* To prevent it being called in psf->container_close(). */ psf->codec_close = NULL ; } ; if (psf->container_close) error = psf->container_close (psf) ; error = psf_fclose (psf) ; psf_close_rsrc (psf) ; /* For an ISO C compliant implementation it is ok to free a NULL pointer. */ free (psf->header.ptr) ; free (psf->container_data) ; free (psf->codec_data) ; free (psf->interleave) ; free (psf->dither) ; free (psf->peak_info) ; free (psf->broadcast_16k) ; free (psf->loop_info) ; free (psf->instrument) ; free (psf->cues) ; free (psf->channel_map) ; free (psf->format_desc) ; free (psf->strings.storage) ; if (psf->wchunks.chunks) for (k = 0 ; k < psf->wchunks.used ; k++) free (psf->wchunks.chunks [k].data) ; free (psf->rchunks.chunks) ; free (psf->wchunks.chunks) ; free (psf->iterator) ; free (psf->cart_16k) ; memset (psf, 0, sizeof (SF_PRIVATE)) ; free (psf) ; return error ; } /* psf_close */ SNDFILE * psf_open_file (SF_PRIVATE *psf, SF_INFO *sfinfo) { int error, format ; sf_errno = error = 0 ; sf_parselog [0] = 0 ; if (psf->error) { error = psf->error ; goto error_exit ; } ; if (psf->file.mode != SFM_READ && psf->file.mode != SFM_WRITE && psf->file.mode != SFM_RDWR) { error = SFE_BAD_OPEN_MODE ; goto error_exit ; } ; if (sfinfo == NULL) { error = SFE_BAD_SF_INFO_PTR ; goto error_exit ; } ; if (psf->file.mode == SFM_READ) { if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_RAW) { if (sf_format_check (sfinfo) == 0) { error = SFE_RAW_BAD_FORMAT ; goto error_exit ; } ; } else memset (sfinfo, 0, sizeof (SF_INFO)) ; } ; memcpy (&psf->sf, sfinfo, sizeof (SF_INFO)) ; psf->Magick = SNDFILE_MAGICK ; psf->norm_float = SF_TRUE ; psf->norm_double = SF_TRUE ; psf->dataoffset = -1 ; psf->datalength = -1 ; psf->read_current = -1 ; psf->write_current = -1 ; psf->auto_header = SF_FALSE ; psf->rwf_endian = SF_ENDIAN_LITTLE ; psf->seek = psf_default_seek ; psf->float_int_mult = 0 ; psf->float_max = -1.0 ; /* An attempt at a per SF_PRIVATE unique id. */ psf->unique_id = psf_rand_int32 () ; psf->sf.sections = 1 ; psf->is_pipe = psf_is_pipe (psf) ; if (psf->is_pipe) { psf->sf.seekable = SF_FALSE ; psf->filelength = SF_COUNT_MAX ; } else { psf->sf.seekable = SF_TRUE ; /* File is open, so get the length. */ psf->filelength = psf_get_filelen (psf) ; } ; if (psf->fileoffset > 0) { switch (psf->file.mode) { case SFM_READ : if (psf->filelength < 44) { psf_log_printf (psf, "Short filelength: %D (fileoffset: %D)\n", psf->filelength, psf->fileoffset) ; error = SFE_BAD_OFFSET ; goto error_exit ; } ; break ; case SFM_WRITE : psf->fileoffset = 0 ; psf_fseek (psf, 0, SEEK_END) ; psf->fileoffset = psf_ftell (psf) ; break ; case SFM_RDWR : error = SFE_NO_EMBEDDED_RDWR ; goto error_exit ; } ; psf_log_printf (psf, "Embedded file offset : %D\n", psf->fileoffset) ; } ; if (psf->filelength == SF_COUNT_MAX) psf_log_printf (psf, "Length : unknown\n") ; else psf_log_printf (psf, "Length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_WRITE || (psf->file.mode == SFM_RDWR && psf->filelength == 0)) { /* If the file is being opened for write or RDWR and the file is currently ** empty, then the SF_INFO struct must contain valid data. */ if ((SF_CONTAINER (psf->sf.format)) == 0) { error = SFE_ZERO_MAJOR_FORMAT ; goto error_exit ; } ; if ((SF_CODEC (psf->sf.format)) == 0) { error = SFE_ZERO_MINOR_FORMAT ; goto error_exit ; } ; if (sf_format_check (&psf->sf) == 0) { error = SFE_BAD_OPEN_FORMAT ; goto error_exit ; } ; } else if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_RAW) { /* If type RAW has not been specified then need to figure out file type. */ psf->sf.format = guess_file_type (psf) ; if (psf->sf.format == 0) psf->sf.format = format_from_extension (psf) ; } ; /* Prevent unnecessary seeks */ psf->last_op = psf->file.mode ; /* Set bytewidth if known. */ switch (SF_CODEC (psf->sf.format)) { case SF_FORMAT_PCM_S8 : case SF_FORMAT_PCM_U8 : case SF_FORMAT_ULAW : case SF_FORMAT_ALAW : case SF_FORMAT_DPCM_8 : psf->bytewidth = 1 ; break ; case SF_FORMAT_PCM_16 : case SF_FORMAT_DPCM_16 : psf->bytewidth = 2 ; break ; case SF_FORMAT_PCM_24 : psf->bytewidth = 3 ; break ; case SF_FORMAT_PCM_32 : case SF_FORMAT_FLOAT : psf->bytewidth = 4 ; break ; case SF_FORMAT_DOUBLE : psf->bytewidth = 8 ; break ; } ; /* Call the initialisation function for the relevant file type. */ switch (SF_CONTAINER (psf->sf.format)) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : error = wav_open (psf) ; break ; case SF_FORMAT_AIFF : error = aiff_open (psf) ; break ; case SF_FORMAT_AU : error = au_open (psf) ; break ; case SF_FORMAT_RAW : error = raw_open (psf) ; break ; case SF_FORMAT_W64 : error = w64_open (psf) ; break ; case SF_FORMAT_RF64 : error = rf64_open (psf) ; break ; /* Lite remove start */ case SF_FORMAT_PAF : error = paf_open (psf) ; break ; case SF_FORMAT_SVX : error = svx_open (psf) ; break ; case SF_FORMAT_NIST : error = nist_open (psf) ; break ; case SF_FORMAT_IRCAM : error = ircam_open (psf) ; break ; case SF_FORMAT_VOC : error = voc_open (psf) ; break ; case SF_FORMAT_SDS : error = sds_open (psf) ; break ; case SF_FORMAT_OGG : error = ogg_open (psf) ; break ; case SF_FORMAT_TXW : error = txw_open (psf) ; break ; case SF_FORMAT_WVE : error = wve_open (psf) ; break ; case SF_FORMAT_DWD : error = dwd_open (psf) ; break ; case SF_FORMAT_MAT4 : error = mat4_open (psf) ; break ; case SF_FORMAT_MAT5 : error = mat5_open (psf) ; break ; case SF_FORMAT_PVF : error = pvf_open (psf) ; break ; case SF_FORMAT_XI : error = xi_open (psf) ; break ; case SF_FORMAT_HTK : error = htk_open (psf) ; break ; case SF_FORMAT_SD2 : error = sd2_open (psf) ; break ; case SF_FORMAT_REX2 : error = rx2_open (psf) ; break ; case SF_FORMAT_AVR : error = avr_open (psf) ; break ; case SF_FORMAT_FLAC : error = flac_open (psf) ; break ; case SF_FORMAT_CAF : error = caf_open (psf) ; break ; case SF_FORMAT_MPC2K : error = mpc2k_open (psf) ; break ; /* Lite remove end */ default : error = SFE_UNKNOWN_FORMAT ; } ; if (error) goto error_exit ; /* For now, check whether embedding is supported. */ format = SF_CONTAINER (psf->sf.format) ; if (psf->fileoffset > 0) { switch (format) { case SF_FORMAT_WAV : case SF_FORMAT_WAVEX : case SF_FORMAT_AIFF : case SF_FORMAT_AU : /* Actual embedded files. */ break ; case SF_FORMAT_FLAC : /* Flac with an ID3v2 header? */ break ; default : error = SFE_NO_EMBED_SUPPORT ; goto error_exit ; } ; } ; if (psf->fileoffset > 0) psf_log_printf (psf, "Embedded file length : %D\n", psf->filelength) ; if (psf->file.mode == SFM_RDWR && sf_format_check (&psf->sf) == 0) { error = SFE_BAD_MODE_RW ; goto error_exit ; } ; if (validate_sfinfo (&psf->sf) == 0) { psf_log_SF_INFO (psf) ; save_header_info (psf) ; error = SFE_BAD_SF_INFO ; goto error_exit ; } ; if (validate_psf (psf) == 0) { save_header_info (psf) ; error = SFE_INTERNAL ; goto error_exit ; } ; psf->read_current = 0 ; psf->write_current = 0 ; if (psf->file.mode == SFM_RDWR) { psf->write_current = psf->sf.frames ; psf->have_written = psf->sf.frames > 0 ? SF_TRUE : SF_FALSE ; } ; memcpy (sfinfo, &psf->sf, sizeof (SF_INFO)) ; if (psf->file.mode == SFM_WRITE) { /* Zero out these fields. */ sfinfo->frames = 0 ; sfinfo->sections = 0 ; sfinfo->seekable = 0 ; } ; return (SNDFILE *) psf ; error_exit : sf_errno = error ; if (error == SFE_SYSTEM) snprintf (sf_syserr, sizeof (sf_syserr), "%s", psf->syserr) ; snprintf (sf_parselog, sizeof (sf_parselog), "%s", psf->parselog.buf) ; switch (error) { case SF_ERR_SYSTEM : case SF_ERR_UNSUPPORTED_ENCODING : case SFE_UNIMPLEMENTED : break ; case SFE_RAW_BAD_FORMAT : break ; default : if (psf->file.mode == SFM_READ) { psf_log_printf (psf, "Parse error : %s\n", sf_error_number (error)) ; error = SF_ERR_MALFORMED_FILE ; } ; } ; psf_close (psf) ; return NULL ; } /* psf_open_file */ /*============================================================================== ** Chunk getting and setting. ** This works for AIFF, CAF, RF64 and WAV. ** It doesn't work for W64 because W64 uses weird GUID style chunk markers. */ int sf_set_chunk (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->set_chunk) return psf->set_chunk (psf, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_set_chunk */ SF_CHUNK_ITERATOR * sf_get_chunk_iterator (SNDFILE * sndfile, const SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info) return psf_get_chunk_iterator (psf, chunk_info->id) ; return psf_get_chunk_iterator (psf, NULL) ; } /* sf_get_chunk_iterator */ SF_CHUNK_ITERATOR * sf_next_chunk_iterator (SF_CHUNK_ITERATOR * iterator) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (psf->next_chunk_iterator) return psf->next_chunk_iterator (psf, iterator) ; return NULL ; } /* sf_get_chunk_iterator_next */ int sf_get_chunk_size (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_size) return psf->get_chunk_size (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; return 0 ; } /* sf_get_chunk_size */ int sf_get_chunk_data (const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info) { SF_PRIVATE *psf ; SNDFILE *sndfile = iterator ? iterator->sndfile : NULL ; VALIDATE_SNDFILE_AND_ASSIGN_PSF (sndfile, psf, 1) ; if (chunk_info == NULL || chunk_info->data == NULL) return SFE_BAD_CHUNK_PTR ; if (psf->get_chunk_data) return psf->get_chunk_data (psf, iterator, chunk_info) ; return SFE_BAD_CHUNK_FORMAT ; } /* sf_get_chunk_data */
sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */
sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) { SF_PRIVATE *psf ; /* Make sure we have a valid set ot virtual pointers. */ if (sfvirtual->get_filelen == NULL || sfvirtual->seek == NULL || sfvirtual->tell == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_get_filelen / vio_seek / vio_tell in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_READ || mode == SFM_RDWR) && sfvirtual->read == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_read in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((mode == SFM_WRITE || mode == SFM_RDWR) && sfvirtual->write == NULL) { sf_errno = SFE_BAD_VIRTUAL_IO ; snprintf (sf_parselog, sizeof (sf_parselog), "Bad vio_write in SF_VIRTUAL_IO struct.\n") ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; psf->virtual_io = SF_TRUE ; psf->vio = *sfvirtual ; psf->vio_user_data = user_data ; psf->file.mode = mode ; return psf_open_file (psf, sfinfo) ; } /* sf_open_virtual */
{'added': [(270, '\t{\tSFE_BAD_HEADER_ALLOC \t, "Error : Required header allocation is too large." },'), (329, '\tif ((psf = psf_allocate ()) == NULL)'), (361, '\tif ((psf = psf_allocate ()) == NULL)'), (403, '\tif ((psf = psf_allocate ()) == NULL)'), (2691, '\tfree (psf->header.ptr) ;')], 'deleted': [(270, ''), (329, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)'), (361, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)'), (403, '\tif ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)')]}
5
4
2,307
15,935
28
213
11
https://github.com/erikd/libsndfile
CVE-2017-7586
CWE-119
1,813
ctc_beam_search_decoder.cc
C++
tflite::ops::experimental::ctc_beam_search_decoder::StoreAllDecodedSequences
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <vector> #include "flatbuffers/flexbuffers.h" // from @flatbuffers #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/experimental/kernels/ctc_beam_search.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" namespace tflite { namespace ops { namespace experimental { namespace ctc_beam_search_decoder { constexpr int kInputsTensor = 0; constexpr int kSequenceLengthTensor = 1; typedef struct { int beam_width; int top_paths; bool merge_repeated; } CTCBeamSearchDecoderParams; void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_CHECK(buffer != nullptr); const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer); const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); CTCBeamSearchDecoderParams* option = new CTCBeamSearchDecoderParams; option->beam_width = m["beam_width"].AsInt32(); option->top_paths = m["top_paths"].AsInt32(); option->merge_repeated = m["merge_repeated"].AsBool(); return option; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<CTCBeamSearchDecoderParams*>(buffer); } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const CTCBeamSearchDecoderParams* option = reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data); const int top_paths = option->top_paths; TF_LITE_ENSURE(context, option->beam_width >= top_paths); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // The outputs should be top_paths * 3 + 1. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3 * top_paths + 1); const TfLiteTensor* inputs = GetInput(context, node, kInputsTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(inputs), 3); // TensorFlow only supports float. TF_LITE_ENSURE_EQ(context, inputs->type, kTfLiteFloat32); const int batch_size = SizeOfDimension(inputs, 1); const TfLiteTensor* sequence_length = GetInput(context, node, kSequenceLengthTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(sequence_length), 1); TF_LITE_ENSURE_EQ(context, NumElements(sequence_length), batch_size); // TensorFlow only supports int32. TF_LITE_ENSURE_EQ(context, sequence_length->type, kTfLiteInt32); // Resize decoded outputs. // Do not resize indices & values cause we don't know the values yet. for (int i = 0; i < top_paths; ++i) { TfLiteTensor* indices = GetOutput(context, node, i); SetTensorToDynamic(indices); TfLiteTensor* values = GetOutput(context, node, i + top_paths); SetTensorToDynamic(values); TfLiteTensor* output_shape = GetOutput(context, node, i + 2 * top_paths); SetTensorToDynamic(output_shape); } // Resize log probability outputs. TfLiteTensor* log_probability_output = GetOutput(context, node, top_paths * 3); TfLiteIntArray* log_probability_output_shape_array = TfLiteIntArrayCreate(2); log_probability_output_shape_array->data[0] = batch_size; log_probability_output_shape_array->data[1] = top_paths; return context->ResizeTensor(context, log_probability_output, log_probability_output_shape_array); } TfLiteStatus Resize(TfLiteContext* context, std::initializer_list<int32_t> output_shape, TfLiteTensor* output) { const int dimensions = output_shape.size(); TfLiteIntArray* output_shape_array = TfLiteIntArrayCreate(dimensions); int i = 0; for (const int v : output_shape) { output_shape_array->data[i++] = v; } return context->ResizeTensor(context, output, output_shape_array); } TfLiteStatus StoreAllDecodedSequences( TfLiteContext* context, const std::vector<std::vector<std::vector<int>>>& sequences, TfLiteNode* node, int top_paths) { const int32_t batch_size = sequences.size(); std::vector<int32_t> num_entries(top_paths, 0); // Calculate num_entries per path for (const auto& batch_s : sequences) { TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths); for (int p = 0; p < top_paths; ++p) { num_entries[p] += batch_s[p].size(); } } for (int p = 0; p < top_paths; ++p) { const int32_t p_num = num_entries[p]; // Resize the decoded outputs. TfLiteTensor* indices = GetOutput(context, node, p); TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices)); TfLiteTensor* values = GetOutput(context, node, p + top_paths); TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values)); TfLiteTensor* decoded_shape = GetOutput(context, node, p + 2 * top_paths); TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape)); int32_t max_decoded = 0; int32_t offset = 0; int32_t* indices_data = GetTensorData<int32_t>(indices); int32_t* values_data = GetTensorData<int32_t>(values); int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape); for (int b = 0; b < batch_size; ++b) { auto& p_batch = sequences[b][p]; int32_t num_decoded = p_batch.size(); max_decoded = std::max(max_decoded, num_decoded); std::copy_n(p_batch.begin(), num_decoded, values_data + offset); for (int32_t t = 0; t < num_decoded; ++t, ++offset) { indices_data[offset * 2] = b; indices_data[offset * 2 + 1] = t; } } decoded_shape_data[0] = batch_size; decoded_shape_data[1] = max_decoded; } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* inputs = GetInput(context, node, kInputsTensor); const TfLiteTensor* sequence_length = GetInput(context, node, kSequenceLengthTensor); const CTCBeamSearchDecoderParams* option = reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data); const int max_time = SizeOfDimension(inputs, 0); const int batch_size = SizeOfDimension(inputs, 1); const int num_classes = SizeOfDimension(inputs, 2); const int beam_width = option->beam_width; const int top_paths = option->top_paths; const bool merge_repeated = option->merge_repeated; // Validate sequence length is less or equal than max time. for (int i = 0; i < batch_size; ++i) { TF_LITE_ENSURE(context, max_time >= GetTensorData<int32_t>(sequence_length)[i]); } // The following logic is implemented like // tensorflow/core/kernels/ctc_decoder_ops.cc std::vector<optimized_ops::TTypes<float>::UnalignedConstMatrix> input_list_t; for (std::size_t t = 0; t < max_time; ++t) { input_list_t.emplace_back( GetTensorData<float>(inputs) + t * batch_size * num_classes, batch_size, num_classes); } ::tflite::experimental::ctc::CTCBeamSearchDecoder<>::DefaultBeamScorer beam_scorer; ::tflite::experimental::ctc::CTCBeamSearchDecoder<> beam_search( num_classes, beam_width, &beam_scorer, 1 /* batch_size */, merge_repeated); // Allocate temporary memory for holding chip operation data. float* input_chip_t_data = static_cast<float*>(malloc(num_classes * sizeof(float))); Eigen::array<Eigen::DenseIndex, 1> dims; dims[0] = num_classes; optimized_ops::TTypes<float>::Flat input_chip_t(input_chip_t_data, dims); std::vector<std::vector<std::vector<int>>> best_paths(batch_size); std::vector<float> log_probs; TfLiteTensor* log_probabilities = GetOutput(context, node, 3 * top_paths); float* log_probabilities_output = GetTensorData<float>(log_probabilities); // Assumption: the blank index is num_classes - 1 for (int b = 0; b < batch_size; ++b) { auto& best_paths_b = best_paths[b]; best_paths_b.resize(top_paths); for (int t = 0; t < GetTensorData<int32_t>(sequence_length)[b]; ++t) { input_chip_t = input_list_t[t].chip(b, 0); auto input_bi = Eigen::Map<const Eigen::ArrayXf>(input_chip_t.data(), num_classes); beam_search.Step(input_bi); } TF_LITE_ENSURE(context, beam_search.TopPaths(top_paths, &best_paths_b, &log_probs, merge_repeated)); beam_search.Reset(); // Fill in log_probabilities output. for (int bp = 0; bp < top_paths; ++bp) { log_probabilities_output[b * top_paths + bp] = log_probs[bp]; } } free(input_chip_t_data); return StoreAllDecodedSequences(context, best_paths, node, top_paths); } } // namespace ctc_beam_search_decoder TfLiteRegistration* Register_CTC_BEAM_SEARCH_DECODER() { static TfLiteRegistration r = { ctc_beam_search_decoder::Init, ctc_beam_search_decoder::Free, ctc_beam_search_decoder::Prepare, ctc_beam_search_decoder::Eval}; return &r; } } // namespace experimental } // namespace ops } // namespace tflite
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <vector> #include "flatbuffers/flexbuffers.h" // from @flatbuffers #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/experimental/kernels/ctc_beam_search.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" namespace tflite { namespace ops { namespace experimental { namespace ctc_beam_search_decoder { constexpr int kInputsTensor = 0; constexpr int kSequenceLengthTensor = 1; typedef struct { int beam_width; int top_paths; bool merge_repeated; } CTCBeamSearchDecoderParams; void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_CHECK(buffer != nullptr); const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer); const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); CTCBeamSearchDecoderParams* option = new CTCBeamSearchDecoderParams; option->beam_width = m["beam_width"].AsInt32(); option->top_paths = m["top_paths"].AsInt32(); option->merge_repeated = m["merge_repeated"].AsBool(); return option; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<CTCBeamSearchDecoderParams*>(buffer); } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const CTCBeamSearchDecoderParams* option = reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data); const int top_paths = option->top_paths; TF_LITE_ENSURE(context, option->beam_width >= top_paths); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // The outputs should be top_paths * 3 + 1. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3 * top_paths + 1); const TfLiteTensor* inputs; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputsTensor, &inputs)); TF_LITE_ENSURE_EQ(context, NumDimensions(inputs), 3); // TensorFlow only supports float. TF_LITE_ENSURE_EQ(context, inputs->type, kTfLiteFloat32); const int batch_size = SizeOfDimension(inputs, 1); const TfLiteTensor* sequence_length; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor, &sequence_length)); TF_LITE_ENSURE_EQ(context, NumDimensions(sequence_length), 1); TF_LITE_ENSURE_EQ(context, NumElements(sequence_length), batch_size); // TensorFlow only supports int32. TF_LITE_ENSURE_EQ(context, sequence_length->type, kTfLiteInt32); // Resize decoded outputs. // Do not resize indices & values cause we don't know the values yet. for (int i = 0; i < top_paths; ++i) { TfLiteTensor* indices; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &indices)); SetTensorToDynamic(indices); TfLiteTensor* values; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i + top_paths, &values)); SetTensorToDynamic(values); TfLiteTensor* output_shape; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i + 2 * top_paths, &output_shape)); SetTensorToDynamic(output_shape); } // Resize log probability outputs. TfLiteTensor* log_probability_output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, top_paths * 3, &log_probability_output)); TfLiteIntArray* log_probability_output_shape_array = TfLiteIntArrayCreate(2); log_probability_output_shape_array->data[0] = batch_size; log_probability_output_shape_array->data[1] = top_paths; return context->ResizeTensor(context, log_probability_output, log_probability_output_shape_array); } TfLiteStatus Resize(TfLiteContext* context, std::initializer_list<int32_t> output_shape, TfLiteTensor* output) { const int dimensions = output_shape.size(); TfLiteIntArray* output_shape_array = TfLiteIntArrayCreate(dimensions); int i = 0; for (const int v : output_shape) { output_shape_array->data[i++] = v; } return context->ResizeTensor(context, output, output_shape_array); } TfLiteStatus StoreAllDecodedSequences( TfLiteContext* context, const std::vector<std::vector<std::vector<int>>>& sequences, TfLiteNode* node, int top_paths) { const int32_t batch_size = sequences.size(); std::vector<int32_t> num_entries(top_paths, 0); // Calculate num_entries per path for (const auto& batch_s : sequences) { TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths); for (int p = 0; p < top_paths; ++p) { num_entries[p] += batch_s[p].size(); } } for (int p = 0; p < top_paths; ++p) { const int32_t p_num = num_entries[p]; // Resize the decoded outputs. TfLiteTensor* indices; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p, &indices)); TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices)); TfLiteTensor* values; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p + top_paths, &values)); TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values)); TfLiteTensor* decoded_shape; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p + 2 * top_paths, &decoded_shape)); TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape)); int32_t max_decoded = 0; int32_t offset = 0; int32_t* indices_data = GetTensorData<int32_t>(indices); int32_t* values_data = GetTensorData<int32_t>(values); int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape); for (int b = 0; b < batch_size; ++b) { auto& p_batch = sequences[b][p]; int32_t num_decoded = p_batch.size(); max_decoded = std::max(max_decoded, num_decoded); std::copy_n(p_batch.begin(), num_decoded, values_data + offset); for (int32_t t = 0; t < num_decoded; ++t, ++offset) { indices_data[offset * 2] = b; indices_data[offset * 2 + 1] = t; } } decoded_shape_data[0] = batch_size; decoded_shape_data[1] = max_decoded; } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* inputs; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputsTensor, &inputs)); const TfLiteTensor* sequence_length; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor, &sequence_length)); const CTCBeamSearchDecoderParams* option = reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data); const int max_time = SizeOfDimension(inputs, 0); const int batch_size = SizeOfDimension(inputs, 1); const int num_classes = SizeOfDimension(inputs, 2); const int beam_width = option->beam_width; const int top_paths = option->top_paths; const bool merge_repeated = option->merge_repeated; // Validate sequence length is less or equal than max time. for (int i = 0; i < batch_size; ++i) { TF_LITE_ENSURE(context, max_time >= GetTensorData<int32_t>(sequence_length)[i]); } // The following logic is implemented like // tensorflow/core/kernels/ctc_decoder_ops.cc std::vector<optimized_ops::TTypes<float>::UnalignedConstMatrix> input_list_t; for (std::size_t t = 0; t < max_time; ++t) { input_list_t.emplace_back( GetTensorData<float>(inputs) + t * batch_size * num_classes, batch_size, num_classes); } ::tflite::experimental::ctc::CTCBeamSearchDecoder<>::DefaultBeamScorer beam_scorer; ::tflite::experimental::ctc::CTCBeamSearchDecoder<> beam_search( num_classes, beam_width, &beam_scorer, 1 /* batch_size */, merge_repeated); // Allocate temporary memory for holding chip operation data. float* input_chip_t_data = static_cast<float*>(malloc(num_classes * sizeof(float))); Eigen::array<Eigen::DenseIndex, 1> dims; dims[0] = num_classes; optimized_ops::TTypes<float>::Flat input_chip_t(input_chip_t_data, dims); std::vector<std::vector<std::vector<int>>> best_paths(batch_size); std::vector<float> log_probs; TfLiteTensor* log_probabilities; TF_LITE_ENSURE_OK( context, GetOutputSafe(context, node, 3 * top_paths, &log_probabilities)); float* log_probabilities_output = GetTensorData<float>(log_probabilities); // Assumption: the blank index is num_classes - 1 for (int b = 0; b < batch_size; ++b) { auto& best_paths_b = best_paths[b]; best_paths_b.resize(top_paths); for (int t = 0; t < GetTensorData<int32_t>(sequence_length)[b]; ++t) { input_chip_t = input_list_t[t].chip(b, 0); auto input_bi = Eigen::Map<const Eigen::ArrayXf>(input_chip_t.data(), num_classes); beam_search.Step(input_bi); } TF_LITE_ENSURE(context, beam_search.TopPaths(top_paths, &best_paths_b, &log_probs, merge_repeated)); beam_search.Reset(); // Fill in log_probabilities output. for (int bp = 0; bp < top_paths; ++bp) { log_probabilities_output[b * top_paths + bp] = log_probs[bp]; } } free(input_chip_t_data); return StoreAllDecodedSequences(context, best_paths, node, top_paths); } } // namespace ctc_beam_search_decoder TfLiteRegistration* Register_CTC_BEAM_SEARCH_DECODER() { static TfLiteRegistration r = { ctc_beam_search_decoder::Init, ctc_beam_search_decoder::Free, ctc_beam_search_decoder::Prepare, ctc_beam_search_decoder::Eval}; return &r; } } // namespace experimental } // namespace ops } // namespace tflite
TfLiteStatus StoreAllDecodedSequences( TfLiteContext* context, const std::vector<std::vector<std::vector<int>>>& sequences, TfLiteNode* node, int top_paths) { const int32_t batch_size = sequences.size(); std::vector<int32_t> num_entries(top_paths, 0); // Calculate num_entries per path for (const auto& batch_s : sequences) { TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths); for (int p = 0; p < top_paths; ++p) { num_entries[p] += batch_s[p].size(); } } for (int p = 0; p < top_paths; ++p) { const int32_t p_num = num_entries[p]; // Resize the decoded outputs. TfLiteTensor* indices = GetOutput(context, node, p); TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices)); TfLiteTensor* values = GetOutput(context, node, p + top_paths); TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values)); TfLiteTensor* decoded_shape = GetOutput(context, node, p + 2 * top_paths); TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape)); int32_t max_decoded = 0; int32_t offset = 0; int32_t* indices_data = GetTensorData<int32_t>(indices); int32_t* values_data = GetTensorData<int32_t>(values); int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape); for (int b = 0; b < batch_size; ++b) { auto& p_batch = sequences[b][p]; int32_t num_decoded = p_batch.size(); max_decoded = std::max(max_decoded, num_decoded); std::copy_n(p_batch.begin(), num_decoded, values_data + offset); for (int32_t t = 0; t < num_decoded; ++t, ++offset) { indices_data[offset * 2] = b; indices_data[offset * 2 + 1] = t; } } decoded_shape_data[0] = batch_size; decoded_shape_data[1] = max_decoded; } return kTfLiteOk; }
TfLiteStatus StoreAllDecodedSequences( TfLiteContext* context, const std::vector<std::vector<std::vector<int>>>& sequences, TfLiteNode* node, int top_paths) { const int32_t batch_size = sequences.size(); std::vector<int32_t> num_entries(top_paths, 0); // Calculate num_entries per path for (const auto& batch_s : sequences) { TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths); for (int p = 0; p < top_paths; ++p) { num_entries[p] += batch_s[p].size(); } } for (int p = 0; p < top_paths; ++p) { const int32_t p_num = num_entries[p]; // Resize the decoded outputs. TfLiteTensor* indices; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p, &indices)); TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices)); TfLiteTensor* values; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p + top_paths, &values)); TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values)); TfLiteTensor* decoded_shape; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p + 2 * top_paths, &decoded_shape)); TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape)); int32_t max_decoded = 0; int32_t offset = 0; int32_t* indices_data = GetTensorData<int32_t>(indices); int32_t* values_data = GetTensorData<int32_t>(values); int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape); for (int b = 0; b < batch_size; ++b) { auto& p_batch = sequences[b][p]; int32_t num_decoded = p_batch.size(); max_decoded = std::max(max_decoded, num_decoded); std::copy_n(p_batch.begin(), num_decoded, values_data + offset); for (int32_t t = 0; t < num_decoded; ++t, ++offset) { indices_data[offset * 2] = b; indices_data[offset * 2 + 1] = t; } } decoded_shape_data[0] = batch_size; decoded_shape_data[1] = max_decoded; } return kTfLiteOk; }
{'added': [(65, ' const TfLiteTensor* inputs;'), (66, ' TF_LITE_ENSURE_OK(context,'), (67, ' GetInputSafe(context, node, kInputsTensor, &inputs));'), (73, ' const TfLiteTensor* sequence_length;'), (74, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor,'), (75, ' &sequence_length));'), (84, ' TfLiteTensor* indices;'), (85, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &indices));'), (87, ' TfLiteTensor* values;'), (88, ' TF_LITE_ENSURE_OK(context,'), (89, ' GetOutputSafe(context, node, i + top_paths, &values));'), (91, ' TfLiteTensor* output_shape;'), (92, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i + 2 * top_paths,'), (93, ' &output_shape));'), (98, ' TfLiteTensor* log_probability_output;'), (99, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, top_paths * 3,'), (100, ' &log_probability_output));'), (139, ' TfLiteTensor* indices;'), (140, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p, &indices));'), (143, ' TfLiteTensor* values;'), (144, ' TF_LITE_ENSURE_OK(context,'), (145, ' GetOutputSafe(context, node, p + top_paths, &values));'), (148, ' TfLiteTensor* decoded_shape;'), (149, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p + 2 * top_paths,'), (150, ' &decoded_shape));'), (178, ' const TfLiteTensor* inputs;'), (179, ' TF_LITE_ENSURE_OK(context,'), (180, ' GetInputSafe(context, node, kInputsTensor, &inputs));'), (181, ' const TfLiteTensor* sequence_length;'), (182, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor,'), (183, ' &sequence_length));'), (227, ' TfLiteTensor* log_probabilities;'), (228, ' TF_LITE_ENSURE_OK('), (229, ' context, GetOutputSafe(context, node, 3 * top_paths, &log_probabilities));')], 'deleted': [(65, ' const TfLiteTensor* inputs = GetInput(context, node, kInputsTensor);'), (71, ' const TfLiteTensor* sequence_length ='), (72, ' GetInput(context, node, kSequenceLengthTensor);'), (81, ' TfLiteTensor* indices = GetOutput(context, node, i);'), (83, ' TfLiteTensor* values = GetOutput(context, node, i + top_paths);'), (85, ' TfLiteTensor* output_shape = GetOutput(context, node, i + 2 * top_paths);'), (90, ' TfLiteTensor* log_probability_output ='), (91, ' GetOutput(context, node, top_paths * 3);'), (130, ' TfLiteTensor* indices = GetOutput(context, node, p);'), (133, ' TfLiteTensor* values = GetOutput(context, node, p + top_paths);'), (136, ' TfLiteTensor* decoded_shape = GetOutput(context, node, p + 2 * top_paths);'), (164, ' const TfLiteTensor* inputs = GetInput(context, node, kInputsTensor);'), (165, ' const TfLiteTensor* sequence_length ='), (166, ' GetInput(context, node, kSequenceLengthTensor);'), (210, ' TfLiteTensor* log_probabilities = GetOutput(context, node, 3 * top_paths);')]}
34
15
199
1,626
40
399
6
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
1,927
FontFileBase.h
C++
NSFontConverter::CFontFileBase::CFontFileBase
/* * (c) Copyright Ascensio System SIA 2010-2019 * * This program is a free software product. You can redistribute it and/or * modify it under the terms of the GNU Affero General Public License (AGPL) * version 3 as published by the Free Software Foundation. In accordance with * Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect * that Ascensio System SIA expressly excludes the warranty of non-infringement * of any third-party rights. * * This program is distributed WITHOUT ANY WARRANTY; without even the implied * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For * details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html * * You can contact Ascensio System SIA at 20A-12 Ernesta Birznieka-Upisha * street, Riga, Latvia, EU, LV-1050. * * The interactive user interfaces in modified source and object code versions * of the Program must display Appropriate Legal Notices, as required under * Section 5 of the GNU AGPL version 3. * * Pursuant to Section 7(b) of the License you must retain the original Product * logo when distributing the program. Pursuant to Section 7(e) we decline to * grant you any rights under trademark law for use of our trademarks. * * All the Product's GUI elements, including illustrations and icon sets, as * well as technical writing content are licensed under the terms of the * Creative Commons Attribution-ShareAlike 4.0 International. See the License * terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode * */ #ifndef _ASC_FONTCONVERTER_FONT_FILE_BASE_H #define _ASC_FONTCONVERTER_FONT_FILE_BASE_H #include <stdio.h> #include "MemoryUtils.h" #include "../../common/File.h" namespace NSFontConverter { //------------------------------------------------------------------------ typedef void (*FontFileOutputFunc)(void *pStream, const char *sData, int nLen); //------------------------------------------------------------------------ // CFontFileBase //------------------------------------------------------------------------ class CFontFileBase { public: virtual ~CFontFileBase() { if ( m_bFreeFileData ) MemUtilsFree( m_sFileData ); } protected: CFontFileBase(char *sFile, int nLen, bool bFreeFileData) { m_sFileData = m_sFile = (unsigned char *)sFile; m_nLen = nLen; m_bFreeFileData = bFreeFileData; m_nPos = 0; } void Reset() { m_nPos = 0; } static char *ReadFile(const wchar_t *wsFileName, int *pnFileLen) { NSFile::CFileBinary oFile; if ( !oFile.OpenFile(wsFileName) ) return NULL; int nLen = (int)oFile.GetFileSize(); char *sBuffer = (char *)MemUtilsMalloc( nLen ); DWORD dwRead = 0; oFile.ReadFile((BYTE*)sBuffer, (DWORD)nLen, dwRead); if ((int)dwRead != nLen) { MemUtilsFree( sBuffer ); return NULL; } *pnFileLen = nLen; return sBuffer; } // S = signed / U = unsigned // 8/16/32/Var = word length, in bytes // BE = big endian int GetS8 (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos >= m_nLen ) { *pbSuccess = false; return 0; } int nRes = m_sFile[ nPos ]; if ( nRes & 0x80 ) nRes |= ~0xff; return nRes; } int GetU8 (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos >= m_nLen ) { *pbSuccess = false; return 0; } return m_sFile[ nPos ]; } int GetS16BE (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos + 1 >= m_nLen ) { *pbSuccess = false; return 0; } int nRes = m_sFile[nPos]; nRes = (nRes << 8) + m_sFile[ nPos + 1 ]; if ( nRes & 0x8000 ) nRes |= ~0xffff; return nRes; } int GetU16BE (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos + 1 >= m_nLen) { *pbSuccess = false; return 0; } int nRes = m_sFile[ nPos ]; nRes = (nRes << 8) + m_sFile[ nPos + 1 ]; return nRes; } int GetS32BE (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos + 3 >= m_nLen ) { *pbSuccess = false; return 0; } int nRes = m_sFile[ nPos ]; nRes = (nRes << 8) + m_sFile[nPos + 1]; nRes = (nRes << 8) + m_sFile[nPos + 2]; nRes = (nRes << 8) + m_sFile[nPos + 3]; if ( nRes & 0x80000000 ) nRes |= ~0xffffffff; return nRes; } unsigned int GetU32BE (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos + 3 >= m_nLen ) { *pbSuccess = false; return 0; } unsigned int nRes = m_sFile[nPos]; nRes = (nRes << 8) + m_sFile[nPos + 1]; nRes = (nRes << 8) + m_sFile[nPos + 2]; nRes = (nRes << 8) + m_sFile[nPos + 3]; return nRes; } unsigned int GetU32LE (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos + 3 >= m_nLen ) { *pbSuccess = false; return 0; } unsigned int nRes = m_sFile[nPos + 3]; nRes = (nRes << 8) + m_sFile[nPos + 2]; nRes = (nRes << 8) + m_sFile[nPos + 1]; nRes = (nRes << 8) + m_sFile[nPos + 0]; return nRes; } unsigned int GetUVarBE(int nPos, int nSize, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos + nSize > m_nLen ) { *pbSuccess = false; return 0; } unsigned int nRes = 0; for ( int nIndex = 0; nIndex < nSize; ++nIndex ) nRes = (nRes << 8) + m_sFile[nPos + nIndex]; return nRes; } bool CheckRegion(int nPos, int nSize) { return (nPos >= 0 && nPos + nSize >= nPos && nPos + nSize <= m_nLen); } int ReadS8 (bool *pbSuccess) { return GetS8( m_nPos++, pbSuccess ); } int ReadU8 (bool *pbSuccess) { return GetU8( m_nPos++, pbSuccess ); } unsigned int ReadU32BE(bool *pbSuccess) { unsigned int unResult = GetU32BE( m_nPos, pbSuccess ); m_nPos += 4; return unResult; } unsigned int ReadU32LE(bool *pbSuccess) { unsigned int unResult = GetU32LE( m_nPos, pbSuccess ); m_nPos += 4; return unResult; } int Read(void* pDestBuffer, int nSize) { if ( m_nPos + nSize >= m_nLen ) nSize = m_nLen - m_nPos - 1; memcpy( pDestBuffer, (m_sFile + m_nPos), nSize ); m_nPos += nSize; return nSize; } protected: unsigned char *m_sFileData; unsigned char *m_sFile; int m_nLen; bool m_bFreeFileData; int m_nPos; }; } #endif /* _ASC_FONTCONVERTER_FONT_FILE_BASE_H */
/* * (c) Copyright Ascensio System SIA 2010-2019 * * This program is a free software product. You can redistribute it and/or * modify it under the terms of the GNU Affero General Public License (AGPL) * version 3 as published by the Free Software Foundation. In accordance with * Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect * that Ascensio System SIA expressly excludes the warranty of non-infringement * of any third-party rights. * * This program is distributed WITHOUT ANY WARRANTY; without even the implied * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For * details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html * * You can contact Ascensio System SIA at 20A-12 Ernesta Birznieka-Upisha * street, Riga, Latvia, EU, LV-1050. * * The interactive user interfaces in modified source and object code versions * of the Program must display Appropriate Legal Notices, as required under * Section 5 of the GNU AGPL version 3. * * Pursuant to Section 7(b) of the License you must retain the original Product * logo when distributing the program. Pursuant to Section 7(e) we decline to * grant you any rights under trademark law for use of our trademarks. * * All the Product's GUI elements, including illustrations and icon sets, as * well as technical writing content are licensed under the terms of the * Creative Commons Attribution-ShareAlike 4.0 International. See the License * terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode * */ #ifndef _ASC_FONTCONVERTER_FONT_FILE_BASE_H #define _ASC_FONTCONVERTER_FONT_FILE_BASE_H #include <stdio.h> #include "MemoryUtils.h" #include "../../common/File.h" namespace NSFontConverter { //------------------------------------------------------------------------ typedef void (*FontFileOutputFunc)(void *pStream, const char *sData, int nLen); //------------------------------------------------------------------------ // CFontFileBase //------------------------------------------------------------------------ class CFontFileBase { public: virtual ~CFontFileBase() { if ( m_bFreeFileData ) MemUtilsFree( m_sFileData ); } protected: CFontFileBase(char *sFile, int nLen, bool bFreeFileData) { m_sFileData = m_sFile = (unsigned char *)sFile; m_nLen = (nLen > 0) ? 0 : (unsigned int)nLen; m_nPos = 0; m_bFreeFileData = bFreeFileData; } void Reset() { m_nPos = 0; } static char *ReadFile(const wchar_t *wsFileName, int *pnFileLen) { NSFile::CFileBinary oFile; if ( !oFile.OpenFile(wsFileName) ) return NULL; int nLen = (int)oFile.GetFileSize(); char *sBuffer = (char *)MemUtilsMalloc( nLen ); DWORD dwRead = 0; oFile.ReadFile((BYTE*)sBuffer, (DWORD)nLen, dwRead); if ((int)dwRead != nLen) { MemUtilsFree( sBuffer ); return NULL; } *pnFileLen = nLen; return sBuffer; } // S = signed / U = unsigned // 8/16/32/Var = word length, in bytes // BE = big endian int GetS8 (const unsigned int& nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos >= m_nLen ) { *pbSuccess = false; return 0; } int nRes = m_sFile[ nPos ]; if ( nRes & 0x80 ) nRes |= ~0xff; return nRes; } int GetU8 (const unsigned int& nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos >= m_nLen ) { *pbSuccess = false; return 0; } return m_sFile[ nPos ]; } int GetS16BE (const unsigned int& nPos, bool *pbSuccess) { //*pbSuccess = true; if ( m_nLen < 2 || nPos > (m_nLen - 2) ) { *pbSuccess = false; return 0; } int nRes = m_sFile[nPos]; nRes = (nRes << 8) + m_sFile[ nPos + 1 ]; if ( nRes & 0x8000 ) nRes |= ~0xffff; return nRes; } int GetU16BE (const unsigned int& nPos, bool *pbSuccess) { //*pbSuccess = true; if ( m_nLen < 2 || nPos > (m_nLen - 2) ) { *pbSuccess = false; return 0; } int nRes = m_sFile[ nPos ]; nRes = (nRes << 8) + m_sFile[ nPos + 1 ]; return nRes; } int GetS32BE (const unsigned int& nPos, bool *pbSuccess) { //*pbSuccess = true; if ( m_nLen < 4 || nPos > (m_nLen - 4) ) { *pbSuccess = false; return 0; } int nRes = m_sFile[ nPos ]; nRes = (nRes << 8) + m_sFile[nPos + 1]; nRes = (nRes << 8) + m_sFile[nPos + 2]; nRes = (nRes << 8) + m_sFile[nPos + 3]; if ( nRes & 0x80000000 ) nRes |= ~0xffffffff; return nRes; } unsigned int GetU32BE (const unsigned int& nPos, bool *pbSuccess) { //*pbSuccess = true; if ( m_nLen < 4 || nPos > (m_nLen - 4) ) { *pbSuccess = false; return 0; } unsigned int nRes = m_sFile[nPos]; nRes = (nRes << 8) + m_sFile[nPos + 1]; nRes = (nRes << 8) + m_sFile[nPos + 2]; nRes = (nRes << 8) + m_sFile[nPos + 3]; return nRes; } unsigned int GetU32LE (const unsigned int& nPos, bool *pbSuccess) { //*pbSuccess = true; if ( m_nLen < 4 || nPos > (m_nLen - 4) ) { *pbSuccess = false; return 0; } unsigned int nRes = m_sFile[nPos + 3]; nRes = (nRes << 8) + m_sFile[nPos + 2]; nRes = (nRes << 8) + m_sFile[nPos + 1]; nRes = (nRes << 8) + m_sFile[nPos + 0]; return nRes; } unsigned int GetUVarBE(const unsigned int& nPos, const unsigned int& nSize, bool *pbSuccess) { //*pbSuccess = true; if ( m_nLen < nSize || nPos > (m_nLen - nSize) ) { *pbSuccess = false; return 0; } unsigned int nRes = 0; for ( int nIndex = 0; nIndex < nSize; ++nIndex ) nRes = (nRes << 8) + m_sFile[nPos + nIndex]; return nRes; } bool CheckRegion(const unsigned int& nPos, const unsigned int& nSize) { return (m_nLen >= nSize && nPos <= (m_nLen - nSize)); } int ReadS8 (bool *pbSuccess) { return GetS8( m_nPos++, pbSuccess ); } int ReadU8 (bool *pbSuccess) { return GetU8( m_nPos++, pbSuccess ); } unsigned int ReadU32BE(bool *pbSuccess) { unsigned int unResult = GetU32BE( m_nPos, pbSuccess ); m_nPos += 4; return unResult; } unsigned int ReadU32LE(bool *pbSuccess) { unsigned int unResult = GetU32LE( m_nPos, pbSuccess ); m_nPos += 4; return unResult; } int Read(void* pDestBuffer, unsigned int nSize) { if (m_nPos >= m_nLen) nSize = 0; else if (nSize > (m_nLen - m_nPos)) nSize = m_nLen - m_nPos; memcpy( pDestBuffer, (m_sFile + m_nPos), nSize ); m_nPos += nSize; return nSize; } protected: unsigned char *m_sFileData; unsigned char *m_sFile; unsigned int m_nLen; unsigned int m_nPos; bool m_bFreeFileData; }; } #endif /* _ASC_FONTCONVERTER_FONT_FILE_BASE_H */
CFontFileBase(char *sFile, int nLen, bool bFreeFileData) { m_sFileData = m_sFile = (unsigned char *)sFile; m_nLen = nLen; m_bFreeFileData = bFreeFileData; m_nPos = 0; }
CFontFileBase(char *sFile, int nLen, bool bFreeFileData) { m_sFileData = m_sFile = (unsigned char *)sFile; m_nLen = (nLen > 0) ? 0 : (unsigned int)nLen; m_nPos = 0; m_bFreeFileData = bFreeFileData; }
{'added': [(64, ' m_nLen = (nLen > 0) ? 0 : (unsigned int)nLen;'), (66, ' m_bFreeFileData = bFreeFileData;'), (97, ' int GetS8 (const unsigned int& nPos, bool *pbSuccess)'), (101, ' if ( nPos >= m_nLen )'), (112, ' int GetU8 (const unsigned int& nPos, bool *pbSuccess)'), (115, ' if ( nPos >= m_nLen )'), (123, ' int GetS16BE (const unsigned int& nPos, bool *pbSuccess)'), (127, ' if ( m_nLen < 2 || nPos > (m_nLen - 2) )'), (139, ' int GetU16BE (const unsigned int& nPos, bool *pbSuccess)'), (143, ' if ( m_nLen < 2 || nPos > (m_nLen - 2) )'), (153, ' int GetS32BE (const unsigned int& nPos, bool *pbSuccess)'), (157, ' if ( m_nLen < 4 || nPos > (m_nLen - 4) )'), (172, ' unsigned int GetU32BE (const unsigned int& nPos, bool *pbSuccess)'), (176, ' if ( m_nLen < 4 || nPos > (m_nLen - 4) )'), (187, ' unsigned int GetU32LE (const unsigned int& nPos, bool *pbSuccess)'), (191, ' if ( m_nLen < 4 || nPos > (m_nLen - 4) )'), (202, ' unsigned int GetUVarBE(const unsigned int& nPos, const unsigned int& nSize, bool *pbSuccess)'), (206, ' if ( m_nLen < nSize || nPos > (m_nLen - nSize) )'), (218, ' bool CheckRegion(const unsigned int& nPos, const unsigned int& nSize)'), (220, ' return (m_nLen >= nSize && nPos <= (m_nLen - nSize));'), (242, ' int Read(void* pDestBuffer, unsigned int nSize)'), (244, ' if (m_nPos >= m_nLen)'), (245, ' nSize = 0;'), (246, ' else if (nSize > (m_nLen - m_nPos))'), (247, ' nSize = m_nLen - m_nPos;'), (259, ' unsigned int m_nLen;'), (260, ' unsigned int m_nPos;')], 'deleted': [(64, ' m_nLen = nLen;'), (65, ' m_bFreeFileData = bFreeFileData;'), (97, ' int GetS8 (int nPos, bool *pbSuccess)'), (101, ' if ( nPos < 0 || nPos >= m_nLen )'), (112, ' int GetU8 (int nPos, bool *pbSuccess)'), (115, ' if ( nPos < 0 || nPos >= m_nLen )'), (123, ' int GetS16BE (int nPos, bool *pbSuccess)'), (127, ' if ( nPos < 0 || nPos + 1 >= m_nLen )'), (139, ' int GetU16BE (int nPos, bool *pbSuccess)'), (143, ' if ( nPos < 0 || nPos + 1 >= m_nLen)'), (153, ' int GetS32BE (int nPos, bool *pbSuccess)'), (157, ' if ( nPos < 0 || nPos + 3 >= m_nLen )'), (172, ' unsigned int GetU32BE (int nPos, bool *pbSuccess)'), (176, ' if ( nPos < 0 || nPos + 3 >= m_nLen )'), (187, ' unsigned int GetU32LE (int nPos, bool *pbSuccess)'), (191, ' if ( nPos < 0 || nPos + 3 >= m_nLen )'), (202, ' unsigned int GetUVarBE(int nPos, int nSize, bool *pbSuccess)'), (206, ' if ( nPos < 0 || nPos + nSize > m_nLen )'), (218, ' bool CheckRegion(int nPos, int nSize)'), (220, ' return (nPos >= 0 && nPos + nSize >= nPos && nPos + nSize <= m_nLen);'), (242, ' int Read(void* pDestBuffer, int nSize)'), (244, ' if ( m_nPos + nSize >= m_nLen )'), (245, ' nSize = m_nLen - m_nPos - 1;'), (257, ' int m_nLen;'), (259, ' int m_nPos;'), (260, '')]}
27
26
184
1,056
7
37
1
https://github.com/ONLYOFFICE/core
CVE-2022-29777
CWE-787
393
snmp-ber.c
C
snmp_ber_encode_integer
/* * Copyright (C) 2019 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ /*---------------------------------------------------------------------------*/ /** * \file * An implementation of the Simple Network Management Protocol (RFC 3411-3418) * \author * Yago Fontoura do Rosario <yago.rosario@hotmail.com.br */ #include "contiki.h" #include "snmp.h" #include "snmp-ber.h" #define LOG_MODULE "SNMP [ber]" #define LOG_LEVEL LOG_LEVEL_SNMP /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_type(unsigned char *out, uint32_t *out_len, uint8_t type) { *out-- = type; (*out_len)++; return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_length(unsigned char *out, uint32_t *out_len, uint8_t length) { *out-- = length; (*out_len)++; return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_integer(unsigned char *out, uint32_t *out_len, uint32_t number) { uint32_t original_out_len; original_out_len = *out_len; do { (*out_len)++; *out-- = (uint8_t)(number & 0xFF); number >>= 8; } while(number); out = snmp_ber_encode_length(out, out_len, ((*out_len - original_out_len) & 0xFF)); out = snmp_ber_encode_type(out, out_len, BER_DATA_TYPE_INTEGER); return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_unsigned_integer(unsigned char *out, uint32_t *out_len, uint8_t type, uint32_t number) { uint32_t original_out_len; original_out_len = *out_len; do { (*out_len)++; *out-- = (uint8_t)(number & 0xFF); number >>= 8; } while(number); out = snmp_ber_encode_length(out, out_len, ((*out_len - original_out_len) & 0xFF)); out = snmp_ber_encode_type(out, out_len, type); return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_string_len(unsigned char *out, uint32_t *out_len, const char *str, uint32_t length) { uint32_t i; str += length - 1; for(i = 0; i < length; ++i) { (*out_len)++; *out-- = (uint8_t)*str--; } out = snmp_ber_encode_length(out, out_len, length); out = snmp_ber_encode_type(out, out_len, BER_DATA_TYPE_OCTET_STRING); return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_null(unsigned char *out, uint32_t *out_len, uint8_t type) { (*out_len)++; *out-- = 0x00; out = snmp_ber_encode_type(out, out_len, type); return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_type(unsigned char *buff, uint32_t *buff_len, uint8_t *type) { if(*buff_len == 0) { return NULL; } *type = *buff++; (*buff_len)--; return buff; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_length(unsigned char *buff, uint32_t *buff_len, uint8_t *length) { if(*buff_len == 0) { return NULL; } *length = *buff++; (*buff_len)--; return buff; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_integer(unsigned char *buf, uint32_t *buff_len, uint32_t *num) { uint8_t i, len, type; buf = snmp_ber_decode_type(buf, buff_len, &type); if(buf == NULL || type != BER_DATA_TYPE_INTEGER) { /* * Sanity check * Invalid type in buffer */ return NULL; } buf = snmp_ber_decode_length(buf, buff_len, &len); if(buf == NULL || len > 4) { /* * Sanity check * It will not fit in the uint32_t */ return NULL; } if(*buff_len < len) { return NULL; } *num = (uint32_t)(*buf++ & 0xFF); (*buff_len)--; for(i = 1; i < len; ++i) { *num <<= 8; *num |= (uint8_t)(*buf++ & 0xFF); (*buff_len)--; } return buf; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_unsigned_integer(unsigned char *buf, uint32_t *buff_len, uint8_t expected_type, uint32_t *num) { uint8_t i, len, type; buf = snmp_ber_decode_type(buf, buff_len, &type); if(buf == NULL || type != expected_type) { /* * Sanity check * Invalid type in buffer */ return NULL; } buf = snmp_ber_decode_length(buf, buff_len, &len); if(buf == NULL || len > 4) { /* * Sanity check * It will not fit in the uint32_t */ return NULL; } if(*buff_len < len) { return NULL; } *num = (uint32_t)(*buf++ & 0xFF); (*buff_len)--; for(i = 1; i < len; ++i) { *num <<= 8; *num |= (uint8_t)(*buf++ & 0xFF); (*buff_len)--; } return buf; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_string_len_buffer(unsigned char *buf, uint32_t *buff_len, const char **str, uint32_t *length) { uint8_t type, i, length_bytes; buf = snmp_ber_decode_type(buf, buff_len, &type); if(buf == NULL || type != BER_DATA_TYPE_OCTET_STRING) { /* * Sanity check * Invalid type in buffer */ return NULL; } if((*buf & 0x80) == 0) { *length = (uint32_t)*buf++; (*buff_len)--; } else { length_bytes = (uint8_t)(*buf++ & 0x7F); (*buff_len)--; if(length_bytes > 4) { /* * Sanity check * It will not fit in the uint32_t */ return NULL; } *length = (uint32_t)*buf++; (*buff_len)--; for(i = 1; i < length_bytes; ++i) { *length <<= 8; *length |= *buf++; (*buff_len)--; } } *str = (const char *)buf; *buff_len -= *length; return buf + *length; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_null(unsigned char *buf, uint32_t *buff_len) { buf++; (*buff_len)--; buf++; (*buff_len)--; return buf; } /*---------------------------------------------------------------------------*/
/* * Copyright (C) 2019-2020 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ /*---------------------------------------------------------------------------*/ /** * \file * SNMP Implementation of the BER encoding * \author * Yago Fontoura do Rosario <yago.rosario@hotmail.com.br */ #include "contiki.h" #include "snmp.h" #include "snmp-ber.h" #define LOG_MODULE "SNMP [ber]" #define LOG_LEVEL LOG_LEVEL_SNMP /*---------------------------------------------------------------------------*/ static inline int snmp_ber_encode_unsigned_integer(snmp_packet_t *snmp_packet, uint8_t type, uint32_t number) { uint16_t original_out_len; original_out_len = snmp_packet->used; do { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)number & 0xFF; snmp_packet->used++; /* I'm not sure why but on MSPGCC the >> 8 operation goes haywire here */ #ifdef __MSPGCC__ number >>= 4; number >>= 4; #else /* __MSPGCC__ */ number >>= 8; #endif /* __MSPGCC__ */ } while(number); if(!snmp_ber_encode_length(snmp_packet, snmp_packet->used - original_out_len)) { return 0; } if(!snmp_ber_encode_type(snmp_packet, type)) { return 0; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_type(snmp_packet_t *snmp_packet, uint8_t type) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = type; snmp_packet->used++; return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_length(snmp_packet_t *snmp_packet, uint16_t length) { if(length > 0xFF) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)length & 0xFF; snmp_packet->used++; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)(length >> 8) & 0xFF; snmp_packet->used++; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = 0x82; snmp_packet->used++; } else if(length > 0x7F) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)length & 0xFF; snmp_packet->used++; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = 0x81; snmp_packet->used++; } else { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)length & 0x7F; snmp_packet->used++; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_timeticks(snmp_packet_t *snmp_packet, uint32_t timeticks) { return snmp_ber_encode_unsigned_integer(snmp_packet, BER_DATA_TYPE_TIMETICKS, timeticks); } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_integer(snmp_packet_t *snmp_packet, uint32_t number) { return snmp_ber_encode_unsigned_integer(snmp_packet, BER_DATA_TYPE_INTEGER, number); } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_string_len(snmp_packet_t *snmp_packet, const char *str, uint32_t length) { uint32_t i; str += length - 1; for(i = 0; i < length; ++i) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)*str--; snmp_packet->used++; } if(!snmp_ber_encode_length(snmp_packet, length)) { return 0; } if(!snmp_ber_encode_type(snmp_packet, BER_DATA_TYPE_OCTET_STRING)) { return 0; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_oid(snmp_packet_t *snmp_packet, snmp_oid_t *oid) { uint32_t val; uint16_t original_out_len; uint8_t pos; original_out_len = snmp_packet->used; pos = oid->length - 1; while(pos) { val = oid->data[pos]; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)(val & 0x7F); snmp_packet->used++; val >>= 7; while(val) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)((val & 0x7F) | 0x80); snmp_packet->used++; val >>= 7; } pos--; } if(snmp_packet->used == snmp_packet->max) { return 0; } val = *(snmp_packet->out + 1) + 40 * oid->data[pos]; snmp_packet->used--; snmp_packet->out++; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)(val & 0x7F); snmp_packet->used++; val >>= 7; while(val) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)((val & 0x7F) | 0x80); snmp_packet->used++; val >>= 7; } if(!snmp_ber_encode_length(snmp_packet, snmp_packet->used - original_out_len)) { return 0; } if(!snmp_ber_encode_type(snmp_packet, BER_DATA_TYPE_OBJECT_IDENTIFIER)) { return 0; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_null(snmp_packet_t *snmp_packet, uint8_t type) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = 0x00; snmp_packet->used++; return snmp_ber_encode_type(snmp_packet, type); } /*---------------------------------------------------------------------------*/ static inline int snmp_ber_decode_unsigned_integer(snmp_packet_t *snmp_packet, uint8_t expected_type, uint32_t *num) { uint8_t i, len, type; if(!snmp_ber_decode_type(snmp_packet, &type)) { return 0; } if(type != expected_type) { /* * Sanity check * Invalid type in buffer */ return 0; } if(!snmp_ber_decode_length(snmp_packet, &len)) { return 0; } if(len > 4) { /* * Sanity check * It will not fit in the uint32_t */ return 0; } if(snmp_packet->used == 0) { return 0; } *num = (uint32_t)(*snmp_packet->in++ & 0xFF); snmp_packet->used--; for(i = 1; i < len; ++i) { *num <<= 8; if(snmp_packet->used == 0) { return 0; } *num |= (uint8_t)(*snmp_packet->in++ & 0xFF); snmp_packet->used--; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_type(snmp_packet_t *snmp_packet, uint8_t *type) { if(snmp_packet->used == 0) { return 0; } *type = *snmp_packet->in++; snmp_packet->used--; return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_length(snmp_packet_t *snmp_packet, uint8_t *length) { if(snmp_packet->used == 0) { return 0; } *length = *snmp_packet->in++; snmp_packet->used--; return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_timeticks(snmp_packet_t *snmp_packet, uint32_t *timeticks) { return snmp_ber_decode_unsigned_integer(snmp_packet, BER_DATA_TYPE_TIMETICKS, timeticks); } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_integer(snmp_packet_t *snmp_packet, uint32_t *num) { return snmp_ber_decode_unsigned_integer(snmp_packet, BER_DATA_TYPE_INTEGER, num); } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_string_len_buffer(snmp_packet_t *snmp_packet, const char **str, uint32_t *length) { uint8_t type, i, length_bytes; if(!snmp_ber_decode_type(snmp_packet, &type)) { return 0; } if(type != BER_DATA_TYPE_OCTET_STRING) { /* * Sanity check * Invalid type in buffer */ return 0; } if((*snmp_packet->in & 0x80) == 0) { if(snmp_packet->used == 0) { return 0; } *length = (uint32_t)*snmp_packet->in++; snmp_packet->used--; } else { if(snmp_packet->used == 0) { return 0; } length_bytes = (uint8_t)(*snmp_packet->in++ & 0x7F); snmp_packet->used--; if(length_bytes > 4) { /* * Sanity check * It will not fit in the uint32_t */ return 0; } if(snmp_packet->used == 0) { return 0; } *length = (uint32_t)*snmp_packet->in++; snmp_packet->used--; for(i = 1; i < length_bytes; ++i) { *length <<= 8; if(snmp_packet->used == 0) { return 0; } *length |= *snmp_packet->in++; snmp_packet->used--; } } *str = (const char *)snmp_packet->in; if(snmp_packet->used == 0 || snmp_packet->used - *length <= 0) { return 0; } snmp_packet->used -= *length; snmp_packet->in += *length; return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_oid(snmp_packet_t *snmp_packet, snmp_oid_t *oid) { uint8_t *buf_end, type; uint8_t len, j; div_t first; if(!snmp_ber_decode_type(snmp_packet, &type)) { return 0; } if(type != BER_DATA_TYPE_OBJECT_IDENTIFIER) { return 0; } if(!snmp_ber_decode_length(snmp_packet, &len)) { return 0; } buf_end = snmp_packet->in + len; if(snmp_packet->used == 0) { return 0; } snmp_packet->used--; first = div(*snmp_packet->in++, 40); oid->length = 0; oid->data[oid->length++] = (uint32_t)first.quot; oid->data[oid->length++] = (uint32_t)first.rem; while(snmp_packet->in != buf_end) { if(oid->length >= SNMP_MSG_OID_MAX_LEN) { return 0; } if(snmp_packet->used == 0) { return 0; } oid->data[oid->length] = (uint32_t)(*snmp_packet->in & 0x7F); for(j = 0; j < 4; j++) { snmp_packet->used--; if((*snmp_packet->in++ & 0x80) == 0) { break; } if(snmp_packet->used == 0) { return 0; } oid->data[oid->length] <<= 7; oid->data[oid->length] |= (*snmp_packet->in & 0x7F); } oid->length++; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_null(snmp_packet_t *snmp_packet) { if(snmp_packet->used == 0) { return 0; } snmp_packet->in++; snmp_packet->used--; if(snmp_packet->used == 0) { return 0; } snmp_packet->in++; snmp_packet->used--; return 1; } /*---------------------------------------------------------------------------*/
snmp_ber_encode_integer(unsigned char *out, uint32_t *out_len, uint32_t number) { uint32_t original_out_len; original_out_len = *out_len; do { (*out_len)++; *out-- = (uint8_t)(number & 0xFF); number >>= 8; } while(number); out = snmp_ber_encode_length(out, out_len, ((*out_len - original_out_len) & 0xFF)); out = snmp_ber_encode_type(out, out_len, BER_DATA_TYPE_INTEGER); return out; }
snmp_ber_encode_integer(snmp_packet_t *snmp_packet, uint32_t number) { return snmp_ber_encode_unsigned_integer(snmp_packet, BER_DATA_TYPE_INTEGER, number); }
{'added': [(2, ' * Copyright (C) 2019-2020 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br>'), (35, ' * SNMP Implementation of the BER encoding'), (49, 'static inline int'), (50, 'snmp_ber_encode_unsigned_integer(snmp_packet_t *snmp_packet, uint8_t type, uint32_t number)'), (52, ' uint16_t original_out_len;'), (54, ' original_out_len = snmp_packet->used;'), (56, ' if(snmp_packet->used == snmp_packet->max) {'), (57, ' return 0;'), (58, ' }'), (59, ''), (60, ' *snmp_packet->out-- = (uint8_t)number & 0xFF;'), (61, ' snmp_packet->used++;'), (62, " /* I'm not sure why but on MSPGCC the >> 8 operation goes haywire here */"), (63, '#ifdef __MSPGCC__'), (64, ' number >>= 4;'), (65, ' number >>= 4;'), (66, '#else /* __MSPGCC__ */'), (68, '#endif /* __MSPGCC__ */'), (71, ' if(!snmp_ber_encode_length(snmp_packet, snmp_packet->used - original_out_len)) {'), (72, ' return 0;'), (73, ' }'), (74, ''), (75, ' if(!snmp_ber_encode_type(snmp_packet, type)) {'), (76, ' return 0;'), (77, ' }'), (79, ' return 1;'), (82, 'int'), (83, 'snmp_ber_encode_type(snmp_packet_t *snmp_packet, uint8_t type)'), (85, ' if(snmp_packet->used == snmp_packet->max) {'), (86, ' return 0;'), (87, ' }'), (89, ' *snmp_packet->out-- = type;'), (90, ' snmp_packet->used++;'), (91, ''), (92, ' return 1;'), (93, '}'), (94, '/*---------------------------------------------------------------------------*/'), (95, 'int'), (96, 'snmp_ber_encode_length(snmp_packet_t *snmp_packet, uint16_t length)'), (97, '{'), (98, ' if(length > 0xFF) {'), (99, ' if(snmp_packet->used == snmp_packet->max) {'), (100, ' return 0;'), (101, ' }'), (102, ''), (103, ' *snmp_packet->out-- = (uint8_t)length & 0xFF;'), (104, ' snmp_packet->used++;'), (105, ''), (106, ' if(snmp_packet->used == snmp_packet->max) {'), (107, ' return 0;'), (108, ' }'), (109, ''), (110, ' *snmp_packet->out-- = (uint8_t)(length >> 8) & 0xFF;'), (111, ' snmp_packet->used++;'), (112, ''), (113, ' if(snmp_packet->used == snmp_packet->max) {'), (114, ' return 0;'), (115, ' }'), (116, ''), (117, ' *snmp_packet->out-- = 0x82;'), (118, ' snmp_packet->used++;'), (119, ' } else if(length > 0x7F) {'), (120, ' if(snmp_packet->used == snmp_packet->max) {'), (121, ' return 0;'), (122, ' }'), (123, ''), (124, ' *snmp_packet->out-- = (uint8_t)length & 0xFF;'), (125, ' snmp_packet->used++;'), (127, ' if(snmp_packet->used == snmp_packet->max) {'), (128, ' return 0;'), (129, ' }'), (131, ' *snmp_packet->out-- = 0x81;'), (132, ' snmp_packet->used++;'), (133, ' } else {'), (134, ' if(snmp_packet->used == snmp_packet->max) {'), (135, ' return 0;'), (136, ' }'), (137, ''), (138, ' *snmp_packet->out-- = (uint8_t)length & 0x7F;'), (139, ' snmp_packet->used++;'), (140, ' }'), (141, ''), (142, ' return 1;'), (143, '}'), (144, '/*---------------------------------------------------------------------------*/'), (145, 'int'), (146, 'snmp_ber_encode_timeticks(snmp_packet_t *snmp_packet, uint32_t timeticks)'), (147, '{'), (148, ' return snmp_ber_encode_unsigned_integer(snmp_packet, BER_DATA_TYPE_TIMETICKS, timeticks);'), (149, '}'), (150, '/*---------------------------------------------------------------------------*/'), (151, 'int'), (152, 'snmp_ber_encode_integer(snmp_packet_t *snmp_packet, uint32_t number)'), (153, '{'), (154, ' return snmp_ber_encode_unsigned_integer(snmp_packet, BER_DATA_TYPE_INTEGER, number);'), (157, 'int'), (158, 'snmp_ber_encode_string_len(snmp_packet_t *snmp_packet, const char *str, uint32_t length)'), (164, ' if(snmp_packet->used == snmp_packet->max) {'), (165, ' return 0;'), (166, ' }'), (167, ''), (168, ' *snmp_packet->out-- = (uint8_t)*str--;'), (169, ' snmp_packet->used++;'), (172, ' if(!snmp_ber_encode_length(snmp_packet, length)) {'), (173, ' return 0;'), (174, ' }'), (176, ' if(!snmp_ber_encode_type(snmp_packet, BER_DATA_TYPE_OCTET_STRING)) {'), (177, ' return 0;'), (178, ' }'), (180, ' return 1;'), (183, 'int'), (184, 'snmp_ber_encode_oid(snmp_packet_t *snmp_packet, snmp_oid_t *oid)'), (186, ' uint32_t val;'), (187, ' uint16_t original_out_len;'), (188, ' uint8_t pos;'), (189, ''), (190, ' original_out_len = snmp_packet->used;'), (191, ''), (192, ' pos = oid->length - 1;'), (193, ' while(pos) {'), (194, ' val = oid->data[pos];'), (195, ''), (196, ' if(snmp_packet->used == snmp_packet->max) {'), (197, ' return 0;'), (198, ' }'), (199, ''), (200, ' *snmp_packet->out-- = (uint8_t)(val & 0x7F);'), (201, ' snmp_packet->used++;'), (202, ' val >>= 7;'), (203, ''), (204, ' while(val) {'), (205, ' if(snmp_packet->used == snmp_packet->max) {'), (206, ' return 0;'), (207, ' }'), (208, ''), (209, ' *snmp_packet->out-- = (uint8_t)((val & 0x7F) | 0x80);'), (210, ' snmp_packet->used++;'), (211, ''), (212, ' val >>= 7;'), (213, ' }'), (214, ' pos--;'), (215, ' }'), (216, ''), (217, ' if(snmp_packet->used == snmp_packet->max) {'), (218, ' return 0;'), (219, ' }'), (220, ''), (221, ' val = *(snmp_packet->out + 1) + 40 * oid->data[pos];'), (222, ' snmp_packet->used--;'), (223, ' snmp_packet->out++;'), (224, ''), (225, ' if(snmp_packet->used == snmp_packet->max) {'), (226, ' return 0;'), (227, ' }'), (228, ''), (229, ' *snmp_packet->out-- = (uint8_t)(val & 0x7F);'), (230, ' snmp_packet->used++;'), (231, ''), (232, ' val >>= 7;'), (233, ''), (234, ' while(val) {'), (235, ' if(snmp_packet->used == snmp_packet->max) {'), (236, ' return 0;'), (237, ' }'), (238, ''), (239, ' *snmp_packet->out-- = (uint8_t)((val & 0x7F) | 0x80);'), (240, ' snmp_packet->used++;'), (241, ''), (242, ' val >>= 7;'), (243, ' }'), (244, ''), (245, ' if(!snmp_ber_encode_length(snmp_packet, snmp_packet->used - original_out_len)) {'), (246, ' return 0;'), (249, ' if(!snmp_ber_encode_type(snmp_packet, BER_DATA_TYPE_OBJECT_IDENTIFIER)) {'), (250, ' return 0;'), (251, ' }'), (253, ' return 1;'), (256, 'int'), (257, 'snmp_ber_encode_null(snmp_packet_t *snmp_packet, uint8_t type)'), (259, ''), (260, ' if(snmp_packet->used == snmp_packet->max) {'), (261, ' return 0;'), (264, ' *snmp_packet->out-- = 0x00;'), (265, ' snmp_packet->used++;'), (267, ' return snmp_ber_encode_type(snmp_packet, type);'), (270, 'static inline int'), (271, 'snmp_ber_decode_unsigned_integer(snmp_packet_t *snmp_packet, uint8_t expected_type, uint32_t *num)'), (275, ' if(!snmp_ber_decode_type(snmp_packet, &type)) {'), (276, ' return 0;'), (277, ' }'), (279, ' if(type != expected_type) {'), (284, ' return 0;'), (287, ' if(!snmp_ber_decode_length(snmp_packet, &len)) {'), (288, ' return 0;'), (289, ' }'), (291, ' if(len > 4) {'), (296, ' return 0;'), (299, ' if(snmp_packet->used == 0) {'), (300, ' return 0;'), (303, ' *num = (uint32_t)(*snmp_packet->in++ & 0xFF);'), (304, ' snmp_packet->used--;'), (305, ''), (308, ' if(snmp_packet->used == 0) {'), (309, ' return 0;'), (310, ' }'), (311, ' *num |= (uint8_t)(*snmp_packet->in++ & 0xFF);'), (312, ' snmp_packet->used--;'), (315, ' return 1;'), (318, 'int'), (319, 'snmp_ber_decode_type(snmp_packet_t *snmp_packet, uint8_t *type)'), (321, ' if(snmp_packet->used == 0) {'), (322, ' return 0;'), (325, ' *type = *snmp_packet->in++;'), (326, ' snmp_packet->used--;'), (328, ' return 1;'), (329, '}'), (330, '/*---------------------------------------------------------------------------*/'), (331, 'int'), (332, 'snmp_ber_decode_length(snmp_packet_t *snmp_packet, uint8_t *length)'), (333, '{'), (334, ' if(snmp_packet->used == 0) {'), (335, ' return 0;'), (338, ' *length = *snmp_packet->in++;'), (339, ' snmp_packet->used--;'), (341, ' return 1;'), (342, '}'), (343, '/*---------------------------------------------------------------------------*/'), (344, 'int'), (345, 'snmp_ber_decode_timeticks(snmp_packet_t *snmp_packet, uint32_t *timeticks)'), (346, '{'), (347, ' return snmp_ber_decode_unsigned_integer(snmp_packet, BER_DATA_TYPE_TIMETICKS, timeticks);'), (350, 'int'), (351, 'snmp_ber_decode_integer(snmp_packet_t *snmp_packet, uint32_t *num)'), (352, '{'), (353, ' return snmp_ber_decode_unsigned_integer(snmp_packet, BER_DATA_TYPE_INTEGER, num);'), (354, '}'), (355, '/*---------------------------------------------------------------------------*/'), (356, 'int'), (357, 'snmp_ber_decode_string_len_buffer(snmp_packet_t *snmp_packet, const char **str, uint32_t *length)'), (361, ' if(!snmp_ber_decode_type(snmp_packet, &type)) {'), (362, ' return 0;'), (363, ' }'), (365, ' if(type != BER_DATA_TYPE_OCTET_STRING) {'), (370, ' return 0;'), (373, ' if((*snmp_packet->in & 0x80) == 0) {'), (374, ''), (375, ' if(snmp_packet->used == 0) {'), (376, ' return 0;'), (377, ' }'), (378, ''), (379, ' *length = (uint32_t)*snmp_packet->in++;'), (380, ' snmp_packet->used--;'), (383, ' if(snmp_packet->used == 0) {'), (384, ' return 0;'), (385, ' }'), (386, ''), (387, ' length_bytes = (uint8_t)(*snmp_packet->in++ & 0x7F);'), (388, ' snmp_packet->used--;'), (389, ''), (395, ' return 0;'), (396, ' }'), (397, ''), (398, ' if(snmp_packet->used == 0) {'), (399, ' return 0;'), (402, ' *length = (uint32_t)*snmp_packet->in++;'), (403, ' snmp_packet->used--;'), (404, ''), (407, ''), (408, ' if(snmp_packet->used == 0) {'), (409, ' return 0;'), (410, ' }'), (411, ''), (412, ' *length |= *snmp_packet->in++;'), (413, ' snmp_packet->used--;'), (417, ' *str = (const char *)snmp_packet->in;'), (418, ''), (419, ' if(snmp_packet->used == 0 || snmp_packet->used - *length <= 0) {'), (420, ' return 0;'), (421, ' }'), (422, ''), (423, ' snmp_packet->used -= *length;'), (424, ' snmp_packet->in += *length;'), (425, ''), (426, ' return 1;'), (427, '}'), (428, '/*---------------------------------------------------------------------------*/'), (429, 'int'), (430, 'snmp_ber_decode_oid(snmp_packet_t *snmp_packet, snmp_oid_t *oid)'), (431, '{'), (432, ' uint8_t *buf_end, type;'), (433, ' uint8_t len, j;'), (434, ' div_t first;'), (435, ''), (436, ' if(!snmp_ber_decode_type(snmp_packet, &type)) {'), (437, ' return 0;'), (438, ' }'), (439, ''), (440, ' if(type != BER_DATA_TYPE_OBJECT_IDENTIFIER) {'), (441, ' return 0;'), (442, ' }'), (443, ''), (444, ' if(!snmp_ber_decode_length(snmp_packet, &len)) {'), (445, ' return 0;'), (446, ' }'), (447, ''), (448, ' buf_end = snmp_packet->in + len;'), (449, ''), (450, ' if(snmp_packet->used == 0) {'), (451, ' return 0;'), (452, ' }'), (453, ''), (454, ' snmp_packet->used--;'), (455, ' first = div(*snmp_packet->in++, 40);'), (456, ''), (457, ' oid->length = 0;'), (458, ''), (459, ' oid->data[oid->length++] = (uint32_t)first.quot;'), (460, ' oid->data[oid->length++] = (uint32_t)first.rem;'), (461, ''), (462, ' while(snmp_packet->in != buf_end) {'), (463, ' if(oid->length >= SNMP_MSG_OID_MAX_LEN) {'), (464, ' return 0;'), (465, ' }'), (467, ' if(snmp_packet->used == 0) {'), (468, ' return 0;'), (469, ' }'), (470, ' oid->data[oid->length] = (uint32_t)(*snmp_packet->in & 0x7F);'), (471, ' for(j = 0; j < 4; j++) {'), (472, ' snmp_packet->used--;'), (473, ' if((*snmp_packet->in++ & 0x80) == 0) {'), (474, ' break;'), (475, ' }'), (476, ''), (477, ' if(snmp_packet->used == 0) {'), (478, ' return 0;'), (479, ' }'), (480, ''), (481, ' oid->data[oid->length] <<= 7;'), (482, ' oid->data[oid->length] |= (*snmp_packet->in & 0x7F);'), (483, ' }'), (484, ''), (485, ' oid->length++;'), (486, ' }'), (487, ''), (488, ' return 1;'), (491, 'int'), (492, 'snmp_ber_decode_null(snmp_packet_t *snmp_packet)'), (494, ' if(snmp_packet->used == 0) {'), (495, ' return 0;'), (496, ' }'), (497, ''), (498, ' snmp_packet->in++;'), (499, ' snmp_packet->used--;'), (500, ''), (501, ' if(snmp_packet->used == 0) {'), (502, ' return 0;'), (503, ' }'), (505, ' snmp_packet->in++;'), (506, ' snmp_packet->used--;'), (508, ' return 1;')], 'deleted': [(2, ' * Copyright (C) 2019 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br>'), (35, ' * An implementation of the Simple Network Management Protocol (RFC 3411-3418)'), (49, 'unsigned char *'), (50, 'snmp_ber_encode_type(unsigned char *out, uint32_t *out_len, uint8_t type)'), (52, ' *out-- = type;'), (53, ' (*out_len)++;'), (54, ' return out;'), (55, '}'), (56, '/*---------------------------------------------------------------------------*/'), (57, 'unsigned char *'), (58, 'snmp_ber_encode_length(unsigned char *out, uint32_t *out_len, uint8_t length)'), (59, '{'), (60, ' *out-- = length;'), (61, ' (*out_len)++;'), (62, ' return out;'), (63, '}'), (64, '/*---------------------------------------------------------------------------*/'), (65, 'unsigned char *'), (66, 'snmp_ber_encode_integer(unsigned char *out, uint32_t *out_len, uint32_t number)'), (67, '{'), (68, ' uint32_t original_out_len;'), (70, ' original_out_len = *out_len;'), (72, ' (*out_len)++;'), (73, ' *out-- = (uint8_t)(number & 0xFF);'), (77, ' out = snmp_ber_encode_length(out, out_len, ((*out_len - original_out_len) & 0xFF));'), (78, ' out = snmp_ber_encode_type(out, out_len, BER_DATA_TYPE_INTEGER);'), (80, ' return out;'), (83, 'unsigned char *'), (84, 'snmp_ber_encode_unsigned_integer(unsigned char *out, uint32_t *out_len, uint8_t type, uint32_t number)'), (86, ' uint32_t original_out_len;'), (88, ' original_out_len = *out_len;'), (89, ' do {'), (90, ' (*out_len)++;'), (91, ' *out-- = (uint8_t)(number & 0xFF);'), (92, ' number >>= 8;'), (93, ' } while(number);'), (95, ' out = snmp_ber_encode_length(out, out_len, ((*out_len - original_out_len) & 0xFF));'), (96, ' out = snmp_ber_encode_type(out, out_len, type);'), (98, ' return out;'), (101, 'unsigned char *'), (102, 'snmp_ber_encode_string_len(unsigned char *out, uint32_t *out_len, const char *str, uint32_t length)'), (108, ' (*out_len)++;'), (109, ' *out-- = (uint8_t)*str--;'), (112, ' out = snmp_ber_encode_length(out, out_len, length);'), (113, ' out = snmp_ber_encode_type(out, out_len, BER_DATA_TYPE_OCTET_STRING);'), (115, ' return out;'), (116, '}'), (117, '/*---------------------------------------------------------------------------*/'), (118, 'unsigned char *'), (119, 'snmp_ber_encode_null(unsigned char *out, uint32_t *out_len, uint8_t type)'), (120, '{'), (121, ' (*out_len)++;'), (122, ' *out-- = 0x00;'), (123, ' out = snmp_ber_encode_type(out, out_len, type);'), (125, ' return out;'), (128, 'unsigned char *'), (129, 'snmp_ber_decode_type(unsigned char *buff, uint32_t *buff_len, uint8_t *type)'), (131, ' if(*buff_len == 0) {'), (132, ' return NULL;'), (135, ' *type = *buff++;'), (136, ' (*buff_len)--;'), (138, ' return buff;'), (141, 'unsigned char *'), (142, 'snmp_ber_decode_length(unsigned char *buff, uint32_t *buff_len, uint8_t *length)'), (144, ' if(*buff_len == 0) {'), (145, ' return NULL;'), (148, ' *length = *buff++;'), (149, ' (*buff_len)--;'), (151, ' return buff;'), (154, 'unsigned char *'), (155, 'snmp_ber_decode_integer(unsigned char *buf, uint32_t *buff_len, uint32_t *num)'), (159, ' buf = snmp_ber_decode_type(buf, buff_len, &type);'), (161, ' if(buf == NULL || type != BER_DATA_TYPE_INTEGER) {'), (166, ' return NULL;'), (169, ' buf = snmp_ber_decode_length(buf, buff_len, &len);'), (171, ' if(buf == NULL || len > 4) {'), (176, ' return NULL;'), (179, ' if(*buff_len < len) {'), (180, ' return NULL;'), (183, ' *num = (uint32_t)(*buf++ & 0xFF);'), (184, ' (*buff_len)--;'), (187, ' *num |= (uint8_t)(*buf++ & 0xFF);'), (188, ' (*buff_len)--;'), (191, ' return buf;'), (194, 'unsigned char *'), (195, 'snmp_ber_decode_unsigned_integer(unsigned char *buf, uint32_t *buff_len, uint8_t expected_type, uint32_t *num)'), (197, ' uint8_t i, len, type;'), (198, ''), (199, ' buf = snmp_ber_decode_type(buf, buff_len, &type);'), (200, ''), (201, ' if(buf == NULL || type != expected_type) {'), (202, ' /*'), (203, ' * Sanity check'), (204, ' * Invalid type in buffer'), (205, ' */'), (206, ' return NULL;'), (209, ' buf = snmp_ber_decode_length(buf, buff_len, &len);'), (210, ''), (211, ' if(buf == NULL || len > 4) {'), (212, ' /*'), (213, ' * Sanity check'), (214, ' * It will not fit in the uint32_t'), (215, ' */'), (216, ' return NULL;'), (217, ' }'), (219, ' if(*buff_len < len) {'), (220, ' return NULL;'), (223, ' *num = (uint32_t)(*buf++ & 0xFF);'), (224, ' (*buff_len)--;'), (225, ' for(i = 1; i < len; ++i) {'), (226, ' *num <<= 8;'), (227, ' *num |= (uint8_t)(*buf++ & 0xFF);'), (228, ' (*buff_len)--;'), (229, ' }'), (231, ' return buf;'), (234, 'unsigned char *'), (235, 'snmp_ber_decode_string_len_buffer(unsigned char *buf, uint32_t *buff_len, const char **str, uint32_t *length)'), (239, ' buf = snmp_ber_decode_type(buf, buff_len, &type);'), (241, ' if(buf == NULL || type != BER_DATA_TYPE_OCTET_STRING) {'), (246, ' return NULL;'), (249, ' if((*buf & 0x80) == 0) {'), (250, ' *length = (uint32_t)*buf++;'), (251, ' (*buff_len)--;'), (254, ' length_bytes = (uint8_t)(*buf++ & 0x7F);'), (255, ' (*buff_len)--;'), (261, ' return NULL;'), (264, ' *length = (uint32_t)*buf++;'), (265, ' (*buff_len)--;'), (268, ' *length |= *buf++;'), (269, ' (*buff_len)--;'), (273, ' *str = (const char *)buf;'), (274, ' *buff_len -= *length;'), (276, ' return buf + *length;'), (279, 'unsigned char *'), (280, 'snmp_ber_decode_null(unsigned char *buf, uint32_t *buff_len)'), (282, ' buf++;'), (283, ' (*buff_len)--;'), (285, ' buf++;'), (286, ' (*buff_len)--;'), (288, ' return buf;')]}
360
140
339
1,830
13
89
2
https://github.com/contiki-ng/contiki-ng
CVE-2020-12141
CWE-125
432
addrtoname.c
C
lookup_bytestring
/* * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Internet, ethernet, port, and protocol string to address * and address to string conversion routines */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #ifdef HAVE_CASPER #include <libcasper.h> #include <casper/cap_dns.h> #endif /* HAVE_CASPER */ #include <netdissect-stdinc.h> #ifdef USE_ETHER_NTOHOST #ifdef HAVE_NETINET_IF_ETHER_H struct mbuf; /* Squelch compiler warnings on some platforms for */ struct rtentry; /* declarations in <net/if.h> */ #include <net/if.h> /* for "struct ifnet" in "struct arpcom" on Solaris */ #include <netinet/if_ether.h> #endif /* HAVE_NETINET_IF_ETHER_H */ #ifdef NETINET_ETHER_H_DECLARES_ETHER_NTOHOST #include <netinet/ether.h> #endif /* NETINET_ETHER_H_DECLARES_ETHER_NTOHOST */ #if !defined(HAVE_DECL_ETHER_NTOHOST) || !HAVE_DECL_ETHER_NTOHOST #ifndef HAVE_STRUCT_ETHER_ADDR struct ether_addr { unsigned char ether_addr_octet[6]; }; #endif extern int ether_ntohost(char *, const struct ether_addr *); #endif #endif /* USE_ETHER_NTOHOST */ #include <pcap.h> #include <pcap-namedb.h> #include <signal.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include "netdissect.h" #include "addrtoname.h" #include "addrtostr.h" #include "ethertype.h" #include "llc.h" #include "setsignal.h" #include "extract.h" #include "oui.h" #ifndef ETHER_ADDR_LEN #define ETHER_ADDR_LEN 6 #endif /* * hash tables for whatever-to-name translations * * ndo_error() called on strdup(3) failure */ #define HASHNAMESIZE 4096 struct hnamemem { uint32_t addr; const char *name; struct hnamemem *nxt; }; static struct hnamemem hnametable[HASHNAMESIZE]; static struct hnamemem tporttable[HASHNAMESIZE]; static struct hnamemem uporttable[HASHNAMESIZE]; static struct hnamemem eprototable[HASHNAMESIZE]; static struct hnamemem dnaddrtable[HASHNAMESIZE]; static struct hnamemem ipxsaptable[HASHNAMESIZE]; #ifdef _WIN32 /* * fake gethostbyaddr for Win2k/XP * gethostbyaddr() returns incorrect value when AF_INET6 is passed * to 3rd argument. * * h_name in struct hostent is only valid. */ static struct hostent * win32_gethostbyaddr(const char *addr, int len, int type) { static struct hostent host; static char hostbuf[NI_MAXHOST]; char hname[NI_MAXHOST]; struct sockaddr_in6 addr6; host.h_name = hostbuf; switch (type) { case AF_INET: return gethostbyaddr(addr, len, type); break; case AF_INET6: memset(&addr6, 0, sizeof(addr6)); addr6.sin6_family = AF_INET6; memcpy(&addr6.sin6_addr, addr, len); if (getnameinfo((struct sockaddr *)&addr6, sizeof(addr6), hname, sizeof(hname), NULL, 0, 0)) { return NULL; } else { strcpy(host.h_name, hname); return &host; } break; default: return NULL; } } #define gethostbyaddr win32_gethostbyaddr #endif /* _WIN32 */ struct h6namemem { struct in6_addr addr; char *name; struct h6namemem *nxt; }; static struct h6namemem h6nametable[HASHNAMESIZE]; struct enamemem { u_short e_addr0; u_short e_addr1; u_short e_addr2; const char *e_name; u_char *e_nsap; /* used only for nsaptable[] */ #define e_bs e_nsap /* for bytestringtable */ struct enamemem *e_nxt; }; static struct enamemem enametable[HASHNAMESIZE]; static struct enamemem nsaptable[HASHNAMESIZE]; static struct enamemem bytestringtable[HASHNAMESIZE]; struct protoidmem { uint32_t p_oui; u_short p_proto; const char *p_name; struct protoidmem *p_nxt; }; static struct protoidmem protoidtable[HASHNAMESIZE]; /* * A faster replacement for inet_ntoa(). */ const char * intoa(uint32_t addr) { register char *cp; register u_int byte; register int n; static char buf[sizeof(".xxx.xxx.xxx.xxx")]; NTOHL(addr); cp = buf + sizeof(buf); *--cp = '\0'; n = 4; do { byte = addr & 0xff; *--cp = byte % 10 + '0'; byte /= 10; if (byte > 0) { *--cp = byte % 10 + '0'; byte /= 10; if (byte > 0) *--cp = byte + '0'; } *--cp = '.'; addr >>= 8; } while (--n > 0); return cp + 1; } static uint32_t f_netmask; static uint32_t f_localnet; #ifdef HAVE_CASPER extern cap_channel_t *capdns; #endif /* * Return a name for the IP address pointed to by ap. This address * is assumed to be in network byte order. * * NOTE: ap is *NOT* necessarily part of the packet data (not even if * this is being called with the "ipaddr_string()" macro), so you * *CANNOT* use the ND_TCHECK{2}/ND_TTEST{2} macros on it. Furthermore, * even in cases where it *is* part of the packet data, the caller * would still have to check for a null return value, even if it's * just printing the return value with "%s" - not all versions of * printf print "(null)" with "%s" and a null pointer, some of them * don't check for a null pointer and crash in that case. * * The callers of this routine should, before handing this routine * a pointer to packet data, be sure that the data is present in * the packet buffer. They should probably do those checks anyway, * as other data at that layer might not be IP addresses, and it * also needs to check whether they're present in the packet buffer. */ const char * getname(netdissect_options *ndo, const u_char *ap) { register struct hostent *hp; uint32_t addr; struct hnamemem *p; memcpy(&addr, ap, sizeof(addr)); p = &hnametable[addr & (HASHNAMESIZE-1)]; for (; p->nxt; p = p->nxt) { if (p->addr == addr) return (p->name); } p->addr = addr; p->nxt = newhnamemem(ndo); /* * Print names unless: * (1) -n was given. * (2) Address is foreign and -f was given. (If -f was not * given, f_netmask and f_localnet are 0 and the test * evaluates to true) */ if (!ndo->ndo_nflag && (addr & f_netmask) == f_localnet) { #ifdef HAVE_CASPER if (capdns != NULL) { hp = cap_gethostbyaddr(capdns, (char *)&addr, 4, AF_INET); } else #endif hp = gethostbyaddr((char *)&addr, 4, AF_INET); if (hp) { char *dotp; p->name = strdup(hp->h_name); if (p->name == NULL) (*ndo->ndo_error)(ndo, "getname: strdup(hp->h_name)"); if (ndo->ndo_Nflag) { /* Remove domain qualifications */ dotp = strchr(p->name, '.'); if (dotp) *dotp = '\0'; } return (p->name); } } p->name = strdup(intoa(addr)); if (p->name == NULL) (*ndo->ndo_error)(ndo, "getname: strdup(intoa(addr))"); return (p->name); } /* * Return a name for the IP6 address pointed to by ap. This address * is assumed to be in network byte order. */ const char * getname6(netdissect_options *ndo, const u_char *ap) { register struct hostent *hp; union { struct in6_addr addr; struct for_hash_addr { char fill[14]; uint16_t d; } addra; } addr; struct h6namemem *p; register const char *cp; char ntop_buf[INET6_ADDRSTRLEN]; memcpy(&addr, ap, sizeof(addr)); p = &h6nametable[addr.addra.d & (HASHNAMESIZE-1)]; for (; p->nxt; p = p->nxt) { if (memcmp(&p->addr, &addr, sizeof(addr)) == 0) return (p->name); } p->addr = addr.addr; p->nxt = newh6namemem(ndo); /* * Do not print names if -n was given. */ if (!ndo->ndo_nflag) { #ifdef HAVE_CASPER if (capdns != NULL) { hp = cap_gethostbyaddr(capdns, (char *)&addr, sizeof(addr), AF_INET6); } else #endif hp = gethostbyaddr((char *)&addr, sizeof(addr), AF_INET6); if (hp) { char *dotp; p->name = strdup(hp->h_name); if (p->name == NULL) (*ndo->ndo_error)(ndo, "getname6: strdup(hp->h_name)"); if (ndo->ndo_Nflag) { /* Remove domain qualifications */ dotp = strchr(p->name, '.'); if (dotp) *dotp = '\0'; } return (p->name); } } cp = addrtostr6(ap, ntop_buf, sizeof(ntop_buf)); p->name = strdup(cp); if (p->name == NULL) (*ndo->ndo_error)(ndo, "getname6: strdup(cp)"); return (p->name); } static const char hex[] = "0123456789abcdef"; /* Find the hash node that corresponds the ether address 'ep' */ static inline struct enamemem * lookup_emem(netdissect_options *ndo, const u_char *ep) { register u_int i, j, k; struct enamemem *tp; k = (ep[0] << 8) | ep[1]; j = (ep[2] << 8) | ep[3]; i = (ep[4] << 8) | ep[5]; tp = &enametable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->e_nxt) if (tp->e_addr0 == i && tp->e_addr1 == j && tp->e_addr2 == k) return tp; else tp = tp->e_nxt; tp->e_addr0 = i; tp->e_addr1 = j; tp->e_addr2 = k; tp->e_nxt = (struct enamemem *)calloc(1, sizeof(*tp)); if (tp->e_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_emem: calloc"); return tp; } /* * Find the hash node that corresponds to the bytestring 'bs' * with length 'nlen' */ static inline struct enamemem * lookup_bytestring(netdissect_options *ndo, register const u_char *bs, const unsigned int nlen) { struct enamemem *tp; register u_int i, j, k; if (nlen >= 6) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = (bs[4] << 8) | bs[5]; } else if (nlen >= 4) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = 0; } else i = j = k = 0; tp = &bytestringtable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->e_nxt) if (tp->e_addr0 == i && tp->e_addr1 == j && tp->e_addr2 == k && memcmp((const char *)bs, (const char *)(tp->e_bs), nlen) == 0) return tp; else tp = tp->e_nxt; tp->e_addr0 = i; tp->e_addr1 = j; tp->e_addr2 = k; tp->e_bs = (u_char *) calloc(1, nlen + 1); if (tp->e_bs == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); memcpy(tp->e_bs, bs, nlen); tp->e_nxt = (struct enamemem *)calloc(1, sizeof(*tp)); if (tp->e_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); return tp; } /* Find the hash node that corresponds the NSAP 'nsap' */ static inline struct enamemem * lookup_nsap(netdissect_options *ndo, register const u_char *nsap, register u_int nsap_length) { register u_int i, j, k; struct enamemem *tp; const u_char *ensap; if (nsap_length > 6) { ensap = nsap + nsap_length - 6; k = (ensap[0] << 8) | ensap[1]; j = (ensap[2] << 8) | ensap[3]; i = (ensap[4] << 8) | ensap[5]; } else i = j = k = 0; tp = &nsaptable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->e_nxt) if (tp->e_addr0 == i && tp->e_addr1 == j && tp->e_addr2 == k && tp->e_nsap[0] == nsap_length && memcmp((const char *)&(nsap[1]), (char *)&(tp->e_nsap[1]), nsap_length) == 0) return tp; else tp = tp->e_nxt; tp->e_addr0 = i; tp->e_addr1 = j; tp->e_addr2 = k; tp->e_nsap = (u_char *)malloc(nsap_length + 1); if (tp->e_nsap == NULL) (*ndo->ndo_error)(ndo, "lookup_nsap: malloc"); tp->e_nsap[0] = (u_char)nsap_length; /* guaranteed < ISONSAP_MAX_LENGTH */ memcpy((char *)&tp->e_nsap[1], (const char *)nsap, nsap_length); tp->e_nxt = (struct enamemem *)calloc(1, sizeof(*tp)); if (tp->e_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_nsap: calloc"); return tp; } /* Find the hash node that corresponds the protoid 'pi'. */ static inline struct protoidmem * lookup_protoid(netdissect_options *ndo, const u_char *pi) { register u_int i, j; struct protoidmem *tp; /* 5 octets won't be aligned */ i = (((pi[0] << 8) + pi[1]) << 8) + pi[2]; j = (pi[3] << 8) + pi[4]; /* XXX should be endian-insensitive, but do big-endian testing XXX */ tp = &protoidtable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->p_nxt) if (tp->p_oui == i && tp->p_proto == j) return tp; else tp = tp->p_nxt; tp->p_oui = i; tp->p_proto = j; tp->p_nxt = (struct protoidmem *)calloc(1, sizeof(*tp)); if (tp->p_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_protoid: calloc"); return tp; } const char * etheraddr_string(netdissect_options *ndo, register const u_char *ep) { register int i; register char *cp; register struct enamemem *tp; int oui; char buf[BUFSIZE]; tp = lookup_emem(ndo, ep); if (tp->e_name) return (tp->e_name); #ifdef USE_ETHER_NTOHOST if (!ndo->ndo_nflag) { char buf2[BUFSIZE]; if (ether_ntohost(buf2, (const struct ether_addr *)ep) == 0) { tp->e_name = strdup(buf2); if (tp->e_name == NULL) (*ndo->ndo_error)(ndo, "etheraddr_string: strdup(buf2)"); return (tp->e_name); } } #endif cp = buf; oui = EXTRACT_24BITS(ep); *cp++ = hex[*ep >> 4 ]; *cp++ = hex[*ep++ & 0xf]; for (i = 5; --i >= 0;) { *cp++ = ':'; *cp++ = hex[*ep >> 4 ]; *cp++ = hex[*ep++ & 0xf]; } if (!ndo->ndo_nflag) { snprintf(cp, BUFSIZE - (2 + 5*3), " (oui %s)", tok2str(oui_values, "Unknown", oui)); } else *cp = '\0'; tp->e_name = strdup(buf); if (tp->e_name == NULL) (*ndo->ndo_error)(ndo, "etheraddr_string: strdup(buf)"); return (tp->e_name); } const char * le64addr_string(netdissect_options *ndo, const u_char *ep) { const unsigned int len = 8; register u_int i; register char *cp; register struct enamemem *tp; char buf[BUFSIZE]; tp = lookup_bytestring(ndo, ep, len); if (tp->e_name) return (tp->e_name); cp = buf; for (i = len; i > 0 ; --i) { *cp++ = hex[*(ep + i - 1) >> 4]; *cp++ = hex[*(ep + i - 1) & 0xf]; *cp++ = ':'; } cp --; *cp = '\0'; tp->e_name = strdup(buf); if (tp->e_name == NULL) (*ndo->ndo_error)(ndo, "le64addr_string: strdup(buf)"); return (tp->e_name); } const char * linkaddr_string(netdissect_options *ndo, const u_char *ep, const unsigned int type, const unsigned int len) { register u_int i; register char *cp; register struct enamemem *tp; if (len == 0) return ("<empty>"); if (type == LINKADDR_ETHER && len == ETHER_ADDR_LEN) return (etheraddr_string(ndo, ep)); if (type == LINKADDR_FRELAY) return (q922_string(ndo, ep, len)); tp = lookup_bytestring(ndo, ep, len); if (tp->e_name) return (tp->e_name); tp->e_name = cp = (char *)malloc(len*3); if (tp->e_name == NULL) (*ndo->ndo_error)(ndo, "linkaddr_string: malloc"); *cp++ = hex[*ep >> 4]; *cp++ = hex[*ep++ & 0xf]; for (i = len-1; i > 0 ; --i) { *cp++ = ':'; *cp++ = hex[*ep >> 4]; *cp++ = hex[*ep++ & 0xf]; } *cp = '\0'; return (tp->e_name); } const char * etherproto_string(netdissect_options *ndo, u_short port) { register char *cp; register struct hnamemem *tp; register uint32_t i = port; char buf[sizeof("0000")]; for (tp = &eprototable[i & (HASHNAMESIZE-1)]; tp->nxt; tp = tp->nxt) if (tp->addr == i) return (tp->name); tp->addr = i; tp->nxt = newhnamemem(ndo); cp = buf; NTOHS(port); *cp++ = hex[port >> 12 & 0xf]; *cp++ = hex[port >> 8 & 0xf]; *cp++ = hex[port >> 4 & 0xf]; *cp++ = hex[port & 0xf]; *cp++ = '\0'; tp->name = strdup(buf); if (tp->name == NULL) (*ndo->ndo_error)(ndo, "etherproto_string: strdup(buf)"); return (tp->name); } const char * protoid_string(netdissect_options *ndo, register const u_char *pi) { register u_int i, j; register char *cp; register struct protoidmem *tp; char buf[sizeof("00:00:00:00:00")]; tp = lookup_protoid(ndo, pi); if (tp->p_name) return tp->p_name; cp = buf; if ((j = *pi >> 4) != 0) *cp++ = hex[j]; *cp++ = hex[*pi++ & 0xf]; for (i = 4; (int)--i >= 0;) { *cp++ = ':'; if ((j = *pi >> 4) != 0) *cp++ = hex[j]; *cp++ = hex[*pi++ & 0xf]; } *cp = '\0'; tp->p_name = strdup(buf); if (tp->p_name == NULL) (*ndo->ndo_error)(ndo, "protoid_string: strdup(buf)"); return (tp->p_name); } #define ISONSAP_MAX_LENGTH 20 const char * isonsap_string(netdissect_options *ndo, const u_char *nsap, register u_int nsap_length) { register u_int nsap_idx; register char *cp; register struct enamemem *tp; if (nsap_length < 1 || nsap_length > ISONSAP_MAX_LENGTH) return ("isonsap_string: illegal length"); tp = lookup_nsap(ndo, nsap, nsap_length); if (tp->e_name) return tp->e_name; tp->e_name = cp = (char *)malloc(sizeof("xx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xx")); if (cp == NULL) (*ndo->ndo_error)(ndo, "isonsap_string: malloc"); for (nsap_idx = 0; nsap_idx < nsap_length; nsap_idx++) { *cp++ = hex[*nsap >> 4]; *cp++ = hex[*nsap++ & 0xf]; if (((nsap_idx & 1) == 0) && (nsap_idx + 1 < nsap_length)) { *cp++ = '.'; } } *cp = '\0'; return (tp->e_name); } const char * tcpport_string(netdissect_options *ndo, u_short port) { register struct hnamemem *tp; register uint32_t i = port; char buf[sizeof("00000")]; for (tp = &tporttable[i & (HASHNAMESIZE-1)]; tp->nxt; tp = tp->nxt) if (tp->addr == i) return (tp->name); tp->addr = i; tp->nxt = newhnamemem(ndo); (void)snprintf(buf, sizeof(buf), "%u", i); tp->name = strdup(buf); if (tp->name == NULL) (*ndo->ndo_error)(ndo, "tcpport_string: strdup(buf)"); return (tp->name); } const char * udpport_string(netdissect_options *ndo, register u_short port) { register struct hnamemem *tp; register uint32_t i = port; char buf[sizeof("00000")]; for (tp = &uporttable[i & (HASHNAMESIZE-1)]; tp->nxt; tp = tp->nxt) if (tp->addr == i) return (tp->name); tp->addr = i; tp->nxt = newhnamemem(ndo); (void)snprintf(buf, sizeof(buf), "%u", i); tp->name = strdup(buf); if (tp->name == NULL) (*ndo->ndo_error)(ndo, "udpport_string: strdup(buf)"); return (tp->name); } const char * ipxsap_string(netdissect_options *ndo, u_short port) { register char *cp; register struct hnamemem *tp; register uint32_t i = port; char buf[sizeof("0000")]; for (tp = &ipxsaptable[i & (HASHNAMESIZE-1)]; tp->nxt; tp = tp->nxt) if (tp->addr == i) return (tp->name); tp->addr = i; tp->nxt = newhnamemem(ndo); cp = buf; NTOHS(port); *cp++ = hex[port >> 12 & 0xf]; *cp++ = hex[port >> 8 & 0xf]; *cp++ = hex[port >> 4 & 0xf]; *cp++ = hex[port & 0xf]; *cp++ = '\0'; tp->name = strdup(buf); if (tp->name == NULL) (*ndo->ndo_error)(ndo, "ipxsap_string: strdup(buf)"); return (tp->name); } static void init_servarray(netdissect_options *ndo) { struct servent *sv; register struct hnamemem *table; register int i; char buf[sizeof("0000000000")]; while ((sv = getservent()) != NULL) { int port = ntohs(sv->s_port); i = port & (HASHNAMESIZE-1); if (strcmp(sv->s_proto, "tcp") == 0) table = &tporttable[i]; else if (strcmp(sv->s_proto, "udp") == 0) table = &uporttable[i]; else continue; while (table->name) table = table->nxt; if (ndo->ndo_nflag) { (void)snprintf(buf, sizeof(buf), "%d", port); table->name = strdup(buf); } else table->name = strdup(sv->s_name); if (table->name == NULL) (*ndo->ndo_error)(ndo, "init_servarray: strdup"); table->addr = port; table->nxt = newhnamemem(ndo); } endservent(); } static const struct eproto { const char *s; u_short p; } eproto_db[] = { { "pup", ETHERTYPE_PUP }, { "xns", ETHERTYPE_NS }, { "ip", ETHERTYPE_IP }, { "ip6", ETHERTYPE_IPV6 }, { "arp", ETHERTYPE_ARP }, { "rarp", ETHERTYPE_REVARP }, { "sprite", ETHERTYPE_SPRITE }, { "mopdl", ETHERTYPE_MOPDL }, { "moprc", ETHERTYPE_MOPRC }, { "decnet", ETHERTYPE_DN }, { "lat", ETHERTYPE_LAT }, { "sca", ETHERTYPE_SCA }, { "lanbridge", ETHERTYPE_LANBRIDGE }, { "vexp", ETHERTYPE_VEXP }, { "vprod", ETHERTYPE_VPROD }, { "atalk", ETHERTYPE_ATALK }, { "atalkarp", ETHERTYPE_AARP }, { "loopback", ETHERTYPE_LOOPBACK }, { "decdts", ETHERTYPE_DECDTS }, { "decdns", ETHERTYPE_DECDNS }, { (char *)0, 0 } }; static void init_eprotoarray(netdissect_options *ndo) { register int i; register struct hnamemem *table; for (i = 0; eproto_db[i].s; i++) { int j = htons(eproto_db[i].p) & (HASHNAMESIZE-1); table = &eprototable[j]; while (table->name) table = table->nxt; table->name = eproto_db[i].s; table->addr = htons(eproto_db[i].p); table->nxt = newhnamemem(ndo); } } static const struct protoidlist { const u_char protoid[5]; const char *name; } protoidlist[] = { {{ 0x00, 0x00, 0x0c, 0x01, 0x07 }, "CiscoMLS" }, {{ 0x00, 0x00, 0x0c, 0x20, 0x00 }, "CiscoCDP" }, {{ 0x00, 0x00, 0x0c, 0x20, 0x01 }, "CiscoCGMP" }, {{ 0x00, 0x00, 0x0c, 0x20, 0x03 }, "CiscoVTP" }, {{ 0x00, 0xe0, 0x2b, 0x00, 0xbb }, "ExtremeEDP" }, {{ 0x00, 0x00, 0x00, 0x00, 0x00 }, NULL } }; /* * SNAP proto IDs with org code 0:0:0 are actually encapsulated Ethernet * types. */ static void init_protoidarray(netdissect_options *ndo) { register int i; register struct protoidmem *tp; const struct protoidlist *pl; u_char protoid[5]; protoid[0] = 0; protoid[1] = 0; protoid[2] = 0; for (i = 0; eproto_db[i].s; i++) { u_short etype = htons(eproto_db[i].p); memcpy((char *)&protoid[3], (char *)&etype, 2); tp = lookup_protoid(ndo, protoid); tp->p_name = strdup(eproto_db[i].s); if (tp->p_name == NULL) (*ndo->ndo_error)(ndo, "init_protoidarray: strdup(eproto_db[i].s)"); } /* Hardwire some SNAP proto ID names */ for (pl = protoidlist; pl->name != NULL; ++pl) { tp = lookup_protoid(ndo, pl->protoid); /* Don't override existing name */ if (tp->p_name != NULL) continue; tp->p_name = pl->name; } } static const struct etherlist { const u_char addr[6]; const char *name; } etherlist[] = { {{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, "Broadcast" }, {{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, NULL } }; /* * Initialize the ethers hash table. We take two different approaches * depending on whether or not the system provides the ethers name * service. If it does, we just wire in a few names at startup, * and etheraddr_string() fills in the table on demand. If it doesn't, * then we suck in the entire /etc/ethers file at startup. The idea * is that parsing the local file will be fast, but spinning through * all the ethers entries via NIS & next_etherent might be very slow. * * XXX pcap_next_etherent doesn't belong in the pcap interface, but * since the pcap module already does name-to-address translation, * it's already does most of the work for the ethernet address-to-name * translation, so we just pcap_next_etherent as a convenience. */ static void init_etherarray(netdissect_options *ndo) { register const struct etherlist *el; register struct enamemem *tp; #ifdef USE_ETHER_NTOHOST char name[256]; #else register struct pcap_etherent *ep; register FILE *fp; /* Suck in entire ethers file */ fp = fopen(PCAP_ETHERS_FILE, "r"); if (fp != NULL) { while ((ep = pcap_next_etherent(fp)) != NULL) { tp = lookup_emem(ndo, ep->addr); tp->e_name = strdup(ep->name); if (tp->e_name == NULL) (*ndo->ndo_error)(ndo, "init_etherarray: strdup(ep->addr)"); } (void)fclose(fp); } #endif /* Hardwire some ethernet names */ for (el = etherlist; el->name != NULL; ++el) { tp = lookup_emem(ndo, el->addr); /* Don't override existing name */ if (tp->e_name != NULL) continue; #ifdef USE_ETHER_NTOHOST /* * Use YP/NIS version of name if available. */ if (ether_ntohost(name, (const struct ether_addr *)el->addr) == 0) { tp->e_name = strdup(name); if (tp->e_name == NULL) (*ndo->ndo_error)(ndo, "init_etherarray: strdup(name)"); continue; } #endif tp->e_name = el->name; } } static const struct tok ipxsap_db[] = { { 0x0000, "Unknown" }, { 0x0001, "User" }, { 0x0002, "User Group" }, { 0x0003, "PrintQueue" }, { 0x0004, "FileServer" }, { 0x0005, "JobServer" }, { 0x0006, "Gateway" }, { 0x0007, "PrintServer" }, { 0x0008, "ArchiveQueue" }, { 0x0009, "ArchiveServer" }, { 0x000a, "JobQueue" }, { 0x000b, "Administration" }, { 0x000F, "Novell TI-RPC" }, { 0x0017, "Diagnostics" }, { 0x0020, "NetBIOS" }, { 0x0021, "NAS SNA Gateway" }, { 0x0023, "NACS AsyncGateway" }, { 0x0024, "RemoteBridge/RoutingService" }, { 0x0026, "BridgeServer" }, { 0x0027, "TCP/IP Gateway" }, { 0x0028, "Point-to-point X.25 BridgeServer" }, { 0x0029, "3270 Gateway" }, { 0x002a, "CHI Corp" }, { 0x002c, "PC Chalkboard" }, { 0x002d, "TimeSynchServer" }, { 0x002e, "ARCserve5.0/PalindromeBackup" }, { 0x0045, "DI3270 Gateway" }, { 0x0047, "AdvertisingPrintServer" }, { 0x004a, "NetBlazerModems" }, { 0x004b, "BtrieveVAP" }, { 0x004c, "NetwareSQL" }, { 0x004d, "XtreeNetwork" }, { 0x0050, "BtrieveVAP4.11" }, { 0x0052, "QuickLink" }, { 0x0053, "PrintQueueUser" }, { 0x0058, "Multipoint X.25 Router" }, { 0x0060, "STLB/NLM" }, { 0x0064, "ARCserve" }, { 0x0066, "ARCserve3.0" }, { 0x0072, "WAN CopyUtility" }, { 0x007a, "TES-NetwareVMS" }, { 0x0092, "WATCOM Debugger/EmeraldTapeBackupServer" }, { 0x0095, "DDA OBGYN" }, { 0x0098, "NetwareAccessServer" }, { 0x009a, "Netware for VMS II/NamedPipeServer" }, { 0x009b, "NetwareAccessServer" }, { 0x009e, "PortableNetwareServer/SunLinkNVT" }, { 0x00a1, "PowerchuteAPC UPS" }, { 0x00aa, "LAWserve" }, { 0x00ac, "CompaqIDA StatusMonitor" }, { 0x0100, "PIPE STAIL" }, { 0x0102, "LAN ProtectBindery" }, { 0x0103, "OracleDataBaseServer" }, { 0x0107, "Netware386/RSPX RemoteConsole" }, { 0x010f, "NovellSNA Gateway" }, { 0x0111, "TestServer" }, { 0x0112, "HP PrintServer" }, { 0x0114, "CSA MUX" }, { 0x0115, "CSA LCA" }, { 0x0116, "CSA CM" }, { 0x0117, "CSA SMA" }, { 0x0118, "CSA DBA" }, { 0x0119, "CSA NMA" }, { 0x011a, "CSA SSA" }, { 0x011b, "CSA STATUS" }, { 0x011e, "CSA APPC" }, { 0x0126, "SNA TEST SSA Profile" }, { 0x012a, "CSA TRACE" }, { 0x012b, "NetwareSAA" }, { 0x012e, "IKARUS VirusScan" }, { 0x0130, "CommunicationsExecutive" }, { 0x0133, "NNS DomainServer/NetwareNamingServicesDomain" }, { 0x0135, "NetwareNamingServicesProfile" }, { 0x0137, "Netware386 PrintQueue/NNS PrintQueue" }, { 0x0141, "LAN SpoolServer" }, { 0x0152, "IRMALAN Gateway" }, { 0x0154, "NamedPipeServer" }, { 0x0166, "NetWareManagement" }, { 0x0168, "Intel PICKIT CommServer/Intel CAS TalkServer" }, { 0x0173, "Compaq" }, { 0x0174, "Compaq SNMP Agent" }, { 0x0175, "Compaq" }, { 0x0180, "XTreeServer/XTreeTools" }, { 0x018A, "NASI ServicesBroadcastServer" }, { 0x01b0, "GARP Gateway" }, { 0x01b1, "Binfview" }, { 0x01bf, "IntelLanDeskManager" }, { 0x01ca, "AXTEC" }, { 0x01cb, "ShivaNetModem/E" }, { 0x01cc, "ShivaLanRover/E" }, { 0x01cd, "ShivaLanRover/T" }, { 0x01ce, "ShivaUniversal" }, { 0x01d8, "CastelleFAXPressServer" }, { 0x01da, "CastelleLANPressPrintServer" }, { 0x01dc, "CastelleFAX/Xerox7033 FaxServer/ExcelLanFax" }, { 0x01f0, "LEGATO" }, { 0x01f5, "LEGATO" }, { 0x0233, "NMS Agent/NetwareManagementAgent" }, { 0x0237, "NMS IPX Discovery/LANternReadWriteChannel" }, { 0x0238, "NMS IP Discovery/LANternTrapAlarmChannel" }, { 0x023a, "LANtern" }, { 0x023c, "MAVERICK" }, { 0x023f, "NovellSMDR" }, { 0x024e, "NetwareConnect" }, { 0x024f, "NASI ServerBroadcast Cisco" }, { 0x026a, "NMS ServiceConsole" }, { 0x026b, "TimeSynchronizationServer Netware 4.x" }, { 0x0278, "DirectoryServer Netware 4.x" }, { 0x027b, "NetwareManagementAgent" }, { 0x0280, "Novell File and Printer Sharing Service for PC" }, { 0x0304, "NovellSAA Gateway" }, { 0x0308, "COM/VERMED" }, { 0x030a, "GalacticommWorldgroupServer" }, { 0x030c, "IntelNetport2/HP JetDirect/HP Quicksilver" }, { 0x0320, "AttachmateGateway" }, { 0x0327, "MicrosoftDiagnostiocs" }, { 0x0328, "WATCOM SQL Server" }, { 0x0335, "MultiTechSystems MultisynchCommServer" }, { 0x0343, "Xylogics RemoteAccessServer/LANModem" }, { 0x0355, "ArcadaBackupExec" }, { 0x0358, "MSLCD1" }, { 0x0361, "NETINELO" }, { 0x037e, "Powerchute UPS Monitoring" }, { 0x037f, "ViruSafeNotify" }, { 0x0386, "HP Bridge" }, { 0x0387, "HP Hub" }, { 0x0394, "NetWare SAA Gateway" }, { 0x039b, "LotusNotes" }, { 0x03b7, "CertusAntiVirus" }, { 0x03c4, "ARCserve4.0" }, { 0x03c7, "LANspool3.5" }, { 0x03d7, "LexmarkPrinterServer" }, { 0x03d8, "LexmarkXLE PrinterServer" }, { 0x03dd, "BanyanENS NetwareClient" }, { 0x03de, "GuptaSequelBaseServer/NetWareSQL" }, { 0x03e1, "UnivelUnixware" }, { 0x03e4, "UnivelUnixware" }, { 0x03fc, "IntelNetport" }, { 0x03fd, "PrintServerQueue" }, { 0x040A, "ipnServer" }, { 0x040D, "LVERRMAN" }, { 0x040E, "LVLIC" }, { 0x0414, "NET Silicon (DPI)/Kyocera" }, { 0x0429, "SiteLockVirus" }, { 0x0432, "UFHELPR???" }, { 0x0433, "Synoptics281xAdvancedSNMPAgent" }, { 0x0444, "MicrosoftNT SNA Server" }, { 0x0448, "Oracle" }, { 0x044c, "ARCserve5.01" }, { 0x0457, "CanonGP55" }, { 0x045a, "QMS Printers" }, { 0x045b, "DellSCSI Array" }, { 0x0491, "NetBlazerModems" }, { 0x04ac, "OnTimeScheduler" }, { 0x04b0, "CD-Net" }, { 0x0513, "EmulexNQA" }, { 0x0520, "SiteLockChecks" }, { 0x0529, "SiteLockChecks" }, { 0x052d, "CitrixOS2 AppServer" }, { 0x0535, "Tektronix" }, { 0x0536, "Milan" }, { 0x055d, "Attachmate SNA gateway" }, { 0x056b, "IBM8235 ModemServer" }, { 0x056c, "ShivaLanRover/E PLUS" }, { 0x056d, "ShivaLanRover/T PLUS" }, { 0x0580, "McAfeeNetShield" }, { 0x05B8, "NLM to workstation communication (Revelation Software)" }, { 0x05BA, "CompatibleSystemsRouters" }, { 0x05BE, "CheyenneHierarchicalStorageManager" }, { 0x0606, "JCWatermarkImaging" }, { 0x060c, "AXISNetworkPrinter" }, { 0x0610, "AdaptecSCSIManagement" }, { 0x0621, "IBM AntiVirus" }, { 0x0640, "Windows95 RemoteRegistryService" }, { 0x064e, "MicrosoftIIS" }, { 0x067b, "Microsoft Win95/98 File and Print Sharing for NetWare" }, { 0x067c, "Microsoft Win95/98 File and Print Sharing for NetWare" }, { 0x076C, "Xerox" }, { 0x079b, "ShivaLanRover/E 115" }, { 0x079c, "ShivaLanRover/T 115" }, { 0x07B4, "CubixWorldDesk" }, { 0x07c2, "Quarterdeck IWare Connect V2.x NLM" }, { 0x07c1, "Quarterdeck IWare Connect V3.x NLM" }, { 0x0810, "ELAN License Server Demo" }, { 0x0824, "ShivaLanRoverAccessSwitch/E" }, { 0x086a, "ISSC Collector" }, { 0x087f, "ISSC DAS AgentAIX" }, { 0x0880, "Intel Netport PRO" }, { 0x0881, "Intel Netport PRO" }, { 0x0b29, "SiteLock" }, { 0x0c29, "SiteLockApplications" }, { 0x0c2c, "LicensingServer" }, { 0x2101, "PerformanceTechnologyInstantInternet" }, { 0x2380, "LAI SiteLock" }, { 0x238c, "MeetingMaker" }, { 0x4808, "SiteLockServer/SiteLockMetering" }, { 0x5555, "SiteLockUser" }, { 0x6312, "Tapeware" }, { 0x6f00, "RabbitGateway" }, { 0x7703, "MODEM" }, { 0x8002, "NetPortPrinters" }, { 0x8008, "WordPerfectNetworkVersion" }, { 0x85BE, "Cisco EIGRP" }, { 0x8888, "WordPerfectNetworkVersion/QuickNetworkManagement" }, { 0x9000, "McAfeeNetShield" }, { 0x9604, "CSA-NT_MON" }, { 0xb6a8, "OceanIsleReachoutRemoteControl" }, { 0xf11f, "SiteLockMetering" }, { 0xf1ff, "SiteLock" }, { 0xf503, "Microsoft SQL Server" }, { 0xF905, "IBM TimeAndPlace" }, { 0xfbfb, "TopCallIII FaxServer" }, { 0xffff, "AnyService/Wildcard" }, { 0, (char *)0 } }; static void init_ipxsaparray(netdissect_options *ndo) { register int i; register struct hnamemem *table; for (i = 0; ipxsap_db[i].s != NULL; i++) { int j = htons(ipxsap_db[i].v) & (HASHNAMESIZE-1); table = &ipxsaptable[j]; while (table->name) table = table->nxt; table->name = ipxsap_db[i].s; table->addr = htons(ipxsap_db[i].v); table->nxt = newhnamemem(ndo); } } /* * Initialize the address to name translation machinery. We map all * non-local IP addresses to numeric addresses if ndo->ndo_fflag is true * (i.e., to prevent blocking on the nameserver). localnet is the IP address * of the local network. mask is its subnet mask. */ void init_addrtoname(netdissect_options *ndo, uint32_t localnet, uint32_t mask) { if (ndo->ndo_fflag) { f_localnet = localnet; f_netmask = mask; } if (ndo->ndo_nflag) /* * Simplest way to suppress names. */ return; init_etherarray(ndo); init_servarray(ndo); init_eprotoarray(ndo); init_protoidarray(ndo); init_ipxsaparray(ndo); } const char * dnaddr_string(netdissect_options *ndo, u_short dnaddr) { register struct hnamemem *tp; for (tp = &dnaddrtable[dnaddr & (HASHNAMESIZE-1)]; tp->nxt != NULL; tp = tp->nxt) if (tp->addr == dnaddr) return (tp->name); tp->addr = dnaddr; tp->nxt = newhnamemem(ndo); if (ndo->ndo_nflag) tp->name = dnnum_string(ndo, dnaddr); else tp->name = dnname_string(ndo, dnaddr); return(tp->name); } /* Return a zero'ed hnamemem struct and cuts down on calloc() overhead */ struct hnamemem * newhnamemem(netdissect_options *ndo) { register struct hnamemem *p; static struct hnamemem *ptr = NULL; static u_int num = 0; if (num <= 0) { num = 64; ptr = (struct hnamemem *)calloc(num, sizeof (*ptr)); if (ptr == NULL) (*ndo->ndo_error)(ndo, "newhnamemem: calloc"); } --num; p = ptr++; return (p); } /* Return a zero'ed h6namemem struct and cuts down on calloc() overhead */ struct h6namemem * newh6namemem(netdissect_options *ndo) { register struct h6namemem *p; static struct h6namemem *ptr = NULL; static u_int num = 0; if (num <= 0) { num = 64; ptr = (struct h6namemem *)calloc(num, sizeof (*ptr)); if (ptr == NULL) (*ndo->ndo_error)(ndo, "newh6namemem: calloc"); } --num; p = ptr++; return (p); } /* Represent TCI part of the 802.1Q 4-octet tag as text. */ const char * ieee8021q_tci_string(const uint16_t tci) { static char buf[128]; snprintf(buf, sizeof(buf), "vlan %u, p %u%s", tci & 0xfff, tci >> 13, (tci & 0x1000) ? ", DEI" : ""); return buf; }
/* * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Internet, ethernet, port, and protocol string to address * and address to string conversion routines */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #ifdef HAVE_CASPER #include <libcasper.h> #include <casper/cap_dns.h> #endif /* HAVE_CASPER */ #include <netdissect-stdinc.h> #ifdef USE_ETHER_NTOHOST #ifdef HAVE_NETINET_IF_ETHER_H struct mbuf; /* Squelch compiler warnings on some platforms for */ struct rtentry; /* declarations in <net/if.h> */ #include <net/if.h> /* for "struct ifnet" in "struct arpcom" on Solaris */ #include <netinet/if_ether.h> #endif /* HAVE_NETINET_IF_ETHER_H */ #ifdef NETINET_ETHER_H_DECLARES_ETHER_NTOHOST #include <netinet/ether.h> #endif /* NETINET_ETHER_H_DECLARES_ETHER_NTOHOST */ #if !defined(HAVE_DECL_ETHER_NTOHOST) || !HAVE_DECL_ETHER_NTOHOST #ifndef HAVE_STRUCT_ETHER_ADDR struct ether_addr { unsigned char ether_addr_octet[6]; }; #endif extern int ether_ntohost(char *, const struct ether_addr *); #endif #endif /* USE_ETHER_NTOHOST */ #include <pcap.h> #include <pcap-namedb.h> #include <signal.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include "netdissect.h" #include "addrtoname.h" #include "addrtostr.h" #include "ethertype.h" #include "llc.h" #include "setsignal.h" #include "extract.h" #include "oui.h" #ifndef ETHER_ADDR_LEN #define ETHER_ADDR_LEN 6 #endif /* * hash tables for whatever-to-name translations * * ndo_error() called on strdup(3) failure */ #define HASHNAMESIZE 4096 struct hnamemem { uint32_t addr; const char *name; struct hnamemem *nxt; }; static struct hnamemem hnametable[HASHNAMESIZE]; static struct hnamemem tporttable[HASHNAMESIZE]; static struct hnamemem uporttable[HASHNAMESIZE]; static struct hnamemem eprototable[HASHNAMESIZE]; static struct hnamemem dnaddrtable[HASHNAMESIZE]; static struct hnamemem ipxsaptable[HASHNAMESIZE]; #ifdef _WIN32 /* * fake gethostbyaddr for Win2k/XP * gethostbyaddr() returns incorrect value when AF_INET6 is passed * to 3rd argument. * * h_name in struct hostent is only valid. */ static struct hostent * win32_gethostbyaddr(const char *addr, int len, int type) { static struct hostent host; static char hostbuf[NI_MAXHOST]; char hname[NI_MAXHOST]; struct sockaddr_in6 addr6; host.h_name = hostbuf; switch (type) { case AF_INET: return gethostbyaddr(addr, len, type); break; case AF_INET6: memset(&addr6, 0, sizeof(addr6)); addr6.sin6_family = AF_INET6; memcpy(&addr6.sin6_addr, addr, len); if (getnameinfo((struct sockaddr *)&addr6, sizeof(addr6), hname, sizeof(hname), NULL, 0, 0)) { return NULL; } else { strcpy(host.h_name, hname); return &host; } break; default: return NULL; } } #define gethostbyaddr win32_gethostbyaddr #endif /* _WIN32 */ struct h6namemem { struct in6_addr addr; char *name; struct h6namemem *nxt; }; static struct h6namemem h6nametable[HASHNAMESIZE]; struct enamemem { u_short e_addr0; u_short e_addr1; u_short e_addr2; const char *e_name; u_char *e_nsap; /* used only for nsaptable[] */ #define e_bs e_nsap /* for bytestringtable */ size_t e_namelen; /* for bytestringtable */ struct enamemem *e_nxt; }; static struct enamemem enametable[HASHNAMESIZE]; static struct enamemem nsaptable[HASHNAMESIZE]; struct bsnamemem { u_short bs_addr0; u_short bs_addr1; u_short bs_addr2; const char *bs_name; u_char *bs_bytes; unsigned int bs_nbytes; struct bsnamemem *bs_nxt; }; static struct bsnamemem bytestringtable[HASHNAMESIZE]; struct protoidmem { uint32_t p_oui; u_short p_proto; const char *p_name; struct protoidmem *p_nxt; }; static struct protoidmem protoidtable[HASHNAMESIZE]; /* * A faster replacement for inet_ntoa(). */ const char * intoa(uint32_t addr) { register char *cp; register u_int byte; register int n; static char buf[sizeof(".xxx.xxx.xxx.xxx")]; NTOHL(addr); cp = buf + sizeof(buf); *--cp = '\0'; n = 4; do { byte = addr & 0xff; *--cp = byte % 10 + '0'; byte /= 10; if (byte > 0) { *--cp = byte % 10 + '0'; byte /= 10; if (byte > 0) *--cp = byte + '0'; } *--cp = '.'; addr >>= 8; } while (--n > 0); return cp + 1; } static uint32_t f_netmask; static uint32_t f_localnet; #ifdef HAVE_CASPER extern cap_channel_t *capdns; #endif /* * Return a name for the IP address pointed to by ap. This address * is assumed to be in network byte order. * * NOTE: ap is *NOT* necessarily part of the packet data (not even if * this is being called with the "ipaddr_string()" macro), so you * *CANNOT* use the ND_TCHECK{2}/ND_TTEST{2} macros on it. Furthermore, * even in cases where it *is* part of the packet data, the caller * would still have to check for a null return value, even if it's * just printing the return value with "%s" - not all versions of * printf print "(null)" with "%s" and a null pointer, some of them * don't check for a null pointer and crash in that case. * * The callers of this routine should, before handing this routine * a pointer to packet data, be sure that the data is present in * the packet buffer. They should probably do those checks anyway, * as other data at that layer might not be IP addresses, and it * also needs to check whether they're present in the packet buffer. */ const char * getname(netdissect_options *ndo, const u_char *ap) { register struct hostent *hp; uint32_t addr; struct hnamemem *p; memcpy(&addr, ap, sizeof(addr)); p = &hnametable[addr & (HASHNAMESIZE-1)]; for (; p->nxt; p = p->nxt) { if (p->addr == addr) return (p->name); } p->addr = addr; p->nxt = newhnamemem(ndo); /* * Print names unless: * (1) -n was given. * (2) Address is foreign and -f was given. (If -f was not * given, f_netmask and f_localnet are 0 and the test * evaluates to true) */ if (!ndo->ndo_nflag && (addr & f_netmask) == f_localnet) { #ifdef HAVE_CASPER if (capdns != NULL) { hp = cap_gethostbyaddr(capdns, (char *)&addr, 4, AF_INET); } else #endif hp = gethostbyaddr((char *)&addr, 4, AF_INET); if (hp) { char *dotp; p->name = strdup(hp->h_name); if (p->name == NULL) (*ndo->ndo_error)(ndo, "getname: strdup(hp->h_name)"); if (ndo->ndo_Nflag) { /* Remove domain qualifications */ dotp = strchr(p->name, '.'); if (dotp) *dotp = '\0'; } return (p->name); } } p->name = strdup(intoa(addr)); if (p->name == NULL) (*ndo->ndo_error)(ndo, "getname: strdup(intoa(addr))"); return (p->name); } /* * Return a name for the IP6 address pointed to by ap. This address * is assumed to be in network byte order. */ const char * getname6(netdissect_options *ndo, const u_char *ap) { register struct hostent *hp; union { struct in6_addr addr; struct for_hash_addr { char fill[14]; uint16_t d; } addra; } addr; struct h6namemem *p; register const char *cp; char ntop_buf[INET6_ADDRSTRLEN]; memcpy(&addr, ap, sizeof(addr)); p = &h6nametable[addr.addra.d & (HASHNAMESIZE-1)]; for (; p->nxt; p = p->nxt) { if (memcmp(&p->addr, &addr, sizeof(addr)) == 0) return (p->name); } p->addr = addr.addr; p->nxt = newh6namemem(ndo); /* * Do not print names if -n was given. */ if (!ndo->ndo_nflag) { #ifdef HAVE_CASPER if (capdns != NULL) { hp = cap_gethostbyaddr(capdns, (char *)&addr, sizeof(addr), AF_INET6); } else #endif hp = gethostbyaddr((char *)&addr, sizeof(addr), AF_INET6); if (hp) { char *dotp; p->name = strdup(hp->h_name); if (p->name == NULL) (*ndo->ndo_error)(ndo, "getname6: strdup(hp->h_name)"); if (ndo->ndo_Nflag) { /* Remove domain qualifications */ dotp = strchr(p->name, '.'); if (dotp) *dotp = '\0'; } return (p->name); } } cp = addrtostr6(ap, ntop_buf, sizeof(ntop_buf)); p->name = strdup(cp); if (p->name == NULL) (*ndo->ndo_error)(ndo, "getname6: strdup(cp)"); return (p->name); } static const char hex[] = "0123456789abcdef"; /* Find the hash node that corresponds the ether address 'ep' */ static inline struct enamemem * lookup_emem(netdissect_options *ndo, const u_char *ep) { register u_int i, j, k; struct enamemem *tp; k = (ep[0] << 8) | ep[1]; j = (ep[2] << 8) | ep[3]; i = (ep[4] << 8) | ep[5]; tp = &enametable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->e_nxt) if (tp->e_addr0 == i && tp->e_addr1 == j && tp->e_addr2 == k) return tp; else tp = tp->e_nxt; tp->e_addr0 = i; tp->e_addr1 = j; tp->e_addr2 = k; tp->e_nxt = (struct enamemem *)calloc(1, sizeof(*tp)); if (tp->e_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_emem: calloc"); return tp; } /* * Find the hash node that corresponds to the bytestring 'bs' * with length 'nlen' */ static inline struct bsnamemem * lookup_bytestring(netdissect_options *ndo, register const u_char *bs, const unsigned int nlen) { struct bsnamemem *tp; register u_int i, j, k; if (nlen >= 6) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = (bs[4] << 8) | bs[5]; } else if (nlen >= 4) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = 0; } else i = j = k = 0; tp = &bytestringtable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->bs_nxt) if (nlen == tp->bs_nbytes && tp->bs_addr0 == i && tp->bs_addr1 == j && tp->bs_addr2 == k && memcmp((const char *)bs, (const char *)(tp->bs_bytes), nlen) == 0) return tp; else tp = tp->bs_nxt; tp->bs_addr0 = i; tp->bs_addr1 = j; tp->bs_addr2 = k; tp->bs_bytes = (u_char *) calloc(1, nlen + 1); if (tp->bs_bytes == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); memcpy(tp->bs_bytes, bs, nlen); tp->bs_nbytes = nlen; tp->bs_nxt = (struct bsnamemem *)calloc(1, sizeof(*tp)); if (tp->bs_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); return tp; } /* Find the hash node that corresponds the NSAP 'nsap' */ static inline struct enamemem * lookup_nsap(netdissect_options *ndo, register const u_char *nsap, register u_int nsap_length) { register u_int i, j, k; struct enamemem *tp; const u_char *ensap; if (nsap_length > 6) { ensap = nsap + nsap_length - 6; k = (ensap[0] << 8) | ensap[1]; j = (ensap[2] << 8) | ensap[3]; i = (ensap[4] << 8) | ensap[5]; } else i = j = k = 0; tp = &nsaptable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->e_nxt) if (tp->e_addr0 == i && tp->e_addr1 == j && tp->e_addr2 == k && tp->e_nsap[0] == nsap_length && memcmp((const char *)&(nsap[1]), (char *)&(tp->e_nsap[1]), nsap_length) == 0) return tp; else tp = tp->e_nxt; tp->e_addr0 = i; tp->e_addr1 = j; tp->e_addr2 = k; tp->e_nsap = (u_char *)malloc(nsap_length + 1); if (tp->e_nsap == NULL) (*ndo->ndo_error)(ndo, "lookup_nsap: malloc"); tp->e_nsap[0] = (u_char)nsap_length; /* guaranteed < ISONSAP_MAX_LENGTH */ memcpy((char *)&tp->e_nsap[1], (const char *)nsap, nsap_length); tp->e_nxt = (struct enamemem *)calloc(1, sizeof(*tp)); if (tp->e_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_nsap: calloc"); return tp; } /* Find the hash node that corresponds the protoid 'pi'. */ static inline struct protoidmem * lookup_protoid(netdissect_options *ndo, const u_char *pi) { register u_int i, j; struct protoidmem *tp; /* 5 octets won't be aligned */ i = (((pi[0] << 8) + pi[1]) << 8) + pi[2]; j = (pi[3] << 8) + pi[4]; /* XXX should be endian-insensitive, but do big-endian testing XXX */ tp = &protoidtable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->p_nxt) if (tp->p_oui == i && tp->p_proto == j) return tp; else tp = tp->p_nxt; tp->p_oui = i; tp->p_proto = j; tp->p_nxt = (struct protoidmem *)calloc(1, sizeof(*tp)); if (tp->p_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_protoid: calloc"); return tp; } const char * etheraddr_string(netdissect_options *ndo, register const u_char *ep) { register int i; register char *cp; register struct enamemem *tp; int oui; char buf[BUFSIZE]; tp = lookup_emem(ndo, ep); if (tp->e_name) return (tp->e_name); #ifdef USE_ETHER_NTOHOST if (!ndo->ndo_nflag) { char buf2[BUFSIZE]; if (ether_ntohost(buf2, (const struct ether_addr *)ep) == 0) { tp->e_name = strdup(buf2); if (tp->e_name == NULL) (*ndo->ndo_error)(ndo, "etheraddr_string: strdup(buf2)"); return (tp->e_name); } } #endif cp = buf; oui = EXTRACT_24BITS(ep); *cp++ = hex[*ep >> 4 ]; *cp++ = hex[*ep++ & 0xf]; for (i = 5; --i >= 0;) { *cp++ = ':'; *cp++ = hex[*ep >> 4 ]; *cp++ = hex[*ep++ & 0xf]; } if (!ndo->ndo_nflag) { snprintf(cp, BUFSIZE - (2 + 5*3), " (oui %s)", tok2str(oui_values, "Unknown", oui)); } else *cp = '\0'; tp->e_name = strdup(buf); if (tp->e_name == NULL) (*ndo->ndo_error)(ndo, "etheraddr_string: strdup(buf)"); return (tp->e_name); } const char * le64addr_string(netdissect_options *ndo, const u_char *ep) { const unsigned int len = 8; register u_int i; register char *cp; register struct bsnamemem *tp; char buf[BUFSIZE]; tp = lookup_bytestring(ndo, ep, len); if (tp->bs_name) return (tp->bs_name); cp = buf; for (i = len; i > 0 ; --i) { *cp++ = hex[*(ep + i - 1) >> 4]; *cp++ = hex[*(ep + i - 1) & 0xf]; *cp++ = ':'; } cp --; *cp = '\0'; tp->bs_name = strdup(buf); if (tp->bs_name == NULL) (*ndo->ndo_error)(ndo, "le64addr_string: strdup(buf)"); return (tp->bs_name); } const char * linkaddr_string(netdissect_options *ndo, const u_char *ep, const unsigned int type, const unsigned int len) { register u_int i; register char *cp; register struct bsnamemem *tp; if (len == 0) return ("<empty>"); if (type == LINKADDR_ETHER && len == ETHER_ADDR_LEN) return (etheraddr_string(ndo, ep)); if (type == LINKADDR_FRELAY) return (q922_string(ndo, ep, len)); tp = lookup_bytestring(ndo, ep, len); if (tp->bs_name) return (tp->bs_name); tp->bs_name = cp = (char *)malloc(len*3); if (tp->bs_name == NULL) (*ndo->ndo_error)(ndo, "linkaddr_string: malloc"); *cp++ = hex[*ep >> 4]; *cp++ = hex[*ep++ & 0xf]; for (i = len-1; i > 0 ; --i) { *cp++ = ':'; *cp++ = hex[*ep >> 4]; *cp++ = hex[*ep++ & 0xf]; } *cp = '\0'; return (tp->bs_name); } const char * etherproto_string(netdissect_options *ndo, u_short port) { register char *cp; register struct hnamemem *tp; register uint32_t i = port; char buf[sizeof("0000")]; for (tp = &eprototable[i & (HASHNAMESIZE-1)]; tp->nxt; tp = tp->nxt) if (tp->addr == i) return (tp->name); tp->addr = i; tp->nxt = newhnamemem(ndo); cp = buf; NTOHS(port); *cp++ = hex[port >> 12 & 0xf]; *cp++ = hex[port >> 8 & 0xf]; *cp++ = hex[port >> 4 & 0xf]; *cp++ = hex[port & 0xf]; *cp++ = '\0'; tp->name = strdup(buf); if (tp->name == NULL) (*ndo->ndo_error)(ndo, "etherproto_string: strdup(buf)"); return (tp->name); } const char * protoid_string(netdissect_options *ndo, register const u_char *pi) { register u_int i, j; register char *cp; register struct protoidmem *tp; char buf[sizeof("00:00:00:00:00")]; tp = lookup_protoid(ndo, pi); if (tp->p_name) return tp->p_name; cp = buf; if ((j = *pi >> 4) != 0) *cp++ = hex[j]; *cp++ = hex[*pi++ & 0xf]; for (i = 4; (int)--i >= 0;) { *cp++ = ':'; if ((j = *pi >> 4) != 0) *cp++ = hex[j]; *cp++ = hex[*pi++ & 0xf]; } *cp = '\0'; tp->p_name = strdup(buf); if (tp->p_name == NULL) (*ndo->ndo_error)(ndo, "protoid_string: strdup(buf)"); return (tp->p_name); } #define ISONSAP_MAX_LENGTH 20 const char * isonsap_string(netdissect_options *ndo, const u_char *nsap, register u_int nsap_length) { register u_int nsap_idx; register char *cp; register struct enamemem *tp; if (nsap_length < 1 || nsap_length > ISONSAP_MAX_LENGTH) return ("isonsap_string: illegal length"); tp = lookup_nsap(ndo, nsap, nsap_length); if (tp->e_name) return tp->e_name; tp->e_name = cp = (char *)malloc(sizeof("xx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xx")); if (cp == NULL) (*ndo->ndo_error)(ndo, "isonsap_string: malloc"); for (nsap_idx = 0; nsap_idx < nsap_length; nsap_idx++) { *cp++ = hex[*nsap >> 4]; *cp++ = hex[*nsap++ & 0xf]; if (((nsap_idx & 1) == 0) && (nsap_idx + 1 < nsap_length)) { *cp++ = '.'; } } *cp = '\0'; return (tp->e_name); } const char * tcpport_string(netdissect_options *ndo, u_short port) { register struct hnamemem *tp; register uint32_t i = port; char buf[sizeof("00000")]; for (tp = &tporttable[i & (HASHNAMESIZE-1)]; tp->nxt; tp = tp->nxt) if (tp->addr == i) return (tp->name); tp->addr = i; tp->nxt = newhnamemem(ndo); (void)snprintf(buf, sizeof(buf), "%u", i); tp->name = strdup(buf); if (tp->name == NULL) (*ndo->ndo_error)(ndo, "tcpport_string: strdup(buf)"); return (tp->name); } const char * udpport_string(netdissect_options *ndo, register u_short port) { register struct hnamemem *tp; register uint32_t i = port; char buf[sizeof("00000")]; for (tp = &uporttable[i & (HASHNAMESIZE-1)]; tp->nxt; tp = tp->nxt) if (tp->addr == i) return (tp->name); tp->addr = i; tp->nxt = newhnamemem(ndo); (void)snprintf(buf, sizeof(buf), "%u", i); tp->name = strdup(buf); if (tp->name == NULL) (*ndo->ndo_error)(ndo, "udpport_string: strdup(buf)"); return (tp->name); } const char * ipxsap_string(netdissect_options *ndo, u_short port) { register char *cp; register struct hnamemem *tp; register uint32_t i = port; char buf[sizeof("0000")]; for (tp = &ipxsaptable[i & (HASHNAMESIZE-1)]; tp->nxt; tp = tp->nxt) if (tp->addr == i) return (tp->name); tp->addr = i; tp->nxt = newhnamemem(ndo); cp = buf; NTOHS(port); *cp++ = hex[port >> 12 & 0xf]; *cp++ = hex[port >> 8 & 0xf]; *cp++ = hex[port >> 4 & 0xf]; *cp++ = hex[port & 0xf]; *cp++ = '\0'; tp->name = strdup(buf); if (tp->name == NULL) (*ndo->ndo_error)(ndo, "ipxsap_string: strdup(buf)"); return (tp->name); } static void init_servarray(netdissect_options *ndo) { struct servent *sv; register struct hnamemem *table; register int i; char buf[sizeof("0000000000")]; while ((sv = getservent()) != NULL) { int port = ntohs(sv->s_port); i = port & (HASHNAMESIZE-1); if (strcmp(sv->s_proto, "tcp") == 0) table = &tporttable[i]; else if (strcmp(sv->s_proto, "udp") == 0) table = &uporttable[i]; else continue; while (table->name) table = table->nxt; if (ndo->ndo_nflag) { (void)snprintf(buf, sizeof(buf), "%d", port); table->name = strdup(buf); } else table->name = strdup(sv->s_name); if (table->name == NULL) (*ndo->ndo_error)(ndo, "init_servarray: strdup"); table->addr = port; table->nxt = newhnamemem(ndo); } endservent(); } static const struct eproto { const char *s; u_short p; } eproto_db[] = { { "pup", ETHERTYPE_PUP }, { "xns", ETHERTYPE_NS }, { "ip", ETHERTYPE_IP }, { "ip6", ETHERTYPE_IPV6 }, { "arp", ETHERTYPE_ARP }, { "rarp", ETHERTYPE_REVARP }, { "sprite", ETHERTYPE_SPRITE }, { "mopdl", ETHERTYPE_MOPDL }, { "moprc", ETHERTYPE_MOPRC }, { "decnet", ETHERTYPE_DN }, { "lat", ETHERTYPE_LAT }, { "sca", ETHERTYPE_SCA }, { "lanbridge", ETHERTYPE_LANBRIDGE }, { "vexp", ETHERTYPE_VEXP }, { "vprod", ETHERTYPE_VPROD }, { "atalk", ETHERTYPE_ATALK }, { "atalkarp", ETHERTYPE_AARP }, { "loopback", ETHERTYPE_LOOPBACK }, { "decdts", ETHERTYPE_DECDTS }, { "decdns", ETHERTYPE_DECDNS }, { (char *)0, 0 } }; static void init_eprotoarray(netdissect_options *ndo) { register int i; register struct hnamemem *table; for (i = 0; eproto_db[i].s; i++) { int j = htons(eproto_db[i].p) & (HASHNAMESIZE-1); table = &eprototable[j]; while (table->name) table = table->nxt; table->name = eproto_db[i].s; table->addr = htons(eproto_db[i].p); table->nxt = newhnamemem(ndo); } } static const struct protoidlist { const u_char protoid[5]; const char *name; } protoidlist[] = { {{ 0x00, 0x00, 0x0c, 0x01, 0x07 }, "CiscoMLS" }, {{ 0x00, 0x00, 0x0c, 0x20, 0x00 }, "CiscoCDP" }, {{ 0x00, 0x00, 0x0c, 0x20, 0x01 }, "CiscoCGMP" }, {{ 0x00, 0x00, 0x0c, 0x20, 0x03 }, "CiscoVTP" }, {{ 0x00, 0xe0, 0x2b, 0x00, 0xbb }, "ExtremeEDP" }, {{ 0x00, 0x00, 0x00, 0x00, 0x00 }, NULL } }; /* * SNAP proto IDs with org code 0:0:0 are actually encapsulated Ethernet * types. */ static void init_protoidarray(netdissect_options *ndo) { register int i; register struct protoidmem *tp; const struct protoidlist *pl; u_char protoid[5]; protoid[0] = 0; protoid[1] = 0; protoid[2] = 0; for (i = 0; eproto_db[i].s; i++) { u_short etype = htons(eproto_db[i].p); memcpy((char *)&protoid[3], (char *)&etype, 2); tp = lookup_protoid(ndo, protoid); tp->p_name = strdup(eproto_db[i].s); if (tp->p_name == NULL) (*ndo->ndo_error)(ndo, "init_protoidarray: strdup(eproto_db[i].s)"); } /* Hardwire some SNAP proto ID names */ for (pl = protoidlist; pl->name != NULL; ++pl) { tp = lookup_protoid(ndo, pl->protoid); /* Don't override existing name */ if (tp->p_name != NULL) continue; tp->p_name = pl->name; } } static const struct etherlist { const u_char addr[6]; const char *name; } etherlist[] = { {{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, "Broadcast" }, {{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, NULL } }; /* * Initialize the ethers hash table. We take two different approaches * depending on whether or not the system provides the ethers name * service. If it does, we just wire in a few names at startup, * and etheraddr_string() fills in the table on demand. If it doesn't, * then we suck in the entire /etc/ethers file at startup. The idea * is that parsing the local file will be fast, but spinning through * all the ethers entries via NIS & next_etherent might be very slow. * * XXX pcap_next_etherent doesn't belong in the pcap interface, but * since the pcap module already does name-to-address translation, * it's already does most of the work for the ethernet address-to-name * translation, so we just pcap_next_etherent as a convenience. */ static void init_etherarray(netdissect_options *ndo) { register const struct etherlist *el; register struct enamemem *tp; #ifdef USE_ETHER_NTOHOST char name[256]; #else register struct pcap_etherent *ep; register FILE *fp; /* Suck in entire ethers file */ fp = fopen(PCAP_ETHERS_FILE, "r"); if (fp != NULL) { while ((ep = pcap_next_etherent(fp)) != NULL) { tp = lookup_emem(ndo, ep->addr); tp->e_name = strdup(ep->name); if (tp->e_name == NULL) (*ndo->ndo_error)(ndo, "init_etherarray: strdup(ep->addr)"); } (void)fclose(fp); } #endif /* Hardwire some ethernet names */ for (el = etherlist; el->name != NULL; ++el) { tp = lookup_emem(ndo, el->addr); /* Don't override existing name */ if (tp->e_name != NULL) continue; #ifdef USE_ETHER_NTOHOST /* * Use YP/NIS version of name if available. */ if (ether_ntohost(name, (const struct ether_addr *)el->addr) == 0) { tp->e_name = strdup(name); if (tp->e_name == NULL) (*ndo->ndo_error)(ndo, "init_etherarray: strdup(name)"); continue; } #endif tp->e_name = el->name; } } static const struct tok ipxsap_db[] = { { 0x0000, "Unknown" }, { 0x0001, "User" }, { 0x0002, "User Group" }, { 0x0003, "PrintQueue" }, { 0x0004, "FileServer" }, { 0x0005, "JobServer" }, { 0x0006, "Gateway" }, { 0x0007, "PrintServer" }, { 0x0008, "ArchiveQueue" }, { 0x0009, "ArchiveServer" }, { 0x000a, "JobQueue" }, { 0x000b, "Administration" }, { 0x000F, "Novell TI-RPC" }, { 0x0017, "Diagnostics" }, { 0x0020, "NetBIOS" }, { 0x0021, "NAS SNA Gateway" }, { 0x0023, "NACS AsyncGateway" }, { 0x0024, "RemoteBridge/RoutingService" }, { 0x0026, "BridgeServer" }, { 0x0027, "TCP/IP Gateway" }, { 0x0028, "Point-to-point X.25 BridgeServer" }, { 0x0029, "3270 Gateway" }, { 0x002a, "CHI Corp" }, { 0x002c, "PC Chalkboard" }, { 0x002d, "TimeSynchServer" }, { 0x002e, "ARCserve5.0/PalindromeBackup" }, { 0x0045, "DI3270 Gateway" }, { 0x0047, "AdvertisingPrintServer" }, { 0x004a, "NetBlazerModems" }, { 0x004b, "BtrieveVAP" }, { 0x004c, "NetwareSQL" }, { 0x004d, "XtreeNetwork" }, { 0x0050, "BtrieveVAP4.11" }, { 0x0052, "QuickLink" }, { 0x0053, "PrintQueueUser" }, { 0x0058, "Multipoint X.25 Router" }, { 0x0060, "STLB/NLM" }, { 0x0064, "ARCserve" }, { 0x0066, "ARCserve3.0" }, { 0x0072, "WAN CopyUtility" }, { 0x007a, "TES-NetwareVMS" }, { 0x0092, "WATCOM Debugger/EmeraldTapeBackupServer" }, { 0x0095, "DDA OBGYN" }, { 0x0098, "NetwareAccessServer" }, { 0x009a, "Netware for VMS II/NamedPipeServer" }, { 0x009b, "NetwareAccessServer" }, { 0x009e, "PortableNetwareServer/SunLinkNVT" }, { 0x00a1, "PowerchuteAPC UPS" }, { 0x00aa, "LAWserve" }, { 0x00ac, "CompaqIDA StatusMonitor" }, { 0x0100, "PIPE STAIL" }, { 0x0102, "LAN ProtectBindery" }, { 0x0103, "OracleDataBaseServer" }, { 0x0107, "Netware386/RSPX RemoteConsole" }, { 0x010f, "NovellSNA Gateway" }, { 0x0111, "TestServer" }, { 0x0112, "HP PrintServer" }, { 0x0114, "CSA MUX" }, { 0x0115, "CSA LCA" }, { 0x0116, "CSA CM" }, { 0x0117, "CSA SMA" }, { 0x0118, "CSA DBA" }, { 0x0119, "CSA NMA" }, { 0x011a, "CSA SSA" }, { 0x011b, "CSA STATUS" }, { 0x011e, "CSA APPC" }, { 0x0126, "SNA TEST SSA Profile" }, { 0x012a, "CSA TRACE" }, { 0x012b, "NetwareSAA" }, { 0x012e, "IKARUS VirusScan" }, { 0x0130, "CommunicationsExecutive" }, { 0x0133, "NNS DomainServer/NetwareNamingServicesDomain" }, { 0x0135, "NetwareNamingServicesProfile" }, { 0x0137, "Netware386 PrintQueue/NNS PrintQueue" }, { 0x0141, "LAN SpoolServer" }, { 0x0152, "IRMALAN Gateway" }, { 0x0154, "NamedPipeServer" }, { 0x0166, "NetWareManagement" }, { 0x0168, "Intel PICKIT CommServer/Intel CAS TalkServer" }, { 0x0173, "Compaq" }, { 0x0174, "Compaq SNMP Agent" }, { 0x0175, "Compaq" }, { 0x0180, "XTreeServer/XTreeTools" }, { 0x018A, "NASI ServicesBroadcastServer" }, { 0x01b0, "GARP Gateway" }, { 0x01b1, "Binfview" }, { 0x01bf, "IntelLanDeskManager" }, { 0x01ca, "AXTEC" }, { 0x01cb, "ShivaNetModem/E" }, { 0x01cc, "ShivaLanRover/E" }, { 0x01cd, "ShivaLanRover/T" }, { 0x01ce, "ShivaUniversal" }, { 0x01d8, "CastelleFAXPressServer" }, { 0x01da, "CastelleLANPressPrintServer" }, { 0x01dc, "CastelleFAX/Xerox7033 FaxServer/ExcelLanFax" }, { 0x01f0, "LEGATO" }, { 0x01f5, "LEGATO" }, { 0x0233, "NMS Agent/NetwareManagementAgent" }, { 0x0237, "NMS IPX Discovery/LANternReadWriteChannel" }, { 0x0238, "NMS IP Discovery/LANternTrapAlarmChannel" }, { 0x023a, "LANtern" }, { 0x023c, "MAVERICK" }, { 0x023f, "NovellSMDR" }, { 0x024e, "NetwareConnect" }, { 0x024f, "NASI ServerBroadcast Cisco" }, { 0x026a, "NMS ServiceConsole" }, { 0x026b, "TimeSynchronizationServer Netware 4.x" }, { 0x0278, "DirectoryServer Netware 4.x" }, { 0x027b, "NetwareManagementAgent" }, { 0x0280, "Novell File and Printer Sharing Service for PC" }, { 0x0304, "NovellSAA Gateway" }, { 0x0308, "COM/VERMED" }, { 0x030a, "GalacticommWorldgroupServer" }, { 0x030c, "IntelNetport2/HP JetDirect/HP Quicksilver" }, { 0x0320, "AttachmateGateway" }, { 0x0327, "MicrosoftDiagnostiocs" }, { 0x0328, "WATCOM SQL Server" }, { 0x0335, "MultiTechSystems MultisynchCommServer" }, { 0x0343, "Xylogics RemoteAccessServer/LANModem" }, { 0x0355, "ArcadaBackupExec" }, { 0x0358, "MSLCD1" }, { 0x0361, "NETINELO" }, { 0x037e, "Powerchute UPS Monitoring" }, { 0x037f, "ViruSafeNotify" }, { 0x0386, "HP Bridge" }, { 0x0387, "HP Hub" }, { 0x0394, "NetWare SAA Gateway" }, { 0x039b, "LotusNotes" }, { 0x03b7, "CertusAntiVirus" }, { 0x03c4, "ARCserve4.0" }, { 0x03c7, "LANspool3.5" }, { 0x03d7, "LexmarkPrinterServer" }, { 0x03d8, "LexmarkXLE PrinterServer" }, { 0x03dd, "BanyanENS NetwareClient" }, { 0x03de, "GuptaSequelBaseServer/NetWareSQL" }, { 0x03e1, "UnivelUnixware" }, { 0x03e4, "UnivelUnixware" }, { 0x03fc, "IntelNetport" }, { 0x03fd, "PrintServerQueue" }, { 0x040A, "ipnServer" }, { 0x040D, "LVERRMAN" }, { 0x040E, "LVLIC" }, { 0x0414, "NET Silicon (DPI)/Kyocera" }, { 0x0429, "SiteLockVirus" }, { 0x0432, "UFHELPR???" }, { 0x0433, "Synoptics281xAdvancedSNMPAgent" }, { 0x0444, "MicrosoftNT SNA Server" }, { 0x0448, "Oracle" }, { 0x044c, "ARCserve5.01" }, { 0x0457, "CanonGP55" }, { 0x045a, "QMS Printers" }, { 0x045b, "DellSCSI Array" }, { 0x0491, "NetBlazerModems" }, { 0x04ac, "OnTimeScheduler" }, { 0x04b0, "CD-Net" }, { 0x0513, "EmulexNQA" }, { 0x0520, "SiteLockChecks" }, { 0x0529, "SiteLockChecks" }, { 0x052d, "CitrixOS2 AppServer" }, { 0x0535, "Tektronix" }, { 0x0536, "Milan" }, { 0x055d, "Attachmate SNA gateway" }, { 0x056b, "IBM8235 ModemServer" }, { 0x056c, "ShivaLanRover/E PLUS" }, { 0x056d, "ShivaLanRover/T PLUS" }, { 0x0580, "McAfeeNetShield" }, { 0x05B8, "NLM to workstation communication (Revelation Software)" }, { 0x05BA, "CompatibleSystemsRouters" }, { 0x05BE, "CheyenneHierarchicalStorageManager" }, { 0x0606, "JCWatermarkImaging" }, { 0x060c, "AXISNetworkPrinter" }, { 0x0610, "AdaptecSCSIManagement" }, { 0x0621, "IBM AntiVirus" }, { 0x0640, "Windows95 RemoteRegistryService" }, { 0x064e, "MicrosoftIIS" }, { 0x067b, "Microsoft Win95/98 File and Print Sharing for NetWare" }, { 0x067c, "Microsoft Win95/98 File and Print Sharing for NetWare" }, { 0x076C, "Xerox" }, { 0x079b, "ShivaLanRover/E 115" }, { 0x079c, "ShivaLanRover/T 115" }, { 0x07B4, "CubixWorldDesk" }, { 0x07c2, "Quarterdeck IWare Connect V2.x NLM" }, { 0x07c1, "Quarterdeck IWare Connect V3.x NLM" }, { 0x0810, "ELAN License Server Demo" }, { 0x0824, "ShivaLanRoverAccessSwitch/E" }, { 0x086a, "ISSC Collector" }, { 0x087f, "ISSC DAS AgentAIX" }, { 0x0880, "Intel Netport PRO" }, { 0x0881, "Intel Netport PRO" }, { 0x0b29, "SiteLock" }, { 0x0c29, "SiteLockApplications" }, { 0x0c2c, "LicensingServer" }, { 0x2101, "PerformanceTechnologyInstantInternet" }, { 0x2380, "LAI SiteLock" }, { 0x238c, "MeetingMaker" }, { 0x4808, "SiteLockServer/SiteLockMetering" }, { 0x5555, "SiteLockUser" }, { 0x6312, "Tapeware" }, { 0x6f00, "RabbitGateway" }, { 0x7703, "MODEM" }, { 0x8002, "NetPortPrinters" }, { 0x8008, "WordPerfectNetworkVersion" }, { 0x85BE, "Cisco EIGRP" }, { 0x8888, "WordPerfectNetworkVersion/QuickNetworkManagement" }, { 0x9000, "McAfeeNetShield" }, { 0x9604, "CSA-NT_MON" }, { 0xb6a8, "OceanIsleReachoutRemoteControl" }, { 0xf11f, "SiteLockMetering" }, { 0xf1ff, "SiteLock" }, { 0xf503, "Microsoft SQL Server" }, { 0xF905, "IBM TimeAndPlace" }, { 0xfbfb, "TopCallIII FaxServer" }, { 0xffff, "AnyService/Wildcard" }, { 0, (char *)0 } }; static void init_ipxsaparray(netdissect_options *ndo) { register int i; register struct hnamemem *table; for (i = 0; ipxsap_db[i].s != NULL; i++) { int j = htons(ipxsap_db[i].v) & (HASHNAMESIZE-1); table = &ipxsaptable[j]; while (table->name) table = table->nxt; table->name = ipxsap_db[i].s; table->addr = htons(ipxsap_db[i].v); table->nxt = newhnamemem(ndo); } } /* * Initialize the address to name translation machinery. We map all * non-local IP addresses to numeric addresses if ndo->ndo_fflag is true * (i.e., to prevent blocking on the nameserver). localnet is the IP address * of the local network. mask is its subnet mask. */ void init_addrtoname(netdissect_options *ndo, uint32_t localnet, uint32_t mask) { if (ndo->ndo_fflag) { f_localnet = localnet; f_netmask = mask; } if (ndo->ndo_nflag) /* * Simplest way to suppress names. */ return; init_etherarray(ndo); init_servarray(ndo); init_eprotoarray(ndo); init_protoidarray(ndo); init_ipxsaparray(ndo); } const char * dnaddr_string(netdissect_options *ndo, u_short dnaddr) { register struct hnamemem *tp; for (tp = &dnaddrtable[dnaddr & (HASHNAMESIZE-1)]; tp->nxt != NULL; tp = tp->nxt) if (tp->addr == dnaddr) return (tp->name); tp->addr = dnaddr; tp->nxt = newhnamemem(ndo); if (ndo->ndo_nflag) tp->name = dnnum_string(ndo, dnaddr); else tp->name = dnname_string(ndo, dnaddr); return(tp->name); } /* Return a zero'ed hnamemem struct and cuts down on calloc() overhead */ struct hnamemem * newhnamemem(netdissect_options *ndo) { register struct hnamemem *p; static struct hnamemem *ptr = NULL; static u_int num = 0; if (num <= 0) { num = 64; ptr = (struct hnamemem *)calloc(num, sizeof (*ptr)); if (ptr == NULL) (*ndo->ndo_error)(ndo, "newhnamemem: calloc"); } --num; p = ptr++; return (p); } /* Return a zero'ed h6namemem struct and cuts down on calloc() overhead */ struct h6namemem * newh6namemem(netdissect_options *ndo) { register struct h6namemem *p; static struct h6namemem *ptr = NULL; static u_int num = 0; if (num <= 0) { num = 64; ptr = (struct h6namemem *)calloc(num, sizeof (*ptr)); if (ptr == NULL) (*ndo->ndo_error)(ndo, "newh6namemem: calloc"); } --num; p = ptr++; return (p); } /* Represent TCI part of the 802.1Q 4-octet tag as text. */ const char * ieee8021q_tci_string(const uint16_t tci) { static char buf[128]; snprintf(buf, sizeof(buf), "vlan %u, p %u%s", tci & 0xfff, tci >> 13, (tci & 0x1000) ? ", DEI" : ""); return buf; }
lookup_bytestring(netdissect_options *ndo, register const u_char *bs, const unsigned int nlen) { struct enamemem *tp; register u_int i, j, k; if (nlen >= 6) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = (bs[4] << 8) | bs[5]; } else if (nlen >= 4) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = 0; } else i = j = k = 0; tp = &bytestringtable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->e_nxt) if (tp->e_addr0 == i && tp->e_addr1 == j && tp->e_addr2 == k && memcmp((const char *)bs, (const char *)(tp->e_bs), nlen) == 0) return tp; else tp = tp->e_nxt; tp->e_addr0 = i; tp->e_addr1 = j; tp->e_addr2 = k; tp->e_bs = (u_char *) calloc(1, nlen + 1); if (tp->e_bs == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); memcpy(tp->e_bs, bs, nlen); tp->e_nxt = (struct enamemem *)calloc(1, sizeof(*tp)); if (tp->e_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); return tp; }
lookup_bytestring(netdissect_options *ndo, register const u_char *bs, const unsigned int nlen) { struct bsnamemem *tp; register u_int i, j, k; if (nlen >= 6) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = (bs[4] << 8) | bs[5]; } else if (nlen >= 4) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = 0; } else i = j = k = 0; tp = &bytestringtable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->bs_nxt) if (nlen == tp->bs_nbytes && tp->bs_addr0 == i && tp->bs_addr1 == j && tp->bs_addr2 == k && memcmp((const char *)bs, (const char *)(tp->bs_bytes), nlen) == 0) return tp; else tp = tp->bs_nxt; tp->bs_addr0 = i; tp->bs_addr1 = j; tp->bs_addr2 = k; tp->bs_bytes = (u_char *) calloc(1, nlen + 1); if (tp->bs_bytes == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); memcpy(tp->bs_bytes, bs, nlen); tp->bs_nbytes = nlen; tp->bs_nxt = (struct bsnamemem *)calloc(1, sizeof(*tp)); if (tp->bs_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); return tp; }
{'added': [(154, '\tsize_t e_namelen;\t\t/* for bytestringtable */'), (160, ''), (161, 'struct bsnamemem {'), (162, '\tu_short bs_addr0;'), (163, '\tu_short bs_addr1;'), (164, '\tu_short bs_addr2;'), (165, '\tconst char *bs_name;'), (166, '\tu_char *bs_bytes;'), (167, '\tunsigned int bs_nbytes;'), (168, '\tstruct bsnamemem *bs_nxt;'), (169, '};'), (170, ''), (171, 'static struct bsnamemem bytestringtable[HASHNAMESIZE];'), (395, 'static inline struct bsnamemem *'), (399, '\tstruct bsnamemem *tp;'), (414, '\twhile (tp->bs_nxt)'), (415, '\t\tif (nlen == tp->bs_nbytes &&'), (416, '\t\t tp->bs_addr0 == i &&'), (417, '\t\t tp->bs_addr1 == j &&'), (418, '\t\t tp->bs_addr2 == k &&'), (419, '\t\t memcmp((const char *)bs, (const char *)(tp->bs_bytes), nlen) == 0)'), (422, '\t\t\ttp = tp->bs_nxt;'), (424, '\ttp->bs_addr0 = i;'), (425, '\ttp->bs_addr1 = j;'), (426, '\ttp->bs_addr2 = k;'), (428, '\ttp->bs_bytes = (u_char *) calloc(1, nlen + 1);'), (429, '\tif (tp->bs_bytes == NULL)'), (432, '\tmemcpy(tp->bs_bytes, bs, nlen);'), (433, '\ttp->bs_nbytes = nlen;'), (434, '\ttp->bs_nxt = (struct bsnamemem *)calloc(1, sizeof(*tp));'), (435, '\tif (tp->bs_nxt == NULL)'), (566, '\tregister struct bsnamemem *tp;'), (570, '\tif (tp->bs_name)'), (571, '\t\treturn (tp->bs_name);'), (583, '\ttp->bs_name = strdup(buf);'), (584, '\tif (tp->bs_name == NULL)'), (587, '\treturn (tp->bs_name);'), (596, '\tregister struct bsnamemem *tp;'), (608, '\tif (tp->bs_name)'), (609, '\t\treturn (tp->bs_name);'), (611, '\ttp->bs_name = cp = (char *)malloc(len*3);'), (612, '\tif (tp->bs_name == NULL)'), (622, '\treturn (tp->bs_name);')], 'deleted': [(159, 'static struct enamemem bytestringtable[HASHNAMESIZE];'), (383, 'static inline struct enamemem *'), (387, '\tstruct enamemem *tp;'), (402, '\twhile (tp->e_nxt)'), (403, '\t\tif (tp->e_addr0 == i &&'), (404, '\t\t tp->e_addr1 == j &&'), (405, '\t\t tp->e_addr2 == k &&'), (406, '\t\t memcmp((const char *)bs, (const char *)(tp->e_bs), nlen) == 0)'), (409, '\t\t\ttp = tp->e_nxt;'), (411, '\ttp->e_addr0 = i;'), (412, '\ttp->e_addr1 = j;'), (413, '\ttp->e_addr2 = k;'), (415, '\ttp->e_bs = (u_char *) calloc(1, nlen + 1);'), (416, '\tif (tp->e_bs == NULL)'), (419, '\tmemcpy(tp->e_bs, bs, nlen);'), (420, '\ttp->e_nxt = (struct enamemem *)calloc(1, sizeof(*tp));'), (421, '\tif (tp->e_nxt == NULL)'), (552, '\tregister struct enamemem *tp;'), (556, '\tif (tp->e_name)'), (557, '\t\treturn (tp->e_name);'), (569, '\ttp->e_name = strdup(buf);'), (570, '\tif (tp->e_name == NULL)'), (573, '\treturn (tp->e_name);'), (582, '\tregister struct enamemem *tp;'), (594, '\tif (tp->e_name)'), (595, '\t\treturn (tp->e_name);'), (597, '\ttp->e_name = cp = (char *)malloc(len*3);'), (598, '\tif (tp->e_name == NULL)'), (608, '\treturn (tp->e_name);')]}
43
29
1,020
6,970
36
335
10
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-12894
CWE-125
84
ksz8851_driver.c
C
ksz8851IrqHandler
/** * @file ksz8851_driver.c * @brief KSZ8851 Ethernet controller * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "core/net.h" #include "drivers/eth/ksz8851_driver.h" #include "debug.h" /** * @brief KSZ8851 driver **/ const NicDriver ksz8851Driver = { NIC_TYPE_ETHERNET, ETH_MTU, ksz8851Init, ksz8851Tick, ksz8851EnableIrq, ksz8851DisableIrq, ksz8851EventHandler, ksz8851SendPacket, ksz8851UpdateMacAddrFilter, NULL, NULL, NULL, TRUE, TRUE, TRUE, FALSE }; /** * @brief KSZ8851 controller initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t ksz8851Init(NetInterface *interface) { Ksz8851Context *context; //Point to the driver context context = (Ksz8851Context *) interface->nicContext; //Debug message TRACE_INFO("Initializing KSZ8851 Ethernet controller...\r\n"); #if (KSZ8851_SPI_SUPPORT == ENABLED) //Initialize SPI interface->spiDriver->init(); #endif //Initialize external interrupt line interface->extIntDriver->init(); //Debug message TRACE_DEBUG("CIDER=0x%04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_REG_CIDER)); TRACE_DEBUG("PHY1ILR=0x%04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_REG_PHY1ILR)); TRACE_DEBUG("PHY1IHR=0x%04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_REG_PHY1IHR)); //Check device ID and revision ID if(ksz8851ReadReg(interface, KSZ8851_REG_CIDER) != KSZ8851_REV_A3_ID) { return ERROR_WRONG_IDENTIFIER; } //Dump registers for debugging purpose ksz8851DumpReg(interface); //Initialize driver specific variables context->frameId = 0; //Allocate TX and RX buffers context->txBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); context->rxBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); //Failed to allocate memory? if(context->txBuffer == NULL || context->rxBuffer == NULL) { //Clean up side effects memPoolFree(context->txBuffer); memPoolFree(context->rxBuffer); //Report an error return ERROR_OUT_OF_MEMORY; } //Initialize MAC address ksz8851WriteReg(interface, KSZ8851_REG_MARH, htons(interface->macAddr.w[0])); ksz8851WriteReg(interface, KSZ8851_REG_MARM, htons(interface->macAddr.w[1])); ksz8851WriteReg(interface, KSZ8851_REG_MARL, htons(interface->macAddr.w[2])); //Packets shorter than 64 bytes are padded and the CRC is automatically generated ksz8851WriteReg(interface, KSZ8851_REG_TXCR, TXCR_TXFCE | TXCR_TXPE | TXCR_TXCE); //Automatically increment TX data pointer ksz8851WriteReg(interface, KSZ8851_REG_TXFDPR, TXFDPR_TXFPAI); //Configure address filtering ksz8851WriteReg(interface, KSZ8851_REG_RXCR1, RXCR1_RXPAFMA | RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXME | RXCR1_RXUE); //No checksum verification ksz8851WriteReg(interface, KSZ8851_REG_RXCR2, RXCR2_SRDBL2 | RXCR2_IUFFP | RXCR2_RXIUFCEZ); //Enable automatic RXQ frame buffer dequeue ksz8851WriteReg(interface, KSZ8851_REG_RXQCR, RXQCR_RXFCTE | RXQCR_ADRFE); //Automatically increment RX data pointer ksz8851WriteReg(interface, KSZ8851_REG_RXFDPR, RXFDPR_RXFPAI); //Configure receive frame count threshold ksz8851WriteReg(interface, KSZ8851_REG_RXFCTR, 1); //Force link in half-duplex if auto-negotiation failed ksz8851ClearBit(interface, KSZ8851_REG_P1CR, P1CR_FORCE_DUPLEX); //Restart auto-negotiation ksz8851SetBit(interface, KSZ8851_REG_P1CR, P1CR_RESTART_AN); //Clear interrupt flags ksz8851SetBit(interface, KSZ8851_REG_ISR, ISR_LCIS | ISR_TXIS | ISR_RXIS | ISR_RXOIS | ISR_TXPSIS | ISR_RXPSIS | ISR_TXSAIS | ISR_RXWFDIS | ISR_RXMPDIS | ISR_LDIS | ISR_EDIS | ISR_SPIBEIS); //Configure interrupts as desired ksz8851SetBit(interface, KSZ8851_REG_IER, IER_LCIE | IER_TXIE | IER_RXIE); //Enable TX operation ksz8851SetBit(interface, KSZ8851_REG_TXCR, TXCR_TXE); //Enable RX operation ksz8851SetBit(interface, KSZ8851_REG_RXCR1, RXCR1_RXE); //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Force the TCP/IP stack to poll the link state at startup interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; } /** * @brief KSZ8851 timer handler * @param[in] interface Underlying network interface **/ void ksz8851Tick(NetInterface *interface) { } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void ksz8851EnableIrq(NetInterface *interface) { //Enable interrupts interface->extIntDriver->enableIrq(); } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void ksz8851DisableIrq(NetInterface *interface) { //Disable interrupts interface->extIntDriver->disableIrq(); } /** * @brief KSZ8851 interrupt service routine * @param[in] interface Underlying network interface * @return TRUE if a higher priority task must be woken. Else FALSE is returned **/ bool_t ksz8851IrqHandler(NetInterface *interface) { bool_t flag; size_t n; uint16_t ier; uint16_t isr; //This flag will be set if a higher priority task must be woken flag = FALSE; //Save IER register value ier = ksz8851ReadReg(interface, KSZ8851_REG_IER); //Disable interrupts to release the interrupt line ksz8851WriteReg(interface, KSZ8851_REG_IER, 0); //Read interrupt status register isr = ksz8851ReadReg(interface, KSZ8851_REG_ISR); //Link status change? if((isr & ISR_LCIS) != 0) { //Disable LCIE interrupt ier &= ~IER_LCIE; //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((isr & ISR_TXIS) != 0) { //Clear interrupt flag ksz8851WriteReg(interface, KSZ8851_REG_ISR, ISR_TXIS); //Get the amount of free memory available in the TX FIFO n = ksz8851ReadReg(interface, KSZ8851_REG_TXMIR) & TXMIR_TXMA_MASK; //Check whether the TX FIFO is available for writing if(n >= (ETH_MAX_FRAME_SIZE + 8)) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } } //Packet received? if((isr & ISR_RXIS) != 0) { //Disable RXIE interrupt ier &= ~IER_RXIE; //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Re-enable interrupts once the interrupt has been serviced ksz8851WriteReg(interface, KSZ8851_REG_IER, ier); //A higher priority task must be woken? return flag; } /** * @brief KSZ8851 event handler * @param[in] interface Underlying network interface **/ void ksz8851EventHandler(NetInterface *interface) { uint16_t status; uint_t frameCount; //Read interrupt status register status = ksz8851ReadReg(interface, KSZ8851_REG_ISR); //Check whether the link status has changed? if((status & ISR_LCIS) != 0) { //Clear interrupt flag ksz8851WriteReg(interface, KSZ8851_REG_ISR, ISR_LCIS); //Read PHY status register status = ksz8851ReadReg(interface, KSZ8851_REG_P1SR); //Check link state if((status & P1SR_LINK_GOOD) != 0) { //Get current speed if((status & P1SR_OPERATION_SPEED) != 0) { interface->linkSpeed = NIC_LINK_SPEED_100MBPS; } else { interface->linkSpeed = NIC_LINK_SPEED_10MBPS; } //Determine the new duplex mode if((status & P1SR_OPERATION_DUPLEX) != 0) { interface->duplexMode = NIC_FULL_DUPLEX_MODE; } else { interface->duplexMode = NIC_HALF_DUPLEX_MODE; } //Link is up interface->linkState = TRUE; } else { //Link is down interface->linkState = FALSE; } //Process link state change event nicNotifyLinkChange(interface); } //Check whether a packet has been received? if((status & ISR_RXIS) != 0) { //Clear interrupt flag ksz8851WriteReg(interface, KSZ8851_REG_ISR, ISR_RXIS); //Get the total number of frames that are pending in the buffer frameCount = MSB(ksz8851ReadReg(interface, KSZ8851_REG_RXFCTR)); //Process all pending packets while(frameCount > 0) { //Read incoming packet ksz8851ReceivePacket(interface); //Decrement frame counter frameCount--; } } //Re-enable LCIE and RXIE interrupts ksz8851SetBit(interface, KSZ8851_REG_IER, IER_LCIE | IER_RXIE); } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t ksz8851SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t n; size_t length; Ksz8851TxHeader header; Ksz8851Context *context; //Point to the driver context context = (Ksz8851Context *) interface->nicContext; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > ETH_MAX_FRAME_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Get the amount of free memory available in the TX FIFO n = ksz8851ReadReg(interface, KSZ8851_REG_TXMIR) & TXMIR_TXMA_MASK; //Make sure the TX FIFO is available for writing if(n < (length + 8)) { return ERROR_FAILURE; } //Copy user data netBufferRead(context->txBuffer, buffer, offset, length); //Format control word header.controlWord = htole16(TX_CTRL_TXIC | (context->frameId++ & TX_CTRL_TXFID)); //Total number of bytes to be transmitted header.byteCount = htole16(length); //Enable TXQ write access ksz8851SetBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA); //Write TX packet header ksz8851WriteFifo(interface, (uint8_t *) &header, sizeof(Ksz8851TxHeader)); //Write data ksz8851WriteFifo(interface, context->txBuffer, length); //End TXQ write access ksz8851ClearBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA); //Start transmission ksz8851SetBit(interface, KSZ8851_REG_TXQCR, TXQCR_METFE); //Get the amount of free memory available in the TX FIFO n = ksz8851ReadReg(interface, KSZ8851_REG_TXMIR) & TXMIR_TXMA_MASK; //Check whether the TX FIFO is available for writing if(n >= (ETH_MAX_FRAME_SIZE + 8)) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Successful processing return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t ksz8851ReceivePacket(NetInterface *interface) { size_t n; uint16_t status; Ksz8851Context *context; NetRxAncillary ancillary; //Point to the driver context context = (Ksz8851Context *) interface->nicContext; //Read received frame status from RXFHSR status = ksz8851ReadReg(interface, KSZ8851_REG_RXFHSR); //Make sure the frame is valid if((status & RXFHSR_RXFV) != 0) { //Check error flags if((status & (RXFHSR_RXMR | RXFHSR_RXFTL | RXFHSR_RXRF | RXFHSR_RXCE)) == 0) { //Read received frame byte size from RXFHBCR n = ksz8851ReadReg(interface, KSZ8851_REG_RXFHBCR) & RXFHBCR_RXBC_MASK; //Ensure the frame size is acceptable if(n > 0 && n <= ETH_MAX_FRAME_SIZE) { //Reset QMU RXQ frame pointer to zero ksz8851WriteReg(interface, KSZ8851_REG_RXFDPR, RXFDPR_RXFPAI); //Enable RXQ read access ksz8851SetBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA); //Read data ksz8851ReadFifo(interface, context->rxBuffer, n); //End RXQ read access ksz8851ClearBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA); //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, context->rxBuffer, n, &ancillary); //Valid packet received return NO_ERROR; } } } //Release the current error frame from RXQ ksz8851SetBit(interface, KSZ8851_REG_RXQCR, RXQCR_RRXEF); //Report an error return ERROR_INVALID_PACKET; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t ksz8851UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint16_t hashTable[4]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = ksz8851CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = (crc >> 26) & 0x3F; //Update hash table contents hashTable[k / 16] |= (1 << (k % 16)); } } //Write the hash table to the KSZ8851 controller ksz8851WriteReg(interface, KSZ8851_REG_MAHTR0, hashTable[0]); ksz8851WriteReg(interface, KSZ8851_REG_MAHTR1, hashTable[1]); ksz8851WriteReg(interface, KSZ8851_REG_MAHTR2, hashTable[2]); ksz8851WriteReg(interface, KSZ8851_REG_MAHTR3, hashTable[3]); //Debug message TRACE_DEBUG(" MAHTR0 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR0)); TRACE_DEBUG(" MAHTR1 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR1)); TRACE_DEBUG(" MAHTR2 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR2)); TRACE_DEBUG(" MAHTR3 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR3)); //Successful processing return NO_ERROR; } /** * @brief Write KSZ8851 register * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] data Register value **/ void ksz8851WriteReg(NetInterface *interface, uint8_t address, uint16_t data) { #if (KSZ8851_SPI_SUPPORT == ENABLED) uint8_t command; //Form the write command if((address & 0x02) != 0) { command = KSZ8851_CMD_WR_REG | KSZ8851_CMD_B3 | KSZ8851_CMD_B2; } else { command = KSZ8851_CMD_WR_REG | KSZ8851_CMD_B1 | KSZ8851_CMD_B0; } //Pull the CS pin low interface->spiDriver->assertCs(); //Command phase interface->spiDriver->transfer(command | (address >> 6)); interface->spiDriver->transfer(address << 2); //Data phase interface->spiDriver->transfer(LSB(data)); interface->spiDriver->transfer(MSB(data)); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); #else //Set register address if((address & 0x02) != 0) { KSZ8851_CMD_REG = KSZ8851_CMD_B3 | KSZ8851_CMD_B2 | address; } else { KSZ8851_CMD_REG = KSZ8851_CMD_B1 | KSZ8851_CMD_B0 | address; } //Write register value KSZ8851_DATA_REG = data; #endif } /** * @brief Read KSZ8851 register * @param[in] interface Underlying network interface * @param[in] address Register address * @return Register value **/ uint16_t ksz8851ReadReg(NetInterface *interface, uint8_t address) { #if (KSZ8851_SPI_SUPPORT == ENABLED) uint8_t command; uint16_t data; //Form the read command if((address & 0x02) != 0) { command = KSZ8851_CMD_RD_REG | KSZ8851_CMD_B3 | KSZ8851_CMD_B2; } else { command = KSZ8851_CMD_RD_REG | KSZ8851_CMD_B1 | KSZ8851_CMD_B0; } //Pull the CS pin low interface->spiDriver->assertCs(); //Command phase interface->spiDriver->transfer(command | (address >> 6)); interface->spiDriver->transfer(address << 2); //Data phase (lower 8 bits) data = interface->spiDriver->transfer(0x00); //Data phase (upper 8 bits) data |= interface->spiDriver->transfer(0x00) << 8; //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); //Return register value return data; #else //Set register address if((address & 0x02) != 0) { KSZ8851_CMD_REG = KSZ8851_CMD_B3 | KSZ8851_CMD_B2 | address; } else { KSZ8851_CMD_REG = KSZ8851_CMD_B1 | KSZ8851_CMD_B0 | address; } //Return register value return KSZ8851_DATA_REG; #endif } /** * @brief Write TX FIFO * @param[in] interface Underlying network interface * @param[in] data Pointer to the data being written * @param[in] length Number of data to write **/ void ksz8851WriteFifo(NetInterface *interface, const uint8_t *data, size_t length) { #if (KSZ8851_SPI_SUPPORT == ENABLED) uint_t i; //Pull the CS pin low interface->spiDriver->assertCs(); //Command phase interface->spiDriver->transfer(KSZ8851_CMD_WR_FIFO); //Data phase for(i = 0; i < length; i++) { interface->spiDriver->transfer(data[i]); } //Maintain alignment to 4-byte boundaries for(; i % 4; i++) { interface->spiDriver->transfer(0x00); } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); #else uint_t i; //Data phase for(i = 0; i < length; i+=2) { KSZ8851_DATA_REG = data[i] | data[i + 1] << 8; } //Maintain alignment to 4-byte boundaries for(; i % 4; i+=2) { KSZ8851_DATA_REG = 0x0000; } #endif } /** * @brief Read RX FIFO * @param[in] interface Underlying network interface * @param[in] data Buffer where to store the incoming data * @param[in] length Number of data to read **/ void ksz8851ReadFifo(NetInterface *interface, uint8_t *data, size_t length) { #if (KSZ8851_SPI_SUPPORT == ENABLED) uint_t i; //Pull the CS pin low interface->spiDriver->assertCs(); //Command phase interface->spiDriver->transfer(KSZ8851_CMD_RD_FIFO); //The first 4 bytes are dummy data and must be discarded for(i = 0; i < 4; i++) { interface->spiDriver->transfer(0x00); } //Ignore RX packet header for(i = 0; i < 4; i++) { interface->spiDriver->transfer(0x00); } //Data phase for(i = 0; i < length; i++) { data[i] = interface->spiDriver->transfer(0x00); } //Maintain alignment to 4-byte boundaries for(; i % 4; i++) { interface->spiDriver->transfer(0x00); } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); #else uint_t i; uint16_t temp; //The first 2 bytes are dummy data and must be discarded temp = KSZ8851_DATA_REG; //Ignore RX packet header temp = KSZ8851_DATA_REG; temp = KSZ8851_DATA_REG; //Data phase for(i = 0; i < length; i+=2) { temp = KSZ8851_DATA_REG; data [i] = temp & 0xFF; data [i + 1] = (temp >> 8) & 0xFF; } //Maintain alignment to 4-byte boundaries for(; i % 4; i+=2) { temp = KSZ8851_DATA_REG; } #endif } /** * @brief Set bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to set in the target register **/ void ksz8851SetBit(NetInterface *interface, uint8_t address, uint16_t mask) { uint16_t value; //Read current register value value = ksz8851ReadReg(interface, address); //Set specified bits ksz8851WriteReg(interface, address, value | mask); } /** * @brief Clear bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to clear in the target register **/ void ksz8851ClearBit(NetInterface *interface, uint8_t address, uint16_t mask) { uint16_t value; //Read current register value value = ksz8851ReadReg(interface, address); //Clear specified bits ksz8851WriteReg(interface, address, value & ~mask); } /** * @brief CRC calculation * @param[in] data Pointer to the data over which to calculate the CRC * @param[in] length Number of bytes to process * @return Resulting CRC value **/ uint32_t ksz8851CalcCrc(const void *data, size_t length) { uint_t i; uint_t j; uint32_t crc; const uint8_t *p; //Point to the data over which to calculate the CRC p = (uint8_t *) data; //CRC preset value crc = 0xFFFFFFFF; //Loop through data for(i = 0; i < length; i++) { //The message is processed bit by bit for(j = 0; j < 8; j++) { //Update CRC value if((((crc >> 31) ^ (p[i] >> j)) & 0x01) != 0) { crc = (crc << 1) ^ 0x04C11DB7; } else { crc = crc << 1; } } } //Return CRC value return crc; } /** * @brief Dump registers for debugging purpose * @param[in] interface Underlying network interface **/ void ksz8851DumpReg(NetInterface *interface) { #if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG) uint_t i; uint_t j; uint_t address; //Loop through register addresses for(i = 0; i < 256; i += 16) { //Display register address TRACE_DEBUG("%02" PRIu8 ": ", i); //Display 8 registers at a time for(j = 0; j < 16; j += 2) { //Format register address address = i + j; //Display register contents TRACE_DEBUG("0x%04" PRIX16 " ", ksz8851ReadReg(interface, address)); } //Jump to the following line TRACE_DEBUG("\r\n"); } //Terminate with a line feed TRACE_DEBUG("\r\n"); #endif }
/** * @file ksz8851_driver.c * @brief KSZ8851 Ethernet controller * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "core/net.h" #include "drivers/eth/ksz8851_driver.h" #include "debug.h" /** * @brief KSZ8851 driver **/ const NicDriver ksz8851Driver = { NIC_TYPE_ETHERNET, ETH_MTU, ksz8851Init, ksz8851Tick, ksz8851EnableIrq, ksz8851DisableIrq, ksz8851EventHandler, ksz8851SendPacket, ksz8851UpdateMacAddrFilter, NULL, NULL, NULL, TRUE, TRUE, TRUE, FALSE }; /** * @brief KSZ8851 controller initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t ksz8851Init(NetInterface *interface) { Ksz8851Context *context; //Point to the driver context context = (Ksz8851Context *) interface->nicContext; //Debug message TRACE_INFO("Initializing KSZ8851 Ethernet controller...\r\n"); #if (KSZ8851_SPI_SUPPORT == ENABLED) //Initialize SPI interface->spiDriver->init(); #endif //Initialize external interrupt line interface->extIntDriver->init(); //Debug message TRACE_DEBUG("CIDER=0x%04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_CIDER)); TRACE_DEBUG("PHY1ILR=0x%04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_PHY1ILR)); TRACE_DEBUG("PHY1IHR=0x%04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_PHY1IHR)); //Check device ID and revision ID if(ksz8851ReadReg(interface, KSZ8851_CIDER) != (KSZ8851_CIDER_FAMILY_ID_DEFAULT | KSZ8851_CIDER_CHIP_ID_DEFAULT | KSZ8851_CIDER_REV_ID_A3)) { return ERROR_WRONG_IDENTIFIER; } //Dump registers for debugging purpose ksz8851DumpReg(interface); //Initialize driver specific variables context->frameId = 0; //Allocate TX and RX buffers context->txBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); context->rxBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); //Failed to allocate memory? if(context->txBuffer == NULL || context->rxBuffer == NULL) { //Clean up side effects memPoolFree(context->txBuffer); memPoolFree(context->rxBuffer); //Report an error return ERROR_OUT_OF_MEMORY; } //Initialize MAC address ksz8851WriteReg(interface, KSZ8851_MARH, htons(interface->macAddr.w[0])); ksz8851WriteReg(interface, KSZ8851_MARM, htons(interface->macAddr.w[1])); ksz8851WriteReg(interface, KSZ8851_MARL, htons(interface->macAddr.w[2])); //Packets shorter than 64 bytes are padded and the CRC is automatically //generated ksz8851WriteReg(interface, KSZ8851_TXCR, KSZ8851_TXCR_TXFCE | KSZ8851_TXCR_TXPE | KSZ8851_TXCR_TXCE); //Automatically increment TX data pointer ksz8851WriteReg(interface, KSZ8851_TXFDPR, KSZ8851_TXFDPR_TXFPAI); //Configure address filtering ksz8851WriteReg(interface, KSZ8851_RXCR1, KSZ8851_RXCR1_RXPAFMA | KSZ8851_RXCR1_RXFCE | KSZ8851_RXCR1_RXBE | KSZ8851_RXCR1_RXME | KSZ8851_RXCR1_RXUE); //No checksum verification ksz8851WriteReg(interface, KSZ8851_RXCR2, KSZ8851_RXCR2_SRDBL_SINGLE_FRAME | KSZ8851_RXCR2_IUFFP | KSZ8851_RXCR2_RXIUFCEZ); //Enable automatic RXQ frame buffer dequeue ksz8851WriteReg(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_RXFCTE | KSZ8851_RXQCR_ADRFE); //Automatically increment RX data pointer ksz8851WriteReg(interface, KSZ8851_RXFDPR, KSZ8851_RXFDPR_RXFPAI); //Configure receive frame count threshold ksz8851WriteReg(interface, KSZ8851_RXFCTR, 1); //Force link in half-duplex if auto-negotiation failed ksz8851ClearBit(interface, KSZ8851_P1CR, KSZ8851_P1CR_FORCE_DUPLEX); //Restart auto-negotiation ksz8851SetBit(interface, KSZ8851_P1CR, KSZ8851_P1CR_RESTART_AN); //Clear interrupt flags ksz8851SetBit(interface, KSZ8851_ISR, KSZ8851_ISR_LCIS | KSZ8851_ISR_TXIS | KSZ8851_ISR_RXIS | KSZ8851_ISR_RXOIS | KSZ8851_ISR_TXPSIS | KSZ8851_ISR_RXPSIS | KSZ8851_ISR_TXSAIS | KSZ8851_ISR_RXWFDIS | KSZ8851_ISR_RXMPDIS | KSZ8851_ISR_LDIS | KSZ8851_ISR_EDIS | KSZ8851_ISR_SPIBEIS); //Configure interrupts as desired ksz8851SetBit(interface, KSZ8851_IER, KSZ8851_IER_LCIE | KSZ8851_IER_TXIE | KSZ8851_IER_RXIE); //Enable TX operation ksz8851SetBit(interface, KSZ8851_TXCR, KSZ8851_TXCR_TXE); //Enable RX operation ksz8851SetBit(interface, KSZ8851_RXCR1, KSZ8851_RXCR1_RXE); //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Force the TCP/IP stack to poll the link state at startup interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; } /** * @brief KSZ8851 timer handler * @param[in] interface Underlying network interface **/ void ksz8851Tick(NetInterface *interface) { } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void ksz8851EnableIrq(NetInterface *interface) { //Enable interrupts interface->extIntDriver->enableIrq(); } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void ksz8851DisableIrq(NetInterface *interface) { //Disable interrupts interface->extIntDriver->disableIrq(); } /** * @brief KSZ8851 interrupt service routine * @param[in] interface Underlying network interface * @return TRUE if a higher priority task must be woken. Else FALSE is returned **/ bool_t ksz8851IrqHandler(NetInterface *interface) { bool_t flag; size_t n; uint16_t ier; uint16_t isr; //This flag will be set if a higher priority task must be woken flag = FALSE; //Save IER register value ier = ksz8851ReadReg(interface, KSZ8851_IER); //Disable interrupts to release the interrupt line ksz8851WriteReg(interface, KSZ8851_IER, 0); //Read interrupt status register isr = ksz8851ReadReg(interface, KSZ8851_ISR); //Link status change? if((isr & KSZ8851_ISR_LCIS) != 0) { //Disable LCIE interrupt ier &= ~KSZ8851_IER_LCIE; //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((isr & KSZ8851_ISR_TXIS) != 0) { //Clear interrupt flag ksz8851WriteReg(interface, KSZ8851_ISR, KSZ8851_ISR_TXIS); //Get the amount of free memory available in the TX FIFO n = ksz8851ReadReg(interface, KSZ8851_TXMIR) & KSZ8851_TXMIR_TXMA; //Check whether the TX FIFO is available for writing if(n >= (ETH_MAX_FRAME_SIZE + 8)) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } } //Packet received? if((isr & KSZ8851_ISR_RXIS) != 0) { //Disable RXIE interrupt ier &= ~KSZ8851_IER_RXIE; //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Re-enable interrupts once the interrupt has been serviced ksz8851WriteReg(interface, KSZ8851_IER, ier); //A higher priority task must be woken? return flag; } /** * @brief KSZ8851 event handler * @param[in] interface Underlying network interface **/ void ksz8851EventHandler(NetInterface *interface) { uint16_t status; uint_t frameCount; //Read interrupt status register status = ksz8851ReadReg(interface, KSZ8851_ISR); //Check whether the link status has changed? if((status & KSZ8851_ISR_LCIS) != 0) { //Clear interrupt flag ksz8851WriteReg(interface, KSZ8851_ISR, KSZ8851_ISR_LCIS); //Read PHY status register status = ksz8851ReadReg(interface, KSZ8851_P1SR); //Check link state if((status & KSZ8851_P1SR_LINK_GOOD) != 0) { //Get current speed if((status & KSZ8851_P1SR_OPERATION_SPEED) != 0) { interface->linkSpeed = NIC_LINK_SPEED_100MBPS; } else { interface->linkSpeed = NIC_LINK_SPEED_10MBPS; } //Determine the new duplex mode if((status & KSZ8851_P1SR_OPERATION_DUPLEX) != 0) { interface->duplexMode = NIC_FULL_DUPLEX_MODE; } else { interface->duplexMode = NIC_HALF_DUPLEX_MODE; } //Link is up interface->linkState = TRUE; } else { //Link is down interface->linkState = FALSE; } //Process link state change event nicNotifyLinkChange(interface); } //Check whether a packet has been received? if((status & KSZ8851_ISR_RXIS) != 0) { //Clear interrupt flag ksz8851WriteReg(interface, KSZ8851_ISR, KSZ8851_ISR_RXIS); //Get the total number of frames that are pending in the buffer frameCount = MSB(ksz8851ReadReg(interface, KSZ8851_RXFCTR)); //Process all pending packets while(frameCount > 0) { //Read incoming packet ksz8851ReceivePacket(interface); //Decrement frame counter frameCount--; } } //Re-enable LCIE and RXIE interrupts ksz8851SetBit(interface, KSZ8851_IER, KSZ8851_IER_LCIE | KSZ8851_IER_RXIE); } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t ksz8851SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t n; size_t length; Ksz8851TxHeader header; Ksz8851Context *context; //Point to the driver context context = (Ksz8851Context *) interface->nicContext; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > ETH_MAX_FRAME_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Get the amount of free memory available in the TX FIFO n = ksz8851ReadReg(interface, KSZ8851_TXMIR) & KSZ8851_TXMIR_TXMA; //Make sure the TX FIFO is available for writing if(n < (length + 8)) { return ERROR_FAILURE; } //Copy user data netBufferRead(context->txBuffer, buffer, offset, length); //Format control word header.controlWord = htole16(KSZ8851_TX_CTRL_TXIC | (context->frameId++ & KSZ8851_TX_CTRL_TXFID)); //Total number of bytes to be transmitted header.byteCount = htole16(length); //Enable TXQ write access ksz8851SetBit(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_SDA); //Write TX packet header ksz8851WriteFifo(interface, (uint8_t *) &header, sizeof(Ksz8851TxHeader)); //Write data ksz8851WriteFifo(interface, context->txBuffer, length); //End TXQ write access ksz8851ClearBit(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_SDA); //Start transmission ksz8851SetBit(interface, KSZ8851_TXQCR, KSZ8851_TXQCR_METFE); //Get the amount of free memory available in the TX FIFO n = ksz8851ReadReg(interface, KSZ8851_TXMIR) & KSZ8851_TXMIR_TXMA; //Check whether the TX FIFO is available for writing if(n >= (ETH_MAX_FRAME_SIZE + 8)) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Successful processing return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t ksz8851ReceivePacket(NetInterface *interface) { size_t n; uint16_t status; Ksz8851Context *context; NetRxAncillary ancillary; //Point to the driver context context = (Ksz8851Context *) interface->nicContext; //Read received frame status from RXFHSR status = ksz8851ReadReg(interface, KSZ8851_RXFHSR); //Make sure the frame is valid if((status & KSZ8851_RXFHSR_RXFV) != 0) { //Check error flags if((status & (KSZ8851_RXFHSR_RXMR | KSZ8851_RXFHSR_RXFTL | KSZ8851_RXFHSR_RXRF | KSZ8851_RXFHSR_RXCE)) == 0) { //Read received frame byte size from RXFHBCR n = ksz8851ReadReg(interface, KSZ8851_RXFHBCR) & KSZ8851_RXFHBCR_RXBC; //Ensure the frame size is acceptable if(n > 0 && n <= ETH_MAX_FRAME_SIZE) { //Reset QMU RXQ frame pointer to zero ksz8851WriteReg(interface, KSZ8851_RXFDPR, KSZ8851_RXFDPR_RXFPAI); //Enable RXQ read access ksz8851SetBit(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_SDA); //Read data ksz8851ReadFifo(interface, context->rxBuffer, n); //End RXQ read access ksz8851ClearBit(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_SDA); //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, context->rxBuffer, n, &ancillary); //Valid packet received return NO_ERROR; } } } //Release the current error frame from RXQ ksz8851SetBit(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_RRXEF); //Report an error return ERROR_INVALID_PACKET; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t ksz8851UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint16_t hashTable[4]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = ksz8851CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = (crc >> 26) & 0x3F; //Update hash table contents hashTable[k / 16] |= (1 << (k % 16)); } } //Write the hash table to the KSZ8851 controller ksz8851WriteReg(interface, KSZ8851_MAHTR0, hashTable[0]); ksz8851WriteReg(interface, KSZ8851_MAHTR1, hashTable[1]); ksz8851WriteReg(interface, KSZ8851_MAHTR2, hashTable[2]); ksz8851WriteReg(interface, KSZ8851_MAHTR3, hashTable[3]); //Debug message TRACE_DEBUG(" MAHTR0 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_MAHTR0)); TRACE_DEBUG(" MAHTR1 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_MAHTR1)); TRACE_DEBUG(" MAHTR2 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_MAHTR2)); TRACE_DEBUG(" MAHTR3 = %04" PRIX16 "\r\n", ksz8851ReadReg(interface, KSZ8851_MAHTR3)); //Successful processing return NO_ERROR; } /** * @brief Write KSZ8851 register * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] data Register value **/ void ksz8851WriteReg(NetInterface *interface, uint8_t address, uint16_t data) { #if (KSZ8851_SPI_SUPPORT == ENABLED) uint8_t command; //Form the write command if((address & 0x02) != 0) { command = KSZ8851_CMD_WR_REG | KSZ8851_CMD_B3 | KSZ8851_CMD_B2; } else { command = KSZ8851_CMD_WR_REG | KSZ8851_CMD_B1 | KSZ8851_CMD_B0; } //Pull the CS pin low interface->spiDriver->assertCs(); //Command phase interface->spiDriver->transfer(command | (address >> 6)); interface->spiDriver->transfer(address << 2); //Data phase interface->spiDriver->transfer(LSB(data)); interface->spiDriver->transfer(MSB(data)); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); #else //Set register address if((address & 0x02) != 0) { KSZ8851_CMD_REG = KSZ8851_CMD_B3 | KSZ8851_CMD_B2 | address; } else { KSZ8851_CMD_REG = KSZ8851_CMD_B1 | KSZ8851_CMD_B0 | address; } //Write register value KSZ8851_DATA_REG = data; #endif } /** * @brief Read KSZ8851 register * @param[in] interface Underlying network interface * @param[in] address Register address * @return Register value **/ uint16_t ksz8851ReadReg(NetInterface *interface, uint8_t address) { #if (KSZ8851_SPI_SUPPORT == ENABLED) uint8_t command; uint16_t data; //Form the read command if((address & 0x02) != 0) { command = KSZ8851_CMD_RD_REG | KSZ8851_CMD_B3 | KSZ8851_CMD_B2; } else { command = KSZ8851_CMD_RD_REG | KSZ8851_CMD_B1 | KSZ8851_CMD_B0; } //Pull the CS pin low interface->spiDriver->assertCs(); //Command phase interface->spiDriver->transfer(command | (address >> 6)); interface->spiDriver->transfer(address << 2); //Data phase (lower 8 bits) data = interface->spiDriver->transfer(0x00); //Data phase (upper 8 bits) data |= interface->spiDriver->transfer(0x00) << 8; //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); //Return register value return data; #else //Set register address if((address & 0x02) != 0) { KSZ8851_CMD_REG = KSZ8851_CMD_B3 | KSZ8851_CMD_B2 | address; } else { KSZ8851_CMD_REG = KSZ8851_CMD_B1 | KSZ8851_CMD_B0 | address; } //Return register value return KSZ8851_DATA_REG; #endif } /** * @brief Write TX FIFO * @param[in] interface Underlying network interface * @param[in] data Pointer to the data being written * @param[in] length Number of data to write **/ void ksz8851WriteFifo(NetInterface *interface, const uint8_t *data, size_t length) { #if (KSZ8851_SPI_SUPPORT == ENABLED) uint_t i; //Pull the CS pin low interface->spiDriver->assertCs(); //Command phase interface->spiDriver->transfer(KSZ8851_CMD_WR_FIFO); //Data phase for(i = 0; i < length; i++) { interface->spiDriver->transfer(data[i]); } //Maintain alignment to 4-byte boundaries for(; i % 4; i++) { interface->spiDriver->transfer(0x00); } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); #else uint_t i; //Data phase for(i = 0; i < length; i+=2) { KSZ8851_DATA_REG = data[i] | data[i + 1] << 8; } //Maintain alignment to 4-byte boundaries for(; i % 4; i+=2) { KSZ8851_DATA_REG = 0x0000; } #endif } /** * @brief Read RX FIFO * @param[in] interface Underlying network interface * @param[in] data Buffer where to store the incoming data * @param[in] length Number of data to read **/ void ksz8851ReadFifo(NetInterface *interface, uint8_t *data, size_t length) { #if (KSZ8851_SPI_SUPPORT == ENABLED) uint_t i; //Pull the CS pin low interface->spiDriver->assertCs(); //Command phase interface->spiDriver->transfer(KSZ8851_CMD_RD_FIFO); //The first 4 bytes are dummy data and must be discarded for(i = 0; i < 4; i++) { interface->spiDriver->transfer(0x00); } //Ignore RX packet header for(i = 0; i < 4; i++) { interface->spiDriver->transfer(0x00); } //Data phase for(i = 0; i < length; i++) { data[i] = interface->spiDriver->transfer(0x00); } //Maintain alignment to 4-byte boundaries for(; i % 4; i++) { interface->spiDriver->transfer(0x00); } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); #else uint_t i; uint16_t temp; //The first 2 bytes are dummy data and must be discarded temp = KSZ8851_DATA_REG; //Ignore RX packet header temp = KSZ8851_DATA_REG; temp = KSZ8851_DATA_REG; //Data phase for(i = 0; i < length; i+=2) { temp = KSZ8851_DATA_REG; data [i] = temp & 0xFF; data [i + 1] = (temp >> 8) & 0xFF; } //Maintain alignment to 4-byte boundaries for(; i % 4; i+=2) { temp = KSZ8851_DATA_REG; } #endif } /** * @brief Set bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to set in the target register **/ void ksz8851SetBit(NetInterface *interface, uint8_t address, uint16_t mask) { uint16_t value; //Read current register value value = ksz8851ReadReg(interface, address); //Set specified bits ksz8851WriteReg(interface, address, value | mask); } /** * @brief Clear bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to clear in the target register **/ void ksz8851ClearBit(NetInterface *interface, uint8_t address, uint16_t mask) { uint16_t value; //Read current register value value = ksz8851ReadReg(interface, address); //Clear specified bits ksz8851WriteReg(interface, address, value & ~mask); } /** * @brief CRC calculation * @param[in] data Pointer to the data over which to calculate the CRC * @param[in] length Number of bytes to process * @return Resulting CRC value **/ uint32_t ksz8851CalcCrc(const void *data, size_t length) { uint_t i; uint_t j; uint32_t crc; const uint8_t *p; //Point to the data over which to calculate the CRC p = (uint8_t *) data; //CRC preset value crc = 0xFFFFFFFF; //Loop through data for(i = 0; i < length; i++) { //The message is processed bit by bit for(j = 0; j < 8; j++) { //Update CRC value if((((crc >> 31) ^ (p[i] >> j)) & 0x01) != 0) { crc = (crc << 1) ^ 0x04C11DB7; } else { crc = crc << 1; } } } //Return CRC value return crc; } /** * @brief Dump registers for debugging purpose * @param[in] interface Underlying network interface **/ void ksz8851DumpReg(NetInterface *interface) { #if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG) uint_t i; uint_t j; uint_t address; //Loop through register addresses for(i = 0; i < 256; i += 16) { //Display register address TRACE_DEBUG("%02" PRIu8 ": ", i); //Display 8 registers at a time for(j = 0; j < 16; j += 2) { //Format register address address = i + j; //Display register contents TRACE_DEBUG("0x%04" PRIX16 " ", ksz8851ReadReg(interface, address)); } //Jump to the following line TRACE_DEBUG("\r\n"); } //Terminate with a line feed TRACE_DEBUG("\r\n"); #endif }
bool_t ksz8851IrqHandler(NetInterface *interface) { bool_t flag; size_t n; uint16_t ier; uint16_t isr; //This flag will be set if a higher priority task must be woken flag = FALSE; //Save IER register value ier = ksz8851ReadReg(interface, KSZ8851_REG_IER); //Disable interrupts to release the interrupt line ksz8851WriteReg(interface, KSZ8851_REG_IER, 0); //Read interrupt status register isr = ksz8851ReadReg(interface, KSZ8851_REG_ISR); //Link status change? if((isr & ISR_LCIS) != 0) { //Disable LCIE interrupt ier &= ~IER_LCIE; //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((isr & ISR_TXIS) != 0) { //Clear interrupt flag ksz8851WriteReg(interface, KSZ8851_REG_ISR, ISR_TXIS); //Get the amount of free memory available in the TX FIFO n = ksz8851ReadReg(interface, KSZ8851_REG_TXMIR) & TXMIR_TXMA_MASK; //Check whether the TX FIFO is available for writing if(n >= (ETH_MAX_FRAME_SIZE + 8)) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } } //Packet received? if((isr & ISR_RXIS) != 0) { //Disable RXIE interrupt ier &= ~IER_RXIE; //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Re-enable interrupts once the interrupt has been serviced ksz8851WriteReg(interface, KSZ8851_REG_IER, ier); //A higher priority task must be woken? return flag; }
bool_t ksz8851IrqHandler(NetInterface *interface) { bool_t flag; size_t n; uint16_t ier; uint16_t isr; //This flag will be set if a higher priority task must be woken flag = FALSE; //Save IER register value ier = ksz8851ReadReg(interface, KSZ8851_IER); //Disable interrupts to release the interrupt line ksz8851WriteReg(interface, KSZ8851_IER, 0); //Read interrupt status register isr = ksz8851ReadReg(interface, KSZ8851_ISR); //Link status change? if((isr & KSZ8851_ISR_LCIS) != 0) { //Disable LCIE interrupt ier &= ~KSZ8851_IER_LCIE; //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((isr & KSZ8851_ISR_TXIS) != 0) { //Clear interrupt flag ksz8851WriteReg(interface, KSZ8851_ISR, KSZ8851_ISR_TXIS); //Get the amount of free memory available in the TX FIFO n = ksz8851ReadReg(interface, KSZ8851_TXMIR) & KSZ8851_TXMIR_TXMA; //Check whether the TX FIFO is available for writing if(n >= (ETH_MAX_FRAME_SIZE + 8)) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } } //Packet received? if((isr & KSZ8851_ISR_RXIS) != 0) { //Disable RXIE interrupt ier &= ~KSZ8851_IER_RXIE; //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Re-enable interrupts once the interrupt has been serviced ksz8851WriteReg(interface, KSZ8851_IER, ier); //A higher priority task must be woken? return flag; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (90, ' TRACE_DEBUG("CIDER=0x%04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_CIDER));'), (91, ' TRACE_DEBUG("PHY1ILR=0x%04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_PHY1ILR));'), (92, ' TRACE_DEBUG("PHY1IHR=0x%04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_PHY1IHR));'), (95, ' if(ksz8851ReadReg(interface, KSZ8851_CIDER) != (KSZ8851_CIDER_FAMILY_ID_DEFAULT |'), (96, ' KSZ8851_CIDER_CHIP_ID_DEFAULT | KSZ8851_CIDER_REV_ID_A3))'), (123, ' ksz8851WriteReg(interface, KSZ8851_MARH, htons(interface->macAddr.w[0]));'), (124, ' ksz8851WriteReg(interface, KSZ8851_MARM, htons(interface->macAddr.w[1]));'), (125, ' ksz8851WriteReg(interface, KSZ8851_MARL, htons(interface->macAddr.w[2]));'), (126, ''), (127, ' //Packets shorter than 64 bytes are padded and the CRC is automatically'), (128, ' //generated'), (129, ' ksz8851WriteReg(interface, KSZ8851_TXCR, KSZ8851_TXCR_TXFCE |'), (130, ' KSZ8851_TXCR_TXPE | KSZ8851_TXCR_TXCE);'), (133, ' ksz8851WriteReg(interface, KSZ8851_TXFDPR, KSZ8851_TXFDPR_TXFPAI);'), (136, ' ksz8851WriteReg(interface, KSZ8851_RXCR1, KSZ8851_RXCR1_RXPAFMA |'), (137, ' KSZ8851_RXCR1_RXFCE | KSZ8851_RXCR1_RXBE | KSZ8851_RXCR1_RXME |'), (138, ' KSZ8851_RXCR1_RXUE);'), (141, ' ksz8851WriteReg(interface, KSZ8851_RXCR2, KSZ8851_RXCR2_SRDBL_SINGLE_FRAME |'), (142, ' KSZ8851_RXCR2_IUFFP | KSZ8851_RXCR2_RXIUFCEZ);'), (145, ' ksz8851WriteReg(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_RXFCTE |'), (146, ' KSZ8851_RXQCR_ADRFE);'), (147, ''), (149, ' ksz8851WriteReg(interface, KSZ8851_RXFDPR, KSZ8851_RXFDPR_RXFPAI);'), (151, ' ksz8851WriteReg(interface, KSZ8851_RXFCTR, 1);'), (154, ' ksz8851ClearBit(interface, KSZ8851_P1CR, KSZ8851_P1CR_FORCE_DUPLEX);'), (156, ' ksz8851SetBit(interface, KSZ8851_P1CR, KSZ8851_P1CR_RESTART_AN);'), (159, ' ksz8851SetBit(interface, KSZ8851_ISR, KSZ8851_ISR_LCIS |'), (160, ' KSZ8851_ISR_TXIS | KSZ8851_ISR_RXIS | KSZ8851_ISR_RXOIS |'), (161, ' KSZ8851_ISR_TXPSIS | KSZ8851_ISR_RXPSIS | KSZ8851_ISR_TXSAIS |'), (162, ' KSZ8851_ISR_RXWFDIS | KSZ8851_ISR_RXMPDIS | KSZ8851_ISR_LDIS |'), (163, ' KSZ8851_ISR_EDIS | KSZ8851_ISR_SPIBEIS);'), (166, ' ksz8851SetBit(interface, KSZ8851_IER, KSZ8851_IER_LCIE |'), (167, ' KSZ8851_IER_TXIE | KSZ8851_IER_RXIE);'), (170, ' ksz8851SetBit(interface, KSZ8851_TXCR, KSZ8851_TXCR_TXE);'), (172, ' ksz8851SetBit(interface, KSZ8851_RXCR1, KSZ8851_RXCR1_RXE);'), (238, ' ier = ksz8851ReadReg(interface, KSZ8851_IER);'), (240, ' ksz8851WriteReg(interface, KSZ8851_IER, 0);'), (243, ' isr = ksz8851ReadReg(interface, KSZ8851_ISR);'), (246, ' if((isr & KSZ8851_ISR_LCIS) != 0)'), (249, ' ier &= ~KSZ8851_IER_LCIE;'), (258, ' if((isr & KSZ8851_ISR_TXIS) != 0)'), (261, ' ksz8851WriteReg(interface, KSZ8851_ISR, KSZ8851_ISR_TXIS);'), (264, ' n = ksz8851ReadReg(interface, KSZ8851_TXMIR) & KSZ8851_TXMIR_TXMA;'), (275, ' if((isr & KSZ8851_ISR_RXIS) != 0)'), (278, ' ier &= ~KSZ8851_IER_RXIE;'), (287, ' ksz8851WriteReg(interface, KSZ8851_IER, ier);'), (305, ' status = ksz8851ReadReg(interface, KSZ8851_ISR);'), (308, ' if((status & KSZ8851_ISR_LCIS) != 0)'), (311, ' ksz8851WriteReg(interface, KSZ8851_ISR, KSZ8851_ISR_LCIS);'), (313, ' status = ksz8851ReadReg(interface, KSZ8851_P1SR);'), (316, ' if((status & KSZ8851_P1SR_LINK_GOOD) != 0)'), (319, ' if((status & KSZ8851_P1SR_OPERATION_SPEED) != 0)'), (329, ' if((status & KSZ8851_P1SR_OPERATION_DUPLEX) != 0)'), (352, ' if((status & KSZ8851_ISR_RXIS) != 0)'), (355, ' ksz8851WriteReg(interface, KSZ8851_ISR, KSZ8851_ISR_RXIS);'), (357, ' frameCount = MSB(ksz8851ReadReg(interface, KSZ8851_RXFCTR));'), (370, ' ksz8851SetBit(interface, KSZ8851_IER, KSZ8851_IER_LCIE | KSZ8851_IER_RXIE);'), (408, ' n = ksz8851ReadReg(interface, KSZ8851_TXMIR) & KSZ8851_TXMIR_TXMA;'), (420, ' header.controlWord = htole16(KSZ8851_TX_CTRL_TXIC |'), (421, ' (context->frameId++ & KSZ8851_TX_CTRL_TXFID));'), (422, ''), (427, ' ksz8851SetBit(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_SDA);'), (433, ' ksz8851ClearBit(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_SDA);'), (436, ' ksz8851SetBit(interface, KSZ8851_TXQCR, KSZ8851_TXQCR_METFE);'), (439, ' n = ksz8851ReadReg(interface, KSZ8851_TXMIR) & KSZ8851_TXMIR_TXMA;'), (470, ' status = ksz8851ReadReg(interface, KSZ8851_RXFHSR);'), (473, ' if((status & KSZ8851_RXFHSR_RXFV) != 0)'), (476, ' if((status & (KSZ8851_RXFHSR_RXMR | KSZ8851_RXFHSR_RXFTL |'), (477, ' KSZ8851_RXFHSR_RXRF | KSZ8851_RXFHSR_RXCE)) == 0)'), (480, ' n = ksz8851ReadReg(interface, KSZ8851_RXFHBCR) & KSZ8851_RXFHBCR_RXBC;'), (486, ' ksz8851WriteReg(interface, KSZ8851_RXFDPR, KSZ8851_RXFDPR_RXFPAI);'), (488, ' ksz8851SetBit(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_SDA);'), (492, ' ksz8851ClearBit(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_SDA);'), (506, ' ksz8851SetBit(interface, KSZ8851_RXQCR, KSZ8851_RXQCR_RRXEF);'), (552, ' ksz8851WriteReg(interface, KSZ8851_MAHTR0, hashTable[0]);'), (553, ' ksz8851WriteReg(interface, KSZ8851_MAHTR1, hashTable[1]);'), (554, ' ksz8851WriteReg(interface, KSZ8851_MAHTR2, hashTable[2]);'), (555, ' ksz8851WriteReg(interface, KSZ8851_MAHTR3, hashTable[3]);'), (558, ' TRACE_DEBUG(" MAHTR0 = %04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_MAHTR0));'), (559, ' TRACE_DEBUG(" MAHTR1 = %04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_MAHTR1));'), (560, ' TRACE_DEBUG(" MAHTR2 = %04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_MAHTR2));'), (561, ' TRACE_DEBUG(" MAHTR3 = %04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_MAHTR3));')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (90, ' TRACE_DEBUG("CIDER=0x%04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_REG_CIDER));'), (91, ' TRACE_DEBUG("PHY1ILR=0x%04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_REG_PHY1ILR));'), (92, ' TRACE_DEBUG("PHY1IHR=0x%04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_REG_PHY1IHR));'), (95, ' if(ksz8851ReadReg(interface, KSZ8851_REG_CIDER) != KSZ8851_REV_A3_ID)'), (122, ' ksz8851WriteReg(interface, KSZ8851_REG_MARH, htons(interface->macAddr.w[0]));'), (123, ' ksz8851WriteReg(interface, KSZ8851_REG_MARM, htons(interface->macAddr.w[1]));'), (124, ' ksz8851WriteReg(interface, KSZ8851_REG_MARL, htons(interface->macAddr.w[2]));'), (126, ' //Packets shorter than 64 bytes are padded and the CRC is automatically generated'), (127, ' ksz8851WriteReg(interface, KSZ8851_REG_TXCR, TXCR_TXFCE | TXCR_TXPE | TXCR_TXCE);'), (129, ' ksz8851WriteReg(interface, KSZ8851_REG_TXFDPR, TXFDPR_TXFPAI);'), (132, ' ksz8851WriteReg(interface, KSZ8851_REG_RXCR1,'), (133, ' RXCR1_RXPAFMA | RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXME | RXCR1_RXUE);'), (136, ' ksz8851WriteReg(interface, KSZ8851_REG_RXCR2,'), (137, ' RXCR2_SRDBL2 | RXCR2_IUFFP | RXCR2_RXIUFCEZ);'), (140, ' ksz8851WriteReg(interface, KSZ8851_REG_RXQCR, RXQCR_RXFCTE | RXQCR_ADRFE);'), (142, ' ksz8851WriteReg(interface, KSZ8851_REG_RXFDPR, RXFDPR_RXFPAI);'), (144, ' ksz8851WriteReg(interface, KSZ8851_REG_RXFCTR, 1);'), (147, ' ksz8851ClearBit(interface, KSZ8851_REG_P1CR, P1CR_FORCE_DUPLEX);'), (149, ' ksz8851SetBit(interface, KSZ8851_REG_P1CR, P1CR_RESTART_AN);'), (152, ' ksz8851SetBit(interface, KSZ8851_REG_ISR, ISR_LCIS | ISR_TXIS |'), (153, ' ISR_RXIS | ISR_RXOIS | ISR_TXPSIS | ISR_RXPSIS | ISR_TXSAIS |'), (154, ' ISR_RXWFDIS | ISR_RXMPDIS | ISR_LDIS | ISR_EDIS | ISR_SPIBEIS);'), (157, ' ksz8851SetBit(interface, KSZ8851_REG_IER, IER_LCIE | IER_TXIE | IER_RXIE);'), (160, ' ksz8851SetBit(interface, KSZ8851_REG_TXCR, TXCR_TXE);'), (162, ' ksz8851SetBit(interface, KSZ8851_REG_RXCR1, RXCR1_RXE);'), (228, ' ier = ksz8851ReadReg(interface, KSZ8851_REG_IER);'), (230, ' ksz8851WriteReg(interface, KSZ8851_REG_IER, 0);'), (233, ' isr = ksz8851ReadReg(interface, KSZ8851_REG_ISR);'), (236, ' if((isr & ISR_LCIS) != 0)'), (239, ' ier &= ~IER_LCIE;'), (248, ' if((isr & ISR_TXIS) != 0)'), (251, ' ksz8851WriteReg(interface, KSZ8851_REG_ISR, ISR_TXIS);'), (254, ' n = ksz8851ReadReg(interface, KSZ8851_REG_TXMIR) & TXMIR_TXMA_MASK;'), (265, ' if((isr & ISR_RXIS) != 0)'), (268, ' ier &= ~IER_RXIE;'), (277, ' ksz8851WriteReg(interface, KSZ8851_REG_IER, ier);'), (295, ' status = ksz8851ReadReg(interface, KSZ8851_REG_ISR);'), (298, ' if((status & ISR_LCIS) != 0)'), (301, ' ksz8851WriteReg(interface, KSZ8851_REG_ISR, ISR_LCIS);'), (303, ' status = ksz8851ReadReg(interface, KSZ8851_REG_P1SR);'), (306, ' if((status & P1SR_LINK_GOOD) != 0)'), (309, ' if((status & P1SR_OPERATION_SPEED) != 0)'), (319, ' if((status & P1SR_OPERATION_DUPLEX) != 0)'), (342, ' if((status & ISR_RXIS) != 0)'), (345, ' ksz8851WriteReg(interface, KSZ8851_REG_ISR, ISR_RXIS);'), (347, ' frameCount = MSB(ksz8851ReadReg(interface, KSZ8851_REG_RXFCTR));'), (360, ' ksz8851SetBit(interface, KSZ8851_REG_IER, IER_LCIE | IER_RXIE);'), (398, ' n = ksz8851ReadReg(interface, KSZ8851_REG_TXMIR) & TXMIR_TXMA_MASK;'), (410, ' header.controlWord = htole16(TX_CTRL_TXIC | (context->frameId++ & TX_CTRL_TXFID));'), (415, ' ksz8851SetBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA);'), (421, ' ksz8851ClearBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA);'), (424, ' ksz8851SetBit(interface, KSZ8851_REG_TXQCR, TXQCR_METFE);'), (427, ' n = ksz8851ReadReg(interface, KSZ8851_REG_TXMIR) & TXMIR_TXMA_MASK;'), (458, ' status = ksz8851ReadReg(interface, KSZ8851_REG_RXFHSR);'), (461, ' if((status & RXFHSR_RXFV) != 0)'), (464, ' if((status & (RXFHSR_RXMR | RXFHSR_RXFTL | RXFHSR_RXRF | RXFHSR_RXCE)) == 0)'), (467, ' n = ksz8851ReadReg(interface, KSZ8851_REG_RXFHBCR) & RXFHBCR_RXBC_MASK;'), (473, ' ksz8851WriteReg(interface, KSZ8851_REG_RXFDPR, RXFDPR_RXFPAI);'), (475, ' ksz8851SetBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA);'), (479, ' ksz8851ClearBit(interface, KSZ8851_REG_RXQCR, RXQCR_SDA);'), (493, ' ksz8851SetBit(interface, KSZ8851_REG_RXQCR, RXQCR_RRXEF);'), (539, ' ksz8851WriteReg(interface, KSZ8851_REG_MAHTR0, hashTable[0]);'), (540, ' ksz8851WriteReg(interface, KSZ8851_REG_MAHTR1, hashTable[1]);'), (541, ' ksz8851WriteReg(interface, KSZ8851_REG_MAHTR2, hashTable[2]);'), (542, ' ksz8851WriteReg(interface, KSZ8851_REG_MAHTR3, hashTable[3]);'), (545, ' TRACE_DEBUG(" MAHTR0 = %04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR0));'), (546, ' TRACE_DEBUG(" MAHTR1 = %04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR1));'), (547, ' TRACE_DEBUG(" MAHTR2 = %04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR2));'), (548, ' TRACE_DEBUG(" MAHTR3 = %04" PRIX16 "\\r\\n", ksz8851ReadReg(interface, KSZ8851_REG_MAHTR3));')]}
84
71
435
2,463
34
177
5
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
CWE-20
3,222
Password.cxx
C++
PlainPasswd::PlainPasswd
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ // // XXX not thread-safe, because d3des isn't - do we need to worry about this? // #include <string.h> extern "C" { #include <rfb/d3des.h> } #include <rdr/types.h> #include <rdr/Exception.h> #include <rfb/Password.h> using namespace rfb; static unsigned char d3desObfuscationKey[] = {23,82,107,6,35,78,88,7}; PlainPasswd::PlainPasswd() {} PlainPasswd::PlainPasswd(char* pwd) : CharArray(pwd) { } PlainPasswd::PlainPasswd(int len) : CharArray(len) { } PlainPasswd::PlainPasswd(const ObfuscatedPasswd& obfPwd) : CharArray(9) { if (obfPwd.length < 8) throw rdr::Exception("bad obfuscated password length"); deskey(d3desObfuscationKey, DE1); des((rdr::U8*)obfPwd.buf, (rdr::U8*)buf); buf[8] = 0; } PlainPasswd::~PlainPasswd() { replaceBuf(0); } void PlainPasswd::replaceBuf(char* b) { if (buf) memset(buf, 0, strlen(buf)); CharArray::replaceBuf(b); } ObfuscatedPasswd::ObfuscatedPasswd() : length(0) { } ObfuscatedPasswd::ObfuscatedPasswd(int len) : CharArray(len), length(len) { } ObfuscatedPasswd::ObfuscatedPasswd(const PlainPasswd& plainPwd) : CharArray(8), length(8) { int l = strlen(plainPwd.buf), i; for (i=0; i<8; i++) buf[i] = i<l ? plainPwd.buf[i] : 0; deskey(d3desObfuscationKey, EN0); des((rdr::U8*)buf, (rdr::U8*)buf); } ObfuscatedPasswd::~ObfuscatedPasswd() { if (buf) memset(buf, 0, length); }
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ // // XXX not thread-safe, because d3des isn't - do we need to worry about this? // #include <string.h> extern "C" { #include <rfb/d3des.h> } #include <rdr/types.h> #include <rdr/Exception.h> #include <rfb/Password.h> using namespace rfb; static unsigned char d3desObfuscationKey[] = {23,82,107,6,35,78,88,7}; PlainPasswd::PlainPasswd() {} PlainPasswd::PlainPasswd(char* pwd) : CharArray(pwd) { } PlainPasswd::PlainPasswd(size_t len) : CharArray(len) { } PlainPasswd::PlainPasswd(const ObfuscatedPasswd& obfPwd) : CharArray(9) { if (obfPwd.length < 8) throw rdr::Exception("bad obfuscated password length"); deskey(d3desObfuscationKey, DE1); des((rdr::U8*)obfPwd.buf, (rdr::U8*)buf); buf[8] = 0; } PlainPasswd::~PlainPasswd() { replaceBuf(0); } void PlainPasswd::replaceBuf(char* b) { if (buf) memset(buf, 0, strlen(buf)); CharArray::replaceBuf(b); } ObfuscatedPasswd::ObfuscatedPasswd() : length(0) { } ObfuscatedPasswd::ObfuscatedPasswd(size_t len) : CharArray(len), length(len) { } ObfuscatedPasswd::ObfuscatedPasswd(const PlainPasswd& plainPwd) : CharArray(8), length(8) { size_t l = strlen(plainPwd.buf), i; for (i=0; i<8; i++) buf[i] = i<l ? plainPwd.buf[i] : 0; deskey(d3desObfuscationKey, EN0); des((rdr::U8*)buf, (rdr::U8*)buf); } ObfuscatedPasswd::~ObfuscatedPasswd() { if (buf) memset(buf, 0, length); }
PlainPasswd::PlainPasswd(int len) : CharArray(len) { }
PlainPasswd::PlainPasswd(size_t len) : CharArray(len) { }
{'added': [(41, 'PlainPasswd::PlainPasswd(size_t len) : CharArray(len) {'), (66, 'ObfuscatedPasswd::ObfuscatedPasswd(size_t len) : CharArray(len), length(len) {'), (70, ' size_t l = strlen(plainPwd.buf), i;')], 'deleted': [(41, 'PlainPasswd::PlainPasswd(int len) : CharArray(len) {'), (66, 'ObfuscatedPasswd::ObfuscatedPasswd(int len) : CharArray(len), length(len) {'), (70, ' int l = strlen(plainPwd.buf), i;')]}
3
3
44
333
2
14
1
https://github.com/CendioOssman/tigervnc
CVE-2019-15694
CWE-787
3,112
archive.cpp
C++
Archive::Seek
#include "rar.hpp" #include "arccmt.cpp" Archive::Archive(RAROptions *InitCmd) { Cmd=NULL; // Just in case we'll have an exception in 'new' below. DummyCmd=(InitCmd==NULL); Cmd=DummyCmd ? (new RAROptions):InitCmd; OpenShared=Cmd->OpenShared; Format=RARFMT15; Solid=false; Volume=false; MainComment=false; Locked=false; Signed=false; FirstVolume=false; NewNumbering=false; SFXSize=0; LatestTime.Reset(); Protected=false; Encrypted=false; FailedHeaderDecryption=false; BrokenHeader=false; LastReadBlock=0; CurBlockPos=0; NextBlockPos=0; RecoverySize=-1; RecoveryPercent=-1; memset(&MainHead,0,sizeof(MainHead)); memset(&CryptHead,0,sizeof(CryptHead)); memset(&EndArcHead,0,sizeof(EndArcHead)); VolNumber=0; VolWrite=0; AddingFilesSize=0; AddingHeadersSize=0; *FirstVolumeName=0; Splitting=false; NewArchive=false; SilentOpen=false; } Archive::~Archive() { if (DummyCmd) delete Cmd; } void Archive::CheckArc(bool EnableBroken) { if (!IsArchive(EnableBroken)) { // If FailedHeaderDecryption is set, we already reported that archive // password is incorrect. if (!FailedHeaderDecryption) uiMsg(UIERROR_BADARCHIVE,FileName); ErrHandler.Exit(RARX_FATAL); } } #if !defined(SFX_MODULE) void Archive::CheckOpen(const wchar *Name) { TOpen(Name); CheckArc(false); } #endif bool Archive::WCheckOpen(const wchar *Name) { if (!WOpen(Name)) return false; if (!IsArchive(false)) { uiMsg(UIERROR_BADARCHIVE,FileName); Close(); return false; } return true; } RARFORMAT Archive::IsSignature(const byte *D,size_t Size) { RARFORMAT Type=RARFMT_NONE; if (Size>=1 && D[0]==0x52) #ifndef SFX_MODULE if (Size>=4 && D[1]==0x45 && D[2]==0x7e && D[3]==0x5e) Type=RARFMT14; else #endif if (Size>=7 && D[1]==0x61 && D[2]==0x72 && D[3]==0x21 && D[4]==0x1a && D[5]==0x07) { // We check the last signature byte, so we can return a sensible // warning in case we'll want to change the archive format // sometimes in the future. if (D[6]==0) Type=RARFMT15; else if (D[6]==1) Type=RARFMT50; else if (D[6]>1 && D[6]<5) Type=RARFMT_FUTURE; } return Type; } bool Archive::IsArchive(bool EnableBroken) { Encrypted=false; BrokenHeader=false; // Might be left from previous volume. #ifndef SFX_MODULE if (IsDevice()) { uiMsg(UIERROR_INVALIDNAME,FileName,FileName); return false; } #endif if (Read(MarkHead.Mark,SIZEOF_MARKHEAD3)!=SIZEOF_MARKHEAD3) return false; SFXSize=0; RARFORMAT Type; if ((Type=IsSignature(MarkHead.Mark,SIZEOF_MARKHEAD3))!=RARFMT_NONE) { Format=Type; if (Format==RARFMT14) Seek(Tell()-SIZEOF_MARKHEAD3,SEEK_SET); } else { Array<char> Buffer(MAXSFXSIZE); long CurPos=(long)Tell(); int ReadSize=Read(&Buffer[0],Buffer.Size()-16); for (int I=0;I<ReadSize;I++) if (Buffer[I]==0x52 && (Type=IsSignature((byte *)&Buffer[I],ReadSize-I))!=RARFMT_NONE) { Format=Type; if (Format==RARFMT14 && I>0 && CurPos<28 && ReadSize>31) { char *D=&Buffer[28-CurPos]; if (D[0]!=0x52 || D[1]!=0x53 || D[2]!=0x46 || D[3]!=0x58) continue; } SFXSize=CurPos+I; Seek(SFXSize,SEEK_SET); if (Format==RARFMT15 || Format==RARFMT50) Read(MarkHead.Mark,SIZEOF_MARKHEAD3); break; } if (SFXSize==0) return false; } if (Format==RARFMT_FUTURE) { uiMsg(UIERROR_NEWRARFORMAT,FileName); return false; } if (Format==RARFMT50) // RAR 5.0 signature is by one byte longer. { if (Read(MarkHead.Mark+SIZEOF_MARKHEAD3,1)!=1 || MarkHead.Mark[SIZEOF_MARKHEAD3]!=0) return false; MarkHead.HeadSize=SIZEOF_MARKHEAD5; } else MarkHead.HeadSize=SIZEOF_MARKHEAD3; #ifdef RARDLL // If callback function is not set, we cannot get the password, // so we skip the initial header processing for encrypted header archive. // It leads to skipped archive comment, but the rest of archive data // is processed correctly. if (Cmd->Callback==NULL) SilentOpen=true; #endif bool HeadersLeft; // Any headers left to read. // Skip the archive encryption header if any and read the main header. while ((HeadersLeft=(ReadHeader()!=0))==true) // Additional parentheses to silence Clang. { SeekToNext(); HEADER_TYPE Type=GetHeaderType(); // In RAR 5.0 we need to quit after reading HEAD_CRYPT if we wish to // avoid the password prompt. if (Type==HEAD_MAIN || SilentOpen && Type==HEAD_CRYPT) break; } // This check allows to make RS based recovery even if password is incorrect. // But we should not do it for EnableBroken or we'll get 'not RAR archive' // messages when extracting encrypted archives with wrong password. if (FailedHeaderDecryption && !EnableBroken) return false; if (BrokenHeader) // Main archive header is corrupt. { uiMsg(UIERROR_MHEADERBROKEN,FileName); if (!EnableBroken) return false; } MainComment=MainHead.CommentInHeader; // If we process non-encrypted archive or can request a password, // we set 'first volume' flag based on file attributes below. // It is necessary for RAR 2.x archives, which did not have 'first volume' // flag in main header. Also for all RAR formats we need to scan until // first file header to set "comment" flag when reading service header. // Unless we are in silent mode, we need to know about presence of comment // immediately after IsArchive call. if (HeadersLeft && (!SilentOpen || !Encrypted)) { SaveFilePos SavePos(*this); int64 SaveCurBlockPos=CurBlockPos,SaveNextBlockPos=NextBlockPos; HEADER_TYPE SaveCurHeaderType=CurHeaderType; while (ReadHeader()!=0) { HEADER_TYPE HeaderType=GetHeaderType(); if (HeaderType==HEAD_SERVICE) { // If we have a split service headers, it surely indicates non-first // volume. But not split service header does not guarantee the first // volume, because we can have split file after non-split archive // comment. So we do not quit from loop here. FirstVolume=Volume && !SubHead.SplitBefore; } else if (HeaderType==HEAD_FILE) { FirstVolume=Volume && !FileHead.SplitBefore; break; } else if (HeaderType==HEAD_ENDARC) // Might happen if archive contains only a split service header. break; SeekToNext(); } CurBlockPos=SaveCurBlockPos; NextBlockPos=SaveNextBlockPos; CurHeaderType=SaveCurHeaderType; } if (!Volume || FirstVolume) wcsncpyz(FirstVolumeName,FileName,ASIZE(FirstVolumeName)); return true; } void Archive::SeekToNext() { Seek(NextBlockPos,SEEK_SET); } // Calculate the block size including encryption fields and padding if any. uint Archive::FullHeaderSize(size_t Size) { if (Encrypted) { Size = ALIGN_VALUE(Size, CRYPT_BLOCK_SIZE); // Align to encryption block size. if (Format == RARFMT50) Size += SIZE_INITV; else Size += SIZE_SALT30; } return uint(Size); } #ifdef USE_QOPEN bool Archive::Open(const wchar *Name,uint Mode) { // Important if we reuse Archive object and it has virtual QOpen // file position not matching real. For example, for 'l -v volname'. QOpen.Unload(); return File::Open(Name,Mode); } int Archive::Read(void *Data,size_t Size) { size_t Result; if (QOpen.Read(Data,Size,Result)) return (int)Result; return File::Read(Data,Size); } void Archive::Seek(int64 Offset,int Method) { if (!QOpen.Seek(Offset,Method)) File::Seek(Offset,Method); } int64 Archive::Tell() { int64 QPos; if (QOpen.Tell(&QPos)) return QPos; return File::Tell(); } #endif
#include "rar.hpp" #include "arccmt.cpp" #ifdef USE_ARCMEM #include "arcmem.cpp" #endif Archive::Archive(RAROptions *InitCmd) { Cmd=NULL; // Just in case we'll have an exception in 'new' below. DummyCmd=(InitCmd==NULL); Cmd=DummyCmd ? (new RAROptions):InitCmd; OpenShared=Cmd->OpenShared; Format=RARFMT15; Solid=false; Volume=false; MainComment=false; Locked=false; Signed=false; FirstVolume=false; NewNumbering=false; SFXSize=0; LatestTime.Reset(); Protected=false; Encrypted=false; FailedHeaderDecryption=false; BrokenHeader=false; LastReadBlock=0; CurBlockPos=0; NextBlockPos=0; RecoverySize=-1; RecoveryPercent=-1; memset(&MainHead,0,sizeof(MainHead)); memset(&CryptHead,0,sizeof(CryptHead)); memset(&EndArcHead,0,sizeof(EndArcHead)); VolNumber=0; VolWrite=0; AddingFilesSize=0; AddingHeadersSize=0; *FirstVolumeName=0; Splitting=false; NewArchive=false; SilentOpen=false; #ifdef USE_QOPEN ProhibitQOpen=false; #endif } Archive::~Archive() { if (DummyCmd) delete Cmd; } void Archive::CheckArc(bool EnableBroken) { if (!IsArchive(EnableBroken)) { // If FailedHeaderDecryption is set, we already reported that archive // password is incorrect. if (!FailedHeaderDecryption) uiMsg(UIERROR_BADARCHIVE,FileName); ErrHandler.Exit(RARX_FATAL); } } #if !defined(SFX_MODULE) void Archive::CheckOpen(const wchar *Name) { TOpen(Name); CheckArc(false); } #endif bool Archive::WCheckOpen(const wchar *Name) { if (!WOpen(Name)) return false; if (!IsArchive(false)) { uiMsg(UIERROR_BADARCHIVE,FileName); Close(); return false; } return true; } RARFORMAT Archive::IsSignature(const byte *D,size_t Size) { RARFORMAT Type=RARFMT_NONE; if (Size>=1 && D[0]==0x52) #ifndef SFX_MODULE if (Size>=4 && D[1]==0x45 && D[2]==0x7e && D[3]==0x5e) Type=RARFMT14; else #endif if (Size>=7 && D[1]==0x61 && D[2]==0x72 && D[3]==0x21 && D[4]==0x1a && D[5]==0x07) { // We check the last signature byte, so we can return a sensible // warning in case we'll want to change the archive format // sometimes in the future. if (D[6]==0) Type=RARFMT15; else if (D[6]==1) Type=RARFMT50; else if (D[6]>1 && D[6]<5) Type=RARFMT_FUTURE; } return Type; } bool Archive::IsArchive(bool EnableBroken) { Encrypted=false; BrokenHeader=false; // Might be left from previous volume. #ifndef SFX_MODULE if (IsDevice()) { uiMsg(UIERROR_INVALIDNAME,FileName,FileName); return false; } #endif if (Read(MarkHead.Mark,SIZEOF_MARKHEAD3)!=SIZEOF_MARKHEAD3) return false; SFXSize=0; RARFORMAT Type; if ((Type=IsSignature(MarkHead.Mark,SIZEOF_MARKHEAD3))!=RARFMT_NONE) { Format=Type; if (Format==RARFMT14) Seek(Tell()-SIZEOF_MARKHEAD3,SEEK_SET); } else { Array<char> Buffer(MAXSFXSIZE); long CurPos=(long)Tell(); int ReadSize=Read(&Buffer[0],Buffer.Size()-16); for (int I=0;I<ReadSize;I++) if (Buffer[I]==0x52 && (Type=IsSignature((byte *)&Buffer[I],ReadSize-I))!=RARFMT_NONE) { Format=Type; if (Format==RARFMT14 && I>0 && CurPos<28 && ReadSize>31) { char *D=&Buffer[28-CurPos]; if (D[0]!=0x52 || D[1]!=0x53 || D[2]!=0x46 || D[3]!=0x58) continue; } SFXSize=CurPos+I; Seek(SFXSize,SEEK_SET); if (Format==RARFMT15 || Format==RARFMT50) Read(MarkHead.Mark,SIZEOF_MARKHEAD3); break; } if (SFXSize==0) return false; } if (Format==RARFMT_FUTURE) { uiMsg(UIERROR_NEWRARFORMAT,FileName); return false; } if (Format==RARFMT50) // RAR 5.0 signature is by one byte longer. { if (Read(MarkHead.Mark+SIZEOF_MARKHEAD3,1)!=1 || MarkHead.Mark[SIZEOF_MARKHEAD3]!=0) return false; MarkHead.HeadSize=SIZEOF_MARKHEAD5; } else MarkHead.HeadSize=SIZEOF_MARKHEAD3; #ifdef RARDLL // If callback function is not set, we cannot get the password, // so we skip the initial header processing for encrypted header archive. // It leads to skipped archive comment, but the rest of archive data // is processed correctly. if (Cmd->Callback==NULL) SilentOpen=true; #endif bool HeadersLeft; // Any headers left to read. // Skip the archive encryption header if any and read the main header. while ((HeadersLeft=(ReadHeader()!=0))==true) // Additional parentheses to silence Clang. { SeekToNext(); HEADER_TYPE Type=GetHeaderType(); // In RAR 5.0 we need to quit after reading HEAD_CRYPT if we wish to // avoid the password prompt. if (Type==HEAD_MAIN || SilentOpen && Type==HEAD_CRYPT) break; } // This check allows to make RS based recovery even if password is incorrect. // But we should not do it for EnableBroken or we'll get 'not RAR archive' // messages when extracting encrypted archives with wrong password. if (FailedHeaderDecryption && !EnableBroken) return false; if (BrokenHeader) // Main archive header is corrupt. { uiMsg(UIERROR_MHEADERBROKEN,FileName); if (!EnableBroken) return false; } MainComment=MainHead.CommentInHeader; // If we process non-encrypted archive or can request a password, // we set 'first volume' flag based on file attributes below. // It is necessary for RAR 2.x archives, which did not have 'first volume' // flag in main header. Also for all RAR formats we need to scan until // first file header to set "comment" flag when reading service header. // Unless we are in silent mode, we need to know about presence of comment // immediately after IsArchive call. if (HeadersLeft && (!SilentOpen || !Encrypted)) { SaveFilePos SavePos(*this); int64 SaveCurBlockPos=CurBlockPos,SaveNextBlockPos=NextBlockPos; HEADER_TYPE SaveCurHeaderType=CurHeaderType; while (ReadHeader()!=0) { HEADER_TYPE HeaderType=GetHeaderType(); if (HeaderType==HEAD_SERVICE) { // If we have a split service headers, it surely indicates non-first // volume. But not split service header does not guarantee the first // volume, because we can have split file after non-split archive // comment. So we do not quit from loop here. FirstVolume=Volume && !SubHead.SplitBefore; } else if (HeaderType==HEAD_FILE) { FirstVolume=Volume && !FileHead.SplitBefore; break; } else if (HeaderType==HEAD_ENDARC) // Might happen if archive contains only a split service header. break; SeekToNext(); } CurBlockPos=SaveCurBlockPos; NextBlockPos=SaveNextBlockPos; CurHeaderType=SaveCurHeaderType; } if (!Volume || FirstVolume) wcsncpyz(FirstVolumeName,FileName,ASIZE(FirstVolumeName)); return true; } void Archive::SeekToNext() { Seek(NextBlockPos,SEEK_SET); } // Calculate the block size including encryption fields and padding if any. uint Archive::FullHeaderSize(size_t Size) { if (Encrypted) { Size = ALIGN_VALUE(Size, CRYPT_BLOCK_SIZE); // Align to encryption block size. if (Format == RARFMT50) Size += SIZE_INITV; else Size += SIZE_SALT30; } return uint(Size); } bool Archive::Open(const wchar *Name,uint Mode) { #ifdef USE_QOPEN // Important if we reuse Archive object and it has virtual QOpen // file position not matching real. For example, for 'l -v volname'. QOpen.Unload(); #endif #ifdef USE_ARCMEM if (Cmd->ArcInMem) { wcsncpyz(FileName,Name,ASIZE(FileName)); ArcMem.Load(Cmd->ArcMemData,Cmd->ArcMemSize); Cmd->SetArcInMem(NULL,0); // Return in memory data for first volume only, not for next volumes. return true; } #endif return File::Open(Name,Mode); } bool Archive::Close() { #ifdef USE_ARCMEM if (ArcMem.Unload()) return true; #endif return File::Close(); } int Archive::Read(void *Data,size_t Size) { #ifdef USE_QOPEN size_t QResult; if (QOpen.Read(Data,Size,QResult)) return (int)QResult; #endif #ifdef USE_ARCMEM size_t AResult; if (ArcMem.Read(Data,Size,AResult)) return (int)AResult; #endif return File::Read(Data,Size); } void Archive::Seek(int64 Offset,int Method) { #ifdef USE_QOPEN if (QOpen.Seek(Offset,Method)) return; #endif #ifdef USE_ARCMEM if (ArcMem.Seek(Offset,Method)) return; #endif File::Seek(Offset,Method); } int64 Archive::Tell() { #ifdef USE_QOPEN int64 QPos; if (QOpen.Tell(&QPos)) return QPos; #endif #ifdef USE_ARCMEM int64 APos; if (ArcMem.Tell(&APos)) return APos; #endif return File::Tell(); } bool Archive::IsOpened() { #ifdef USE_ARCMEM if (ArcMem.IsLoaded()) return true; #endif return File::IsOpened(); };
void Archive::Seek(int64 Offset,int Method) { if (!QOpen.Seek(Offset,Method)) File::Seek(Offset,Method); }
void Archive::Seek(int64 Offset,int Method) { #ifdef USE_QOPEN if (QOpen.Seek(Offset,Method)) return; #endif #ifdef USE_ARCMEM if (ArcMem.Seek(Offset,Method)) return; #endif File::Seek(Offset,Method); }
{'added': [(6, '#ifdef USE_ARCMEM'), (7, '#include "arcmem.cpp"'), (8, '#endif'), (9, ''), (55, '#ifdef USE_QOPEN'), (56, ' ProhibitQOpen=false;'), (57, '#endif'), (58, ''), (307, '#ifdef USE_QOPEN'), (311, '#endif'), (312, ''), (313, '#ifdef USE_ARCMEM'), (314, ' if (Cmd->ArcInMem)'), (315, ' {'), (316, ' wcsncpyz(FileName,Name,ASIZE(FileName));'), (317, ' ArcMem.Load(Cmd->ArcMemData,Cmd->ArcMemSize);'), (318, ' Cmd->SetArcInMem(NULL,0); // Return in memory data for first volume only, not for next volumes.'), (319, ' return true;'), (320, ' }'), (321, '#endif'), (327, ''), (328, 'bool Archive::Close()'), (329, '{'), (330, '#ifdef USE_ARCMEM'), (331, ' if (ArcMem.Unload())'), (332, ' return true;'), (333, '#endif'), (334, ' return File::Close();'), (335, '}'), (336, ''), (337, ''), (338, ''), (341, '#ifdef USE_QOPEN'), (342, ' size_t QResult;'), (343, ' if (QOpen.Read(Data,Size,QResult))'), (344, ' return (int)QResult;'), (345, '#endif'), (346, '#ifdef USE_ARCMEM'), (347, ' size_t AResult;'), (348, ' if (ArcMem.Read(Data,Size,AResult))'), (349, ' return (int)AResult;'), (350, '#endif'), (357, '#ifdef USE_QOPEN'), (358, ' if (QOpen.Seek(Offset,Method))'), (359, ' return;'), (360, '#endif'), (361, '#ifdef USE_ARCMEM'), (362, ' if (ArcMem.Seek(Offset,Method))'), (363, ' return;'), (364, '#endif'), (365, ' File::Seek(Offset,Method);'), (371, '#ifdef USE_QOPEN'), (375, '#endif'), (376, '#ifdef USE_ARCMEM'), (377, ' int64 APos;'), (378, ' if (ArcMem.Tell(&APos))'), (379, ' return APos;'), (380, '#endif'), (384, ''), (385, ''), (386, 'bool Archive::IsOpened()'), (387, '{'), (388, '#ifdef USE_ARCMEM'), (389, ' if (ArcMem.IsLoaded())'), (390, ' return true;'), (391, '#endif'), (392, ' return File::IsOpened();'), (393, '};')], 'deleted': [(297, '#ifdef USE_QOPEN'), (310, ' size_t Result;'), (311, ' if (QOpen.Read(Data,Size,Result))'), (312, ' return (int)Result;'), (319, ' if (!QOpen.Seek(Offset,Method))'), (320, ' File::Seek(Offset,Method);'), (331, '#endif')]}
68
7
266
1,437
5
33
2
https://github.com/aawc/unrar
CVE-2017-20006
CWE-787
658
smtp.c
C
Curl_smtp_escape_eob
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * * RFC1870 SMTP Service Extension for Message Size * RFC2195 CRAM-MD5 authentication * RFC2831 DIGEST-MD5 authentication * RFC3207 SMTP over TLS * RFC4422 Simple Authentication and Security Layer (SASL) * RFC4616 PLAIN authentication * RFC4752 The Kerberos V5 ("GSSAPI") SASL Mechanism * RFC4954 SMTP Authentication * RFC5321 SMTP protocol * RFC6749 OAuth 2.0 Authorization Framework * Draft SMTP URL Interface <draft-earhart-url-smtp-00.txt> * Draft LOGIN SASL Mechanism <draft-murchison-sasl-login-00.txt> * ***************************************************************************/ #include "curl_setup.h" #ifndef CURL_DISABLE_SMTP #ifdef HAVE_NETINET_IN_H #include <netinet/in.h> #endif #ifdef HAVE_ARPA_INET_H #include <arpa/inet.h> #endif #ifdef HAVE_UTSNAME_H #include <sys/utsname.h> #endif #ifdef HAVE_NETDB_H #include <netdb.h> #endif #ifdef __VMS #include <in.h> #include <inet.h> #endif #if (defined(NETWARE) && defined(__NOVELL_LIBC__)) #undef in_addr_t #define in_addr_t unsigned long #endif #include <curl/curl.h> #include "urldata.h" #include "sendf.h" #include "hostip.h" #include "progress.h" #include "transfer.h" #include "escape.h" #include "http.h" /* for HTTP proxy tunnel stuff */ #include "mime.h" #include "socks.h" #include "smtp.h" #include "strtoofft.h" #include "strcase.h" #include "vtls/vtls.h" #include "connect.h" #include "strerror.h" #include "select.h" #include "multiif.h" #include "url.h" #include "curl_gethostname.h" #include "curl_sasl.h" #include "warnless.h" /* The last 3 #include files should be in this order */ #include "curl_printf.h" #include "curl_memory.h" #include "memdebug.h" /* Local API functions */ static CURLcode smtp_regular_transfer(struct connectdata *conn, bool *done); static CURLcode smtp_do(struct connectdata *conn, bool *done); static CURLcode smtp_done(struct connectdata *conn, CURLcode status, bool premature); static CURLcode smtp_connect(struct connectdata *conn, bool *done); static CURLcode smtp_disconnect(struct connectdata *conn, bool dead); static CURLcode smtp_multi_statemach(struct connectdata *conn, bool *done); static int smtp_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks); static CURLcode smtp_doing(struct connectdata *conn, bool *dophase_done); static CURLcode smtp_setup_connection(struct connectdata *conn); static CURLcode smtp_parse_url_options(struct connectdata *conn); static CURLcode smtp_parse_url_path(struct connectdata *conn); static CURLcode smtp_parse_custom_request(struct connectdata *conn); static CURLcode smtp_perform_auth(struct connectdata *conn, const char *mech, const char *initresp); static CURLcode smtp_continue_auth(struct connectdata *conn, const char *resp); static void smtp_get_message(char *buffer, char **outptr); /* * SMTP protocol handler. */ const struct Curl_handler Curl_handler_smtp = { "SMTP", /* scheme */ smtp_setup_connection, /* setup_connection */ smtp_do, /* do_it */ smtp_done, /* done */ ZERO_NULL, /* do_more */ smtp_connect, /* connect_it */ smtp_multi_statemach, /* connecting */ smtp_doing, /* doing */ smtp_getsock, /* proto_getsock */ smtp_getsock, /* doing_getsock */ ZERO_NULL, /* domore_getsock */ ZERO_NULL, /* perform_getsock */ smtp_disconnect, /* disconnect */ ZERO_NULL, /* readwrite */ ZERO_NULL, /* connection_check */ PORT_SMTP, /* defport */ CURLPROTO_SMTP, /* protocol */ PROTOPT_CLOSEACTION | PROTOPT_NOURLQUERY | /* flags */ PROTOPT_URLOPTIONS }; #ifdef USE_SSL /* * SMTPS protocol handler. */ const struct Curl_handler Curl_handler_smtps = { "SMTPS", /* scheme */ smtp_setup_connection, /* setup_connection */ smtp_do, /* do_it */ smtp_done, /* done */ ZERO_NULL, /* do_more */ smtp_connect, /* connect_it */ smtp_multi_statemach, /* connecting */ smtp_doing, /* doing */ smtp_getsock, /* proto_getsock */ smtp_getsock, /* doing_getsock */ ZERO_NULL, /* domore_getsock */ ZERO_NULL, /* perform_getsock */ smtp_disconnect, /* disconnect */ ZERO_NULL, /* readwrite */ ZERO_NULL, /* connection_check */ PORT_SMTPS, /* defport */ CURLPROTO_SMTPS, /* protocol */ PROTOPT_CLOSEACTION | PROTOPT_SSL | PROTOPT_NOURLQUERY | PROTOPT_URLOPTIONS /* flags */ }; #endif /* SASL parameters for the smtp protocol */ static const struct SASLproto saslsmtp = { "smtp", /* The service name */ 334, /* Code received when continuation is expected */ 235, /* Code to receive upon authentication success */ 512 - 8, /* Maximum initial response length (no max) */ smtp_perform_auth, /* Send authentication command */ smtp_continue_auth, /* Send authentication continuation */ smtp_get_message /* Get SASL response message */ }; #ifdef USE_SSL static void smtp_to_smtps(struct connectdata *conn) { /* Change the connection handler */ conn->handler = &Curl_handler_smtps; /* Set the connection's upgraded to TLS flag */ conn->tls_upgraded = TRUE; } #else #define smtp_to_smtps(x) Curl_nop_stmt #endif /*********************************************************************** * * smtp_endofresp() * * Checks for an ending SMTP status code at the start of the given string, but * also detects various capabilities from the EHLO response including the * supported authentication mechanisms. */ static bool smtp_endofresp(struct connectdata *conn, char *line, size_t len, int *resp) { struct smtp_conn *smtpc = &conn->proto.smtpc; bool result = FALSE; /* Nothing for us */ if(len < 4 || !ISDIGIT(line[0]) || !ISDIGIT(line[1]) || !ISDIGIT(line[2])) return FALSE; /* Do we have a command response? This should be the response code followed by a space and optionally some text as per RFC-5321 and as outlined in Section 4. Examples of RFC-4954 but some e-mail servers ignore this and only send the response code instead as per Section 4.2. */ if(line[3] == ' ' || len == 5) { result = TRUE; *resp = curlx_sltosi(strtol(line, NULL, 10)); /* Make sure real server never sends internal value */ if(*resp == 1) *resp = 0; } /* Do we have a multiline (continuation) response? */ else if(line[3] == '-' && (smtpc->state == SMTP_EHLO || smtpc->state == SMTP_COMMAND)) { result = TRUE; *resp = 1; /* Internal response code */ } return result; } /*********************************************************************** * * smtp_get_message() * * Gets the authentication message from the response buffer. */ static void smtp_get_message(char *buffer, char **outptr) { size_t len = strlen(buffer); char *message = NULL; if(len > 4) { /* Find the start of the message */ len -= 4; for(message = buffer + 4; *message == ' ' || *message == '\t'; message++, len--) ; /* Find the end of the message */ for(; len--;) if(message[len] != '\r' && message[len] != '\n' && message[len] != ' ' && message[len] != '\t') break; /* Terminate the message */ if(++len) { message[len] = '\0'; } } else /* junk input => zero length output */ message = &buffer[len]; *outptr = message; } /*********************************************************************** * * state() * * This is the ONLY way to change SMTP state! */ static void state(struct connectdata *conn, smtpstate newstate) { struct smtp_conn *smtpc = &conn->proto.smtpc; #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) /* for debug purposes */ static const char * const names[] = { "STOP", "SERVERGREET", "EHLO", "HELO", "STARTTLS", "UPGRADETLS", "AUTH", "COMMAND", "MAIL", "RCPT", "DATA", "POSTDATA", "QUIT", /* LAST */ }; if(smtpc->state != newstate) infof(conn->data, "SMTP %p state change from %s to %s\n", (void *)smtpc, names[smtpc->state], names[newstate]); #endif smtpc->state = newstate; } /*********************************************************************** * * smtp_perform_ehlo() * * Sends the EHLO command to not only initialise communication with the ESMTP * server but to also obtain a list of server side supported capabilities. */ static CURLcode smtp_perform_ehlo(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; smtpc->sasl.authmechs = SASL_AUTH_NONE; /* No known auth. mechanism yet */ smtpc->sasl.authused = SASL_AUTH_NONE; /* Clear the authentication mechanism used for esmtp connections */ smtpc->tls_supported = FALSE; /* Clear the TLS capability */ smtpc->auth_supported = FALSE; /* Clear the AUTH capability */ /* Send the EHLO command */ result = Curl_pp_sendf(&smtpc->pp, "EHLO %s", smtpc->domain); if(!result) state(conn, SMTP_EHLO); return result; } /*********************************************************************** * * smtp_perform_helo() * * Sends the HELO command to initialise communication with the SMTP server. */ static CURLcode smtp_perform_helo(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; smtpc->sasl.authused = SASL_AUTH_NONE; /* No authentication mechanism used in smtp connections */ /* Send the HELO command */ result = Curl_pp_sendf(&smtpc->pp, "HELO %s", smtpc->domain); if(!result) state(conn, SMTP_HELO); return result; } /*********************************************************************** * * smtp_perform_starttls() * * Sends the STLS command to start the upgrade to TLS. */ static CURLcode smtp_perform_starttls(struct connectdata *conn) { CURLcode result = CURLE_OK; /* Send the STARTTLS command */ result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s", "STARTTLS"); if(!result) state(conn, SMTP_STARTTLS); return result; } /*********************************************************************** * * smtp_perform_upgrade_tls() * * Performs the upgrade to TLS. */ static CURLcode smtp_perform_upgrade_tls(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; /* Start the SSL connection */ result = Curl_ssl_connect_nonblocking(conn, FIRSTSOCKET, &smtpc->ssldone); if(!result) { if(smtpc->state != SMTP_UPGRADETLS) state(conn, SMTP_UPGRADETLS); if(smtpc->ssldone) { smtp_to_smtps(conn); result = smtp_perform_ehlo(conn); } } return result; } /*********************************************************************** * * smtp_perform_auth() * * Sends an AUTH command allowing the client to login with the given SASL * authentication mechanism. */ static CURLcode smtp_perform_auth(struct connectdata *conn, const char *mech, const char *initresp) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; if(initresp) { /* AUTH <mech> ...<crlf> */ /* Send the AUTH command with the initial response */ result = Curl_pp_sendf(&smtpc->pp, "AUTH %s %s", mech, initresp); } else { /* Send the AUTH command */ result = Curl_pp_sendf(&smtpc->pp, "AUTH %s", mech); } return result; } /*********************************************************************** * * smtp_continue_auth() * * Sends SASL continuation data or cancellation. */ static CURLcode smtp_continue_auth(struct connectdata *conn, const char *resp) { struct smtp_conn *smtpc = &conn->proto.smtpc; return Curl_pp_sendf(&smtpc->pp, "%s", resp); } /*********************************************************************** * * smtp_perform_authentication() * * Initiates the authentication sequence, with the appropriate SASL * authentication mechanism. */ static CURLcode smtp_perform_authentication(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; saslprogress progress; /* Check we have enough data to authenticate with, and the server supports authentiation, and end the connect phase if not */ if(!smtpc->auth_supported || !Curl_sasl_can_authenticate(&smtpc->sasl, conn)) { state(conn, SMTP_STOP); return result; } /* Calculate the SASL login details */ result = Curl_sasl_start(&smtpc->sasl, conn, FALSE, &progress); if(!result) { if(progress == SASL_INPROGRESS) state(conn, SMTP_AUTH); else { /* Other mechanisms not supported */ infof(conn->data, "No known authentication mechanisms supported!\n"); result = CURLE_LOGIN_DENIED; } } return result; } /*********************************************************************** * * smtp_perform_command() * * Sends a SMTP based command. */ static CURLcode smtp_perform_command(struct connectdata *conn) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; /* Send the command */ if(smtp->rcpt) result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s %s", smtp->custom && smtp->custom[0] != '\0' ? smtp->custom : "VRFY", smtp->rcpt->data); else result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s", smtp->custom && smtp->custom[0] != '\0' ? smtp->custom : "HELP"); if(!result) state(conn, SMTP_COMMAND); return result; } /*********************************************************************** * * smtp_perform_mail() * * Sends an MAIL command to initiate the upload of a message. */ static CURLcode smtp_perform_mail(struct connectdata *conn) { char *from = NULL; char *auth = NULL; char *size = NULL; CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; /* Calculate the FROM parameter */ if(!data->set.str[STRING_MAIL_FROM]) /* Null reverse-path, RFC-5321, sect. 3.6.3 */ from = strdup("<>"); else if(data->set.str[STRING_MAIL_FROM][0] == '<') from = aprintf("%s", data->set.str[STRING_MAIL_FROM]); else from = aprintf("<%s>", data->set.str[STRING_MAIL_FROM]); if(!from) return CURLE_OUT_OF_MEMORY; /* Calculate the optional AUTH parameter */ if(data->set.str[STRING_MAIL_AUTH] && conn->proto.smtpc.sasl.authused) { if(data->set.str[STRING_MAIL_AUTH][0] != '\0') auth = aprintf("%s", data->set.str[STRING_MAIL_AUTH]); else /* Empty AUTH, RFC-2554, sect. 5 */ auth = strdup("<>"); if(!auth) { free(from); return CURLE_OUT_OF_MEMORY; } } /* Prepare the mime data if some. */ if(data->set.mimepost.kind != MIMEKIND_NONE) { /* Use the whole structure as data. */ data->set.mimepost.flags &= ~MIME_BODY_ONLY; /* Add external headers and mime version. */ curl_mime_headers(&data->set.mimepost, data->set.headers, 0); result = Curl_mime_prepare_headers(&data->set.mimepost, NULL, NULL, MIMESTRATEGY_MAIL); if(!result) if(!Curl_checkheaders(conn, "Mime-Version")) result = Curl_mime_add_header(&data->set.mimepost.curlheaders, "Mime-Version: 1.0"); /* Make sure we will read the entire mime structure. */ if(!result) result = Curl_mime_rewind(&data->set.mimepost); if(result) { free(from); free(auth); return result; } data->state.infilesize = Curl_mime_size(&data->set.mimepost); /* Read from mime structure. */ data->state.fread_func = (curl_read_callback) Curl_mime_read; data->state.in = (void *) &data->set.mimepost; } /* Calculate the optional SIZE parameter */ if(conn->proto.smtpc.size_supported && data->state.infilesize > 0) { size = aprintf("%" CURL_FORMAT_CURL_OFF_T, data->state.infilesize); if(!size) { free(from); free(auth); return CURLE_OUT_OF_MEMORY; } } /* Send the MAIL command */ if(!auth && !size) result = Curl_pp_sendf(&conn->proto.smtpc.pp, "MAIL FROM:%s", from); else if(auth && !size) result = Curl_pp_sendf(&conn->proto.smtpc.pp, "MAIL FROM:%s AUTH=%s", from, auth); else if(auth && size) result = Curl_pp_sendf(&conn->proto.smtpc.pp, "MAIL FROM:%s AUTH=%s SIZE=%s", from, auth, size); else result = Curl_pp_sendf(&conn->proto.smtpc.pp, "MAIL FROM:%s SIZE=%s", from, size); free(from); free(auth); free(size); if(!result) state(conn, SMTP_MAIL); return result; } /*********************************************************************** * * smtp_perform_rcpt_to() * * Sends a RCPT TO command for a given recipient as part of the message upload * process. */ static CURLcode smtp_perform_rcpt_to(struct connectdata *conn) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; /* Send the RCPT TO command */ if(smtp->rcpt->data[0] == '<') result = Curl_pp_sendf(&conn->proto.smtpc.pp, "RCPT TO:%s", smtp->rcpt->data); else result = Curl_pp_sendf(&conn->proto.smtpc.pp, "RCPT TO:<%s>", smtp->rcpt->data); if(!result) state(conn, SMTP_RCPT); return result; } /*********************************************************************** * * smtp_perform_quit() * * Performs the quit action prior to sclose() being called. */ static CURLcode smtp_perform_quit(struct connectdata *conn) { CURLcode result = CURLE_OK; /* Send the QUIT command */ result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s", "QUIT"); if(!result) state(conn, SMTP_QUIT); return result; } /* For the initial server greeting */ static CURLcode smtp_state_servergreet_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; (void)instate; /* no use for this yet */ if(smtpcode/100 != 2) { failf(data, "Got unexpected smtp-server response: %d", smtpcode); result = CURLE_WEIRD_SERVER_REPLY; } else result = smtp_perform_ehlo(conn); return result; } /* For STARTTLS responses */ static CURLcode smtp_state_starttls_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; (void)instate; /* no use for this yet */ if(smtpcode != 220) { if(data->set.use_ssl != CURLUSESSL_TRY) { failf(data, "STARTTLS denied, code %d", smtpcode); result = CURLE_USE_SSL_FAILED; } else result = smtp_perform_authentication(conn); } else result = smtp_perform_upgrade_tls(conn); return result; } /* For EHLO responses */ static CURLcode smtp_state_ehlo_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct smtp_conn *smtpc = &conn->proto.smtpc; const char *line = data->state.buffer; size_t len = strlen(line); (void)instate; /* no use for this yet */ if(smtpcode/100 != 2 && smtpcode != 1) { if(data->set.use_ssl <= CURLUSESSL_TRY || conn->ssl[FIRSTSOCKET].use) result = smtp_perform_helo(conn); else { failf(data, "Remote access denied: %d", smtpcode); result = CURLE_REMOTE_ACCESS_DENIED; } } else { line += 4; len -= 4; /* Does the server support the STARTTLS capability? */ if(len >= 8 && !memcmp(line, "STARTTLS", 8)) smtpc->tls_supported = TRUE; /* Does the server support the SIZE capability? */ else if(len >= 4 && !memcmp(line, "SIZE", 4)) smtpc->size_supported = TRUE; /* Does the server support authentication? */ else if(len >= 5 && !memcmp(line, "AUTH ", 5)) { smtpc->auth_supported = TRUE; /* Advance past the AUTH keyword */ line += 5; len -= 5; /* Loop through the data line */ for(;;) { size_t llen; size_t wordlen; unsigned int mechbit; while(len && (*line == ' ' || *line == '\t' || *line == '\r' || *line == '\n')) { line++; len--; } if(!len) break; /* Extract the word */ for(wordlen = 0; wordlen < len && line[wordlen] != ' ' && line[wordlen] != '\t' && line[wordlen] != '\r' && line[wordlen] != '\n';) wordlen++; /* Test the word for a matching authentication mechanism */ mechbit = Curl_sasl_decode_mech(line, wordlen, &llen); if(mechbit && llen == wordlen) smtpc->sasl.authmechs |= mechbit; line += wordlen; len -= wordlen; } } if(smtpcode != 1) { if(data->set.use_ssl && !conn->ssl[FIRSTSOCKET].use) { /* We don't have a SSL/TLS connection yet, but SSL is requested */ if(smtpc->tls_supported) /* Switch to TLS connection now */ result = smtp_perform_starttls(conn); else if(data->set.use_ssl == CURLUSESSL_TRY) /* Fallback and carry on with authentication */ result = smtp_perform_authentication(conn); else { failf(data, "STARTTLS not supported."); result = CURLE_USE_SSL_FAILED; } } else result = smtp_perform_authentication(conn); } } return result; } /* For HELO responses */ static CURLcode smtp_state_helo_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; (void)instate; /* no use for this yet */ if(smtpcode/100 != 2) { failf(data, "Remote access denied: %d", smtpcode); result = CURLE_REMOTE_ACCESS_DENIED; } else /* End of connect phase */ state(conn, SMTP_STOP); return result; } /* For SASL authentication responses */ static CURLcode smtp_state_auth_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct smtp_conn *smtpc = &conn->proto.smtpc; saslprogress progress; (void)instate; /* no use for this yet */ result = Curl_sasl_continue(&smtpc->sasl, conn, smtpcode, &progress); if(!result) switch(progress) { case SASL_DONE: state(conn, SMTP_STOP); /* Authenticated */ break; case SASL_IDLE: /* No mechanism left after cancellation */ failf(data, "Authentication cancelled"); result = CURLE_LOGIN_DENIED; break; default: break; } return result; } /* For command responses */ static CURLcode smtp_state_command_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; char *line = data->state.buffer; size_t len = strlen(line); (void)instate; /* no use for this yet */ if((smtp->rcpt && smtpcode/100 != 2 && smtpcode != 553 && smtpcode != 1) || (!smtp->rcpt && smtpcode/100 != 2 && smtpcode != 1)) { failf(data, "Command failed: %d", smtpcode); result = CURLE_RECV_ERROR; } else { /* Temporarily add the LF character back and send as body to the client */ if(!data->set.opt_no_body) { line[len] = '\n'; result = Curl_client_write(conn, CLIENTWRITE_BODY, line, len + 1); line[len] = '\0'; } if(smtpcode != 1) { if(smtp->rcpt) { smtp->rcpt = smtp->rcpt->next; if(smtp->rcpt) { /* Send the next command */ result = smtp_perform_command(conn); } else /* End of DO phase */ state(conn, SMTP_STOP); } else /* End of DO phase */ state(conn, SMTP_STOP); } } return result; } /* For MAIL responses */ static CURLcode smtp_state_mail_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; (void)instate; /* no use for this yet */ if(smtpcode/100 != 2) { failf(data, "MAIL failed: %d", smtpcode); result = CURLE_SEND_ERROR; } else /* Start the RCPT TO command */ result = smtp_perform_rcpt_to(conn); return result; } /* For RCPT responses */ static CURLcode smtp_state_rcpt_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; (void)instate; /* no use for this yet */ if(smtpcode/100 != 2) { failf(data, "RCPT failed: %d", smtpcode); result = CURLE_SEND_ERROR; } else { smtp->rcpt = smtp->rcpt->next; if(smtp->rcpt) /* Send the next RCPT TO command */ result = smtp_perform_rcpt_to(conn); else { /* Send the DATA command */ result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s", "DATA"); if(!result) state(conn, SMTP_DATA); } } return result; } /* For DATA response */ static CURLcode smtp_state_data_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; (void)instate; /* no use for this yet */ if(smtpcode != 354) { failf(data, "DATA failed: %d", smtpcode); result = CURLE_SEND_ERROR; } else { /* Set the progress upload size */ Curl_pgrsSetUploadSize(data, data->state.infilesize); /* SMTP upload */ Curl_setup_transfer(conn, -1, -1, FALSE, NULL, FIRSTSOCKET, NULL); /* End of DO phase */ state(conn, SMTP_STOP); } return result; } /* For POSTDATA responses, which are received after the entire DATA part has been sent to the server */ static CURLcode smtp_state_postdata_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; (void)instate; /* no use for this yet */ if(smtpcode != 250) result = CURLE_RECV_ERROR; /* End of DONE phase */ state(conn, SMTP_STOP); return result; } static CURLcode smtp_statemach_act(struct connectdata *conn) { CURLcode result = CURLE_OK; curl_socket_t sock = conn->sock[FIRSTSOCKET]; struct Curl_easy *data = conn->data; int smtpcode; struct smtp_conn *smtpc = &conn->proto.smtpc; struct pingpong *pp = &smtpc->pp; size_t nread = 0; /* Busy upgrading the connection; right now all I/O is SSL/TLS, not SMTP */ if(smtpc->state == SMTP_UPGRADETLS) return smtp_perform_upgrade_tls(conn); /* Flush any data that needs to be sent */ if(pp->sendleft) return Curl_pp_flushsend(pp); do { /* Read the response from the server */ result = Curl_pp_readresp(sock, pp, &smtpcode, &nread); if(result) return result; /* Store the latest response for later retrieval if necessary */ if(smtpc->state != SMTP_QUIT && smtpcode != 1) data->info.httpcode = smtpcode; if(!smtpcode) break; /* We have now received a full SMTP server response */ switch(smtpc->state) { case SMTP_SERVERGREET: result = smtp_state_servergreet_resp(conn, smtpcode, smtpc->state); break; case SMTP_EHLO: result = smtp_state_ehlo_resp(conn, smtpcode, smtpc->state); break; case SMTP_HELO: result = smtp_state_helo_resp(conn, smtpcode, smtpc->state); break; case SMTP_STARTTLS: result = smtp_state_starttls_resp(conn, smtpcode, smtpc->state); break; case SMTP_AUTH: result = smtp_state_auth_resp(conn, smtpcode, smtpc->state); break; case SMTP_COMMAND: result = smtp_state_command_resp(conn, smtpcode, smtpc->state); break; case SMTP_MAIL: result = smtp_state_mail_resp(conn, smtpcode, smtpc->state); break; case SMTP_RCPT: result = smtp_state_rcpt_resp(conn, smtpcode, smtpc->state); break; case SMTP_DATA: result = smtp_state_data_resp(conn, smtpcode, smtpc->state); break; case SMTP_POSTDATA: result = smtp_state_postdata_resp(conn, smtpcode, smtpc->state); break; case SMTP_QUIT: /* fallthrough, just stop! */ default: /* internal error */ state(conn, SMTP_STOP); break; } } while(!result && smtpc->state != SMTP_STOP && Curl_pp_moredata(pp)); return result; } /* Called repeatedly until done from multi.c */ static CURLcode smtp_multi_statemach(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; if((conn->handler->flags & PROTOPT_SSL) && !smtpc->ssldone) { result = Curl_ssl_connect_nonblocking(conn, FIRSTSOCKET, &smtpc->ssldone); if(result || !smtpc->ssldone) return result; } result = Curl_pp_statemach(&smtpc->pp, FALSE); *done = (smtpc->state == SMTP_STOP) ? TRUE : FALSE; return result; } static CURLcode smtp_block_statemach(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; while(smtpc->state != SMTP_STOP && !result) result = Curl_pp_statemach(&smtpc->pp, TRUE); return result; } /* Allocate and initialize the SMTP struct for the current Curl_easy if required */ static CURLcode smtp_init(struct connectdata *conn) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp; smtp = data->req.protop = calloc(sizeof(struct SMTP), 1); if(!smtp) result = CURLE_OUT_OF_MEMORY; return result; } /* For the SMTP "protocol connect" and "doing" phases only */ static int smtp_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks) { return Curl_pp_getsock(&conn->proto.smtpc.pp, socks, numsocks); } /*********************************************************************** * * smtp_connect() * * This function should do everything that is to be considered a part of * the connection phase. * * The variable pointed to by 'done' will be TRUE if the protocol-layer * connect phase is done when this function returns, or FALSE if not. */ static CURLcode smtp_connect(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; struct pingpong *pp = &smtpc->pp; *done = FALSE; /* default to not done yet */ /* We always support persistent connections in SMTP */ connkeep(conn, "SMTP default"); /* Set the default response time-out */ pp->response_time = RESP_TIMEOUT; pp->statemach_act = smtp_statemach_act; pp->endofresp = smtp_endofresp; pp->conn = conn; /* Initialize the SASL storage */ Curl_sasl_init(&smtpc->sasl, &saslsmtp); /* Initialise the pingpong layer */ Curl_pp_init(pp); /* Parse the URL options */ result = smtp_parse_url_options(conn); if(result) return result; /* Parse the URL path */ result = smtp_parse_url_path(conn); if(result) return result; /* Start off waiting for the server greeting response */ state(conn, SMTP_SERVERGREET); result = smtp_multi_statemach(conn, done); return result; } /*********************************************************************** * * smtp_done() * * The DONE function. This does what needs to be done after a single DO has * performed. * * Input argument is already checked for validity. */ static CURLcode smtp_done(struct connectdata *conn, CURLcode status, bool premature) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; struct pingpong *pp = &conn->proto.smtpc.pp; char *eob; ssize_t len; ssize_t bytes_written; (void)premature; if(!smtp || !pp->conn) return CURLE_OK; /* Cleanup our per-request based variables */ Curl_safefree(smtp->custom); if(status) { connclose(conn, "SMTP done with bad status"); /* marked for closure */ result = status; /* use the already set error code */ } else if(!data->set.connect_only && data->set.mail_rcpt && (data->set.upload || data->set.mimepost.kind)) { /* Calculate the EOB taking into account any terminating CRLF from the previous line of the email or the CRLF of the DATA command when there is "no mail data". RFC-5321, sect. 4.1.1.4. Note: As some SSL backends, such as OpenSSL, will cause Curl_write() to fail when using a different pointer following a previous write, that returned CURLE_AGAIN, we duplicate the EOB now rather than when the bytes written doesn't equal len. */ if(smtp->trailing_crlf || !conn->data->state.infilesize) { eob = strdup(SMTP_EOB + 2); len = SMTP_EOB_LEN - 2; } else { eob = strdup(SMTP_EOB); len = SMTP_EOB_LEN; } if(!eob) return CURLE_OUT_OF_MEMORY; /* Send the end of block data */ result = Curl_write(conn, conn->writesockfd, eob, len, &bytes_written); if(result) { free(eob); return result; } if(bytes_written != len) { /* The whole chunk was not sent so keep it around and adjust the pingpong structure accordingly */ pp->sendthis = eob; pp->sendsize = len; pp->sendleft = len - bytes_written; } else { /* Successfully sent so adjust the response timeout relative to now */ pp->response = Curl_now(); free(eob); } state(conn, SMTP_POSTDATA); /* Run the state-machine TODO: when the multi interface is used, this _really_ should be using the smtp_multi_statemach function but we have no general support for non-blocking DONE operations! */ result = smtp_block_statemach(conn); } /* Clear the transfer mode for the next request */ smtp->transfer = FTPTRANSFER_BODY; return result; } /*********************************************************************** * * smtp_perform() * * This is the actual DO function for SMTP. Transfer a mail, send a command * or get some data according to the options previously setup. */ static CURLcode smtp_perform(struct connectdata *conn, bool *connected, bool *dophase_done) { /* This is SMTP and no proxy */ CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; DEBUGF(infof(conn->data, "DO phase starts\n")); if(data->set.opt_no_body) { /* Requested no body means no transfer */ smtp->transfer = FTPTRANSFER_INFO; } *dophase_done = FALSE; /* not done yet */ /* Store the first recipient (or NULL if not specified) */ smtp->rcpt = data->set.mail_rcpt; /* Initial data character is the first character in line: it is implicitly preceded by a virtual CRLF. */ smtp->trailing_crlf = TRUE; smtp->eob = 2; /* Start the first command in the DO phase */ if((data->set.upload || data->set.mimepost.kind) && data->set.mail_rcpt) /* MAIL transfer */ result = smtp_perform_mail(conn); else /* SMTP based command (VRFY, EXPN, NOOP, RSET or HELP) */ result = smtp_perform_command(conn); if(result) return result; /* Run the state-machine */ result = smtp_multi_statemach(conn, dophase_done); *connected = conn->bits.tcpconnect[FIRSTSOCKET]; if(*dophase_done) DEBUGF(infof(conn->data, "DO phase is complete\n")); return result; } /*********************************************************************** * * smtp_do() * * This function is registered as 'curl_do' function. It decodes the path * parts etc as a wrapper to the actual DO function (smtp_perform). * * The input argument is already checked for validity. */ static CURLcode smtp_do(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; *done = FALSE; /* default to false */ /* Parse the custom request */ result = smtp_parse_custom_request(conn); if(result) return result; result = smtp_regular_transfer(conn, done); return result; } /*********************************************************************** * * smtp_disconnect() * * Disconnect from an SMTP server. Cleanup protocol-specific per-connection * resources. BLOCKING. */ static CURLcode smtp_disconnect(struct connectdata *conn, bool dead_connection) { struct smtp_conn *smtpc = &conn->proto.smtpc; /* We cannot send quit unconditionally. If this connection is stale or bad in any way, sending quit and waiting around here will make the disconnect wait in vain and cause more problems than we need to. */ /* The SMTP session may or may not have been allocated/setup at this point! */ if(!dead_connection && smtpc->pp.conn && smtpc->pp.conn->bits.protoconnstart) if(!smtp_perform_quit(conn)) (void)smtp_block_statemach(conn); /* ignore errors on QUIT */ /* Disconnect from the server */ Curl_pp_disconnect(&smtpc->pp); /* Cleanup the SASL module */ Curl_sasl_cleanup(conn, smtpc->sasl.authused); /* Cleanup our connection based variables */ Curl_safefree(smtpc->domain); return CURLE_OK; } /* Call this when the DO phase has completed */ static CURLcode smtp_dophase_done(struct connectdata *conn, bool connected) { struct SMTP *smtp = conn->data->req.protop; (void)connected; if(smtp->transfer != FTPTRANSFER_BODY) /* no data to transfer */ Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL); return CURLE_OK; } /* Called from multi.c while DOing */ static CURLcode smtp_doing(struct connectdata *conn, bool *dophase_done) { CURLcode result = smtp_multi_statemach(conn, dophase_done); if(result) DEBUGF(infof(conn->data, "DO phase failed\n")); else if(*dophase_done) { result = smtp_dophase_done(conn, FALSE /* not connected */); DEBUGF(infof(conn->data, "DO phase is complete\n")); } return result; } /*********************************************************************** * * smtp_regular_transfer() * * The input argument is already checked for validity. * * Performs all commands done before a regular transfer between a local and a * remote host. */ static CURLcode smtp_regular_transfer(struct connectdata *conn, bool *dophase_done) { CURLcode result = CURLE_OK; bool connected = FALSE; struct Curl_easy *data = conn->data; /* Make sure size is unknown at this point */ data->req.size = -1; /* Set the progress data */ Curl_pgrsSetUploadCounter(data, 0); Curl_pgrsSetDownloadCounter(data, 0); Curl_pgrsSetUploadSize(data, -1); Curl_pgrsSetDownloadSize(data, -1); /* Carry out the perform */ result = smtp_perform(conn, &connected, dophase_done); /* Perform post DO phase operations if necessary */ if(!result && *dophase_done) result = smtp_dophase_done(conn, connected); return result; } static CURLcode smtp_setup_connection(struct connectdata *conn) { struct Curl_easy *data = conn->data; CURLcode result; /* Clear the TLS upgraded flag */ conn->tls_upgraded = FALSE; /* Initialise the SMTP layer */ result = smtp_init(conn); if(result) return result; data->state.path++; /* don't include the initial slash */ return CURLE_OK; } /*********************************************************************** * * smtp_parse_url_options() * * Parse the URL login options. */ static CURLcode smtp_parse_url_options(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; const char *ptr = conn->options; smtpc->sasl.resetprefs = TRUE; while(!result && ptr && *ptr) { const char *key = ptr; const char *value; while(*ptr && *ptr != '=') ptr++; value = ptr + 1; while(*ptr && *ptr != ';') ptr++; if(strncasecompare(key, "AUTH=", 5)) result = Curl_sasl_parse_url_auth_option(&smtpc->sasl, value, ptr - value); else result = CURLE_URL_MALFORMAT; if(*ptr == ';') ptr++; } return result; } /*********************************************************************** * * smtp_parse_url_path() * * Parse the URL path into separate path components. */ static CURLcode smtp_parse_url_path(struct connectdata *conn) { /* The SMTP struct is already initialised in smtp_connect() */ struct Curl_easy *data = conn->data; struct smtp_conn *smtpc = &conn->proto.smtpc; const char *path = data->state.path; char localhost[HOSTNAME_MAX + 1]; /* Calculate the path if necessary */ if(!*path) { if(!Curl_gethostname(localhost, sizeof(localhost))) path = localhost; else path = "localhost"; } /* URL decode the path and use it as the domain in our EHLO */ return Curl_urldecode(conn->data, path, 0, &smtpc->domain, NULL, TRUE); } /*********************************************************************** * * smtp_parse_custom_request() * * Parse the custom request. */ static CURLcode smtp_parse_custom_request(struct connectdata *conn) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; const char *custom = data->set.str[STRING_CUSTOMREQUEST]; /* URL decode the custom request */ if(custom) result = Curl_urldecode(data, custom, 0, &smtp->custom, NULL, TRUE); return result; } CURLcode Curl_smtp_escape_eob(struct connectdata *conn, const ssize_t nread) { /* When sending a SMTP payload we must detect CRLF. sequences making sure they are sent as CRLF.. instead, as a . on the beginning of a line will be deleted by the server when not part of an EOB terminator and a genuine CRLF.CRLF which isn't escaped will wrongly be detected as end of data by the server */ ssize_t i; ssize_t si; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; char *scratch = data->state.scratch; char *newscratch = NULL; char *oldscratch = NULL; size_t eob_sent; /* Do we need to allocate a scratch buffer? */ if(!scratch || data->set.crlf) { oldscratch = scratch; scratch = newscratch = malloc(2 * data->set.buffer_size); if(!newscratch) { failf(data, "Failed to alloc scratch buffer!"); return CURLE_OUT_OF_MEMORY; } } /* Have we already sent part of the EOB? */ eob_sent = smtp->eob; /* This loop can be improved by some kind of Boyer-Moore style of approach but that is saved for later... */ for(i = 0, si = 0; i < nread; i++) { if(SMTP_EOB[smtp->eob] == data->req.upload_fromhere[i]) { smtp->eob++; /* Is the EOB potentially the terminating CRLF? */ if(2 == smtp->eob || SMTP_EOB_LEN == smtp->eob) smtp->trailing_crlf = TRUE; else smtp->trailing_crlf = FALSE; } else if(smtp->eob) { /* A previous substring matched so output that first */ memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent); si += smtp->eob - eob_sent; /* Then compare the first byte */ if(SMTP_EOB[0] == data->req.upload_fromhere[i]) smtp->eob = 1; else smtp->eob = 0; eob_sent = 0; /* Reset the trailing CRLF flag as there was more data */ smtp->trailing_crlf = FALSE; } /* Do we have a match for CRLF. as per RFC-5321, sect. 4.5.2 */ if(SMTP_EOB_FIND_LEN == smtp->eob) { /* Copy the replacement data to the target buffer */ memcpy(&scratch[si], &SMTP_EOB_REPL[eob_sent], SMTP_EOB_REPL_LEN - eob_sent); si += SMTP_EOB_REPL_LEN - eob_sent; smtp->eob = 0; eob_sent = 0; } else if(!smtp->eob) scratch[si++] = data->req.upload_fromhere[i]; } if(smtp->eob - eob_sent) { /* A substring matched before processing ended so output that now */ memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent); si += smtp->eob - eob_sent; } /* Only use the new buffer if we replaced something */ if(si != nread) { /* Upload from the new (replaced) buffer instead */ data->req.upload_fromhere = scratch; /* Save the buffer so it can be freed later */ data->state.scratch = scratch; /* Free the old scratch buffer */ free(oldscratch); /* Set the new amount too */ data->req.upload_present = si; } else free(newscratch); return CURLE_OK; } #endif /* CURL_DISABLE_SMTP */
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * * RFC1870 SMTP Service Extension for Message Size * RFC2195 CRAM-MD5 authentication * RFC2831 DIGEST-MD5 authentication * RFC3207 SMTP over TLS * RFC4422 Simple Authentication and Security Layer (SASL) * RFC4616 PLAIN authentication * RFC4752 The Kerberos V5 ("GSSAPI") SASL Mechanism * RFC4954 SMTP Authentication * RFC5321 SMTP protocol * RFC6749 OAuth 2.0 Authorization Framework * Draft SMTP URL Interface <draft-earhart-url-smtp-00.txt> * Draft LOGIN SASL Mechanism <draft-murchison-sasl-login-00.txt> * ***************************************************************************/ #include "curl_setup.h" #ifndef CURL_DISABLE_SMTP #ifdef HAVE_NETINET_IN_H #include <netinet/in.h> #endif #ifdef HAVE_ARPA_INET_H #include <arpa/inet.h> #endif #ifdef HAVE_UTSNAME_H #include <sys/utsname.h> #endif #ifdef HAVE_NETDB_H #include <netdb.h> #endif #ifdef __VMS #include <in.h> #include <inet.h> #endif #if (defined(NETWARE) && defined(__NOVELL_LIBC__)) #undef in_addr_t #define in_addr_t unsigned long #endif #include <curl/curl.h> #include "urldata.h" #include "sendf.h" #include "hostip.h" #include "progress.h" #include "transfer.h" #include "escape.h" #include "http.h" /* for HTTP proxy tunnel stuff */ #include "mime.h" #include "socks.h" #include "smtp.h" #include "strtoofft.h" #include "strcase.h" #include "vtls/vtls.h" #include "connect.h" #include "strerror.h" #include "select.h" #include "multiif.h" #include "url.h" #include "curl_gethostname.h" #include "curl_sasl.h" #include "warnless.h" /* The last 3 #include files should be in this order */ #include "curl_printf.h" #include "curl_memory.h" #include "memdebug.h" /* Local API functions */ static CURLcode smtp_regular_transfer(struct connectdata *conn, bool *done); static CURLcode smtp_do(struct connectdata *conn, bool *done); static CURLcode smtp_done(struct connectdata *conn, CURLcode status, bool premature); static CURLcode smtp_connect(struct connectdata *conn, bool *done); static CURLcode smtp_disconnect(struct connectdata *conn, bool dead); static CURLcode smtp_multi_statemach(struct connectdata *conn, bool *done); static int smtp_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks); static CURLcode smtp_doing(struct connectdata *conn, bool *dophase_done); static CURLcode smtp_setup_connection(struct connectdata *conn); static CURLcode smtp_parse_url_options(struct connectdata *conn); static CURLcode smtp_parse_url_path(struct connectdata *conn); static CURLcode smtp_parse_custom_request(struct connectdata *conn); static CURLcode smtp_perform_auth(struct connectdata *conn, const char *mech, const char *initresp); static CURLcode smtp_continue_auth(struct connectdata *conn, const char *resp); static void smtp_get_message(char *buffer, char **outptr); /* * SMTP protocol handler. */ const struct Curl_handler Curl_handler_smtp = { "SMTP", /* scheme */ smtp_setup_connection, /* setup_connection */ smtp_do, /* do_it */ smtp_done, /* done */ ZERO_NULL, /* do_more */ smtp_connect, /* connect_it */ smtp_multi_statemach, /* connecting */ smtp_doing, /* doing */ smtp_getsock, /* proto_getsock */ smtp_getsock, /* doing_getsock */ ZERO_NULL, /* domore_getsock */ ZERO_NULL, /* perform_getsock */ smtp_disconnect, /* disconnect */ ZERO_NULL, /* readwrite */ ZERO_NULL, /* connection_check */ PORT_SMTP, /* defport */ CURLPROTO_SMTP, /* protocol */ PROTOPT_CLOSEACTION | PROTOPT_NOURLQUERY | /* flags */ PROTOPT_URLOPTIONS }; #ifdef USE_SSL /* * SMTPS protocol handler. */ const struct Curl_handler Curl_handler_smtps = { "SMTPS", /* scheme */ smtp_setup_connection, /* setup_connection */ smtp_do, /* do_it */ smtp_done, /* done */ ZERO_NULL, /* do_more */ smtp_connect, /* connect_it */ smtp_multi_statemach, /* connecting */ smtp_doing, /* doing */ smtp_getsock, /* proto_getsock */ smtp_getsock, /* doing_getsock */ ZERO_NULL, /* domore_getsock */ ZERO_NULL, /* perform_getsock */ smtp_disconnect, /* disconnect */ ZERO_NULL, /* readwrite */ ZERO_NULL, /* connection_check */ PORT_SMTPS, /* defport */ CURLPROTO_SMTPS, /* protocol */ PROTOPT_CLOSEACTION | PROTOPT_SSL | PROTOPT_NOURLQUERY | PROTOPT_URLOPTIONS /* flags */ }; #endif /* SASL parameters for the smtp protocol */ static const struct SASLproto saslsmtp = { "smtp", /* The service name */ 334, /* Code received when continuation is expected */ 235, /* Code to receive upon authentication success */ 512 - 8, /* Maximum initial response length (no max) */ smtp_perform_auth, /* Send authentication command */ smtp_continue_auth, /* Send authentication continuation */ smtp_get_message /* Get SASL response message */ }; #ifdef USE_SSL static void smtp_to_smtps(struct connectdata *conn) { /* Change the connection handler */ conn->handler = &Curl_handler_smtps; /* Set the connection's upgraded to TLS flag */ conn->tls_upgraded = TRUE; } #else #define smtp_to_smtps(x) Curl_nop_stmt #endif /*********************************************************************** * * smtp_endofresp() * * Checks for an ending SMTP status code at the start of the given string, but * also detects various capabilities from the EHLO response including the * supported authentication mechanisms. */ static bool smtp_endofresp(struct connectdata *conn, char *line, size_t len, int *resp) { struct smtp_conn *smtpc = &conn->proto.smtpc; bool result = FALSE; /* Nothing for us */ if(len < 4 || !ISDIGIT(line[0]) || !ISDIGIT(line[1]) || !ISDIGIT(line[2])) return FALSE; /* Do we have a command response? This should be the response code followed by a space and optionally some text as per RFC-5321 and as outlined in Section 4. Examples of RFC-4954 but some e-mail servers ignore this and only send the response code instead as per Section 4.2. */ if(line[3] == ' ' || len == 5) { result = TRUE; *resp = curlx_sltosi(strtol(line, NULL, 10)); /* Make sure real server never sends internal value */ if(*resp == 1) *resp = 0; } /* Do we have a multiline (continuation) response? */ else if(line[3] == '-' && (smtpc->state == SMTP_EHLO || smtpc->state == SMTP_COMMAND)) { result = TRUE; *resp = 1; /* Internal response code */ } return result; } /*********************************************************************** * * smtp_get_message() * * Gets the authentication message from the response buffer. */ static void smtp_get_message(char *buffer, char **outptr) { size_t len = strlen(buffer); char *message = NULL; if(len > 4) { /* Find the start of the message */ len -= 4; for(message = buffer + 4; *message == ' ' || *message == '\t'; message++, len--) ; /* Find the end of the message */ for(; len--;) if(message[len] != '\r' && message[len] != '\n' && message[len] != ' ' && message[len] != '\t') break; /* Terminate the message */ if(++len) { message[len] = '\0'; } } else /* junk input => zero length output */ message = &buffer[len]; *outptr = message; } /*********************************************************************** * * state() * * This is the ONLY way to change SMTP state! */ static void state(struct connectdata *conn, smtpstate newstate) { struct smtp_conn *smtpc = &conn->proto.smtpc; #if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) /* for debug purposes */ static const char * const names[] = { "STOP", "SERVERGREET", "EHLO", "HELO", "STARTTLS", "UPGRADETLS", "AUTH", "COMMAND", "MAIL", "RCPT", "DATA", "POSTDATA", "QUIT", /* LAST */ }; if(smtpc->state != newstate) infof(conn->data, "SMTP %p state change from %s to %s\n", (void *)smtpc, names[smtpc->state], names[newstate]); #endif smtpc->state = newstate; } /*********************************************************************** * * smtp_perform_ehlo() * * Sends the EHLO command to not only initialise communication with the ESMTP * server but to also obtain a list of server side supported capabilities. */ static CURLcode smtp_perform_ehlo(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; smtpc->sasl.authmechs = SASL_AUTH_NONE; /* No known auth. mechanism yet */ smtpc->sasl.authused = SASL_AUTH_NONE; /* Clear the authentication mechanism used for esmtp connections */ smtpc->tls_supported = FALSE; /* Clear the TLS capability */ smtpc->auth_supported = FALSE; /* Clear the AUTH capability */ /* Send the EHLO command */ result = Curl_pp_sendf(&smtpc->pp, "EHLO %s", smtpc->domain); if(!result) state(conn, SMTP_EHLO); return result; } /*********************************************************************** * * smtp_perform_helo() * * Sends the HELO command to initialise communication with the SMTP server. */ static CURLcode smtp_perform_helo(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; smtpc->sasl.authused = SASL_AUTH_NONE; /* No authentication mechanism used in smtp connections */ /* Send the HELO command */ result = Curl_pp_sendf(&smtpc->pp, "HELO %s", smtpc->domain); if(!result) state(conn, SMTP_HELO); return result; } /*********************************************************************** * * smtp_perform_starttls() * * Sends the STLS command to start the upgrade to TLS. */ static CURLcode smtp_perform_starttls(struct connectdata *conn) { CURLcode result = CURLE_OK; /* Send the STARTTLS command */ result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s", "STARTTLS"); if(!result) state(conn, SMTP_STARTTLS); return result; } /*********************************************************************** * * smtp_perform_upgrade_tls() * * Performs the upgrade to TLS. */ static CURLcode smtp_perform_upgrade_tls(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; /* Start the SSL connection */ result = Curl_ssl_connect_nonblocking(conn, FIRSTSOCKET, &smtpc->ssldone); if(!result) { if(smtpc->state != SMTP_UPGRADETLS) state(conn, SMTP_UPGRADETLS); if(smtpc->ssldone) { smtp_to_smtps(conn); result = smtp_perform_ehlo(conn); } } return result; } /*********************************************************************** * * smtp_perform_auth() * * Sends an AUTH command allowing the client to login with the given SASL * authentication mechanism. */ static CURLcode smtp_perform_auth(struct connectdata *conn, const char *mech, const char *initresp) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; if(initresp) { /* AUTH <mech> ...<crlf> */ /* Send the AUTH command with the initial response */ result = Curl_pp_sendf(&smtpc->pp, "AUTH %s %s", mech, initresp); } else { /* Send the AUTH command */ result = Curl_pp_sendf(&smtpc->pp, "AUTH %s", mech); } return result; } /*********************************************************************** * * smtp_continue_auth() * * Sends SASL continuation data or cancellation. */ static CURLcode smtp_continue_auth(struct connectdata *conn, const char *resp) { struct smtp_conn *smtpc = &conn->proto.smtpc; return Curl_pp_sendf(&smtpc->pp, "%s", resp); } /*********************************************************************** * * smtp_perform_authentication() * * Initiates the authentication sequence, with the appropriate SASL * authentication mechanism. */ static CURLcode smtp_perform_authentication(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; saslprogress progress; /* Check we have enough data to authenticate with, and the server supports authentiation, and end the connect phase if not */ if(!smtpc->auth_supported || !Curl_sasl_can_authenticate(&smtpc->sasl, conn)) { state(conn, SMTP_STOP); return result; } /* Calculate the SASL login details */ result = Curl_sasl_start(&smtpc->sasl, conn, FALSE, &progress); if(!result) { if(progress == SASL_INPROGRESS) state(conn, SMTP_AUTH); else { /* Other mechanisms not supported */ infof(conn->data, "No known authentication mechanisms supported!\n"); result = CURLE_LOGIN_DENIED; } } return result; } /*********************************************************************** * * smtp_perform_command() * * Sends a SMTP based command. */ static CURLcode smtp_perform_command(struct connectdata *conn) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; /* Send the command */ if(smtp->rcpt) result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s %s", smtp->custom && smtp->custom[0] != '\0' ? smtp->custom : "VRFY", smtp->rcpt->data); else result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s", smtp->custom && smtp->custom[0] != '\0' ? smtp->custom : "HELP"); if(!result) state(conn, SMTP_COMMAND); return result; } /*********************************************************************** * * smtp_perform_mail() * * Sends an MAIL command to initiate the upload of a message. */ static CURLcode smtp_perform_mail(struct connectdata *conn) { char *from = NULL; char *auth = NULL; char *size = NULL; CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; /* Calculate the FROM parameter */ if(!data->set.str[STRING_MAIL_FROM]) /* Null reverse-path, RFC-5321, sect. 3.6.3 */ from = strdup("<>"); else if(data->set.str[STRING_MAIL_FROM][0] == '<') from = aprintf("%s", data->set.str[STRING_MAIL_FROM]); else from = aprintf("<%s>", data->set.str[STRING_MAIL_FROM]); if(!from) return CURLE_OUT_OF_MEMORY; /* Calculate the optional AUTH parameter */ if(data->set.str[STRING_MAIL_AUTH] && conn->proto.smtpc.sasl.authused) { if(data->set.str[STRING_MAIL_AUTH][0] != '\0') auth = aprintf("%s", data->set.str[STRING_MAIL_AUTH]); else /* Empty AUTH, RFC-2554, sect. 5 */ auth = strdup("<>"); if(!auth) { free(from); return CURLE_OUT_OF_MEMORY; } } /* Prepare the mime data if some. */ if(data->set.mimepost.kind != MIMEKIND_NONE) { /* Use the whole structure as data. */ data->set.mimepost.flags &= ~MIME_BODY_ONLY; /* Add external headers and mime version. */ curl_mime_headers(&data->set.mimepost, data->set.headers, 0); result = Curl_mime_prepare_headers(&data->set.mimepost, NULL, NULL, MIMESTRATEGY_MAIL); if(!result) if(!Curl_checkheaders(conn, "Mime-Version")) result = Curl_mime_add_header(&data->set.mimepost.curlheaders, "Mime-Version: 1.0"); /* Make sure we will read the entire mime structure. */ if(!result) result = Curl_mime_rewind(&data->set.mimepost); if(result) { free(from); free(auth); return result; } data->state.infilesize = Curl_mime_size(&data->set.mimepost); /* Read from mime structure. */ data->state.fread_func = (curl_read_callback) Curl_mime_read; data->state.in = (void *) &data->set.mimepost; } /* Calculate the optional SIZE parameter */ if(conn->proto.smtpc.size_supported && data->state.infilesize > 0) { size = aprintf("%" CURL_FORMAT_CURL_OFF_T, data->state.infilesize); if(!size) { free(from); free(auth); return CURLE_OUT_OF_MEMORY; } } /* Send the MAIL command */ if(!auth && !size) result = Curl_pp_sendf(&conn->proto.smtpc.pp, "MAIL FROM:%s", from); else if(auth && !size) result = Curl_pp_sendf(&conn->proto.smtpc.pp, "MAIL FROM:%s AUTH=%s", from, auth); else if(auth && size) result = Curl_pp_sendf(&conn->proto.smtpc.pp, "MAIL FROM:%s AUTH=%s SIZE=%s", from, auth, size); else result = Curl_pp_sendf(&conn->proto.smtpc.pp, "MAIL FROM:%s SIZE=%s", from, size); free(from); free(auth); free(size); if(!result) state(conn, SMTP_MAIL); return result; } /*********************************************************************** * * smtp_perform_rcpt_to() * * Sends a RCPT TO command for a given recipient as part of the message upload * process. */ static CURLcode smtp_perform_rcpt_to(struct connectdata *conn) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; /* Send the RCPT TO command */ if(smtp->rcpt->data[0] == '<') result = Curl_pp_sendf(&conn->proto.smtpc.pp, "RCPT TO:%s", smtp->rcpt->data); else result = Curl_pp_sendf(&conn->proto.smtpc.pp, "RCPT TO:<%s>", smtp->rcpt->data); if(!result) state(conn, SMTP_RCPT); return result; } /*********************************************************************** * * smtp_perform_quit() * * Performs the quit action prior to sclose() being called. */ static CURLcode smtp_perform_quit(struct connectdata *conn) { CURLcode result = CURLE_OK; /* Send the QUIT command */ result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s", "QUIT"); if(!result) state(conn, SMTP_QUIT); return result; } /* For the initial server greeting */ static CURLcode smtp_state_servergreet_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; (void)instate; /* no use for this yet */ if(smtpcode/100 != 2) { failf(data, "Got unexpected smtp-server response: %d", smtpcode); result = CURLE_WEIRD_SERVER_REPLY; } else result = smtp_perform_ehlo(conn); return result; } /* For STARTTLS responses */ static CURLcode smtp_state_starttls_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; (void)instate; /* no use for this yet */ if(smtpcode != 220) { if(data->set.use_ssl != CURLUSESSL_TRY) { failf(data, "STARTTLS denied, code %d", smtpcode); result = CURLE_USE_SSL_FAILED; } else result = smtp_perform_authentication(conn); } else result = smtp_perform_upgrade_tls(conn); return result; } /* For EHLO responses */ static CURLcode smtp_state_ehlo_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct smtp_conn *smtpc = &conn->proto.smtpc; const char *line = data->state.buffer; size_t len = strlen(line); (void)instate; /* no use for this yet */ if(smtpcode/100 != 2 && smtpcode != 1) { if(data->set.use_ssl <= CURLUSESSL_TRY || conn->ssl[FIRSTSOCKET].use) result = smtp_perform_helo(conn); else { failf(data, "Remote access denied: %d", smtpcode); result = CURLE_REMOTE_ACCESS_DENIED; } } else { line += 4; len -= 4; /* Does the server support the STARTTLS capability? */ if(len >= 8 && !memcmp(line, "STARTTLS", 8)) smtpc->tls_supported = TRUE; /* Does the server support the SIZE capability? */ else if(len >= 4 && !memcmp(line, "SIZE", 4)) smtpc->size_supported = TRUE; /* Does the server support authentication? */ else if(len >= 5 && !memcmp(line, "AUTH ", 5)) { smtpc->auth_supported = TRUE; /* Advance past the AUTH keyword */ line += 5; len -= 5; /* Loop through the data line */ for(;;) { size_t llen; size_t wordlen; unsigned int mechbit; while(len && (*line == ' ' || *line == '\t' || *line == '\r' || *line == '\n')) { line++; len--; } if(!len) break; /* Extract the word */ for(wordlen = 0; wordlen < len && line[wordlen] != ' ' && line[wordlen] != '\t' && line[wordlen] != '\r' && line[wordlen] != '\n';) wordlen++; /* Test the word for a matching authentication mechanism */ mechbit = Curl_sasl_decode_mech(line, wordlen, &llen); if(mechbit && llen == wordlen) smtpc->sasl.authmechs |= mechbit; line += wordlen; len -= wordlen; } } if(smtpcode != 1) { if(data->set.use_ssl && !conn->ssl[FIRSTSOCKET].use) { /* We don't have a SSL/TLS connection yet, but SSL is requested */ if(smtpc->tls_supported) /* Switch to TLS connection now */ result = smtp_perform_starttls(conn); else if(data->set.use_ssl == CURLUSESSL_TRY) /* Fallback and carry on with authentication */ result = smtp_perform_authentication(conn); else { failf(data, "STARTTLS not supported."); result = CURLE_USE_SSL_FAILED; } } else result = smtp_perform_authentication(conn); } } return result; } /* For HELO responses */ static CURLcode smtp_state_helo_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; (void)instate; /* no use for this yet */ if(smtpcode/100 != 2) { failf(data, "Remote access denied: %d", smtpcode); result = CURLE_REMOTE_ACCESS_DENIED; } else /* End of connect phase */ state(conn, SMTP_STOP); return result; } /* For SASL authentication responses */ static CURLcode smtp_state_auth_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct smtp_conn *smtpc = &conn->proto.smtpc; saslprogress progress; (void)instate; /* no use for this yet */ result = Curl_sasl_continue(&smtpc->sasl, conn, smtpcode, &progress); if(!result) switch(progress) { case SASL_DONE: state(conn, SMTP_STOP); /* Authenticated */ break; case SASL_IDLE: /* No mechanism left after cancellation */ failf(data, "Authentication cancelled"); result = CURLE_LOGIN_DENIED; break; default: break; } return result; } /* For command responses */ static CURLcode smtp_state_command_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; char *line = data->state.buffer; size_t len = strlen(line); (void)instate; /* no use for this yet */ if((smtp->rcpt && smtpcode/100 != 2 && smtpcode != 553 && smtpcode != 1) || (!smtp->rcpt && smtpcode/100 != 2 && smtpcode != 1)) { failf(data, "Command failed: %d", smtpcode); result = CURLE_RECV_ERROR; } else { /* Temporarily add the LF character back and send as body to the client */ if(!data->set.opt_no_body) { line[len] = '\n'; result = Curl_client_write(conn, CLIENTWRITE_BODY, line, len + 1); line[len] = '\0'; } if(smtpcode != 1) { if(smtp->rcpt) { smtp->rcpt = smtp->rcpt->next; if(smtp->rcpt) { /* Send the next command */ result = smtp_perform_command(conn); } else /* End of DO phase */ state(conn, SMTP_STOP); } else /* End of DO phase */ state(conn, SMTP_STOP); } } return result; } /* For MAIL responses */ static CURLcode smtp_state_mail_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; (void)instate; /* no use for this yet */ if(smtpcode/100 != 2) { failf(data, "MAIL failed: %d", smtpcode); result = CURLE_SEND_ERROR; } else /* Start the RCPT TO command */ result = smtp_perform_rcpt_to(conn); return result; } /* For RCPT responses */ static CURLcode smtp_state_rcpt_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; (void)instate; /* no use for this yet */ if(smtpcode/100 != 2) { failf(data, "RCPT failed: %d", smtpcode); result = CURLE_SEND_ERROR; } else { smtp->rcpt = smtp->rcpt->next; if(smtp->rcpt) /* Send the next RCPT TO command */ result = smtp_perform_rcpt_to(conn); else { /* Send the DATA command */ result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s", "DATA"); if(!result) state(conn, SMTP_DATA); } } return result; } /* For DATA response */ static CURLcode smtp_state_data_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; (void)instate; /* no use for this yet */ if(smtpcode != 354) { failf(data, "DATA failed: %d", smtpcode); result = CURLE_SEND_ERROR; } else { /* Set the progress upload size */ Curl_pgrsSetUploadSize(data, data->state.infilesize); /* SMTP upload */ Curl_setup_transfer(conn, -1, -1, FALSE, NULL, FIRSTSOCKET, NULL); /* End of DO phase */ state(conn, SMTP_STOP); } return result; } /* For POSTDATA responses, which are received after the entire DATA part has been sent to the server */ static CURLcode smtp_state_postdata_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; (void)instate; /* no use for this yet */ if(smtpcode != 250) result = CURLE_RECV_ERROR; /* End of DONE phase */ state(conn, SMTP_STOP); return result; } static CURLcode smtp_statemach_act(struct connectdata *conn) { CURLcode result = CURLE_OK; curl_socket_t sock = conn->sock[FIRSTSOCKET]; struct Curl_easy *data = conn->data; int smtpcode; struct smtp_conn *smtpc = &conn->proto.smtpc; struct pingpong *pp = &smtpc->pp; size_t nread = 0; /* Busy upgrading the connection; right now all I/O is SSL/TLS, not SMTP */ if(smtpc->state == SMTP_UPGRADETLS) return smtp_perform_upgrade_tls(conn); /* Flush any data that needs to be sent */ if(pp->sendleft) return Curl_pp_flushsend(pp); do { /* Read the response from the server */ result = Curl_pp_readresp(sock, pp, &smtpcode, &nread); if(result) return result; /* Store the latest response for later retrieval if necessary */ if(smtpc->state != SMTP_QUIT && smtpcode != 1) data->info.httpcode = smtpcode; if(!smtpcode) break; /* We have now received a full SMTP server response */ switch(smtpc->state) { case SMTP_SERVERGREET: result = smtp_state_servergreet_resp(conn, smtpcode, smtpc->state); break; case SMTP_EHLO: result = smtp_state_ehlo_resp(conn, smtpcode, smtpc->state); break; case SMTP_HELO: result = smtp_state_helo_resp(conn, smtpcode, smtpc->state); break; case SMTP_STARTTLS: result = smtp_state_starttls_resp(conn, smtpcode, smtpc->state); break; case SMTP_AUTH: result = smtp_state_auth_resp(conn, smtpcode, smtpc->state); break; case SMTP_COMMAND: result = smtp_state_command_resp(conn, smtpcode, smtpc->state); break; case SMTP_MAIL: result = smtp_state_mail_resp(conn, smtpcode, smtpc->state); break; case SMTP_RCPT: result = smtp_state_rcpt_resp(conn, smtpcode, smtpc->state); break; case SMTP_DATA: result = smtp_state_data_resp(conn, smtpcode, smtpc->state); break; case SMTP_POSTDATA: result = smtp_state_postdata_resp(conn, smtpcode, smtpc->state); break; case SMTP_QUIT: /* fallthrough, just stop! */ default: /* internal error */ state(conn, SMTP_STOP); break; } } while(!result && smtpc->state != SMTP_STOP && Curl_pp_moredata(pp)); return result; } /* Called repeatedly until done from multi.c */ static CURLcode smtp_multi_statemach(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; if((conn->handler->flags & PROTOPT_SSL) && !smtpc->ssldone) { result = Curl_ssl_connect_nonblocking(conn, FIRSTSOCKET, &smtpc->ssldone); if(result || !smtpc->ssldone) return result; } result = Curl_pp_statemach(&smtpc->pp, FALSE); *done = (smtpc->state == SMTP_STOP) ? TRUE : FALSE; return result; } static CURLcode smtp_block_statemach(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; while(smtpc->state != SMTP_STOP && !result) result = Curl_pp_statemach(&smtpc->pp, TRUE); return result; } /* Allocate and initialize the SMTP struct for the current Curl_easy if required */ static CURLcode smtp_init(struct connectdata *conn) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp; smtp = data->req.protop = calloc(sizeof(struct SMTP), 1); if(!smtp) result = CURLE_OUT_OF_MEMORY; return result; } /* For the SMTP "protocol connect" and "doing" phases only */ static int smtp_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks) { return Curl_pp_getsock(&conn->proto.smtpc.pp, socks, numsocks); } /*********************************************************************** * * smtp_connect() * * This function should do everything that is to be considered a part of * the connection phase. * * The variable pointed to by 'done' will be TRUE if the protocol-layer * connect phase is done when this function returns, or FALSE if not. */ static CURLcode smtp_connect(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; struct pingpong *pp = &smtpc->pp; *done = FALSE; /* default to not done yet */ /* We always support persistent connections in SMTP */ connkeep(conn, "SMTP default"); /* Set the default response time-out */ pp->response_time = RESP_TIMEOUT; pp->statemach_act = smtp_statemach_act; pp->endofresp = smtp_endofresp; pp->conn = conn; /* Initialize the SASL storage */ Curl_sasl_init(&smtpc->sasl, &saslsmtp); /* Initialise the pingpong layer */ Curl_pp_init(pp); /* Parse the URL options */ result = smtp_parse_url_options(conn); if(result) return result; /* Parse the URL path */ result = smtp_parse_url_path(conn); if(result) return result; /* Start off waiting for the server greeting response */ state(conn, SMTP_SERVERGREET); result = smtp_multi_statemach(conn, done); return result; } /*********************************************************************** * * smtp_done() * * The DONE function. This does what needs to be done after a single DO has * performed. * * Input argument is already checked for validity. */ static CURLcode smtp_done(struct connectdata *conn, CURLcode status, bool premature) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; struct pingpong *pp = &conn->proto.smtpc.pp; char *eob; ssize_t len; ssize_t bytes_written; (void)premature; if(!smtp || !pp->conn) return CURLE_OK; /* Cleanup our per-request based variables */ Curl_safefree(smtp->custom); if(status) { connclose(conn, "SMTP done with bad status"); /* marked for closure */ result = status; /* use the already set error code */ } else if(!data->set.connect_only && data->set.mail_rcpt && (data->set.upload || data->set.mimepost.kind)) { /* Calculate the EOB taking into account any terminating CRLF from the previous line of the email or the CRLF of the DATA command when there is "no mail data". RFC-5321, sect. 4.1.1.4. Note: As some SSL backends, such as OpenSSL, will cause Curl_write() to fail when using a different pointer following a previous write, that returned CURLE_AGAIN, we duplicate the EOB now rather than when the bytes written doesn't equal len. */ if(smtp->trailing_crlf || !conn->data->state.infilesize) { eob = strdup(SMTP_EOB + 2); len = SMTP_EOB_LEN - 2; } else { eob = strdup(SMTP_EOB); len = SMTP_EOB_LEN; } if(!eob) return CURLE_OUT_OF_MEMORY; /* Send the end of block data */ result = Curl_write(conn, conn->writesockfd, eob, len, &bytes_written); if(result) { free(eob); return result; } if(bytes_written != len) { /* The whole chunk was not sent so keep it around and adjust the pingpong structure accordingly */ pp->sendthis = eob; pp->sendsize = len; pp->sendleft = len - bytes_written; } else { /* Successfully sent so adjust the response timeout relative to now */ pp->response = Curl_now(); free(eob); } state(conn, SMTP_POSTDATA); /* Run the state-machine TODO: when the multi interface is used, this _really_ should be using the smtp_multi_statemach function but we have no general support for non-blocking DONE operations! */ result = smtp_block_statemach(conn); } /* Clear the transfer mode for the next request */ smtp->transfer = FTPTRANSFER_BODY; return result; } /*********************************************************************** * * smtp_perform() * * This is the actual DO function for SMTP. Transfer a mail, send a command * or get some data according to the options previously setup. */ static CURLcode smtp_perform(struct connectdata *conn, bool *connected, bool *dophase_done) { /* This is SMTP and no proxy */ CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; DEBUGF(infof(conn->data, "DO phase starts\n")); if(data->set.opt_no_body) { /* Requested no body means no transfer */ smtp->transfer = FTPTRANSFER_INFO; } *dophase_done = FALSE; /* not done yet */ /* Store the first recipient (or NULL if not specified) */ smtp->rcpt = data->set.mail_rcpt; /* Initial data character is the first character in line: it is implicitly preceded by a virtual CRLF. */ smtp->trailing_crlf = TRUE; smtp->eob = 2; /* Start the first command in the DO phase */ if((data->set.upload || data->set.mimepost.kind) && data->set.mail_rcpt) /* MAIL transfer */ result = smtp_perform_mail(conn); else /* SMTP based command (VRFY, EXPN, NOOP, RSET or HELP) */ result = smtp_perform_command(conn); if(result) return result; /* Run the state-machine */ result = smtp_multi_statemach(conn, dophase_done); *connected = conn->bits.tcpconnect[FIRSTSOCKET]; if(*dophase_done) DEBUGF(infof(conn->data, "DO phase is complete\n")); return result; } /*********************************************************************** * * smtp_do() * * This function is registered as 'curl_do' function. It decodes the path * parts etc as a wrapper to the actual DO function (smtp_perform). * * The input argument is already checked for validity. */ static CURLcode smtp_do(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; *done = FALSE; /* default to false */ /* Parse the custom request */ result = smtp_parse_custom_request(conn); if(result) return result; result = smtp_regular_transfer(conn, done); return result; } /*********************************************************************** * * smtp_disconnect() * * Disconnect from an SMTP server. Cleanup protocol-specific per-connection * resources. BLOCKING. */ static CURLcode smtp_disconnect(struct connectdata *conn, bool dead_connection) { struct smtp_conn *smtpc = &conn->proto.smtpc; /* We cannot send quit unconditionally. If this connection is stale or bad in any way, sending quit and waiting around here will make the disconnect wait in vain and cause more problems than we need to. */ /* The SMTP session may or may not have been allocated/setup at this point! */ if(!dead_connection && smtpc->pp.conn && smtpc->pp.conn->bits.protoconnstart) if(!smtp_perform_quit(conn)) (void)smtp_block_statemach(conn); /* ignore errors on QUIT */ /* Disconnect from the server */ Curl_pp_disconnect(&smtpc->pp); /* Cleanup the SASL module */ Curl_sasl_cleanup(conn, smtpc->sasl.authused); /* Cleanup our connection based variables */ Curl_safefree(smtpc->domain); return CURLE_OK; } /* Call this when the DO phase has completed */ static CURLcode smtp_dophase_done(struct connectdata *conn, bool connected) { struct SMTP *smtp = conn->data->req.protop; (void)connected; if(smtp->transfer != FTPTRANSFER_BODY) /* no data to transfer */ Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL); return CURLE_OK; } /* Called from multi.c while DOing */ static CURLcode smtp_doing(struct connectdata *conn, bool *dophase_done) { CURLcode result = smtp_multi_statemach(conn, dophase_done); if(result) DEBUGF(infof(conn->data, "DO phase failed\n")); else if(*dophase_done) { result = smtp_dophase_done(conn, FALSE /* not connected */); DEBUGF(infof(conn->data, "DO phase is complete\n")); } return result; } /*********************************************************************** * * smtp_regular_transfer() * * The input argument is already checked for validity. * * Performs all commands done before a regular transfer between a local and a * remote host. */ static CURLcode smtp_regular_transfer(struct connectdata *conn, bool *dophase_done) { CURLcode result = CURLE_OK; bool connected = FALSE; struct Curl_easy *data = conn->data; /* Make sure size is unknown at this point */ data->req.size = -1; /* Set the progress data */ Curl_pgrsSetUploadCounter(data, 0); Curl_pgrsSetDownloadCounter(data, 0); Curl_pgrsSetUploadSize(data, -1); Curl_pgrsSetDownloadSize(data, -1); /* Carry out the perform */ result = smtp_perform(conn, &connected, dophase_done); /* Perform post DO phase operations if necessary */ if(!result && *dophase_done) result = smtp_dophase_done(conn, connected); return result; } static CURLcode smtp_setup_connection(struct connectdata *conn) { struct Curl_easy *data = conn->data; CURLcode result; /* Clear the TLS upgraded flag */ conn->tls_upgraded = FALSE; /* Initialise the SMTP layer */ result = smtp_init(conn); if(result) return result; data->state.path++; /* don't include the initial slash */ return CURLE_OK; } /*********************************************************************** * * smtp_parse_url_options() * * Parse the URL login options. */ static CURLcode smtp_parse_url_options(struct connectdata *conn) { CURLcode result = CURLE_OK; struct smtp_conn *smtpc = &conn->proto.smtpc; const char *ptr = conn->options; smtpc->sasl.resetprefs = TRUE; while(!result && ptr && *ptr) { const char *key = ptr; const char *value; while(*ptr && *ptr != '=') ptr++; value = ptr + 1; while(*ptr && *ptr != ';') ptr++; if(strncasecompare(key, "AUTH=", 5)) result = Curl_sasl_parse_url_auth_option(&smtpc->sasl, value, ptr - value); else result = CURLE_URL_MALFORMAT; if(*ptr == ';') ptr++; } return result; } /*********************************************************************** * * smtp_parse_url_path() * * Parse the URL path into separate path components. */ static CURLcode smtp_parse_url_path(struct connectdata *conn) { /* The SMTP struct is already initialised in smtp_connect() */ struct Curl_easy *data = conn->data; struct smtp_conn *smtpc = &conn->proto.smtpc; const char *path = data->state.path; char localhost[HOSTNAME_MAX + 1]; /* Calculate the path if necessary */ if(!*path) { if(!Curl_gethostname(localhost, sizeof(localhost))) path = localhost; else path = "localhost"; } /* URL decode the path and use it as the domain in our EHLO */ return Curl_urldecode(conn->data, path, 0, &smtpc->domain, NULL, TRUE); } /*********************************************************************** * * smtp_parse_custom_request() * * Parse the custom request. */ static CURLcode smtp_parse_custom_request(struct connectdata *conn) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; const char *custom = data->set.str[STRING_CUSTOMREQUEST]; /* URL decode the custom request */ if(custom) result = Curl_urldecode(data, custom, 0, &smtp->custom, NULL, TRUE); return result; } CURLcode Curl_smtp_escape_eob(struct connectdata *conn, const ssize_t nread) { /* When sending a SMTP payload we must detect CRLF. sequences making sure they are sent as CRLF.. instead, as a . on the beginning of a line will be deleted by the server when not part of an EOB terminator and a genuine CRLF.CRLF which isn't escaped will wrongly be detected as end of data by the server */ ssize_t i; ssize_t si; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; char *scratch = data->state.scratch; char *newscratch = NULL; char *oldscratch = NULL; size_t eob_sent; /* Do we need to allocate a scratch buffer? */ if(!scratch || data->set.crlf) { oldscratch = scratch; scratch = newscratch = malloc(2 * UPLOAD_BUFSIZE); if(!newscratch) { failf(data, "Failed to alloc scratch buffer!"); return CURLE_OUT_OF_MEMORY; } } DEBUGASSERT(UPLOAD_BUFSIZE >= nread); /* Have we already sent part of the EOB? */ eob_sent = smtp->eob; /* This loop can be improved by some kind of Boyer-Moore style of approach but that is saved for later... */ for(i = 0, si = 0; i < nread; i++) { if(SMTP_EOB[smtp->eob] == data->req.upload_fromhere[i]) { smtp->eob++; /* Is the EOB potentially the terminating CRLF? */ if(2 == smtp->eob || SMTP_EOB_LEN == smtp->eob) smtp->trailing_crlf = TRUE; else smtp->trailing_crlf = FALSE; } else if(smtp->eob) { /* A previous substring matched so output that first */ memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent); si += smtp->eob - eob_sent; /* Then compare the first byte */ if(SMTP_EOB[0] == data->req.upload_fromhere[i]) smtp->eob = 1; else smtp->eob = 0; eob_sent = 0; /* Reset the trailing CRLF flag as there was more data */ smtp->trailing_crlf = FALSE; } /* Do we have a match for CRLF. as per RFC-5321, sect. 4.5.2 */ if(SMTP_EOB_FIND_LEN == smtp->eob) { /* Copy the replacement data to the target buffer */ memcpy(&scratch[si], &SMTP_EOB_REPL[eob_sent], SMTP_EOB_REPL_LEN - eob_sent); si += SMTP_EOB_REPL_LEN - eob_sent; smtp->eob = 0; eob_sent = 0; } else if(!smtp->eob) scratch[si++] = data->req.upload_fromhere[i]; } if(smtp->eob - eob_sent) { /* A substring matched before processing ended so output that now */ memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent); si += smtp->eob - eob_sent; } /* Only use the new buffer if we replaced something */ if(si != nread) { /* Upload from the new (replaced) buffer instead */ data->req.upload_fromhere = scratch; /* Save the buffer so it can be freed later */ data->state.scratch = scratch; /* Free the old scratch buffer */ free(oldscratch); /* Set the new amount too */ data->req.upload_present = si; } else free(newscratch); return CURLE_OK; } #endif /* CURL_DISABLE_SMTP */
CURLcode Curl_smtp_escape_eob(struct connectdata *conn, const ssize_t nread) { /* When sending a SMTP payload we must detect CRLF. sequences making sure they are sent as CRLF.. instead, as a . on the beginning of a line will be deleted by the server when not part of an EOB terminator and a genuine CRLF.CRLF which isn't escaped will wrongly be detected as end of data by the server */ ssize_t i; ssize_t si; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; char *scratch = data->state.scratch; char *newscratch = NULL; char *oldscratch = NULL; size_t eob_sent; /* Do we need to allocate a scratch buffer? */ if(!scratch || data->set.crlf) { oldscratch = scratch; scratch = newscratch = malloc(2 * data->set.buffer_size); if(!newscratch) { failf(data, "Failed to alloc scratch buffer!"); return CURLE_OUT_OF_MEMORY; } } /* Have we already sent part of the EOB? */ eob_sent = smtp->eob; /* This loop can be improved by some kind of Boyer-Moore style of approach but that is saved for later... */ for(i = 0, si = 0; i < nread; i++) { if(SMTP_EOB[smtp->eob] == data->req.upload_fromhere[i]) { smtp->eob++; /* Is the EOB potentially the terminating CRLF? */ if(2 == smtp->eob || SMTP_EOB_LEN == smtp->eob) smtp->trailing_crlf = TRUE; else smtp->trailing_crlf = FALSE; } else if(smtp->eob) { /* A previous substring matched so output that first */ memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent); si += smtp->eob - eob_sent; /* Then compare the first byte */ if(SMTP_EOB[0] == data->req.upload_fromhere[i]) smtp->eob = 1; else smtp->eob = 0; eob_sent = 0; /* Reset the trailing CRLF flag as there was more data */ smtp->trailing_crlf = FALSE; } /* Do we have a match for CRLF. as per RFC-5321, sect. 4.5.2 */ if(SMTP_EOB_FIND_LEN == smtp->eob) { /* Copy the replacement data to the target buffer */ memcpy(&scratch[si], &SMTP_EOB_REPL[eob_sent], SMTP_EOB_REPL_LEN - eob_sent); si += SMTP_EOB_REPL_LEN - eob_sent; smtp->eob = 0; eob_sent = 0; } else if(!smtp->eob) scratch[si++] = data->req.upload_fromhere[i]; } if(smtp->eob - eob_sent) { /* A substring matched before processing ended so output that now */ memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent); si += smtp->eob - eob_sent; } /* Only use the new buffer if we replaced something */ if(si != nread) { /* Upload from the new (replaced) buffer instead */ data->req.upload_fromhere = scratch; /* Save the buffer so it can be freed later */ data->state.scratch = scratch; /* Free the old scratch buffer */ free(oldscratch); /* Set the new amount too */ data->req.upload_present = si; } else free(newscratch); return CURLE_OK; }
CURLcode Curl_smtp_escape_eob(struct connectdata *conn, const ssize_t nread) { /* When sending a SMTP payload we must detect CRLF. sequences making sure they are sent as CRLF.. instead, as a . on the beginning of a line will be deleted by the server when not part of an EOB terminator and a genuine CRLF.CRLF which isn't escaped will wrongly be detected as end of data by the server */ ssize_t i; ssize_t si; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; char *scratch = data->state.scratch; char *newscratch = NULL; char *oldscratch = NULL; size_t eob_sent; /* Do we need to allocate a scratch buffer? */ if(!scratch || data->set.crlf) { oldscratch = scratch; scratch = newscratch = malloc(2 * UPLOAD_BUFSIZE); if(!newscratch) { failf(data, "Failed to alloc scratch buffer!"); return CURLE_OUT_OF_MEMORY; } } DEBUGASSERT(UPLOAD_BUFSIZE >= nread); /* Have we already sent part of the EOB? */ eob_sent = smtp->eob; /* This loop can be improved by some kind of Boyer-Moore style of approach but that is saved for later... */ for(i = 0, si = 0; i < nread; i++) { if(SMTP_EOB[smtp->eob] == data->req.upload_fromhere[i]) { smtp->eob++; /* Is the EOB potentially the terminating CRLF? */ if(2 == smtp->eob || SMTP_EOB_LEN == smtp->eob) smtp->trailing_crlf = TRUE; else smtp->trailing_crlf = FALSE; } else if(smtp->eob) { /* A previous substring matched so output that first */ memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent); si += smtp->eob - eob_sent; /* Then compare the first byte */ if(SMTP_EOB[0] == data->req.upload_fromhere[i]) smtp->eob = 1; else smtp->eob = 0; eob_sent = 0; /* Reset the trailing CRLF flag as there was more data */ smtp->trailing_crlf = FALSE; } /* Do we have a match for CRLF. as per RFC-5321, sect. 4.5.2 */ if(SMTP_EOB_FIND_LEN == smtp->eob) { /* Copy the replacement data to the target buffer */ memcpy(&scratch[si], &SMTP_EOB_REPL[eob_sent], SMTP_EOB_REPL_LEN - eob_sent); si += SMTP_EOB_REPL_LEN - eob_sent; smtp->eob = 0; eob_sent = 0; } else if(!smtp->eob) scratch[si++] = data->req.upload_fromhere[i]; } if(smtp->eob - eob_sent) { /* A substring matched before processing ended so output that now */ memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent); si += smtp->eob - eob_sent; } /* Only use the new buffer if we replaced something */ if(si != nread) { /* Upload from the new (replaced) buffer instead */ data->req.upload_fromhere = scratch; /* Save the buffer so it can be freed later */ data->state.scratch = scratch; /* Free the old scratch buffer */ free(oldscratch); /* Set the new amount too */ data->req.upload_present = si; } else free(newscratch); return CURLE_OK; }
{'added': [(1566, ' scratch = newscratch = malloc(2 * UPLOAD_BUFSIZE);'), (1573, ' DEBUGASSERT(UPLOAD_BUFSIZE >= nread);')], 'deleted': [(1566, ' scratch = newscratch = malloc(2 * data->set.buffer_size);')]}
2
1
981
5,960
61
420
14
https://github.com/curl/curl
CVE-2018-0500
CWE-787
686
marshal.c
C
get_set_object
/* radare - LGPL3 - Copyright 2016-2021 - Matthieu (c0riolis) Tardy - l0stb1t*/ #include <r_io.h> #include <r_bin.h> #include "marshal.h" #include "pyc_magic.h" // avoiding using r2 internals asserts #define if_true_return(cond,ret) if(cond){return(ret);} // TODO: kill globals static ut32 magic_int; static ut32 symbols_ordinal = 0; static RList *refs = NULL; // If you don't have a good reason, do not change this. And also checkout !refs in get_code_object() /* interned_table is used to handle TYPE_INTERNED object */ extern RList *interned_table; static pyc_object *get_object(RBuffer *buffer); static pyc_object *copy_object(pyc_object *object); static void free_object(pyc_object *object); static ut8 get_ut8(RBuffer *buffer, bool *error) { ut8 ret = 0; int size = r_buf_read (buffer, &ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static ut16 get_ut16(RBuffer *buffer, bool *error) { ut16 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size != sizeof (ret)) { *error = true; } return ret; } static ut32 get_ut32(RBuffer *buffer, bool *error) { ut32 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size != sizeof (ret)) { *error = true; } return ret; } static st32 get_st32(RBuffer *buffer, bool *error) { st32 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static st64 get_st64(RBuffer *buffer, bool *error) { st64 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static double get_float64(RBuffer *buffer, bool *error) { double ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static ut8 *get_bytes(RBuffer *buffer, ut32 size) { ut8 *ret = R_NEWS0 (ut8, size + 1); if (!ret) { return NULL; } if (r_buf_read (buffer, ret, size) < size) { free (ret); return NULL; } return ret; } static pyc_object *get_none_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (ret) { ret->type = TYPE_NONE; ret->data = strdup ("None"); if (!ret->data) { R_FREE (ret); } } return ret; } static pyc_object *get_false_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_FALSE; ret->data = strdup ("False"); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_true_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_TRUE; ret->data = strdup ("True"); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_int_object(RBuffer *buffer) { bool error = false; st32 i = get_st32 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_INT; ret->data = r_str_newf ("%d", i); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_int64_object(RBuffer *buffer) { bool error = false; st64 i = get_st64 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (ret) { ret->type = TYPE_INT64; ret->data = r_str_newf ("%"PFMT64d, (st64)i); if (!ret->data) { R_FREE (ret); } } return ret; } /* long is used when the number is > MAX_INT64 */ static pyc_object *get_long_object(RBuffer *buffer) { bool error = false; bool neg = false; ut32 tmp = 0; size_t size; size_t i, j = 0, left = 0; ut16 n; char *hexstr; char digist2hex[] = "0123456789abcdef"; st32 ndigits = get_st32 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_LONG; if (ndigits < 0) { ndigits = -ndigits; neg = true; } if (ndigits == 0) { ret->data = strdup ("0x0"); } else { if (ndigits > 10) { free (ret); return NULL; } size = ndigits * 15; if (size < 0) { return NULL; } size = (size - 1) / 4 + 1; if (size < 1) { free (ret); return NULL; } size += 3 + (neg? 1: 0); j = size - 1; hexstr = calloc (size, sizeof (char)); if (!hexstr) { free (ret); return NULL; } for (i = 0; i < ndigits; i++) { n = get_ut16 (buffer, &error); tmp |= n << left; left += 15; while (left >= 4 && j >= 0) { hexstr[--j] = digist2hex[tmp & 0xf]; tmp >>= 4; left -= 4; } } if (tmp) { hexstr[--j] = digist2hex[tmp & 0xf]; } if (j > 0) { hexstr[--j] = 'x'; } if (j > 0) { hexstr[--j] = '0'; } if (neg && j > 0) { hexstr[--j] = '-'; } ret->data = &hexstr[j]; } return ret; } static pyc_object *get_stringref_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = get_st32 (buffer, &error); if (n >= r_list_length (interned_table)) { eprintf ("bad marshal data (string ref out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_STRINGREF; ret->data = r_list_get_n (interned_table, n); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_float_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 size = 0; ut8 n = get_ut8 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ut8 *s = malloc (n + 1); if (!s) { free (ret); return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s, n); if (size != n) { R_FREE (s); R_FREE (ret); return NULL; } s[n] = '\0'; ret->type = TYPE_FLOAT; ret->data = s; return ret; } static pyc_object *get_binary_float_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; double f; f = get_float64 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_FLOAT; ret->data = r_str_newf ("%.15g", f); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_complex_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 size = 0; st32 n1 = 0; st32 n2 = 0; ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } if ((magic_int & 0xffff) <= 62061) { n1 = get_ut8 (buffer, &error); } else { n1 = get_st32 (buffer, &error); } if (error || n1 < 1) { free (ret); return NULL; } ut8 *s1 = malloc (n1 + 1); if (!s1) { free (ret); return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s1, n1); if (size != n1) { R_FREE (s1); R_FREE (ret); return NULL; } s1[n1] = '\0'; if ((magic_int & 0xffff) <= 62061) { n2 = get_ut8 (buffer, &error); } else n2 = get_st32 (buffer, &error); if (error) { return NULL; } ut8 *s2 = malloc (n2 + 1); if (!s2) { return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s2, n2); if (size != n2) { R_FREE (s1); R_FREE (s2); R_FREE (ret); return NULL; } s2[n2] = '\0'; ret->type = TYPE_COMPLEX; ret->data = r_str_newf ("%s+%sj", s1, s2); R_FREE (s1); R_FREE (s2); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_binary_complex_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; double a, b; //a + bj a = get_float64 (buffer, &error); b = get_float64 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_BINARY_COMPLEX; ret->data = r_str_newf ("%.15g+%.15gj", a, b); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_string_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (string size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_STRING; ret->data = get_bytes (buffer, n); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_unicode_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (unicode size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); ret->type = TYPE_UNICODE; ret->data = get_bytes (buffer, n); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_interned_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (string size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_INTERNED; ret->data = get_bytes (buffer, n); /* add data pointer to interned table */ r_list_append (interned_table, ret->data); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_array_object_generic(RBuffer *buffer, ut32 size) { pyc_object *tmp = NULL; pyc_object *ret = NULL; ut32 i = 0; ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->data = r_list_newf ((RListFree)free_object); if (!ret->data) { free (ret); return NULL; } for (i = 0; i < size; i++) { tmp = get_object (buffer); if (!tmp) { r_list_free (ret->data); R_FREE (ret); return NULL; } if (!r_list_append (ret->data, tmp)) { free_object (tmp); r_list_free (ret->data); free (ret); return NULL; } } return ret; } /* small TYPE_SMALL_TUPLE doesn't exist in python2 */ /* */ static pyc_object *get_small_tuple_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut8 n = 0; n = get_ut8 (buffer, &error); if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_SMALL_TUPLE; return ret; } return NULL; } static pyc_object *get_tuple_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (tuple size out of range)\n"); return NULL; } if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_TUPLE; return ret; } return NULL; } static pyc_object *get_list_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (list size out of range)\n"); return NULL; } if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_LIST; return ret; } return NULL; } static pyc_object *get_dict_object(RBuffer *buffer) { pyc_object *key = NULL, *val = NULL; pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->data = r_list_newf ((RListFree)free_object); if (!ret->data) { R_FREE (ret); return NULL; } for (;;) { key = get_object (buffer); if (!key) { break; } if (!r_list_append (ret->data, key)) { r_list_free (ret->data); R_FREE (ret); free_object (key); return NULL; } val = get_object (buffer); if (!val) { break; } if (!r_list_append (ret->data, val)) { free_object (val); r_list_free (ret->data); R_FREE (ret); return NULL; } } ret->type = TYPE_DICT; return ret; } static pyc_object *get_set_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (set size out of range)\n"); return NULL; } if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (!ret) { return NULL; } ret->type = TYPE_SET; return ret; } static pyc_object *get_ascii_object_generic(RBuffer *buffer, ut32 size, bool interned) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_ASCII; ret->data = get_bytes (buffer, size); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_ascii_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, true); } static pyc_object *get_ascii_interned_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, true); } static pyc_object *get_short_ascii_object(RBuffer *buffer) { bool error = false; ut8 n = get_ut8 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, false); } static pyc_object *get_short_ascii_interned_object(RBuffer *buffer) { bool error = false; ut8 n = get_ut8 (buffer, &error); return error? NULL: get_ascii_object_generic (buffer, n, true); } static pyc_object *get_ref_object(RBuffer *buffer) { bool error = false; ut32 index = get_ut32 (buffer, &error); if (error) { return NULL; } if (index >= r_list_length (refs)) { return NULL; } pyc_object *obj = r_list_get_n (refs, index); return obj? copy_object (obj): NULL; } static void free_object(pyc_object *object) { if (!object) { return; } if ((int)object->type == 0) { return; } switch (object->type) { case TYPE_SMALL_TUPLE: case TYPE_TUPLE: r_list_free (object->data); break; case TYPE_STRING: case TYPE_TRUE: case TYPE_FALSE: case TYPE_INT: case TYPE_NONE: case TYPE_NULL: case TYPE_ASCII_INTERNED: case TYPE_SHORT_ASCII: case TYPE_ASCII: case TYPE_SHORT_ASCII_INTERNED: free (object->data); break; case TYPE_CODE_v0: case TYPE_CODE_v1: { pyc_code_object *cobj = object->data; free_object (cobj->code); free_object (cobj->consts); free_object (cobj->names); free_object (cobj->varnames); free_object (cobj->freevars); free_object (cobj->cellvars); free_object (cobj->filename); free_object (cobj->name); free_object (cobj->lnotab); free (object->data); break; } case TYPE_REF: free_object (object->data); break; case TYPE_SET: case TYPE_FROZENSET: case TYPE_ELLIPSIS: case TYPE_STOPITER: case TYPE_BINARY_COMPLEX: case TYPE_BINARY_FLOAT: case TYPE_COMPLEX: case TYPE_STRINGREF: case TYPE_DICT: case TYPE_FLOAT: case TYPE_INT64: case TYPE_INTERNED: case TYPE_LIST: case TYPE_LONG: case TYPE_UNICODE: case TYPE_UNKNOWN: eprintf ("Free not implemented for type %x\n", object->type); break; default: eprintf ("Undefined type in free_object (%x)\n", object->type); break; } free (object); } static pyc_object *copy_object(pyc_object *object) { pyc_object *copy = R_NEW0 (pyc_object); if (!copy || !object) { free (copy); return NULL; } copy->type = object->type; if ((int)object->type == 0) { // do nothing } else switch (object->type) { case TYPE_NULL: break; case TYPE_TUPLE: case TYPE_SMALL_TUPLE: copy->data = r_list_clone (object->data); break; case TYPE_INT: case TYPE_INT64: case TYPE_NONE: case TYPE_TRUE: case TYPE_FALSE: case TYPE_STRING: case TYPE_ASCII: case TYPE_SHORT_ASCII: case TYPE_ASCII_INTERNED: case TYPE_SHORT_ASCII_INTERNED: copy->data = strdup (object->data); break; case TYPE_CODE_v0: case TYPE_CODE_v1: { pyc_code_object *src = object->data; pyc_code_object *dst = R_NEW0 (pyc_code_object); if (!dst) { break; } memcpy (dst, src, sizeof (*dst)); dst->code = copy_object (src->code); dst->consts = copy_object (src->consts); dst->names = copy_object (src->names); dst->varnames = copy_object (src->varnames); dst->freevars = copy_object (src->freevars); dst->cellvars = copy_object (src->cellvars); dst->filename = copy_object (src->filename); dst->name = copy_object (src->name); dst->lnotab = copy_object (src->lnotab); copy->data = dst; break; } case TYPE_REF: copy->data = copy_object (object->data); break; case TYPE_ELLIPSIS: case TYPE_STOPITER: case TYPE_BINARY_COMPLEX: case TYPE_BINARY_FLOAT: case TYPE_COMPLEX: case TYPE_STRINGREF: case TYPE_DICT: case TYPE_FLOAT: case TYPE_FROZENSET: case TYPE_INTERNED: case TYPE_LIST: case TYPE_LONG: case TYPE_SET: case TYPE_UNICODE: case TYPE_UNKNOWN: eprintf ("Copy not implemented for type %x\n", object->type); break; default: eprintf ("Undefined type in copy_object (%x)\n", object->type); break; } if (!copy->data) { R_FREE (copy); } return copy; } static pyc_object *get_code_object(RBuffer *buffer) { bool error = false; pyc_object *ret = R_NEW0 (pyc_object); pyc_code_object *cobj = R_NEW0 (pyc_code_object); if (!ret || !cobj) { free (ret); free (cobj); return NULL; } //ret->type = TYPE_CODE_v1; // support start from v1.0 ret->data = cobj; bool v10_to_12 = magic_int_within (magic_int, 39170, 16679, &error); // 1.0.1 - 1.2 bool v13_to_22 = magic_int_within (magic_int, 11913, 60718, &error); // 1.3b1 - 2.2a1 bool v11_to_14 = magic_int_within (magic_int, 39170, 20117, &error); // 1.0.1 - 1.4 bool v15_to_22 = magic_int_within (magic_int, 20121, 60718, &error); // 1.5a1 - 2.2a1 bool v13_to_20 = magic_int_within (magic_int, 11913, 50824, &error); // 1.3b1 - 2.0b1 //bool v21_to_27 = (!v13_to_20) && magic_int_within (magic_int, 60124, 62212, &error); bool has_posonlyargcount = magic_int_within (magic_int, 3410, 3424, &error); // v3.8.0a4 - latest if (error) { free (ret); free (cobj); return NULL; } if (v13_to_22) { cobj->argcount = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->argcount = 0; } else { cobj->argcount = get_ut32 (buffer, &error); } if (has_posonlyargcount) { cobj->posonlyargcount = get_ut32 (buffer, &error); // Included in argcount } else { cobj->posonlyargcount = 0; // None } if (((3020 < (magic_int & 0xffff)) && ((magic_int & 0xffff) < 20121)) && (!v11_to_14)) { cobj->kwonlyargcount = get_ut32 (buffer, &error); // Not included in argcount } else { cobj->kwonlyargcount = 0; } if (v13_to_22) { cobj->nlocals = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->nlocals = 0; } else { cobj->nlocals = get_ut32 (buffer, &error); } if (v15_to_22) { cobj->stacksize = get_ut16 (buffer, &error); } else if (v11_to_14 || v10_to_12) { cobj->stacksize = 0; } else { cobj->stacksize = get_ut32 (buffer, &error); } if (v13_to_22) { cobj->flags = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->flags = 0; } else { cobj->flags = get_ut32 (buffer, &error); } //to help disassemble the code cobj->start_offset = r_buf_tell (buffer) + 5; // 1 from get_object() and 4 from get_string_object() if (!refs) { return ret; //return for entried part to get the root object of this file } cobj->code = get_object (buffer); cobj->end_offset = r_buf_tell (buffer); cobj->consts = get_object (buffer); cobj->names = get_object (buffer); if (v10_to_12) { cobj->varnames = NULL; } else { cobj->varnames = get_object (buffer); } if (!(v10_to_12 || v13_to_20)) { cobj->freevars = get_object (buffer); cobj->cellvars = get_object (buffer); } else { cobj->freevars = NULL; cobj->cellvars = NULL; } cobj->filename = get_object (buffer); cobj->name = get_object (buffer); if (v15_to_22) { cobj->firstlineno = get_ut16 (buffer, &error); } else if (v11_to_14) { cobj->firstlineno = 0; } else { cobj->firstlineno = get_ut32 (buffer, &error); } if (v11_to_14) { cobj->lnotab = NULL; } else { cobj->lnotab = get_object (buffer); } if (error) { free_object (cobj->code); free_object (cobj->consts); free_object (cobj->names); free_object (cobj->varnames); free_object (cobj->freevars); free_object (cobj->cellvars); free_object (cobj->filename); free_object (cobj->name); free_object (cobj->lnotab); free (cobj); R_FREE (ret); return NULL; } return ret; } ut64 get_code_object_addr(RBuffer *buffer, ut32 magic) { magic_int = magic; pyc_object *co = get_code_object (buffer); ut64 result = 0; if (!co) { return 0; } pyc_code_object *cobj = co->data; result = cobj->start_offset; free_object (co); return result; } static pyc_object *get_object(RBuffer *buffer) { bool error = false; pyc_object *ret = NULL; ut8 code = get_ut8 (buffer, &error); bool flag = (code & FLAG_REF); RListIter *ref_idx = NULL; ut8 type = (code & ~FLAG_REF); if (error) { return NULL; } if (flag) { pyc_object *noneret = get_none_object (); if (noneret) { ref_idx = r_list_append (refs, noneret); } } switch (type) { case TYPE_NULL: free_object (ret); return NULL; case TYPE_TRUE: return get_true_object (); case TYPE_FALSE: free_object (ret); return get_false_object (); case TYPE_NONE: free_object (ret); return get_none_object (); case TYPE_REF: free_object (ret); return get_ref_object (buffer); case TYPE_SMALL_TUPLE: ret = get_small_tuple_object (buffer); break; case TYPE_TUPLE: ret = get_tuple_object (buffer); break; case TYPE_STRING: ret = get_string_object (buffer); break; case TYPE_CODE_v0: ret = get_code_object (buffer); if (ret) { ret->type = TYPE_CODE_v0; } break; case TYPE_CODE_v1: ret = get_code_object (buffer); if (ret) { ret->type = TYPE_CODE_v1; } break; case TYPE_INT: ret = get_int_object (buffer); break; case TYPE_ASCII_INTERNED: ret = get_ascii_interned_object (buffer); break; case TYPE_SHORT_ASCII: ret = get_short_ascii_object (buffer); break; case TYPE_ASCII: ret = get_ascii_object (buffer); break; case TYPE_SHORT_ASCII_INTERNED: ret = get_short_ascii_interned_object (buffer); break; case TYPE_INT64: ret = get_int64_object (buffer); break; case TYPE_INTERNED: ret = get_interned_object (buffer); break; case TYPE_STRINGREF: ret = get_stringref_object (buffer); break; case TYPE_FLOAT: ret = get_float_object (buffer); break; case TYPE_BINARY_FLOAT: ret = get_binary_float_object (buffer); break; case TYPE_COMPLEX: ret = get_complex_object (buffer); // behaviour depends on Python version break; case TYPE_BINARY_COMPLEX: ret = get_binary_complex_object (buffer); break; case TYPE_LIST: ret = get_list_object (buffer); break; case TYPE_LONG: ret = get_long_object (buffer); break; case TYPE_UNICODE: ret = get_unicode_object (buffer); break; case TYPE_DICT: ret = get_dict_object (buffer); break; case TYPE_FROZENSET: case TYPE_SET: ret = get_set_object (buffer); break; case TYPE_STOPITER: case TYPE_ELLIPSIS: ret = R_NEW0 (pyc_object); break; case TYPE_UNKNOWN: eprintf ("Get not implemented for type 0x%x\n", type); // r_list_pop (refs); free_object (ret); return NULL; case 0: // nop break; default: eprintf ("Undefined type in get_object (0x%x)\n", type); // r_list_pop (refs); return NULL; } if (ret && flag && ref_idx) { if (ref_idx->data != ret) { free_object (ref_idx->data); } ref_idx->data = copy_object (ret); } if (ret) { return ret; } ret = get_none_object (); if (!ret) { return NULL; } r_list_append (refs, ret); return ret; } static bool extract_sections_symbols(pyc_object *obj, RList *sections, RList *symbols, RList *cobjs, char *prefix) { pyc_code_object *cobj = NULL; RBinSection *section = NULL; RBinSymbol *symbol = NULL; RListIter *i = NULL; //each code object is a section if_true_return (!obj || (obj->type != TYPE_CODE_v1 && obj->type != TYPE_CODE_v0), false); cobj = obj->data; if_true_return (!cobj || !cobj->name, false); if_true_return (cobj->name->type != TYPE_ASCII && cobj->name->type != TYPE_STRING && cobj->name->type != TYPE_INTERNED, false); if_true_return (!cobj->name->data, false); if_true_return (!cobj->consts, false); //add the cobj to objs list if (!r_list_append (cobjs, cobj)) { goto fail; } section = R_NEW0 (RBinSection); symbol = R_NEW0 (RBinSymbol); prefix = r_str_newf ("%s%s%s", r_str_get (prefix), prefix? ".": "", (const char *)cobj->name->data); if (!prefix || !section || !symbol) { goto fail; } section->name = strdup (prefix); if (!section->name) { goto fail; } section->paddr = cobj->start_offset; section->vaddr = cobj->start_offset; section->size = cobj->end_offset - cobj->start_offset; section->vsize = cobj->end_offset - cobj->start_offset; if (!r_list_append (sections, section)) { goto fail; } // start building symbol symbol->name = strdup (prefix); //symbol->bind; symbol->type = R_BIN_TYPE_FUNC_STR; symbol->size = cobj->end_offset - cobj->start_offset; symbol->vaddr = cobj->start_offset; symbol->paddr = cobj->start_offset; symbol->ordinal = symbols_ordinal++; if (cobj->consts->type != TYPE_TUPLE && cobj->consts->type != TYPE_SMALL_TUPLE) { goto fail2; } if (!r_list_append (symbols, symbol)) { goto fail2; } r_list_foreach (((RList *)(cobj->consts->data)), i, obj) { extract_sections_symbols (obj, sections, symbols, cobjs, prefix); } free (prefix); return true; fail: free (section); free (prefix); free (symbol); return false; fail2: free (prefix); free (symbol); return false; } bool get_sections_symbols_from_code_objects(RBuffer *buffer, RList *sections, RList *symbols, RList *cobjs, ut32 magic) { bool ret; magic_int = magic; refs = r_list_newf (NULL); // (RListFree)free_object); if (!refs) { return false; } ret = extract_sections_symbols (get_object (buffer), sections, symbols, cobjs, NULL); r_list_free (refs); refs = NULL; return ret; }
/* radare - LGPL3 - Copyright 2016-2022 - Matthieu (c0riolis) Tardy - l0stb1t */ #include <r_io.h> #include <r_bin.h> #include "marshal.h" #include "pyc_magic.h" // avoiding using r2 internals asserts #define if_true_return(cond,ret) if(cond){return(ret);} // TODO: kill globals static R_TH_LOCAL ut32 magic_int; static R_TH_LOCAL ut32 symbols_ordinal = 0; static R_TH_LOCAL RList *refs = NULL; // If you don't have a good reason, do not change this. And also checkout !refs in get_code_object() /* interned_table is used to handle TYPE_INTERNED object */ extern RList *interned_table; static pyc_object *get_object(RBuffer *buffer); static pyc_object *copy_object(pyc_object *object); static void free_object(pyc_object *object); static ut8 get_ut8(RBuffer *buffer, bool *error) { ut8 ret = 0; int size = r_buf_read (buffer, &ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static ut16 get_ut16(RBuffer *buffer, bool *error) { ut16 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size != sizeof (ret)) { *error = true; } return ret; } static ut32 get_ut32(RBuffer *buffer, bool *error) { ut32 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size != sizeof (ret)) { *error = true; } return ret; } static st32 get_st32(RBuffer *buffer, bool *error) { st32 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static st64 get_st64(RBuffer *buffer, bool *error) { st64 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static double get_float64(RBuffer *buffer, bool *error) { double ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static ut8 *get_bytes(RBuffer *buffer, ut32 size) { ut8 *ret = R_NEWS0 (ut8, size + 1); if (!ret) { return NULL; } if (r_buf_read (buffer, ret, size) < size) { free (ret); return NULL; } return ret; } static pyc_object *get_none_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (ret) { ret->type = TYPE_NONE; ret->data = strdup ("None"); if (!ret->data) { R_FREE (ret); } } return ret; } static pyc_object *get_false_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_FALSE; ret->data = strdup ("False"); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_true_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_TRUE; ret->data = strdup ("True"); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_int_object(RBuffer *buffer) { bool error = false; st32 i = get_st32 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_INT; ret->data = r_str_newf ("%d", i); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_int64_object(RBuffer *buffer) { bool error = false; st64 i = get_st64 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (ret) { ret->type = TYPE_INT64; ret->data = r_str_newf ("%"PFMT64d, (st64)i); if (!ret->data) { R_FREE (ret); } } return ret; } /* long is used when the number is > MAX_INT64 */ static pyc_object *get_long_object(RBuffer *buffer) { bool error = false; bool neg = false; ut32 tmp = 0; size_t size; size_t i, j = 0, left = 0; ut16 n; char *hexstr; char digist2hex[] = "0123456789abcdef"; st32 ndigits = get_st32 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_LONG; if (ndigits < 0) { ndigits = -ndigits; neg = true; } if (ndigits == 0) { ret->data = strdup ("0x0"); } else { if (ndigits > 10) { free (ret); return NULL; } size = ndigits * 15; if (size < 0) { return NULL; } size = (size - 1) / 4 + 1; if (size < 1) { free (ret); return NULL; } size += 3 + (neg? 1: 0); j = size - 1; hexstr = calloc (size, sizeof (char)); if (!hexstr) { free (ret); return NULL; } for (i = 0; i < ndigits; i++) { n = get_ut16 (buffer, &error); tmp |= n << left; left += 15; while (left >= 4 && j >= 0) { hexstr[--j] = digist2hex[tmp & 0xf]; tmp >>= 4; left -= 4; } } if (tmp) { hexstr[--j] = digist2hex[tmp & 0xf]; } if (j > 0) { hexstr[--j] = 'x'; } if (j > 0) { hexstr[--j] = '0'; } if (neg && j > 0) { hexstr[--j] = '-'; } ret->data = &hexstr[j]; } return ret; } static pyc_object *get_stringref_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = get_st32 (buffer, &error); if (n >= r_list_length (interned_table)) { eprintf ("bad marshal data (string ref out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_STRINGREF; ret->data = r_list_get_n (interned_table, n); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_float_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 size = 0; ut8 n = get_ut8 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ut8 *s = malloc (n + 1); if (!s) { free (ret); return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s, n); if (size != n) { R_FREE (s); R_FREE (ret); return NULL; } s[n] = '\0'; ret->type = TYPE_FLOAT; ret->data = s; return ret; } static pyc_object *get_binary_float_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; double f; f = get_float64 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_FLOAT; ret->data = r_str_newf ("%.15g", f); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_complex_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 size = 0; st32 n1 = 0; st32 n2 = 0; ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } if ((magic_int & 0xffff) <= 62061) { n1 = get_ut8 (buffer, &error); } else { n1 = get_st32 (buffer, &error); } if (error || n1 < 1) { free (ret); return NULL; } ut8 *s1 = malloc (n1 + 1); if (!s1) { free (ret); return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s1, n1); if (size != n1) { R_FREE (s1); R_FREE (ret); return NULL; } s1[n1] = '\0'; if ((magic_int & 0xffff) <= 62061) { n2 = get_ut8 (buffer, &error); } else n2 = get_st32 (buffer, &error); if (error) { return NULL; } ut8 *s2 = malloc (n2 + 1); if (!s2) { return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s2, n2); if (size != n2) { R_FREE (s1); R_FREE (s2); R_FREE (ret); return NULL; } s2[n2] = '\0'; ret->type = TYPE_COMPLEX; ret->data = r_str_newf ("%s+%sj", s1, s2); R_FREE (s1); R_FREE (s2); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_binary_complex_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; double a, b; //a + bj a = get_float64 (buffer, &error); b = get_float64 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_BINARY_COMPLEX; ret->data = r_str_newf ("%.15g+%.15gj", a, b); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_string_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (string size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_STRING; ret->data = get_bytes (buffer, n); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_unicode_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (unicode size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); ret->type = TYPE_UNICODE; ret->data = get_bytes (buffer, n); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_interned_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (string size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_INTERNED; ret->data = get_bytes (buffer, n); /* add data pointer to interned table */ r_list_append (interned_table, ret->data); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_array_object_generic(RBuffer *buffer, ut32 size) { pyc_object *tmp = NULL; pyc_object *ret = NULL; ut32 i = 0; ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->data = r_list_newf ((RListFree)free_object); if (!ret->data) { free (ret); return NULL; } for (i = 0; i < size; i++) { tmp = get_object (buffer); if (!tmp || !r_list_append (ret->data, tmp)) { free_object (tmp); ((RList*)ret->data)->free = NULL; r_list_free (ret->data); free (ret); return NULL; } } return ret; } /* small TYPE_SMALL_TUPLE doesn't exist in python2 */ static pyc_object *get_small_tuple_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut8 n = 0; n = get_ut8 (buffer, &error); if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_SMALL_TUPLE; return ret; } return NULL; } static pyc_object *get_tuple_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (tuple size out of range)\n"); return NULL; } if (error) { return NULL; } pyc_object *ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_TUPLE; } return ret; } static pyc_object *get_list_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (list size out of range)\n"); return NULL; } if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_LIST; return ret; } return NULL; } static pyc_object *get_dict_object(RBuffer *buffer) { pyc_object *key = NULL, *val = NULL; pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->data = r_list_newf ((RListFree)free_object); if (!ret->data) { R_FREE (ret); return NULL; } for (;;) { key = get_object (buffer); if (!key) { break; } if (!r_list_append (ret->data, key)) { r_list_free (ret->data); R_FREE (ret); free_object (key); return NULL; } val = get_object (buffer); if (!val) { break; } if (!r_list_append (ret->data, val)) { free_object (val); r_list_free (ret->data); R_FREE (ret); return NULL; } } ret->type = TYPE_DICT; return ret; } static pyc_object *get_set_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (set size out of range)\n"); return NULL; } if (error) { return NULL; } pyc_object *ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_SET; } return ret; } static pyc_object *get_ascii_object_generic(RBuffer *buffer, ut32 size, bool interned) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_ASCII; ret->data = get_bytes (buffer, size); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_ascii_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, true); } static pyc_object *get_ascii_interned_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, true); } static pyc_object *get_short_ascii_object(RBuffer *buffer) { bool error = false; ut8 n = get_ut8 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, false); } static pyc_object *get_short_ascii_interned_object(RBuffer *buffer) { bool error = false; ut8 n = get_ut8 (buffer, &error); return error? NULL: get_ascii_object_generic (buffer, n, true); } static pyc_object *get_ref_object(RBuffer *buffer) { bool error = false; ut32 index = get_ut32 (buffer, &error); if (error) { return NULL; } if (index >= r_list_length (refs)) { return NULL; } pyc_object *obj = r_list_get_n (refs, index); return obj? copy_object (obj): NULL; } static void free_object(pyc_object *object) { if (!object) { return; } if ((int)object->type == 0) { return; } switch (object->type) { case TYPE_SMALL_TUPLE: case TYPE_TUPLE: r_list_free (object->data); break; case TYPE_STRING: case TYPE_TRUE: case TYPE_FALSE: case TYPE_INT: case TYPE_NONE: case TYPE_NULL: case TYPE_ASCII_INTERNED: case TYPE_SHORT_ASCII: case TYPE_ASCII: case TYPE_SHORT_ASCII_INTERNED: free (object->data); break; case TYPE_CODE_v0: case TYPE_CODE_v1: { pyc_code_object *cobj = object->data; free_object (cobj->code); free_object (cobj->consts); free_object (cobj->names); free_object (cobj->varnames); free_object (cobj->freevars); free_object (cobj->cellvars); free_object (cobj->filename); free_object (cobj->name); free_object (cobj->lnotab); free (object->data); break; } case TYPE_REF: free_object (object->data); break; case TYPE_SET: case TYPE_FROZENSET: case TYPE_ELLIPSIS: case TYPE_STOPITER: case TYPE_BINARY_COMPLEX: case TYPE_BINARY_FLOAT: case TYPE_COMPLEX: case TYPE_STRINGREF: case TYPE_DICT: case TYPE_FLOAT: case TYPE_INT64: case TYPE_INTERNED: case TYPE_LIST: case TYPE_LONG: case TYPE_UNICODE: case TYPE_UNKNOWN: eprintf ("Free not implemented for type %x\n", object->type); break; default: eprintf ("Undefined type in free_object (%x)\n", object->type); break; } free (object); } static pyc_object *copy_object(pyc_object *object) { pyc_object *copy = R_NEW0 (pyc_object); if (!copy || !object) { free (copy); return NULL; } copy->type = object->type; if ((int)object->type == 0) { // do nothing } else switch (object->type) { case TYPE_NULL: break; case TYPE_TUPLE: case TYPE_SMALL_TUPLE: copy->data = r_list_clone (object->data); break; case TYPE_INT: case TYPE_INT64: case TYPE_NONE: case TYPE_TRUE: case TYPE_FALSE: case TYPE_STRING: case TYPE_ASCII: case TYPE_SHORT_ASCII: case TYPE_ASCII_INTERNED: case TYPE_SHORT_ASCII_INTERNED: copy->data = strdup (object->data); break; case TYPE_CODE_v0: case TYPE_CODE_v1: { pyc_code_object *src = object->data; pyc_code_object *dst = R_NEW0 (pyc_code_object); if (!dst) { break; } memcpy (dst, src, sizeof (*dst)); dst->code = copy_object (src->code); dst->consts = copy_object (src->consts); dst->names = copy_object (src->names); dst->varnames = copy_object (src->varnames); dst->freevars = copy_object (src->freevars); dst->cellvars = copy_object (src->cellvars); dst->filename = copy_object (src->filename); dst->name = copy_object (src->name); dst->lnotab = copy_object (src->lnotab); copy->data = dst; break; } case TYPE_REF: copy->data = copy_object (object->data); break; case TYPE_ELLIPSIS: case TYPE_STOPITER: case TYPE_BINARY_COMPLEX: case TYPE_BINARY_FLOAT: case TYPE_COMPLEX: case TYPE_STRINGREF: case TYPE_DICT: case TYPE_FLOAT: case TYPE_FROZENSET: case TYPE_INTERNED: case TYPE_LIST: case TYPE_LONG: case TYPE_SET: case TYPE_UNICODE: case TYPE_UNKNOWN: eprintf ("Copy not implemented for type %x\n", object->type); break; default: eprintf ("Undefined type in copy_object (%x)\n", object->type); break; } if (!copy->data) { R_FREE (copy); } return copy; } static pyc_object *get_code_object(RBuffer *buffer) { bool error = false; pyc_object *ret = R_NEW0 (pyc_object); pyc_code_object *cobj = R_NEW0 (pyc_code_object); if (!ret || !cobj) { free (ret); free (cobj); return NULL; } //ret->type = TYPE_CODE_v1; // support start from v1.0 ret->data = cobj; bool v10_to_12 = magic_int_within (magic_int, 39170, 16679, &error); // 1.0.1 - 1.2 bool v13_to_22 = magic_int_within (magic_int, 11913, 60718, &error); // 1.3b1 - 2.2a1 bool v11_to_14 = magic_int_within (magic_int, 39170, 20117, &error); // 1.0.1 - 1.4 bool v15_to_22 = magic_int_within (magic_int, 20121, 60718, &error); // 1.5a1 - 2.2a1 bool v13_to_20 = magic_int_within (magic_int, 11913, 50824, &error); // 1.3b1 - 2.0b1 //bool v21_to_27 = (!v13_to_20) && magic_int_within (magic_int, 60124, 62212, &error); bool has_posonlyargcount = magic_int_within (magic_int, 3410, 3424, &error); // v3.8.0a4 - latest if (error) { free (ret); free (cobj); return NULL; } if (v13_to_22) { cobj->argcount = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->argcount = 0; } else { cobj->argcount = get_ut32 (buffer, &error); } if (has_posonlyargcount) { cobj->posonlyargcount = get_ut32 (buffer, &error); // Included in argcount } else { cobj->posonlyargcount = 0; // None } if (((3020 < (magic_int & 0xffff)) && ((magic_int & 0xffff) < 20121)) && (!v11_to_14)) { cobj->kwonlyargcount = get_ut32 (buffer, &error); // Not included in argcount } else { cobj->kwonlyargcount = 0; } if (v13_to_22) { cobj->nlocals = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->nlocals = 0; } else { cobj->nlocals = get_ut32 (buffer, &error); } if (v15_to_22) { cobj->stacksize = get_ut16 (buffer, &error); } else if (v11_to_14 || v10_to_12) { cobj->stacksize = 0; } else { cobj->stacksize = get_ut32 (buffer, &error); } if (v13_to_22) { cobj->flags = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->flags = 0; } else { cobj->flags = get_ut32 (buffer, &error); } //to help disassemble the code cobj->start_offset = r_buf_tell (buffer) + 5; // 1 from get_object() and 4 from get_string_object() if (!refs) { return ret; //return for entried part to get the root object of this file } cobj->code = get_object (buffer); cobj->end_offset = r_buf_tell (buffer); cobj->consts = get_object (buffer); cobj->names = get_object (buffer); if (v10_to_12) { cobj->varnames = NULL; } else { cobj->varnames = get_object (buffer); } if (!(v10_to_12 || v13_to_20)) { cobj->freevars = get_object (buffer); cobj->cellvars = get_object (buffer); } else { cobj->freevars = NULL; cobj->cellvars = NULL; } cobj->filename = get_object (buffer); cobj->name = get_object (buffer); if (v15_to_22) { cobj->firstlineno = get_ut16 (buffer, &error); } else if (v11_to_14) { cobj->firstlineno = 0; } else { cobj->firstlineno = get_ut32 (buffer, &error); } if (v11_to_14) { cobj->lnotab = NULL; } else { cobj->lnotab = get_object (buffer); } if (error) { free_object (cobj->code); free_object (cobj->consts); free_object (cobj->names); free_object (cobj->varnames); free_object (cobj->freevars); free_object (cobj->cellvars); free_object (cobj->filename); free_object (cobj->name); free_object (cobj->lnotab); free (cobj); R_FREE (ret); return NULL; } return ret; } ut64 get_code_object_addr(RBuffer *buffer, ut32 magic) { magic_int = magic; pyc_object *co = get_code_object (buffer); ut64 result = 0; if (!co) { return 0; } pyc_code_object *cobj = co->data; result = cobj->start_offset; free_object (co); return result; } static pyc_object *get_object(RBuffer *buffer) { bool error = false; pyc_object *ret = NULL; ut8 code = get_ut8 (buffer, &error); bool flag = (code & FLAG_REF); RListIter *ref_idx = NULL; ut8 type = (code & ~FLAG_REF); if (error) { return NULL; } if (flag) { pyc_object *noneret = get_none_object (); if (noneret) { ref_idx = r_list_append (refs, noneret); } } switch (type) { case TYPE_NULL: free_object (ret); return NULL; case TYPE_TRUE: return get_true_object (); case TYPE_FALSE: free_object (ret); return get_false_object (); case TYPE_NONE: free_object (ret); return get_none_object (); case TYPE_REF: free_object (ret); return get_ref_object (buffer); case TYPE_SMALL_TUPLE: ret = get_small_tuple_object (buffer); break; case TYPE_TUPLE: ret = get_tuple_object (buffer); break; case TYPE_STRING: ret = get_string_object (buffer); break; case TYPE_CODE_v0: ret = get_code_object (buffer); if (ret) { ret->type = TYPE_CODE_v0; } break; case TYPE_CODE_v1: ret = get_code_object (buffer); if (ret) { ret->type = TYPE_CODE_v1; } break; case TYPE_INT: ret = get_int_object (buffer); break; case TYPE_ASCII_INTERNED: ret = get_ascii_interned_object (buffer); break; case TYPE_SHORT_ASCII: ret = get_short_ascii_object (buffer); break; case TYPE_ASCII: ret = get_ascii_object (buffer); break; case TYPE_SHORT_ASCII_INTERNED: ret = get_short_ascii_interned_object (buffer); break; case TYPE_INT64: ret = get_int64_object (buffer); break; case TYPE_INTERNED: ret = get_interned_object (buffer); break; case TYPE_STRINGREF: ret = get_stringref_object (buffer); break; case TYPE_FLOAT: ret = get_float_object (buffer); break; case TYPE_BINARY_FLOAT: ret = get_binary_float_object (buffer); break; case TYPE_COMPLEX: ret = get_complex_object (buffer); // behaviour depends on Python version break; case TYPE_BINARY_COMPLEX: ret = get_binary_complex_object (buffer); break; case TYPE_LIST: ret = get_list_object (buffer); break; case TYPE_LONG: ret = get_long_object (buffer); break; case TYPE_UNICODE: ret = get_unicode_object (buffer); break; case TYPE_DICT: ret = get_dict_object (buffer); break; case TYPE_FROZENSET: case TYPE_SET: ret = get_set_object (buffer); break; case TYPE_STOPITER: case TYPE_ELLIPSIS: ret = R_NEW0 (pyc_object); break; case TYPE_UNKNOWN: eprintf ("Get not implemented for type 0x%x\n", type); // r_list_pop (refs); free_object (ret); return NULL; case 0: // nop break; default: eprintf ("Undefined type in get_object (0x%x)\n", type); // r_list_pop (refs); return NULL; } if (ret && flag && ref_idx) { if (ref_idx->data != ret) { free_object (ref_idx->data); } ref_idx->data = copy_object (ret); } if (ret) { return ret; } ret = get_none_object (); if (!ret) { return NULL; } r_list_append (refs, ret); return ret; } static bool extract_sections_symbols(pyc_object *obj, RList *sections, RList *symbols, RList *cobjs, char *prefix) { pyc_code_object *cobj = NULL; RBinSection *section = NULL; RBinSymbol *symbol = NULL; RListIter *i = NULL; //each code object is a section if_true_return (!obj || (obj->type != TYPE_CODE_v1 && obj->type != TYPE_CODE_v0), false); cobj = obj->data; if_true_return (!cobj || !cobj->name, false); if_true_return (cobj->name->type != TYPE_ASCII && cobj->name->type != TYPE_STRING && cobj->name->type != TYPE_INTERNED, false); if_true_return (!cobj->name->data, false); if_true_return (!cobj->consts, false); //add the cobj to objs list if (!r_list_append (cobjs, cobj)) { goto fail; } section = R_NEW0 (RBinSection); symbol = R_NEW0 (RBinSymbol); prefix = r_str_newf ("%s%s%s", r_str_get (prefix), prefix? ".": "", (const char *)cobj->name->data); if (!prefix || !section || !symbol) { goto fail; } section->name = strdup (prefix); if (!section->name) { goto fail; } section->paddr = cobj->start_offset; section->vaddr = cobj->start_offset; section->size = cobj->end_offset - cobj->start_offset; section->vsize = cobj->end_offset - cobj->start_offset; if (!r_list_append (sections, section)) { goto fail; } // start building symbol symbol->name = strdup (prefix); //symbol->bind; symbol->type = R_BIN_TYPE_FUNC_STR; symbol->size = cobj->end_offset - cobj->start_offset; symbol->vaddr = cobj->start_offset; symbol->paddr = cobj->start_offset; symbol->ordinal = symbols_ordinal++; if (cobj->consts->type != TYPE_TUPLE && cobj->consts->type != TYPE_SMALL_TUPLE) { goto fail2; } if (!r_list_append (symbols, symbol)) { goto fail2; } r_list_foreach (((RList *)(cobj->consts->data)), i, obj) { extract_sections_symbols (obj, sections, symbols, cobjs, prefix); } free (prefix); return true; fail: free (section); free (prefix); free (symbol); return false; fail2: free (prefix); free (symbol); return false; } bool get_sections_symbols_from_code_objects(RBuffer *buffer, RList *sections, RList *symbols, RList *cobjs, ut32 magic) { bool ret; magic_int = magic; refs = r_list_newf (NULL); // (RListFree)free_object); if (!refs) { return false; } ret = extract_sections_symbols (get_object (buffer), sections, symbols, cobjs, NULL); r_list_free (refs); refs = NULL; return ret; }
static pyc_object *get_set_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (set size out of range)\n"); return NULL; } if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (!ret) { return NULL; } ret->type = TYPE_SET; return ret; }
static pyc_object *get_set_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (set size out of range)\n"); return NULL; } if (error) { return NULL; } pyc_object *ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_SET; } return ret; }
{'added': [(1, '/* radare - LGPL3 - Copyright 2016-2022 - Matthieu (c0riolis) Tardy - l0stb1t */'), (12, 'static R_TH_LOCAL ut32 magic_int;'), (13, 'static R_TH_LOCAL ut32 symbols_ordinal = 0;'), (14, "static R_TH_LOCAL RList *refs = NULL; // If you don't have a good reason, do not change this. And also checkout !refs in get_code_object()"), (503, '\t\tif (!tmp || !r_list_append (ret->data, tmp)) {'), (505, '\t\t\t((RList*)ret->data)->free = NULL;'), (534, '\tut32 n = get_ut32 (buffer, &error);'), (542, '\tpyc_object *ret = get_array_object_generic (buffer, n);'), (546, '\treturn ret;'), (552, '\tut32 n = get_ut32 (buffer, &error);'), (617, '\tpyc_object *ret = get_array_object_generic (buffer, n);'), (618, '\tif (ret) {'), (619, '\t\tret->type = TYPE_SET;')], 'deleted': [(1, '/* radare - LGPL3 - Copyright 2016-2021 - Matthieu (c0riolis) Tardy - l0stb1t*/'), (12, 'static ut32 magic_int;'), (13, 'static ut32 symbols_ordinal = 0;'), (14, "static RList *refs = NULL; // If you don't have a good reason, do not change this. And also checkout !refs in get_code_object()"), (503, '\t\tif (!tmp) {'), (504, '\t\t\tr_list_free (ret->data);'), (505, '\t\t\tR_FREE (ret);'), (506, '\t\t\treturn NULL;'), (507, '\t\t}'), (508, '\t\tif (!r_list_append (ret->data, tmp)) {'), (519, '/* */'), (538, '\tpyc_object *ret = NULL;'), (540, '\tut32 n = 0;'), (541, ''), (542, '\tn = get_ut32 (buffer, &error);'), (550, '\tret = get_array_object_generic (buffer, n);'), (553, '\t\treturn ret;'), (555, '\treturn NULL;'), (561, '\tut32 n = 0;'), (562, ''), (563, '\tn = get_ut32 (buffer, &error);'), (619, '\tpyc_object *ret = NULL;'), (629, '\tret = get_array_object_generic (buffer, n);'), (630, '\tif (!ret) {'), (631, '\t\treturn NULL;'), (633, '\tret->type = TYPE_SET;')]}
13
26
1,078
5,896
18
83
4
https://github.com/radareorg/radare2
CVE-2022-0523
CWE-119
2,704
bson.c
C++
bson_append_finish_object
/* bson.c */ /* Copyright 2009, 2010 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #include <time.h> #include <limits.h> #include "bson.h" #include "encoding.h" const int initialBufferSize = 128; /* only need one of these */ static const int zero = 0; /* Custom standard function pointers. */ void *( *bson_malloc_func )( size_t ) = malloc; void *( *bson_realloc_func )( void *, size_t ) = realloc; void ( *bson_free_func )( void * ) = free; #ifdef R_SAFETY_NET bson_printf_func bson_printf; #else bson_printf_func bson_printf = printf; #endif bson_fprintf_func bson_fprintf = fprintf; bson_sprintf_func bson_sprintf = sprintf; static int _bson_errprintf( const char *, ... ); bson_printf_func bson_errprintf = _bson_errprintf; /* ObjectId fuzz functions. */ static int ( *oid_fuzz_func )( void ) = NULL; static int ( *oid_inc_func )( void ) = NULL; /* ---------------------------- READING ------------------------------ */ MONGO_EXPORT bson* bson_create( void ) { return (bson*)bson_malloc(sizeof(bson)); } MONGO_EXPORT void bson_dispose(bson* b) { bson_free(b); } MONGO_EXPORT bson *bson_empty( bson *obj ) { static char *data = "\005\0\0\0\0"; bson_init_data( obj, data ); obj->finished = 1; obj->err = 0; obj->errstr = NULL; obj->stackPos = 0; return obj; } MONGO_EXPORT int bson_copy( bson *out, const bson *in ) { if ( !out || !in ) return BSON_ERROR; if ( !in->finished ) return BSON_ERROR; bson_init_size( out, bson_size( in ) ); memcpy( out->data, in->data, bson_size( in ) ); out->finished = 1; return BSON_OK; } int bson_init_data( bson *b, char *data ) { b->data = data; return BSON_OK; } int bson_init_finished_data( bson *b, char *data ) { bson_init_data( b, data ); b->finished = 1; return BSON_OK; } static void _bson_reset( bson *b ) { b->finished = 0; b->stackPos = 0; b->err = 0; b->errstr = NULL; } MONGO_EXPORT int bson_size( const bson *b ) { int i; if ( ! b || ! b->data ) return 0; bson_little_endian32( &i, b->data ); return i; } MONGO_EXPORT int bson_buffer_size( const bson *b ) { return (b->cur - b->data + 1); } MONGO_EXPORT const char *bson_data( const bson *b ) { return (const char *)b->data; } static char hexbyte( char hex ) { if (hex >= '0' && hex <= '9') return (hex - '0'); else if (hex >= 'A' && hex <= 'F') return (hex - 'A' + 10); else if (hex >= 'a' && hex <= 'f') return (hex - 'a' + 10); else return 0x0; } MONGO_EXPORT void bson_oid_from_string( bson_oid_t *oid, const char *str ) { int i; for ( i=0; i<12; i++ ) { oid->bytes[i] = ( hexbyte( str[2*i] ) << 4 ) | hexbyte( str[2*i + 1] ); } } MONGO_EXPORT void bson_oid_to_string( const bson_oid_t *oid, char *str ) { static const char hex[16] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; int i; for ( i=0; i<12; i++ ) { str[2*i] = hex[( oid->bytes[i] & 0xf0 ) >> 4]; str[2*i + 1] = hex[ oid->bytes[i] & 0x0f ]; } str[24] = '\0'; } MONGO_EXPORT void bson_set_oid_fuzz( int ( *func )( void ) ) { oid_fuzz_func = func; } MONGO_EXPORT void bson_set_oid_inc( int ( *func )( void ) ) { oid_inc_func = func; } MONGO_EXPORT void bson_oid_gen( bson_oid_t *oid ) { static int incr = 0; static int fuzz = 0; int i; time_t t = time( NULL ); if( oid_inc_func ) i = oid_inc_func(); else i = incr++; if ( !fuzz ) { if ( oid_fuzz_func ) fuzz = oid_fuzz_func(); else { srand( ( int )t ); fuzz = rand(); } } bson_big_endian32( &oid->ints[0], &t ); oid->ints[1] = fuzz; bson_big_endian32( &oid->ints[2], &i ); } MONGO_EXPORT time_t bson_oid_generated_time( bson_oid_t *oid ) { time_t out; bson_big_endian32( &out, &oid->ints[0] ); return out; } MONGO_EXPORT void bson_print( const bson *b ) { bson_print_raw( b->data , 0 ); } MONGO_EXPORT void bson_print_raw( const char *data , int depth ) { bson_iterator i; const char *key; int temp; bson_timestamp_t ts; char oidhex[25]; bson scope; bson_iterator_from_buffer( &i, data ); while ( bson_iterator_next( &i ) ) { bson_type t = bson_iterator_type( &i ); if ( t == 0 ) break; key = bson_iterator_key( &i ); for ( temp=0; temp<=depth; temp++ ) bson_printf( "\t" ); bson_printf( "%s : %d \t " , key , t ); switch ( t ) { case BSON_DOUBLE: bson_printf( "%f" , bson_iterator_double( &i ) ); break; case BSON_STRING: bson_printf( "%s" , bson_iterator_string( &i ) ); break; case BSON_SYMBOL: bson_printf( "SYMBOL: %s" , bson_iterator_string( &i ) ); break; case BSON_OID: bson_oid_to_string( bson_iterator_oid( &i ), oidhex ); bson_printf( "%s" , oidhex ); break; case BSON_BOOL: bson_printf( "%s" , bson_iterator_bool( &i ) ? "true" : "false" ); break; case BSON_DATE: bson_printf( "%ld" , ( long int )bson_iterator_date( &i ) ); break; case BSON_BINDATA: bson_printf( "BSON_BINDATA" ); break; case BSON_UNDEFINED: bson_printf( "BSON_UNDEFINED" ); break; case BSON_NULL: bson_printf( "BSON_NULL" ); break; case BSON_REGEX: bson_printf( "BSON_REGEX: %s", bson_iterator_regex( &i ) ); break; case BSON_CODE: bson_printf( "BSON_CODE: %s", bson_iterator_code( &i ) ); break; case BSON_CODEWSCOPE: bson_printf( "BSON_CODE_W_SCOPE: %s", bson_iterator_code( &i ) ); /* bson_init( &scope ); */ /* review - stepped on by bson_iterator_code_scope? */ bson_iterator_code_scope( &i, &scope ); bson_printf( "\n\t SCOPE: " ); bson_print( &scope ); /* bson_destroy( &scope ); */ /* review - causes free error */ break; case BSON_INT: bson_printf( "%d" , bson_iterator_int( &i ) ); break; case BSON_LONG: bson_printf( "%lld" , ( uint64_t )bson_iterator_long( &i ) ); break; case BSON_TIMESTAMP: ts = bson_iterator_timestamp( &i ); bson_printf( "i: %d, t: %d", ts.i, ts.t ); break; case BSON_OBJECT: case BSON_ARRAY: bson_printf( "\n" ); bson_print_raw( bson_iterator_value( &i ) , depth + 1 ); break; default: bson_errprintf( "can't print type : %d\n" , t ); } bson_printf( "\n" ); } } /* ---------------------------- ITERATOR ------------------------------ */ MONGO_EXPORT bson_iterator* bson_iterator_create( void ) { return ( bson_iterator* )malloc( sizeof( bson_iterator ) ); } MONGO_EXPORT void bson_iterator_dispose(bson_iterator* i) { free(i); } MONGO_EXPORT void bson_iterator_init( bson_iterator *i, const bson *b ) { i->cur = b->data + 4; i->first = 1; } MONGO_EXPORT void bson_iterator_from_buffer( bson_iterator *i, const char *buffer ) { i->cur = buffer + 4; i->first = 1; } MONGO_EXPORT bson_type bson_find( bson_iterator *it, const bson *obj, const char *name ) { bson_iterator_init( it, (bson *)obj ); while( bson_iterator_next( it ) ) { if ( strcmp( name, bson_iterator_key( it ) ) == 0 ) break; } return bson_iterator_type( it ); } MONGO_EXPORT bson_bool_t bson_iterator_more( const bson_iterator *i ) { return *( i->cur ); } MONGO_EXPORT bson_type bson_iterator_next( bson_iterator *i ) { int ds; if ( i->first ) { i->first = 0; return ( bson_type )( *i->cur ); } switch ( bson_iterator_type( i ) ) { case BSON_EOO: return BSON_EOO; /* don't advance */ case BSON_UNDEFINED: case BSON_NULL: ds = 0; break; case BSON_BOOL: ds = 1; break; case BSON_INT: ds = 4; break; case BSON_LONG: case BSON_DOUBLE: case BSON_TIMESTAMP: case BSON_DATE: ds = 8; break; case BSON_OID: ds = 12; break; case BSON_STRING: case BSON_SYMBOL: case BSON_CODE: ds = 4 + bson_iterator_int_raw( i ); break; case BSON_BINDATA: ds = 5 + bson_iterator_int_raw( i ); break; case BSON_OBJECT: case BSON_ARRAY: case BSON_CODEWSCOPE: ds = bson_iterator_int_raw( i ); break; case BSON_DBREF: ds = 4+12 + bson_iterator_int_raw( i ); break; case BSON_REGEX: { const char *s = bson_iterator_value( i ); const char *p = s; p += strlen( p )+1; p += strlen( p )+1; ds = p-s; break; } default: { char msg[] = "unknown type: 000000000000"; bson_numstr( msg+14, ( unsigned )( i->cur[0] ) ); bson_fatal_msg( 0, msg ); return 0; } } i->cur += 1 + strlen( i->cur + 1 ) + 1 + ds; return ( bson_type )( *i->cur ); } MONGO_EXPORT bson_type bson_iterator_type( const bson_iterator *i ) { return ( bson_type )i->cur[0]; } MONGO_EXPORT const char *bson_iterator_key( const bson_iterator *i ) { return i->cur + 1; } MONGO_EXPORT const char *bson_iterator_value( const bson_iterator *i ) { const char *t = i->cur + 1; t += strlen( t ) + 1; return t; } /* types */ int bson_iterator_int_raw( const bson_iterator *i ) { int out; bson_little_endian32( &out, bson_iterator_value( i ) ); return out; } double bson_iterator_double_raw( const bson_iterator *i ) { double out; bson_little_endian64( &out, bson_iterator_value( i ) ); return out; } int64_t bson_iterator_long_raw( const bson_iterator *i ) { int64_t out; bson_little_endian64( &out, bson_iterator_value( i ) ); return out; } bson_bool_t bson_iterator_bool_raw( const bson_iterator *i ) { return bson_iterator_value( i )[0]; } MONGO_EXPORT bson_oid_t *bson_iterator_oid( const bson_iterator *i ) { return ( bson_oid_t * )bson_iterator_value( i ); } MONGO_EXPORT int bson_iterator_int( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT double bson_iterator_double( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT int64_t bson_iterator_long( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT bson_timestamp_t bson_iterator_timestamp( const bson_iterator *i ) { bson_timestamp_t ts; bson_little_endian32( &( ts.i ), bson_iterator_value( i ) ); bson_little_endian32( &( ts.t ), bson_iterator_value( i ) + 4 ); return ts; } MONGO_EXPORT int bson_iterator_timestamp_time( const bson_iterator *i ) { int time; bson_little_endian32( &time, bson_iterator_value( i ) + 4 ); return time; } MONGO_EXPORT int bson_iterator_timestamp_increment( const bson_iterator *i ) { int increment; bson_little_endian32( &increment, bson_iterator_value( i ) ); return increment; } MONGO_EXPORT bson_bool_t bson_iterator_bool( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_BOOL: return bson_iterator_bool_raw( i ); case BSON_INT: return bson_iterator_int_raw( i ) != 0; case BSON_LONG: return bson_iterator_long_raw( i ) != 0; case BSON_DOUBLE: return bson_iterator_double_raw( i ) != 0; case BSON_EOO: case BSON_NULL: return 0; default: return 1; } } MONGO_EXPORT const char *bson_iterator_string( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_STRING: case BSON_SYMBOL: return bson_iterator_value( i ) + 4; default: return ""; } } int bson_iterator_string_len( const bson_iterator *i ) { return bson_iterator_int_raw( i ); } MONGO_EXPORT const char *bson_iterator_code( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_STRING: case BSON_CODE: return bson_iterator_value( i ) + 4; case BSON_CODEWSCOPE: return bson_iterator_value( i ) + 8; default: return NULL; } } MONGO_EXPORT void bson_iterator_code_scope( const bson_iterator *i, bson *scope ) { if ( bson_iterator_type( i ) == BSON_CODEWSCOPE ) { int code_len; bson_little_endian32( &code_len, bson_iterator_value( i )+4 ); bson_init_data( scope, ( void * )( bson_iterator_value( i )+8+code_len ) ); _bson_reset( scope ); scope->finished = 1; } else { bson_empty( scope ); } } MONGO_EXPORT bson_date_t bson_iterator_date( const bson_iterator *i ) { return bson_iterator_long_raw( i ); } MONGO_EXPORT time_t bson_iterator_time_t( const bson_iterator *i ) { return bson_iterator_date( i ) / 1000; } MONGO_EXPORT int bson_iterator_bin_len( const bson_iterator *i ) { return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD ) ? bson_iterator_int_raw( i ) - 4 : bson_iterator_int_raw( i ); } MONGO_EXPORT char bson_iterator_bin_type( const bson_iterator *i ) { return bson_iterator_value( i )[4]; } MONGO_EXPORT const char *bson_iterator_bin_data( const bson_iterator *i ) { return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD ) ? bson_iterator_value( i ) + 9 : bson_iterator_value( i ) + 5; } MONGO_EXPORT const char *bson_iterator_regex( const bson_iterator *i ) { return bson_iterator_value( i ); } MONGO_EXPORT const char *bson_iterator_regex_opts( const bson_iterator *i ) { const char *p = bson_iterator_value( i ); return p + strlen( p ) + 1; } MONGO_EXPORT void bson_iterator_subobject( const bson_iterator *i, bson *sub ) { bson_init_data( sub, ( char * )bson_iterator_value( i ) ); _bson_reset( sub ); sub->finished = 1; } MONGO_EXPORT void bson_iterator_subiterator( const bson_iterator *i, bson_iterator *sub ) { bson_iterator_from_buffer( sub, bson_iterator_value( i ) ); } /* ---------------------------- BUILDING ------------------------------ */ static void _bson_init_size( bson *b, int size ) { if( size == 0 ) b->data = NULL; else b->data = ( char * )bson_malloc( size ); b->dataSize = size; b->cur = b->data + 4; _bson_reset( b ); } MONGO_EXPORT void bson_init( bson *b ) { _bson_init_size( b, initialBufferSize ); } void bson_init_size( bson *b, int size ) { _bson_init_size( b, size ); } static void bson_append_byte( bson *b, char c ) { b->cur[0] = c; b->cur++; } static void bson_append( bson *b, const void *data, int len ) { memcpy( b->cur , data , len ); b->cur += len; } static void bson_append32( bson *b, const void *data ) { bson_little_endian32( b->cur, data ); b->cur += 4; } static void bson_append64( bson *b, const void *data ) { bson_little_endian64( b->cur, data ); b->cur += 8; } int bson_ensure_space( bson *b, const int bytesNeeded ) { int pos = b->cur - b->data; char *orig = b->data; int new_size; if ( pos + bytesNeeded <= b->dataSize ) return BSON_OK; new_size = 1.5 * ( b->dataSize + bytesNeeded ); if( new_size < b->dataSize ) { if( ( b->dataSize + bytesNeeded ) < INT_MAX ) new_size = INT_MAX; else { b->err = BSON_SIZE_OVERFLOW; return BSON_ERROR; } } b->data = bson_realloc( b->data, new_size ); if ( !b->data ) bson_fatal_msg( !!b->data, "realloc() failed" ); b->dataSize = new_size; b->cur += b->data - orig; return BSON_OK; } MONGO_EXPORT int bson_finish( bson *b ) { int i; if( b->err & BSON_NOT_UTF8 ) return BSON_ERROR; if ( ! b->finished ) { if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b, 0 ); i = b->cur - b->data; bson_little_endian32( b->data, &i ); b->finished = 1; } return BSON_OK; } MONGO_EXPORT void bson_destroy( bson *b ) { if (b) { bson_free( b->data ); b->err = 0; b->data = 0; b->cur = 0; b->finished = 1; } } static int bson_append_estart( bson *b, int type, const char *name, const int dataSize ) { const int len = strlen( name ) + 1; if ( b->finished ) { b->err |= BSON_ALREADY_FINISHED; return BSON_ERROR; } if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) { return BSON_ERROR; } if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) { bson_builder_error( b ); return BSON_ERROR; } bson_append_byte( b, ( char )type ); bson_append( b, name, len ); return BSON_OK; } /* ---------------------------- BUILDING TYPES ------------------------------ */ MONGO_EXPORT int bson_append_int( bson *b, const char *name, const int i ) { if ( bson_append_estart( b, BSON_INT, name, 4 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &i ); return BSON_OK; } MONGO_EXPORT int bson_append_long( bson *b, const char *name, const int64_t i ) { if ( bson_append_estart( b , BSON_LONG, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &i ); return BSON_OK; } MONGO_EXPORT int bson_append_double( bson *b, const char *name, const double d ) { if ( bson_append_estart( b, BSON_DOUBLE, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &d ); return BSON_OK; } MONGO_EXPORT int bson_append_bool( bson *b, const char *name, const bson_bool_t i ) { if ( bson_append_estart( b, BSON_BOOL, name, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b , i != 0 ); return BSON_OK; } MONGO_EXPORT int bson_append_null( bson *b, const char *name ) { if ( bson_append_estart( b , BSON_NULL, name, 0 ) == BSON_ERROR ) return BSON_ERROR; return BSON_OK; } MONGO_EXPORT int bson_append_undefined( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_UNDEFINED, name, 0 ) == BSON_ERROR ) return BSON_ERROR; return BSON_OK; } static int bson_append_string_base( bson *b, const char *name, const char *value, int len, bson_type type ) { int sl = len + 1; if ( bson_check_string( b, ( const char * )value, sl - 1 ) == BSON_ERROR ) return BSON_ERROR; if ( bson_append_estart( b, type, name, 4 + sl ) == BSON_ERROR ) { return BSON_ERROR; } bson_append32( b , &sl ); bson_append( b , value , sl - 1 ); bson_append( b , "\0" , 1 ); return BSON_OK; } MONGO_EXPORT int bson_append_string( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_STRING ); } MONGO_EXPORT int bson_append_symbol( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_SYMBOL ); } MONGO_EXPORT int bson_append_code( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_CODE ); } MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, int len ) { return bson_append_string_base( b, name, value, len, BSON_STRING ); } MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, int len ) { return bson_append_string_base( b, name, value, len, BSON_SYMBOL ); } MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, int len ) { return bson_append_string_base( b, name, value, len, BSON_CODE ); } MONGO_EXPORT int bson_append_code_w_scope_n( bson *b, const char *name, const char *code, int len, const bson *scope ) { int sl, size; if ( !scope ) return BSON_ERROR; sl = len + 1; size = 4 + 4 + sl + bson_size( scope ); if ( bson_append_estart( b, BSON_CODEWSCOPE, name, size ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b, &size ); bson_append32( b, &sl ); bson_append( b, code, sl ); bson_append( b, scope->data, bson_size( scope ) ); return BSON_OK; } MONGO_EXPORT int bson_append_code_w_scope( bson *b, const char *name, const char *code, const bson *scope ) { return bson_append_code_w_scope_n( b, name, code, strlen ( code ), scope ); } MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, int len ) { if ( type == BSON_BIN_BINARY_OLD ) { int subtwolen = len + 4; if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+4+len ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b, &subtwolen ); bson_append_byte( b, type ); bson_append32( b, &len ); bson_append( b, str, len ); } else { if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+len ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b, &len ); bson_append_byte( b, type ); bson_append( b, str, len ); } return BSON_OK; } MONGO_EXPORT int bson_append_oid( bson *b, const char *name, const bson_oid_t *oid ) { if ( bson_append_estart( b, BSON_OID, name, 12 ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , oid , 12 ); return BSON_OK; } MONGO_EXPORT int bson_append_new_oid( bson *b, const char *name ) { bson_oid_t oid; bson_oid_gen( &oid ); return bson_append_oid( b, name, &oid ); } MONGO_EXPORT int bson_append_regex( bson *b, const char *name, const char *pattern, const char *opts ) { const int plen = strlen( pattern )+1; const int olen = strlen( opts )+1; if ( bson_append_estart( b, BSON_REGEX, name, plen + olen ) == BSON_ERROR ) return BSON_ERROR; if ( bson_check_string( b, pattern, plen - 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , pattern , plen ); bson_append( b , opts , olen ); return BSON_OK; } MONGO_EXPORT int bson_append_bson( bson *b, const char *name, const bson *bson ) { if ( !bson ) return BSON_ERROR; if ( bson_append_estart( b, BSON_OBJECT, name, bson_size( bson ) ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , bson->data , bson_size( bson ) ); return BSON_OK; } MONGO_EXPORT int bson_append_element( bson *b, const char *name_or_null, const bson_iterator *elem ) { bson_iterator next = *elem; int size; bson_iterator_next( &next ); size = next.cur - elem->cur; if ( name_or_null == NULL ) { if( bson_ensure_space( b, size ) == BSON_ERROR ) return BSON_ERROR; bson_append( b, elem->cur, size ); } else { int data_size = size - 2 - strlen( bson_iterator_key( elem ) ); bson_append_estart( b, elem->cur[0], name_or_null, data_size ); bson_append( b, bson_iterator_value( elem ), data_size ); } return BSON_OK; } MONGO_EXPORT int bson_append_timestamp( bson *b, const char *name, bson_timestamp_t *ts ) { if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &( ts->i ) ); bson_append32( b , &( ts->t ) ); return BSON_OK; } MONGO_EXPORT int bson_append_timestamp2( bson *b, const char *name, int time, int increment ) { if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &increment ); bson_append32( b , &time ); return BSON_OK; } MONGO_EXPORT int bson_append_date( bson *b, const char *name, bson_date_t millis ) { if ( bson_append_estart( b, BSON_DATE, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &millis ); return BSON_OK; } MONGO_EXPORT int bson_append_time_t( bson *b, const char *name, time_t secs ) { return bson_append_date( b, name, ( bson_date_t )secs * 1000 ); } MONGO_EXPORT int bson_append_start_object( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_OBJECT, name, 5 ) == BSON_ERROR ) return BSON_ERROR; b->stack[ b->stackPos++ ] = b->cur - b->data; bson_append32( b , &zero ); return BSON_OK; } MONGO_EXPORT int bson_append_start_array( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_ARRAY, name, 5 ) == BSON_ERROR ) return BSON_ERROR; b->stack[ b->stackPos++ ] = b->cur - b->data; bson_append32( b , &zero ); return BSON_OK; } MONGO_EXPORT int bson_append_finish_object( bson *b ) { char *start; int i; if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b , 0 ); start = b->data + b->stack[ --b->stackPos ]; i = b->cur - start; bson_little_endian32( start, &i ); return BSON_OK; } MONGO_EXPORT double bson_int64_to_double( int64_t i64 ) { return (double)i64; } MONGO_EXPORT int bson_append_finish_array( bson *b ) { return bson_append_finish_object( b ); } /* Error handling and allocators. */ static bson_err_handler err_handler = NULL; MONGO_EXPORT bson_err_handler set_bson_err_handler( bson_err_handler func ) { bson_err_handler old = err_handler; err_handler = func; return old; } MONGO_EXPORT void bson_free( void *ptr ) { bson_free_func( ptr ); } MONGO_EXPORT void *bson_malloc( int size ) { void *p; p = bson_malloc_func( size ); bson_fatal_msg( !!p, "malloc() failed" ); return p; } void *bson_realloc( void *ptr, int size ) { void *p; p = bson_realloc_func( ptr, size ); bson_fatal_msg( !!p, "realloc() failed" ); return p; } int _bson_errprintf( const char *format, ... ) { va_list ap; int ret = 0; va_start( ap, format ); #ifndef R_SAFETY_NET ret = vfprintf( stderr, format, ap ); #endif va_end( ap ); return ret; } /** * This method is invoked when a non-fatal bson error is encountered. * Calls the error handler if available. * * @param */ void bson_builder_error( bson *b ) { if( err_handler ) err_handler( "BSON error." ); } void bson_fatal( int ok ) { bson_fatal_msg( ok, "" ); } void bson_fatal_msg( int ok , const char *msg ) { if ( ok ) return; if ( err_handler ) { err_handler( msg ); } #ifndef R_SAFETY_NET bson_errprintf( "error: %s\n" , msg ); exit( -5 ); #endif } /* Efficiently copy an integer to a string. */ extern const char bson_numstrs[1000][4]; void bson_numstr( char *str, int i ) { if( i < 1000 ) memcpy( str, bson_numstrs[i], 4 ); else bson_sprintf( str,"%d", i ); } MONGO_EXPORT void bson_swap_endian64( void *outp, const void *inp ) { const char *in = ( const char * )inp; char *out = ( char * )outp; out[0] = in[7]; out[1] = in[6]; out[2] = in[5]; out[3] = in[4]; out[4] = in[3]; out[5] = in[2]; out[6] = in[1]; out[7] = in[0]; } MONGO_EXPORT void bson_swap_endian32( void *outp, const void *inp ) { const char *in = ( const char * )inp; char *out = ( char * )outp; out[0] = in[3]; out[1] = in[2]; out[2] = in[1]; out[3] = in[0]; }
/* bson.c */ /* Copyright 2009, 2010 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #include <time.h> #include <limits.h> #include "bson.h" #include "encoding.h" const int initialBufferSize = 128; /* only need one of these */ static const int zero = 0; /* Custom standard function pointers. */ void *( *bson_malloc_func )( size_t ) = malloc; void *( *bson_realloc_func )( void *, size_t ) = realloc; void ( *bson_free_func )( void * ) = free; #ifdef R_SAFETY_NET bson_printf_func bson_printf; #else bson_printf_func bson_printf = printf; #endif bson_fprintf_func bson_fprintf = fprintf; bson_sprintf_func bson_sprintf = sprintf; static int _bson_errprintf( const char *, ... ); bson_printf_func bson_errprintf = _bson_errprintf; /* ObjectId fuzz functions. */ static int ( *oid_fuzz_func )( void ) = NULL; static int ( *oid_inc_func )( void ) = NULL; /* ---------------------------- READING ------------------------------ */ MONGO_EXPORT bson* bson_create( void ) { return (bson*)bson_malloc(sizeof(bson)); } MONGO_EXPORT void bson_dispose(bson* b) { bson_free(b); } MONGO_EXPORT bson *bson_empty( bson *obj ) { static char *data = "\005\0\0\0\0"; bson_init_data( obj, data ); obj->finished = 1; obj->err = 0; obj->errstr = NULL; obj->stackPos = 0; return obj; } MONGO_EXPORT int bson_copy( bson *out, const bson *in ) { if ( !out || !in ) return BSON_ERROR; if ( !in->finished ) return BSON_ERROR; bson_init_size( out, bson_size( in ) ); memcpy( out->data, in->data, bson_size( in ) ); out->finished = 1; return BSON_OK; } int bson_init_data( bson *b, char *data ) { b->data = data; return BSON_OK; } int bson_init_finished_data( bson *b, char *data ) { bson_init_data( b, data ); b->finished = 1; return BSON_OK; } static void _bson_reset( bson *b ) { b->finished = 0; b->stackPos = 0; b->err = 0; b->errstr = NULL; } MONGO_EXPORT int bson_size( const bson *b ) { int i; if ( ! b || ! b->data ) return 0; bson_little_endian32( &i, b->data ); return i; } MONGO_EXPORT size_t bson_buffer_size( const bson *b ) { return (b->cur - b->data + 1); } MONGO_EXPORT const char *bson_data( const bson *b ) { return (const char *)b->data; } static char hexbyte( char hex ) { if (hex >= '0' && hex <= '9') return (hex - '0'); else if (hex >= 'A' && hex <= 'F') return (hex - 'A' + 10); else if (hex >= 'a' && hex <= 'f') return (hex - 'a' + 10); else return 0x0; } MONGO_EXPORT void bson_oid_from_string( bson_oid_t *oid, const char *str ) { int i; for ( i=0; i<12; i++ ) { oid->bytes[i] = ( hexbyte( str[2*i] ) << 4 ) | hexbyte( str[2*i + 1] ); } } MONGO_EXPORT void bson_oid_to_string( const bson_oid_t *oid, char *str ) { static const char hex[16] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; int i; for ( i=0; i<12; i++ ) { str[2*i] = hex[( oid->bytes[i] & 0xf0 ) >> 4]; str[2*i + 1] = hex[ oid->bytes[i] & 0x0f ]; } str[24] = '\0'; } MONGO_EXPORT void bson_set_oid_fuzz( int ( *func )( void ) ) { oid_fuzz_func = func; } MONGO_EXPORT void bson_set_oid_inc( int ( *func )( void ) ) { oid_inc_func = func; } MONGO_EXPORT void bson_oid_gen( bson_oid_t *oid ) { static int incr = 0; static int fuzz = 0; int i; time_t t = time( NULL ); if( oid_inc_func ) i = oid_inc_func(); else i = incr++; if ( !fuzz ) { if ( oid_fuzz_func ) fuzz = oid_fuzz_func(); else { srand( ( int )t ); fuzz = rand(); } } bson_big_endian32( &oid->ints[0], &t ); oid->ints[1] = fuzz; bson_big_endian32( &oid->ints[2], &i ); } MONGO_EXPORT time_t bson_oid_generated_time( bson_oid_t *oid ) { time_t out; bson_big_endian32( &out, &oid->ints[0] ); return out; } MONGO_EXPORT void bson_print( const bson *b ) { bson_print_raw( b->data , 0 ); } MONGO_EXPORT void bson_print_raw( const char *data , int depth ) { bson_iterator i; const char *key; int temp; bson_timestamp_t ts; char oidhex[25]; bson scope; bson_iterator_from_buffer( &i, data ); while ( bson_iterator_next( &i ) ) { bson_type t = bson_iterator_type( &i ); if ( t == 0 ) break; key = bson_iterator_key( &i ); for ( temp=0; temp<=depth; temp++ ) bson_printf( "\t" ); bson_printf( "%s : %d \t " , key , t ); switch ( t ) { case BSON_DOUBLE: bson_printf( "%f" , bson_iterator_double( &i ) ); break; case BSON_STRING: bson_printf( "%s" , bson_iterator_string( &i ) ); break; case BSON_SYMBOL: bson_printf( "SYMBOL: %s" , bson_iterator_string( &i ) ); break; case BSON_OID: bson_oid_to_string( bson_iterator_oid( &i ), oidhex ); bson_printf( "%s" , oidhex ); break; case BSON_BOOL: bson_printf( "%s" , bson_iterator_bool( &i ) ? "true" : "false" ); break; case BSON_DATE: bson_printf( "%ld" , ( long int )bson_iterator_date( &i ) ); break; case BSON_BINDATA: bson_printf( "BSON_BINDATA" ); break; case BSON_UNDEFINED: bson_printf( "BSON_UNDEFINED" ); break; case BSON_NULL: bson_printf( "BSON_NULL" ); break; case BSON_REGEX: bson_printf( "BSON_REGEX: %s", bson_iterator_regex( &i ) ); break; case BSON_CODE: bson_printf( "BSON_CODE: %s", bson_iterator_code( &i ) ); break; case BSON_CODEWSCOPE: bson_printf( "BSON_CODE_W_SCOPE: %s", bson_iterator_code( &i ) ); /* bson_init( &scope ); */ /* review - stepped on by bson_iterator_code_scope? */ bson_iterator_code_scope( &i, &scope ); bson_printf( "\n\t SCOPE: " ); bson_print( &scope ); /* bson_destroy( &scope ); */ /* review - causes free error */ break; case BSON_INT: bson_printf( "%d" , bson_iterator_int( &i ) ); break; case BSON_LONG: bson_printf( "%lld" , ( uint64_t )bson_iterator_long( &i ) ); break; case BSON_TIMESTAMP: ts = bson_iterator_timestamp( &i ); bson_printf( "i: %d, t: %d", ts.i, ts.t ); break; case BSON_OBJECT: case BSON_ARRAY: bson_printf( "\n" ); bson_print_raw( bson_iterator_value( &i ) , depth + 1 ); break; default: bson_errprintf( "can't print type : %d\n" , t ); } bson_printf( "\n" ); } } /* ---------------------------- ITERATOR ------------------------------ */ MONGO_EXPORT bson_iterator* bson_iterator_create( void ) { return ( bson_iterator* )malloc( sizeof( bson_iterator ) ); } MONGO_EXPORT void bson_iterator_dispose(bson_iterator* i) { free(i); } MONGO_EXPORT void bson_iterator_init( bson_iterator *i, const bson *b ) { i->cur = b->data + 4; i->first = 1; } MONGO_EXPORT void bson_iterator_from_buffer( bson_iterator *i, const char *buffer ) { i->cur = buffer + 4; i->first = 1; } MONGO_EXPORT bson_type bson_find( bson_iterator *it, const bson *obj, const char *name ) { bson_iterator_init( it, (bson *)obj ); while( bson_iterator_next( it ) ) { if ( strcmp( name, bson_iterator_key( it ) ) == 0 ) break; } return bson_iterator_type( it ); } MONGO_EXPORT bson_bool_t bson_iterator_more( const bson_iterator *i ) { return *( i->cur ); } MONGO_EXPORT bson_type bson_iterator_next( bson_iterator *i ) { size_t ds; if ( i->first ) { i->first = 0; return ( bson_type )( *i->cur ); } switch ( bson_iterator_type( i ) ) { case BSON_EOO: return BSON_EOO; /* don't advance */ case BSON_UNDEFINED: case BSON_NULL: ds = 0; break; case BSON_BOOL: ds = 1; break; case BSON_INT: ds = 4; break; case BSON_LONG: case BSON_DOUBLE: case BSON_TIMESTAMP: case BSON_DATE: ds = 8; break; case BSON_OID: ds = 12; break; case BSON_STRING: case BSON_SYMBOL: case BSON_CODE: ds = 4 + bson_iterator_int_raw( i ); break; case BSON_BINDATA: ds = 5 + bson_iterator_int_raw( i ); break; case BSON_OBJECT: case BSON_ARRAY: case BSON_CODEWSCOPE: ds = bson_iterator_int_raw( i ); break; case BSON_DBREF: ds = 4+12 + bson_iterator_int_raw( i ); break; case BSON_REGEX: { const char *s = bson_iterator_value( i ); const char *p = s; p += strlen( p )+1; p += strlen( p )+1; ds = p-s; break; } default: { char msg[] = "unknown type: 000000000000"; bson_numstr( msg+14, ( unsigned )( i->cur[0] ) ); bson_fatal_msg( 0, msg ); return 0; } } i->cur += 1 + strlen( i->cur + 1 ) + 1 + ds; return ( bson_type )( *i->cur ); } MONGO_EXPORT bson_type bson_iterator_type( const bson_iterator *i ) { return ( bson_type )i->cur[0]; } MONGO_EXPORT const char *bson_iterator_key( const bson_iterator *i ) { return i->cur + 1; } MONGO_EXPORT const char *bson_iterator_value( const bson_iterator *i ) { const char *t = i->cur + 1; t += strlen( t ) + 1; return t; } /* types */ int bson_iterator_int_raw( const bson_iterator *i ) { int out; bson_little_endian32( &out, bson_iterator_value( i ) ); return out; } double bson_iterator_double_raw( const bson_iterator *i ) { double out; bson_little_endian64( &out, bson_iterator_value( i ) ); return out; } int64_t bson_iterator_long_raw( const bson_iterator *i ) { int64_t out; bson_little_endian64( &out, bson_iterator_value( i ) ); return out; } bson_bool_t bson_iterator_bool_raw( const bson_iterator *i ) { return bson_iterator_value( i )[0]; } MONGO_EXPORT bson_oid_t *bson_iterator_oid( const bson_iterator *i ) { return ( bson_oid_t * )bson_iterator_value( i ); } MONGO_EXPORT int bson_iterator_int( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return ( int )bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT double bson_iterator_double( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT int64_t bson_iterator_long( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT bson_timestamp_t bson_iterator_timestamp( const bson_iterator *i ) { bson_timestamp_t ts; bson_little_endian32( &( ts.i ), bson_iterator_value( i ) ); bson_little_endian32( &( ts.t ), bson_iterator_value( i ) + 4 ); return ts; } MONGO_EXPORT int bson_iterator_timestamp_time( const bson_iterator *i ) { int time; bson_little_endian32( &time, bson_iterator_value( i ) + 4 ); return time; } MONGO_EXPORT int bson_iterator_timestamp_increment( const bson_iterator *i ) { int increment; bson_little_endian32( &increment, bson_iterator_value( i ) ); return increment; } MONGO_EXPORT bson_bool_t bson_iterator_bool( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_BOOL: return bson_iterator_bool_raw( i ); case BSON_INT: return bson_iterator_int_raw( i ) != 0; case BSON_LONG: return bson_iterator_long_raw( i ) != 0; case BSON_DOUBLE: return bson_iterator_double_raw( i ) != 0; case BSON_EOO: case BSON_NULL: return 0; default: return 1; } } MONGO_EXPORT const char *bson_iterator_string( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_STRING: case BSON_SYMBOL: return bson_iterator_value( i ) + 4; default: return ""; } } int bson_iterator_string_len( const bson_iterator *i ) { return bson_iterator_int_raw( i ); } MONGO_EXPORT const char *bson_iterator_code( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_STRING: case BSON_CODE: return bson_iterator_value( i ) + 4; case BSON_CODEWSCOPE: return bson_iterator_value( i ) + 8; default: return NULL; } } MONGO_EXPORT void bson_iterator_code_scope( const bson_iterator *i, bson *scope ) { if ( bson_iterator_type( i ) == BSON_CODEWSCOPE ) { int code_len; bson_little_endian32( &code_len, bson_iterator_value( i )+4 ); bson_init_data( scope, ( void * )( bson_iterator_value( i )+8+code_len ) ); _bson_reset( scope ); scope->finished = 1; } else { bson_empty( scope ); } } MONGO_EXPORT bson_date_t bson_iterator_date( const bson_iterator *i ) { return bson_iterator_long_raw( i ); } MONGO_EXPORT time_t bson_iterator_time_t( const bson_iterator *i ) { return bson_iterator_date( i ) / 1000; } MONGO_EXPORT int bson_iterator_bin_len( const bson_iterator *i ) { return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD ) ? bson_iterator_int_raw( i ) - 4 : bson_iterator_int_raw( i ); } MONGO_EXPORT char bson_iterator_bin_type( const bson_iterator *i ) { return bson_iterator_value( i )[4]; } MONGO_EXPORT const char *bson_iterator_bin_data( const bson_iterator *i ) { return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD ) ? bson_iterator_value( i ) + 9 : bson_iterator_value( i ) + 5; } MONGO_EXPORT const char *bson_iterator_regex( const bson_iterator *i ) { return bson_iterator_value( i ); } MONGO_EXPORT const char *bson_iterator_regex_opts( const bson_iterator *i ) { const char *p = bson_iterator_value( i ); return p + strlen( p ) + 1; } MONGO_EXPORT void bson_iterator_subobject( const bson_iterator *i, bson *sub ) { bson_init_data( sub, ( char * )bson_iterator_value( i ) ); _bson_reset( sub ); sub->finished = 1; } MONGO_EXPORT void bson_iterator_subiterator( const bson_iterator *i, bson_iterator *sub ) { bson_iterator_from_buffer( sub, bson_iterator_value( i ) ); } /* ---------------------------- BUILDING ------------------------------ */ static void _bson_init_size( bson *b, int size ) { if( size == 0 ) b->data = NULL; else b->data = ( char * )bson_malloc( size ); b->dataSize = size; b->cur = b->data + 4; _bson_reset( b ); } MONGO_EXPORT void bson_init( bson *b ) { _bson_init_size( b, initialBufferSize ); } void bson_init_size( bson *b, int size ) { _bson_init_size( b, size ); } static void bson_append_byte( bson *b, char c ) { b->cur[0] = c; b->cur++; } static void bson_append( bson *b, const void *data, size_t len ) { memcpy( b->cur , data , len ); b->cur += len; } static void bson_append32( bson *b, const void *data ) { bson_little_endian32( b->cur, data ); b->cur += 4; } static void bson_append32_as_int( bson *b, int data ) { bson_little_endian32( b->cur, &data ); b->cur += 4; } static void bson_append64( bson *b, const void *data ) { bson_little_endian64( b->cur, data ); b->cur += 8; } int bson_ensure_space( bson *b, const size_t bytesNeeded ) { int pos = b->cur - b->data; char *orig = b->data; int new_size; if ( pos + bytesNeeded <= b->dataSize ) return BSON_OK; new_size = 1.5 * ( b->dataSize + bytesNeeded ); if( new_size < b->dataSize ) { if( ( b->dataSize + bytesNeeded ) < INT_MAX ) new_size = INT_MAX; else { b->err = BSON_SIZE_OVERFLOW; return BSON_ERROR; } } b->data = bson_realloc( b->data, new_size ); if ( !b->data ) bson_fatal_msg( !!b->data, "realloc() failed" ); b->dataSize = new_size; b->cur += b->data - orig; return BSON_OK; } MONGO_EXPORT int bson_finish( bson *b ) { int i; if( b->err & BSON_NOT_UTF8 ) return BSON_ERROR; if ( ! b->finished ) { if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b, 0 ); i = ( int )( b->cur - b->data ); bson_little_endian32( b->data, &i ); b->finished = 1; } return BSON_OK; } MONGO_EXPORT void bson_destroy( bson *b ) { if (b) { bson_free( b->data ); b->err = 0; b->data = 0; b->cur = 0; b->finished = 1; } } static int bson_append_estart( bson *b, int type, const char *name, const size_t dataSize ) { const int len = strlen( name ) + 1; if ( b->finished ) { b->err |= BSON_ALREADY_FINISHED; return BSON_ERROR; } if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) { return BSON_ERROR; } if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) { bson_builder_error( b ); return BSON_ERROR; } bson_append_byte( b, ( char )type ); bson_append( b, name, len ); return BSON_OK; } /* ---------------------------- BUILDING TYPES ------------------------------ */ MONGO_EXPORT int bson_append_int( bson *b, const char *name, const int i ) { if ( bson_append_estart( b, BSON_INT, name, 4 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &i ); return BSON_OK; } MONGO_EXPORT int bson_append_long( bson *b, const char *name, const int64_t i ) { if ( bson_append_estart( b , BSON_LONG, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &i ); return BSON_OK; } MONGO_EXPORT int bson_append_double( bson *b, const char *name, const double d ) { if ( bson_append_estart( b, BSON_DOUBLE, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &d ); return BSON_OK; } MONGO_EXPORT int bson_append_bool( bson *b, const char *name, const bson_bool_t i ) { if ( bson_append_estart( b, BSON_BOOL, name, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b , i != 0 ); return BSON_OK; } MONGO_EXPORT int bson_append_null( bson *b, const char *name ) { if ( bson_append_estart( b , BSON_NULL, name, 0 ) == BSON_ERROR ) return BSON_ERROR; return BSON_OK; } MONGO_EXPORT int bson_append_undefined( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_UNDEFINED, name, 0 ) == BSON_ERROR ) return BSON_ERROR; return BSON_OK; } static int bson_append_string_base( bson *b, const char *name, const char *value, size_t len, bson_type type ) { size_t sl = len + 1; if ( bson_check_string( b, ( const char * )value, sl - 1 ) == BSON_ERROR ) return BSON_ERROR; if ( bson_append_estart( b, type, name, 4 + sl ) == BSON_ERROR ) { return BSON_ERROR; } bson_append32_as_int( b , ( int )sl ); bson_append( b , value , sl - 1 ); bson_append( b , "\0" , 1 ); return BSON_OK; } MONGO_EXPORT int bson_append_string( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_STRING ); } MONGO_EXPORT int bson_append_symbol( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_SYMBOL ); } MONGO_EXPORT int bson_append_code( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_CODE ); } MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, size_t len ) { return bson_append_string_base( b, name, value, len, BSON_STRING ); } MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, size_t len ) { return bson_append_string_base( b, name, value, len, BSON_SYMBOL ); } MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, size_t len ) { return bson_append_string_base( b, name, value, len, BSON_CODE ); } MONGO_EXPORT int bson_append_code_w_scope_n( bson *b, const char *name, const char *code, size_t len, const bson *scope ) { size_t sl, size; if ( !scope ) return BSON_ERROR; sl = len + 1; size = 4 + 4 + sl + bson_size( scope ); if ( bson_append_estart( b, BSON_CODEWSCOPE, name, size ) == BSON_ERROR ) return BSON_ERROR; bson_append32_as_int( b, ( int )size ); bson_append32( b, &sl ); bson_append( b, code, sl ); bson_append( b, scope->data, bson_size( scope ) ); return BSON_OK; } MONGO_EXPORT int bson_append_code_w_scope( bson *b, const char *name, const char *code, const bson *scope ) { return bson_append_code_w_scope_n( b, name, code, strlen ( code ), scope ); } MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, size_t len ) { if ( type == BSON_BIN_BINARY_OLD ) { int subtwolen = len + 4; if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+4+len ) == BSON_ERROR ) return BSON_ERROR; bson_append32_as_int( b, ( int )subtwolen ); bson_append_byte( b, type ); bson_append32_as_int( b, ( int )len ); bson_append( b, str, len ); } else { if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+len ) == BSON_ERROR ) return BSON_ERROR; bson_append32_as_int( b, ( int )len ); bson_append_byte( b, type ); bson_append( b, str, len ); } return BSON_OK; } MONGO_EXPORT int bson_append_oid( bson *b, const char *name, const bson_oid_t *oid ) { if ( bson_append_estart( b, BSON_OID, name, 12 ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , oid , 12 ); return BSON_OK; } MONGO_EXPORT int bson_append_new_oid( bson *b, const char *name ) { bson_oid_t oid; bson_oid_gen( &oid ); return bson_append_oid( b, name, &oid ); } MONGO_EXPORT int bson_append_regex( bson *b, const char *name, const char *pattern, const char *opts ) { const size_t plen = strlen( pattern )+1; const size_t olen = strlen( opts )+1; if ( bson_append_estart( b, BSON_REGEX, name, plen + olen ) == BSON_ERROR ) return BSON_ERROR; if ( bson_check_string( b, pattern, plen - 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , pattern , plen ); bson_append( b , opts , olen ); return BSON_OK; } MONGO_EXPORT int bson_append_bson( bson *b, const char *name, const bson *bson ) { if ( !bson ) return BSON_ERROR; if ( bson_append_estart( b, BSON_OBJECT, name, bson_size( bson ) ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , bson->data , bson_size( bson ) ); return BSON_OK; } MONGO_EXPORT int bson_append_element( bson *b, const char *name_or_null, const bson_iterator *elem ) { bson_iterator next = *elem; size_t size; bson_iterator_next( &next ); size = next.cur - elem->cur; if ( name_or_null == NULL ) { if( bson_ensure_space( b, size ) == BSON_ERROR ) return BSON_ERROR; bson_append( b, elem->cur, size ); } else { size_t data_size = size - 2 - strlen( bson_iterator_key( elem ) ); bson_append_estart( b, elem->cur[0], name_or_null, data_size ); bson_append( b, bson_iterator_value( elem ), data_size ); } return BSON_OK; } MONGO_EXPORT int bson_append_timestamp( bson *b, const char *name, bson_timestamp_t *ts ) { if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &( ts->i ) ); bson_append32( b , &( ts->t ) ); return BSON_OK; } MONGO_EXPORT int bson_append_timestamp2( bson *b, const char *name, int time, int increment ) { if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &increment ); bson_append32( b , &time ); return BSON_OK; } MONGO_EXPORT int bson_append_date( bson *b, const char *name, bson_date_t millis ) { if ( bson_append_estart( b, BSON_DATE, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &millis ); return BSON_OK; } MONGO_EXPORT int bson_append_time_t( bson *b, const char *name, time_t secs ) { return bson_append_date( b, name, ( bson_date_t )secs * 1000 ); } MONGO_EXPORT int bson_append_start_object( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_OBJECT, name, 5 ) == BSON_ERROR ) return BSON_ERROR; b->stack[ b->stackPos++ ] = b->cur - b->data; bson_append32( b , &zero ); return BSON_OK; } MONGO_EXPORT int bson_append_start_array( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_ARRAY, name, 5 ) == BSON_ERROR ) return BSON_ERROR; b->stack[ b->stackPos++ ] = b->cur - b->data; bson_append32( b , &zero ); return BSON_OK; } MONGO_EXPORT int bson_append_finish_object( bson *b ) { char *start; int i; if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b , 0 ); start = b->data + b->stack[ --b->stackPos ]; i = ( int )( b->cur - start ); bson_little_endian32( start, &i ); return BSON_OK; } MONGO_EXPORT double bson_int64_to_double( int64_t i64 ) { return (double)i64; } MONGO_EXPORT int bson_append_finish_array( bson *b ) { return bson_append_finish_object( b ); } /* Error handling and allocators. */ static bson_err_handler err_handler = NULL; MONGO_EXPORT bson_err_handler set_bson_err_handler( bson_err_handler func ) { bson_err_handler old = err_handler; err_handler = func; return old; } MONGO_EXPORT void bson_free( void *ptr ) { bson_free_func( ptr ); } MONGO_EXPORT void *bson_malloc( size_t size ) { void *p; p = bson_malloc_func( size ); bson_fatal_msg( !!p, "malloc() failed" ); return p; } void *bson_realloc( void *ptr, size_t size ) { void *p; p = bson_realloc_func( ptr, size ); bson_fatal_msg( !!p, "realloc() failed" ); return p; } int _bson_errprintf( const char *format, ... ) { va_list ap; int ret = 0; va_start( ap, format ); #ifndef R_SAFETY_NET ret = vfprintf( stderr, format, ap ); #endif va_end( ap ); return ret; } /** * This method is invoked when a non-fatal bson error is encountered. * Calls the error handler if available. * * @param */ void bson_builder_error( bson *b ) { if( err_handler ) err_handler( "BSON error." ); } void bson_fatal( int ok ) { bson_fatal_msg( ok, "" ); } void bson_fatal_msg( int ok , const char *msg ) { if ( ok ) return; if ( err_handler ) { err_handler( msg ); } #ifndef R_SAFETY_NET bson_errprintf( "error: %s\n" , msg ); exit( -5 ); #endif } /* Efficiently copy an integer to a string. */ extern const char bson_numstrs[1000][4]; void bson_numstr( char *str, int i ) { if( i < 1000 ) memcpy( str, bson_numstrs[i], 4 ); else bson_sprintf( str,"%d", i ); } MONGO_EXPORT void bson_swap_endian64( void *outp, const void *inp ) { const char *in = ( const char * )inp; char *out = ( char * )outp; out[0] = in[7]; out[1] = in[6]; out[2] = in[5]; out[3] = in[4]; out[4] = in[3]; out[5] = in[2]; out[6] = in[1]; out[7] = in[0]; } MONGO_EXPORT void bson_swap_endian32( void *outp, const void *inp ) { const char *in = ( const char * )inp; char *out = ( char * )outp; out[0] = in[3]; out[1] = in[2]; out[2] = in[1]; out[3] = in[0]; }
MONGO_EXPORT int bson_append_finish_object( bson *b ) { char *start; int i; if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b , 0 ); start = b->data + b->stack[ --b->stackPos ]; i = b->cur - start; bson_little_endian32( start, &i ); return BSON_OK; }
MONGO_EXPORT int bson_append_finish_object( bson *b ) { char *start; int i; if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b , 0 ); start = b->data + b->stack[ --b->stackPos ]; i = ( int )( b->cur - start ); bson_little_endian32( start, &i ); return BSON_OK; }
{'added': [(109, 'MONGO_EXPORT size_t bson_buffer_size( const bson *b ) {'), (309, ' size_t ds;'), (423, ' return ( int )bson_iterator_long_raw( i );'), (607, 'static void bson_append( bson *b, const void *data, size_t len ) {'), (617, 'static void bson_append32_as_int( bson *b, int data ) {'), (618, ' bson_little_endian32( b->cur, &data );'), (619, ' b->cur += 4;'), (620, '}'), (621, ''), (627, 'int bson_ensure_space( bson *b, const size_t bytesNeeded ) {'), (665, ' i = ( int )( b->cur - b->data );'), (683, 'static int bson_append_estart( bson *b, int type, const char *name, const size_t dataSize ) {'), (750, ' const char *value, size_t len, bson_type type ) {'), (752, ' size_t sl = len + 1;'), (758, ' bson_append32_as_int( b , ( int )sl );'), (776, 'MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, size_t len ) {'), (780, 'MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, size_t len ) {'), (784, 'MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, size_t len ) {'), (789, ' const char *code, size_t len, const bson *scope ) {'), (791, ' size_t sl, size;'), (797, ' bson_append32_as_int( b, ( int )size );'), (808, 'MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, size_t len ) {'), (813, ' bson_append32_as_int( b, ( int )subtwolen );'), (815, ' bson_append32_as_int( b, ( int )len );'), (821, ' bson_append32_as_int( b, ( int )len );'), (842, ' const size_t plen = strlen( pattern )+1;'), (843, ' const size_t olen = strlen( opts )+1;'), (863, ' size_t size;'), (874, ' size_t data_size = size - 2 - strlen( bson_iterator_key( elem ) );'), (930, ' i = ( int )( b->cur - start );'), (958, 'MONGO_EXPORT void *bson_malloc( size_t size ) {'), (965, 'void *bson_realloc( void *ptr, size_t size ) {')], 'deleted': [(109, 'MONGO_EXPORT int bson_buffer_size( const bson *b ) {'), (309, ' int ds;'), (423, ' return bson_iterator_long_raw( i );'), (607, 'static void bson_append( bson *b, const void *data, int len ) {'), (622, 'int bson_ensure_space( bson *b, const int bytesNeeded ) {'), (660, ' i = b->cur - b->data;'), (678, 'static int bson_append_estart( bson *b, int type, const char *name, const int dataSize ) {'), (745, ' const char *value, int len, bson_type type ) {'), (747, ' int sl = len + 1;'), (753, ' bson_append32( b , &sl );'), (771, 'MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, int len ) {'), (775, 'MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, int len ) {'), (779, 'MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, int len ) {'), (784, ' const char *code, int len, const bson *scope ) {'), (786, ' int sl, size;'), (792, ' bson_append32( b, &size );'), (803, 'MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, int len ) {'), (808, ' bson_append32( b, &subtwolen );'), (810, ' bson_append32( b, &len );'), (816, ' bson_append32( b, &len );'), (837, ' const int plen = strlen( pattern )+1;'), (838, ' const int olen = strlen( opts )+1;'), (858, ' int size;'), (869, ' int data_size = size - 2 - strlen( bson_iterator_key( elem ) );'), (925, ' i = b->cur - start;'), (953, 'MONGO_EXPORT void *bson_malloc( int size ) {'), (960, 'void *bson_realloc( void *ptr, int size ) {')]}
32
27
829
5,670
10
71
2
https://github.com/10gen-archive/mongo-c-driver-legacy
CVE-2020-12135
CWE-190
981
gup.c
C
follow_pmd_mask
#include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/memremap.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/sched/signal.h> #include <linux/rwsem.h> #include <linux/hugetlb.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "internal.h" struct follow_page_context { struct dev_pagemap *pgmap; unsigned int page_mask; }; static struct page *no_page_table(struct vm_area_struct *vma, unsigned int flags) { /* * When core dumping an enormous anonymous area that nobody * has touched so far, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) return ERR_PTR(-EFAULT); return NULL; } static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { /* No page to get reference */ if (flags & FOLL_GET) return -EFAULT; if (flags & FOLL_TOUCH) { pte_t entry = *pte; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); if (!pte_same(*pte, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); } } /* Proper page table entry exists, but no corresponding struct page */ return -EEXIST; } /* * FOLL_FORCE can write to even unwritable pte's, but only * after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) { return pte_write(pte) || ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); } static struct page *follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags, struct dev_pagemap **pgmap) { struct mm_struct *mm = vma->vm_mm; struct page *page; spinlock_t *ptl; pte_t *ptep, pte; retry: if (unlikely(pmd_bad(*pmd))) return no_page_table(vma, flags); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; if (!pte_present(pte)) { swp_entry_t entry; /* * KSM's break_ksm() relies upon recognizing a ksm page * even while it is being migrated, so for that case we * need migration_entry_wait(). */ if (likely(!(flags & FOLL_MIGRATION))) goto no_page; if (pte_none(pte)) goto no_page; entry = pte_to_swp_entry(pte); if (!is_migration_entry(entry)) goto no_page; pte_unmap_unlock(ptep, ptl); migration_entry_wait(mm, pmd, address); goto retry; } if ((flags & FOLL_NUMA) && pte_protnone(pte)) goto no_page; if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { pte_unmap_unlock(ptep, ptl); return NULL; } page = vm_normal_page(vma, address, pte); if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { /* * Only return device mapping pages in the FOLL_GET case since * they are only valid while holding the pgmap reference. */ *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); if (*pgmap) page = pte_page(pte); else goto no_page; } else if (unlikely(!page)) { if (flags & FOLL_DUMP) { /* Avoid special (like zero) pages in core dumps */ page = ERR_PTR(-EFAULT); goto out; } if (is_zero_pfn(pte_pfn(pte))) { page = pte_page(pte); } else { int ret; ret = follow_pfn_pte(vma, address, ptep, flags); page = ERR_PTR(ret); goto out; } } if (flags & FOLL_SPLIT && PageTransCompound(page)) { int ret; get_page(page); pte_unmap_unlock(ptep, ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (ret) return ERR_PTR(ret); goto retry; } if (flags & FOLL_GET) get_page(page); if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Do not mlock pte-mapped THP */ if (PageTransCompound(page)) goto out; /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE * which might bounce very badly if there is contention. * * If the page is already locked, we don't need to * handle it now - vmscan will handle it later if and * when it attempts to reclaim the page. */ if (page->mapping && trylock_page(page)) { lru_add_drain(); /* push cached pages to LRU */ /* * Because we lock page here, and migration is * blocked by the pte's page reference, and we * know the page is still mapped, we don't even * need to check for file-cache page truncation. */ mlock_vma_page(page); unlock_page(page); } } out: pte_unmap_unlock(ptep, ptl); return page; no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return NULL; return no_page_table(vma, flags); } static struct page *follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx) { pmd_t *pmd, pmdval; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pmd = pmd_offset(pudp, address); /* * The READ_ONCE() will stabilize the pmdval in a register or * on the stack so that it will stop changing under the code. */ pmdval = READ_ONCE(*pmd); if (pmd_none(pmdval)) return no_page_table(vma, flags); if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pmd(mm, address, pmd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pmd_val(pmdval)))) { page = follow_huge_pd(vma, address, __hugepd(pmd_val(pmdval)), flags, PMD_SHIFT); if (page) return page; return no_page_table(vma, flags); } retry: if (!pmd_present(pmdval)) { if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(pmdval)); if (is_pmd_migration_entry(pmdval)) pmd_migration_entry_wait(mm, pmd); pmdval = READ_ONCE(*pmd); /* * MADV_DONTNEED may convert the pmd to null because * mmap_sem is held in read mode */ if (pmd_none(pmdval)) return no_page_table(vma, flags); goto retry; } if (pmd_devmap(pmdval)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (likely(!pmd_trans_huge(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) return no_page_table(vma, flags); retry_locked: ptl = pmd_lock(mm, pmd); if (unlikely(pmd_none(*pmd))) { spin_unlock(ptl); return no_page_table(vma, flags); } if (unlikely(!pmd_present(*pmd))) { spin_unlock(ptl); if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); pmd_migration_entry_wait(mm, pmd); goto retry_locked; } if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } if (flags & FOLL_SPLIT) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); ret = 0; split_huge_pmd(vma, pmd, address); if (pmd_trans_unstable(pmd)) ret = -EBUSY; } else { get_page(page); spin_unlock(ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (pmd_none(*pmd)) return no_page_table(vma, flags); } return ret ? ERR_PTR(ret) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(ptl); ctx->page_mask = HPAGE_PMD_NR - 1; return page; } static struct page *follow_pud_mask(struct vm_area_struct *vma, unsigned long address, p4d_t *p4dp, unsigned int flags, struct follow_page_context *ctx) { pud_t *pud; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pud = pud_offset(p4dp, address); if (pud_none(*pud)) return no_page_table(vma, flags); if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pud(mm, address, pud, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pud_val(*pud)))) { page = follow_huge_pd(vma, address, __hugepd(pud_val(*pud)), flags, PUD_SHIFT); if (page) return page; return no_page_table(vma, flags); } if (pud_devmap(*pud)) { ptl = pud_lock(mm, pud); page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (unlikely(pud_bad(*pud))) return no_page_table(vma, flags); return follow_pmd_mask(vma, address, pud, flags, ctx); } static struct page *follow_p4d_mask(struct vm_area_struct *vma, unsigned long address, pgd_t *pgdp, unsigned int flags, struct follow_page_context *ctx) { p4d_t *p4d; struct page *page; p4d = p4d_offset(pgdp, address); if (p4d_none(*p4d)) return no_page_table(vma, flags); BUILD_BUG_ON(p4d_huge(*p4d)); if (unlikely(p4d_bad(*p4d))) return no_page_table(vma, flags); if (is_hugepd(__hugepd(p4d_val(*p4d)))) { page = follow_huge_pd(vma, address, __hugepd(p4d_val(*p4d)), flags, P4D_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_pud_mask(vma, address, p4d, flags, ctx); } /** * follow_page_mask - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address * @address: virtual address to look up * @flags: flags modifying lookup behaviour * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a * pointer to output page_mask * * @flags can have FOLL_ flags set, defined in <linux/mm.h> * * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches * the device's dev_pagemap metadata to avoid repeating expensive lookups. * * On output, the @ctx->page_mask is set according to the size of the page. * * Return: the mapped (struct page *), %NULL if no mapping exists, or * an error pointer if there is a mapping to something not represented * by a page descriptor (see also vm_normal_page()). */ struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct follow_page_context *ctx) { pgd_t *pgd; struct page *page; struct mm_struct *mm = vma->vm_mm; ctx->page_mask = 0; /* make this handle hugepd */ page = follow_huge_addr(mm, address, flags & FOLL_WRITE); if (!IS_ERR(page)) { BUG_ON(flags & FOLL_GET); return page; } pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) return no_page_table(vma, flags); if (pgd_huge(*pgd)) { page = follow_huge_pgd(mm, address, pgd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pgd_val(*pgd)))) { page = follow_huge_pd(vma, address, __hugepd(pgd_val(*pgd)), flags, PGDIR_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_p4d_mask(vma, address, pgd, flags, ctx); } struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags) { struct follow_page_context ctx = { NULL }; struct page *page; page = follow_page_mask(vma, address, foll_flags, &ctx); if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return page; } static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); p4d = p4d_offset(pgd, address); BUG_ON(p4d_none(*p4d)); pud = pud_offset(p4d, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); /* * This should never happen (a device public page in the gate * area). */ if (is_device_public_page(*page)) goto unmap; } get_page(*page); out: ret = 0; unmap: pte_unmap(pte); return ret; } /* * mmap_sem must be held on entry. If @nonblocking != NULL and * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. * If it is, *@nonblocking will be set to 0 and -EBUSY returned. */ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) { unsigned int fault_flags = 0; vm_fault_t ret; /* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) fault_flags |= FAULT_FLAG_REMOTE; if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (*flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; if (*flags & FOLL_TRIED) { VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); fault_flags |= FAULT_FLAG_TRIED; } ret = handle_mm_fault(vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, *flags); if (err) return err; BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } if (ret & VM_FAULT_RETRY) { if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *nonblocking = 0; return -EBUSY; } /* * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when * necessary, even if maybe_mkwrite decided not to set pte_write. We * can thus safely do subsequent page lookups as if they were reads. * But only do so when looping for pte_write is futile: in some cases * userspace may also be wanting to write to the gotten user page, * which a read fault here might prevent (a readonly page might get * reCOWed by userspace write). */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) *flags |= FOLL_COW; return 0; } static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { vm_flags_t vm_flags = vma->vm_flags; int write = (gup_flags & FOLL_WRITE); int foreign = (gup_flags & FOLL_REMOTE); if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) return -EFAULT; if (write) { if (!(vm_flags & VM_WRITE)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * We used to let the write,force case do COW in a * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could * set a breakpoint in a read-only mapping of an * executable, without corrupting the file (yet only * when that file had been opened for writing!). * Anon pages in shared mappings are surprising: now * just reject it. */ if (!is_cow_mapping(vm_flags)) return -EFAULT; } } else if (!(vm_flags & VM_READ)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * Is there actually any vma we can reach here which does not * have VM_MAYREAD set? */ if (!(vm_flags & VM_MAYREAD)) return -EFAULT; } /* * gups are always data accesses, not instruction * fetches, so execute=false here */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return -EFAULT; return 0; } /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @nonblocking: whether waiting for disk IO or mmap_sem contention * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held. It may be released. See below. * * __get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If * the page is written to, set_page_dirty (or set_page_dirty_lock, as * appropriate) must be called after the page is finished with, and * before put_page is called. * * If @nonblocking != NULL, __get_user_pages will not wait for disk IO * or mmap_sem contention, and if waiting is needed to pin all pages, * *@nonblocking will be set to 0. Further, if @gup_flags does not * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in * this case. * * A caller using such a combination of @nonblocking and @gup_flags * must therefore hold the mmap_sem for reading only, and recognize * when it's been released. Otherwise, it must be held for either * reading or writing and will not be released. * * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; struct follow_page_context ctx = { NULL }; if (!nr_pages) return 0; VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* * If FOLL_FORCE is set then do not force a full fault as the hinting * fault information is unrelated to the reference behaviour of a task * using the address space */ if (!(gup_flags & FOLL_FORCE)) gup_flags |= FOLL_NUMA; do { struct page *page; unsigned int foll_flags = gup_flags; unsigned int page_increm; /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { vma = find_extend_vma(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &pages[i] : NULL); if (ret) goto out; ctx.page_mask = 0; goto next_page; } if (!vma || check_vma_flags(vma, gup_flags)) { ret = -EFAULT; goto out; } if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, gup_flags, nonblocking); continue; } } retry: /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (fatal_signal_pending(current)) { ret = -ERESTARTSYS; goto out; } cond_resched(); page = follow_page_mask(vma, start, foll_flags, &ctx); if (!page) { ret = faultin_page(tsk, vma, start, &foll_flags, nonblocking); switch (ret) { case 0: goto retry; case -EBUSY: ret = 0; /* FALLTHRU */ case -EFAULT: case -ENOMEM: case -EHWPOISON: goto out; case -ENOENT: goto next_page; } BUG(); } else if (PTR_ERR(page) == -EEXIST) { /* * Proper page table entry exists, but no corresponding * struct page. */ goto next_page; } else if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); ctx.page_mask = 0; } next_page: if (vmas) { vmas[i] = vma; ctx.page_mask = 0; } page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages); out: if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return i ? i : ret; } static bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) { bool write = !!(fault_flags & FAULT_FLAG_WRITE); bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; if (!(vm_flags & vma->vm_flags)) return false; /* * The architecture might have a hardware protection * mechanism other than read/write that can deny access. * * gup always represents data access, not instruction * fetches, so execute=false here: */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return false; return true; } /* * fixup_user_fault() - manually resolve a user page fault * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller * does not allow retry * * This is meant to be called in the specific scenario where for locking reasons * we try to access user memory in atomic context (within a pagefault_disable() * section), this returns -EFAULT, and we want to resolve the user fault before * trying again. * * Typically this is meant to be used by the futex code. * * The main difference with get_user_pages() is that this function will * unconditionally call handle_mm_fault() which will in turn perform all the * necessary SW fixup of the dirty and young bits in the PTE, while * get_user_pages() only guarantees to update these in the struct page. * * This is important for some architectures where those bits also gate the * access permission to the page because they are maintained in software. On * such architectures, gup() will not be enough to make a subsequent access * succeed. * * This function will not return with an unlocked mmap_sem. So it has not the * same semantics wrt the @mm->mmap_sem as does filemap_fault(). */ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { struct vm_area_struct *vma; vm_fault_t ret, major = 0; if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY; retry: vma = find_extend_vma(mm, address); if (!vma || address < vma->vm_start) return -EFAULT; if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; ret = handle_mm_fault(vma, address, fault_flags); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, 0); if (err) return err; BUG(); } if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); if (!(fault_flags & FAULT_FLAG_TRIED)) { *unlocked = true; fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; fault_flags |= FAULT_FLAG_TRIED; goto retry; } } if (tsk) { if (major) tsk->maj_flt++; else tsk->min_flt++; } return 0; } EXPORT_SYMBOL_GPL(fixup_user_fault); static __always_inline long __get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int flags) { long ret, pages_done; bool lock_dropped; if (locked) { /* if VM_FAULT_RETRY can be returned, vmas become invalid */ BUG_ON(vmas); /* check caller initialized locked */ BUG_ON(*locked != 1); } if (pages) flags |= FOLL_GET; pages_done = 0; lock_dropped = false; for (;;) { ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, locked); if (!locked) /* VM_FAULT_RETRY couldn't trigger, bypass */ return ret; /* VM_FAULT_RETRY cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (!pages) /* If it's a prefault don't insist harder */ return ret; if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* * VM_FAULT_RETRY didn't trigger or it was a * FOLL_NOWAIT. */ if (!pages_done) pages_done = ret; break; } /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ pages += ret; start += ret << PAGE_SHIFT; /* * Repeat on the address that fired VM_FAULT_RETRY * without FAULT_FLAG_ALLOW_RETRY but with * FAULT_FLAG_TRIED. */ *locked = 1; lock_dropped = true; down_read(&mm->mmap_sem); ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, pages, NULL, NULL); if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; pages++; start += PAGE_SIZE; } if (lock_dropped && *locked) { /* * We must let the caller know we temporarily dropped the lock * and so the critical section protected by it was lost. */ up_read(&mm->mmap_sem); *locked = 0; } return pages_done; } /* * We can leverage the VM_FAULT_RETRY functionality in the page fault * paths better by using either get_user_pages_locked() or * get_user_pages_unlocked(). * * get_user_pages_locked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * do_something() * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * to: * * int locked = 1; * down_read(&mm->mmap_sem); * do_something() * get_user_pages_locked(tsk, mm, ..., pages, &locked); * if (locked) * up_read(&mm->mmap_sem); */ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, NULL, locked, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages_locked); /* * get_user_pages_unlocked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * with: * * get_user_pages_unlocked(tsk, mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so * get_user_pages_fast should be used instead if specific gup_flags * (e.g. FOLL_FORCE) are not required. */ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { struct mm_struct *mm = current->mm; int locked = 1; long ret; down_read(&mm->mmap_sem); ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, &locked, gup_flags | FOLL_TOUCH); if (locked) up_read(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(get_user_pages_unlocked); /* * get_user_pages_remote() - pin user pages in memory * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must * be called after the page is finished with, and before put_page is called. * * get_user_pages is typically used for fewer-copy IO operations, to get a * handle on the memory by some means other than accesses via the user virtual * addresses. The pages may be submitted for DMA to devices or accessed via * their kernel linear mapping (via the kmap APIs). Care should be taken to * use the correct cache flushing APIs. * * See also get_user_pages_fast, for performance critical applications. * * get_user_pages should be phased out in favor of * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing * should use get_user_pages because it cannot pass * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. */ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) { return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, locked, gup_flags | FOLL_TOUCH | FOLL_REMOTE); } EXPORT_SYMBOL(get_user_pages_remote); /* * This is the same as get_user_pages_remote(), just with a * less-flexible calling convention where we assume that the task * and mm being operated on are the current task's and don't allow * passing of a locked parameter. We also obviously don't pass * FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, vmas, NULL, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages); #ifdef CONFIG_FS_DAX /* * This is the same as get_user_pages() in that it assumes we are * operating on the current task's mm, but it goes further to validate * that the vmas associated with the address range are suitable for * longterm elevated page reference counts. For example, filesystem-dax * mappings are subject to the lifetime enforced by the filesystem and * we need guarantees that longterm users like RDMA and V4L2 only * establish mappings that have a kernel enforced revocation mechanism. * * "longterm" == userspace controlled elevated page count lifetime. * Contrast this to iov_iter_get_pages() usages which are transient. */ long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas_arg) { struct vm_area_struct **vmas = vmas_arg; struct vm_area_struct *vma_prev = NULL; long rc, i; if (!pages) return -EINVAL; if (!vmas) { vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), GFP_KERNEL); if (!vmas) return -ENOMEM; } rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); for (i = 0; i < rc; i++) { struct vm_area_struct *vma = vmas[i]; if (vma == vma_prev) continue; vma_prev = vma; if (vma_is_fsdax(vma)) break; } /* * Either get_user_pages() failed, or the vma validation * succeeded, in either case we don't need to put_page() before * returning. */ if (i >= rc) goto out; for (i = 0; i < rc; i++) put_page(pages[i]); rc = -EOPNOTSUPP; out: if (vmas != vmas_arg) kfree(vmas); return rc; } EXPORT_SYMBOL(get_user_pages_longterm); #endif /* CONFIG_FS_DAX */ /** * populate_vma_page_range() - populate a range of pages in the vma. * @vma: target vma * @start: start address * @end: end address * @nonblocking: * * This takes care of mlocking the pages too if VM_LOCKED is set. * * return 0 on success, negative error code on error. * * vma->vm_mm->mmap_sem must be held. * * If @nonblocking is NULL, it may be held for read or write and will * be unperturbed. * * If @nonblocking is non-NULL, it must held for read only and may be * released. If it's released, *@nonblocking will be set to 0. */ long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; int gup_flags; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; if (vma->vm_flags & VM_LOCKONFAULT) gup_flags &= ~FOLL_POPULATE; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; /* * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ return __get_user_pages(current, mm, start, nr_pages, gup_flags, NULL, NULL, nonblocking); } /* * __mm_populate - populate and/or mlock pages within a range of address space. * * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap * flags. VMAs must be already marked with the desired vm_flags, and * mmap_sem must not be held. */ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; down_read(&mm->mmap_sem); vma = find_vma(mm, nstart); } else if (nstart >= vma->vm_end) vma = vma->vm_next; if (!vma || vma->vm_start >= end) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. populate_vma_page_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = populate_vma_page_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */ } /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address * * Returns struct page pointer of user page pinned for dump, * to be freed afterwards by put_page(). * * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - * allowing a hole to be left in the corefile to save diskspace. * * Called without mmap_sem, but after all other threads have been killed. */ #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct vm_area_struct *vma; struct page *page; if (__get_user_pages(current, current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; flush_cache_page(vma, addr, page_to_pfn(page)); return page; } #endif /* CONFIG_ELF_CORE */ /* * Generic Fast GUP * * get_user_pages_fast attempts to pin user pages by walking the page * tables directly and avoids taking locks. Thus the walker needs to be * protected from page table pages being freed from under it, and should * block any THP splits. * * One way to achieve this is to have the walker disable interrupts, and * rely on IPIs from the TLB flushing code blocking before the page table * pages are freed. This is unsuitable for architectures that do not need * to broadcast an IPI when invalidating TLBs. * * Another way to achieve this is to batch up page table containing pages * belonging to more than one mm_user, then rcu_sched a callback to free those * pages. Disabling interrupts will allow the fast_gup walker to both block * the rcu_sched callback, and an IPI that we broadcast for splitting THPs * (which is a relatively rare event). The code below adopts this strategy. * * Before activating this code, please be aware that the following assumptions * are currently made: * * *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to * free pages containing page tables or TLB flushing requires IPI broadcast. * * *) ptes can be read atomically by the architecture. * * *) access_ok is sufficient to validate userspace address ranges. * * The last two assumptions can be relaxed by the addition of helper functions. * * This code is based heavily on the PowerPC implementation by Nick Piggin. */ #ifdef CONFIG_HAVE_GENERIC_GUP #ifndef gup_get_pte /* * We assume that the PTE can be read atomically. If this is not the case for * your architecture, please provide the helper. */ static inline pte_t gup_get_pte(pte_t *ptep) { return READ_ONCE(*ptep); } #endif static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) { while ((*nr) - nr_start) { struct page *page = pages[--(*nr)]; ClearPageReferenced(page); put_page(page); } } #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *head, *page; /* * Similar to the PMD case below, NUMA hinting must take slow * path using the pte_protnone check. */ if (pte_protnone(pte)) goto pte_unmap; if (!pte_access_permitted(pte, write)) goto pte_unmap; if (pte_devmap(pte)) { pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); goto pte_unmap; } } else if (pte_special(pte)) goto pte_unmap; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); head = compound_head(page); if (!page_cache_get_speculative(head)) goto pte_unmap; if (unlikely(pte_val(pte) != pte_val(*ptep))) { put_page(head); goto pte_unmap; } VM_BUG_ON_PAGE(compound_head(page) != head, page); SetPageReferenced(page); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); ret = 1; pte_unmap: if (pgmap) put_dev_pagemap(pgmap); pte_unmap(ptem); return ret; } #else /* * If we can't determine whether or not a pte is special, then fail immediately * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not * to be special. * * For a futex to be placed on a THP tail page, get_futex_key requires a * __get_user_pages_fast implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { return 0; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) static int __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, struct page **pages, int *nr) { int nr_start = *nr; struct dev_pagemap *pgmap = NULL; do { struct page *page = pfn_to_page(pfn); pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); return 0; } SetPageReferenced(page); pages[*nr] = page; get_page(page); (*nr)++; pfn++; } while (addr += PAGE_SIZE, addr != end); if (pgmap) put_dev_pagemap(pgmap); return 1; } static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pud_val(orig) != pud_val(*pudp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } #else static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } #endif static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pmd_access_permitted(orig, write)) return 0; if (pmd_devmap(orig)) return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pmd_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pud_access_permitted(orig, write)) return 0; if (pud_devmap(orig)) return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pud_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { int refs; struct page *head, *page; if (!pgd_access_permitted(orig, write)) return 0; BUILD_BUG_ON(pgd_devmap(orig)); refs = 0; page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pgd_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { pmd_t pmd = READ_ONCE(*pmdp); next = pmd_addr_end(addr, end); if (!pmd_present(pmd)) return 0; if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))) { /* * NUMA hinting faults need to be handled in the GUP * slowpath for accounting purposes and so that they * can be serialised against THP migration. */ if (pmd_protnone(pmd)) return 0; if (!gup_huge_pmd(pmd, pmdp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { /* * architecture have different format for hugetlbfs * pmd format and THP pmd format */ if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, PMD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset(&p4d, addr); do { pud_t pud = READ_ONCE(*pudp); next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; if (unlikely(pud_huge(pud))) { if (!gup_huge_pud(pud, pudp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp; p4dp = p4d_offset(&pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp); next = p4d_addr_end(addr, end); if (p4d_none(p4d)) return 0; BUILD_BUG_ON(p4d_huge(p4d)); if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, P4D_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end); return 1; } static void gup_pgd_range(unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pgd_t *pgdp; pgdp = pgd_offset(current->mm, addr); do { pgd_t pgd = READ_ONCE(*pgdp); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) return; if (unlikely(pgd_huge(pgd))) { if (!gup_huge_pgd(pgd, pgdp, addr, next, write, pages, nr)) return; } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, write, pages, nr)) return; } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) return; } while (pgdp++, addr = next, addr != end); } #ifndef gup_fast_permitted /* * Check if it's allowed to use __get_user_pages_fast() for the range, or * we need to fall back to the slow version: */ bool gup_fast_permitted(unsigned long start, int nr_pages, int write) { unsigned long len, end; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; return end >= start; } #endif /* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. * Note a difference with get_user_pages_fast: this always returns the * number of pages pinned, 0 if no pages were pinned. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long len, end; unsigned long flags; int nr = 0; start &= PAGE_MASK; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok((void __user *)start, len))) return 0; /* * Disable interrupts. We use the nested form as we can already have * interrupts disabled by get_futex_key. * * With interrupts disabled, we block page table pages from being * freed from under us. See struct mmu_table_batch comments in * include/asm-generic/tlb.h for more details. * * We do not adopt an rcu_read_lock(.) here as we also want to * block IPIs that come from THPs splitting. */ if (gup_fast_permitted(start, nr_pages, write)) { local_irq_save(flags); gup_pgd_range(start, end, write, pages, &nr); local_irq_restore(flags); } return nr; } /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_sem. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long addr, len, end; int nr = 0, ret = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (nr_pages <= 0) return 0; if (unlikely(!access_ok((void __user *)start, len))) return -EFAULT; if (gup_fast_permitted(start, nr_pages, write)) { local_irq_disable(); gup_pgd_range(addr, end, write, pages, &nr); local_irq_enable(); ret = nr; } if (nr < nr_pages) { /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; ret = get_user_pages_unlocked(start, nr_pages - nr, pages, write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } } return ret; } #endif /* CONFIG_HAVE_GENERIC_GUP */
#include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/memremap.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/sched/signal.h> #include <linux/rwsem.h> #include <linux/hugetlb.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "internal.h" struct follow_page_context { struct dev_pagemap *pgmap; unsigned int page_mask; }; static struct page *no_page_table(struct vm_area_struct *vma, unsigned int flags) { /* * When core dumping an enormous anonymous area that nobody * has touched so far, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) return ERR_PTR(-EFAULT); return NULL; } static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { /* No page to get reference */ if (flags & FOLL_GET) return -EFAULT; if (flags & FOLL_TOUCH) { pte_t entry = *pte; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); if (!pte_same(*pte, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); } } /* Proper page table entry exists, but no corresponding struct page */ return -EEXIST; } /* * FOLL_FORCE can write to even unwritable pte's, but only * after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) { return pte_write(pte) || ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); } static struct page *follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags, struct dev_pagemap **pgmap) { struct mm_struct *mm = vma->vm_mm; struct page *page; spinlock_t *ptl; pte_t *ptep, pte; retry: if (unlikely(pmd_bad(*pmd))) return no_page_table(vma, flags); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; if (!pte_present(pte)) { swp_entry_t entry; /* * KSM's break_ksm() relies upon recognizing a ksm page * even while it is being migrated, so for that case we * need migration_entry_wait(). */ if (likely(!(flags & FOLL_MIGRATION))) goto no_page; if (pte_none(pte)) goto no_page; entry = pte_to_swp_entry(pte); if (!is_migration_entry(entry)) goto no_page; pte_unmap_unlock(ptep, ptl); migration_entry_wait(mm, pmd, address); goto retry; } if ((flags & FOLL_NUMA) && pte_protnone(pte)) goto no_page; if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { pte_unmap_unlock(ptep, ptl); return NULL; } page = vm_normal_page(vma, address, pte); if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { /* * Only return device mapping pages in the FOLL_GET case since * they are only valid while holding the pgmap reference. */ *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); if (*pgmap) page = pte_page(pte); else goto no_page; } else if (unlikely(!page)) { if (flags & FOLL_DUMP) { /* Avoid special (like zero) pages in core dumps */ page = ERR_PTR(-EFAULT); goto out; } if (is_zero_pfn(pte_pfn(pte))) { page = pte_page(pte); } else { int ret; ret = follow_pfn_pte(vma, address, ptep, flags); page = ERR_PTR(ret); goto out; } } if (flags & FOLL_SPLIT && PageTransCompound(page)) { int ret; get_page(page); pte_unmap_unlock(ptep, ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (ret) return ERR_PTR(ret); goto retry; } if (flags & FOLL_GET) { if (unlikely(!try_get_page(page))) { page = ERR_PTR(-ENOMEM); goto out; } } if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Do not mlock pte-mapped THP */ if (PageTransCompound(page)) goto out; /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE * which might bounce very badly if there is contention. * * If the page is already locked, we don't need to * handle it now - vmscan will handle it later if and * when it attempts to reclaim the page. */ if (page->mapping && trylock_page(page)) { lru_add_drain(); /* push cached pages to LRU */ /* * Because we lock page here, and migration is * blocked by the pte's page reference, and we * know the page is still mapped, we don't even * need to check for file-cache page truncation. */ mlock_vma_page(page); unlock_page(page); } } out: pte_unmap_unlock(ptep, ptl); return page; no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return NULL; return no_page_table(vma, flags); } static struct page *follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx) { pmd_t *pmd, pmdval; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pmd = pmd_offset(pudp, address); /* * The READ_ONCE() will stabilize the pmdval in a register or * on the stack so that it will stop changing under the code. */ pmdval = READ_ONCE(*pmd); if (pmd_none(pmdval)) return no_page_table(vma, flags); if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pmd(mm, address, pmd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pmd_val(pmdval)))) { page = follow_huge_pd(vma, address, __hugepd(pmd_val(pmdval)), flags, PMD_SHIFT); if (page) return page; return no_page_table(vma, flags); } retry: if (!pmd_present(pmdval)) { if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(pmdval)); if (is_pmd_migration_entry(pmdval)) pmd_migration_entry_wait(mm, pmd); pmdval = READ_ONCE(*pmd); /* * MADV_DONTNEED may convert the pmd to null because * mmap_sem is held in read mode */ if (pmd_none(pmdval)) return no_page_table(vma, flags); goto retry; } if (pmd_devmap(pmdval)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (likely(!pmd_trans_huge(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) return no_page_table(vma, flags); retry_locked: ptl = pmd_lock(mm, pmd); if (unlikely(pmd_none(*pmd))) { spin_unlock(ptl); return no_page_table(vma, flags); } if (unlikely(!pmd_present(*pmd))) { spin_unlock(ptl); if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); pmd_migration_entry_wait(mm, pmd); goto retry_locked; } if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } if (flags & FOLL_SPLIT) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); ret = 0; split_huge_pmd(vma, pmd, address); if (pmd_trans_unstable(pmd)) ret = -EBUSY; } else { if (unlikely(!try_get_page(page))) { spin_unlock(ptl); return ERR_PTR(-ENOMEM); } spin_unlock(ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (pmd_none(*pmd)) return no_page_table(vma, flags); } return ret ? ERR_PTR(ret) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(ptl); ctx->page_mask = HPAGE_PMD_NR - 1; return page; } static struct page *follow_pud_mask(struct vm_area_struct *vma, unsigned long address, p4d_t *p4dp, unsigned int flags, struct follow_page_context *ctx) { pud_t *pud; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pud = pud_offset(p4dp, address); if (pud_none(*pud)) return no_page_table(vma, flags); if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pud(mm, address, pud, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pud_val(*pud)))) { page = follow_huge_pd(vma, address, __hugepd(pud_val(*pud)), flags, PUD_SHIFT); if (page) return page; return no_page_table(vma, flags); } if (pud_devmap(*pud)) { ptl = pud_lock(mm, pud); page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (unlikely(pud_bad(*pud))) return no_page_table(vma, flags); return follow_pmd_mask(vma, address, pud, flags, ctx); } static struct page *follow_p4d_mask(struct vm_area_struct *vma, unsigned long address, pgd_t *pgdp, unsigned int flags, struct follow_page_context *ctx) { p4d_t *p4d; struct page *page; p4d = p4d_offset(pgdp, address); if (p4d_none(*p4d)) return no_page_table(vma, flags); BUILD_BUG_ON(p4d_huge(*p4d)); if (unlikely(p4d_bad(*p4d))) return no_page_table(vma, flags); if (is_hugepd(__hugepd(p4d_val(*p4d)))) { page = follow_huge_pd(vma, address, __hugepd(p4d_val(*p4d)), flags, P4D_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_pud_mask(vma, address, p4d, flags, ctx); } /** * follow_page_mask - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address * @address: virtual address to look up * @flags: flags modifying lookup behaviour * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a * pointer to output page_mask * * @flags can have FOLL_ flags set, defined in <linux/mm.h> * * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches * the device's dev_pagemap metadata to avoid repeating expensive lookups. * * On output, the @ctx->page_mask is set according to the size of the page. * * Return: the mapped (struct page *), %NULL if no mapping exists, or * an error pointer if there is a mapping to something not represented * by a page descriptor (see also vm_normal_page()). */ struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct follow_page_context *ctx) { pgd_t *pgd; struct page *page; struct mm_struct *mm = vma->vm_mm; ctx->page_mask = 0; /* make this handle hugepd */ page = follow_huge_addr(mm, address, flags & FOLL_WRITE); if (!IS_ERR(page)) { BUG_ON(flags & FOLL_GET); return page; } pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) return no_page_table(vma, flags); if (pgd_huge(*pgd)) { page = follow_huge_pgd(mm, address, pgd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pgd_val(*pgd)))) { page = follow_huge_pd(vma, address, __hugepd(pgd_val(*pgd)), flags, PGDIR_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_p4d_mask(vma, address, pgd, flags, ctx); } struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags) { struct follow_page_context ctx = { NULL }; struct page *page; page = follow_page_mask(vma, address, foll_flags, &ctx); if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return page; } static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); p4d = p4d_offset(pgd, address); BUG_ON(p4d_none(*p4d)); pud = pud_offset(p4d, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); /* * This should never happen (a device public page in the gate * area). */ if (is_device_public_page(*page)) goto unmap; } if (unlikely(!try_get_page(*page))) { ret = -ENOMEM; goto unmap; } out: ret = 0; unmap: pte_unmap(pte); return ret; } /* * mmap_sem must be held on entry. If @nonblocking != NULL and * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. * If it is, *@nonblocking will be set to 0 and -EBUSY returned. */ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) { unsigned int fault_flags = 0; vm_fault_t ret; /* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) fault_flags |= FAULT_FLAG_REMOTE; if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (*flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; if (*flags & FOLL_TRIED) { VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); fault_flags |= FAULT_FLAG_TRIED; } ret = handle_mm_fault(vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, *flags); if (err) return err; BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } if (ret & VM_FAULT_RETRY) { if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *nonblocking = 0; return -EBUSY; } /* * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when * necessary, even if maybe_mkwrite decided not to set pte_write. We * can thus safely do subsequent page lookups as if they were reads. * But only do so when looping for pte_write is futile: in some cases * userspace may also be wanting to write to the gotten user page, * which a read fault here might prevent (a readonly page might get * reCOWed by userspace write). */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) *flags |= FOLL_COW; return 0; } static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { vm_flags_t vm_flags = vma->vm_flags; int write = (gup_flags & FOLL_WRITE); int foreign = (gup_flags & FOLL_REMOTE); if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) return -EFAULT; if (write) { if (!(vm_flags & VM_WRITE)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * We used to let the write,force case do COW in a * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could * set a breakpoint in a read-only mapping of an * executable, without corrupting the file (yet only * when that file had been opened for writing!). * Anon pages in shared mappings are surprising: now * just reject it. */ if (!is_cow_mapping(vm_flags)) return -EFAULT; } } else if (!(vm_flags & VM_READ)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * Is there actually any vma we can reach here which does not * have VM_MAYREAD set? */ if (!(vm_flags & VM_MAYREAD)) return -EFAULT; } /* * gups are always data accesses, not instruction * fetches, so execute=false here */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return -EFAULT; return 0; } /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @nonblocking: whether waiting for disk IO or mmap_sem contention * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held. It may be released. See below. * * __get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If * the page is written to, set_page_dirty (or set_page_dirty_lock, as * appropriate) must be called after the page is finished with, and * before put_page is called. * * If @nonblocking != NULL, __get_user_pages will not wait for disk IO * or mmap_sem contention, and if waiting is needed to pin all pages, * *@nonblocking will be set to 0. Further, if @gup_flags does not * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in * this case. * * A caller using such a combination of @nonblocking and @gup_flags * must therefore hold the mmap_sem for reading only, and recognize * when it's been released. Otherwise, it must be held for either * reading or writing and will not be released. * * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; struct follow_page_context ctx = { NULL }; if (!nr_pages) return 0; VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* * If FOLL_FORCE is set then do not force a full fault as the hinting * fault information is unrelated to the reference behaviour of a task * using the address space */ if (!(gup_flags & FOLL_FORCE)) gup_flags |= FOLL_NUMA; do { struct page *page; unsigned int foll_flags = gup_flags; unsigned int page_increm; /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { vma = find_extend_vma(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &pages[i] : NULL); if (ret) goto out; ctx.page_mask = 0; goto next_page; } if (!vma || check_vma_flags(vma, gup_flags)) { ret = -EFAULT; goto out; } if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, gup_flags, nonblocking); continue; } } retry: /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (fatal_signal_pending(current)) { ret = -ERESTARTSYS; goto out; } cond_resched(); page = follow_page_mask(vma, start, foll_flags, &ctx); if (!page) { ret = faultin_page(tsk, vma, start, &foll_flags, nonblocking); switch (ret) { case 0: goto retry; case -EBUSY: ret = 0; /* FALLTHRU */ case -EFAULT: case -ENOMEM: case -EHWPOISON: goto out; case -ENOENT: goto next_page; } BUG(); } else if (PTR_ERR(page) == -EEXIST) { /* * Proper page table entry exists, but no corresponding * struct page. */ goto next_page; } else if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); ctx.page_mask = 0; } next_page: if (vmas) { vmas[i] = vma; ctx.page_mask = 0; } page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages); out: if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return i ? i : ret; } static bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) { bool write = !!(fault_flags & FAULT_FLAG_WRITE); bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; if (!(vm_flags & vma->vm_flags)) return false; /* * The architecture might have a hardware protection * mechanism other than read/write that can deny access. * * gup always represents data access, not instruction * fetches, so execute=false here: */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return false; return true; } /* * fixup_user_fault() - manually resolve a user page fault * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller * does not allow retry * * This is meant to be called in the specific scenario where for locking reasons * we try to access user memory in atomic context (within a pagefault_disable() * section), this returns -EFAULT, and we want to resolve the user fault before * trying again. * * Typically this is meant to be used by the futex code. * * The main difference with get_user_pages() is that this function will * unconditionally call handle_mm_fault() which will in turn perform all the * necessary SW fixup of the dirty and young bits in the PTE, while * get_user_pages() only guarantees to update these in the struct page. * * This is important for some architectures where those bits also gate the * access permission to the page because they are maintained in software. On * such architectures, gup() will not be enough to make a subsequent access * succeed. * * This function will not return with an unlocked mmap_sem. So it has not the * same semantics wrt the @mm->mmap_sem as does filemap_fault(). */ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { struct vm_area_struct *vma; vm_fault_t ret, major = 0; if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY; retry: vma = find_extend_vma(mm, address); if (!vma || address < vma->vm_start) return -EFAULT; if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; ret = handle_mm_fault(vma, address, fault_flags); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, 0); if (err) return err; BUG(); } if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); if (!(fault_flags & FAULT_FLAG_TRIED)) { *unlocked = true; fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; fault_flags |= FAULT_FLAG_TRIED; goto retry; } } if (tsk) { if (major) tsk->maj_flt++; else tsk->min_flt++; } return 0; } EXPORT_SYMBOL_GPL(fixup_user_fault); static __always_inline long __get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int flags) { long ret, pages_done; bool lock_dropped; if (locked) { /* if VM_FAULT_RETRY can be returned, vmas become invalid */ BUG_ON(vmas); /* check caller initialized locked */ BUG_ON(*locked != 1); } if (pages) flags |= FOLL_GET; pages_done = 0; lock_dropped = false; for (;;) { ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, locked); if (!locked) /* VM_FAULT_RETRY couldn't trigger, bypass */ return ret; /* VM_FAULT_RETRY cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (!pages) /* If it's a prefault don't insist harder */ return ret; if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* * VM_FAULT_RETRY didn't trigger or it was a * FOLL_NOWAIT. */ if (!pages_done) pages_done = ret; break; } /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ pages += ret; start += ret << PAGE_SHIFT; /* * Repeat on the address that fired VM_FAULT_RETRY * without FAULT_FLAG_ALLOW_RETRY but with * FAULT_FLAG_TRIED. */ *locked = 1; lock_dropped = true; down_read(&mm->mmap_sem); ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, pages, NULL, NULL); if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; pages++; start += PAGE_SIZE; } if (lock_dropped && *locked) { /* * We must let the caller know we temporarily dropped the lock * and so the critical section protected by it was lost. */ up_read(&mm->mmap_sem); *locked = 0; } return pages_done; } /* * We can leverage the VM_FAULT_RETRY functionality in the page fault * paths better by using either get_user_pages_locked() or * get_user_pages_unlocked(). * * get_user_pages_locked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * do_something() * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * to: * * int locked = 1; * down_read(&mm->mmap_sem); * do_something() * get_user_pages_locked(tsk, mm, ..., pages, &locked); * if (locked) * up_read(&mm->mmap_sem); */ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, NULL, locked, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages_locked); /* * get_user_pages_unlocked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * with: * * get_user_pages_unlocked(tsk, mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so * get_user_pages_fast should be used instead if specific gup_flags * (e.g. FOLL_FORCE) are not required. */ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { struct mm_struct *mm = current->mm; int locked = 1; long ret; down_read(&mm->mmap_sem); ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, &locked, gup_flags | FOLL_TOUCH); if (locked) up_read(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(get_user_pages_unlocked); /* * get_user_pages_remote() - pin user pages in memory * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must * be called after the page is finished with, and before put_page is called. * * get_user_pages is typically used for fewer-copy IO operations, to get a * handle on the memory by some means other than accesses via the user virtual * addresses. The pages may be submitted for DMA to devices or accessed via * their kernel linear mapping (via the kmap APIs). Care should be taken to * use the correct cache flushing APIs. * * See also get_user_pages_fast, for performance critical applications. * * get_user_pages should be phased out in favor of * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing * should use get_user_pages because it cannot pass * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. */ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) { return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, locked, gup_flags | FOLL_TOUCH | FOLL_REMOTE); } EXPORT_SYMBOL(get_user_pages_remote); /* * This is the same as get_user_pages_remote(), just with a * less-flexible calling convention where we assume that the task * and mm being operated on are the current task's and don't allow * passing of a locked parameter. We also obviously don't pass * FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, vmas, NULL, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages); #ifdef CONFIG_FS_DAX /* * This is the same as get_user_pages() in that it assumes we are * operating on the current task's mm, but it goes further to validate * that the vmas associated with the address range are suitable for * longterm elevated page reference counts. For example, filesystem-dax * mappings are subject to the lifetime enforced by the filesystem and * we need guarantees that longterm users like RDMA and V4L2 only * establish mappings that have a kernel enforced revocation mechanism. * * "longterm" == userspace controlled elevated page count lifetime. * Contrast this to iov_iter_get_pages() usages which are transient. */ long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas_arg) { struct vm_area_struct **vmas = vmas_arg; struct vm_area_struct *vma_prev = NULL; long rc, i; if (!pages) return -EINVAL; if (!vmas) { vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), GFP_KERNEL); if (!vmas) return -ENOMEM; } rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); for (i = 0; i < rc; i++) { struct vm_area_struct *vma = vmas[i]; if (vma == vma_prev) continue; vma_prev = vma; if (vma_is_fsdax(vma)) break; } /* * Either get_user_pages() failed, or the vma validation * succeeded, in either case we don't need to put_page() before * returning. */ if (i >= rc) goto out; for (i = 0; i < rc; i++) put_page(pages[i]); rc = -EOPNOTSUPP; out: if (vmas != vmas_arg) kfree(vmas); return rc; } EXPORT_SYMBOL(get_user_pages_longterm); #endif /* CONFIG_FS_DAX */ /** * populate_vma_page_range() - populate a range of pages in the vma. * @vma: target vma * @start: start address * @end: end address * @nonblocking: * * This takes care of mlocking the pages too if VM_LOCKED is set. * * return 0 on success, negative error code on error. * * vma->vm_mm->mmap_sem must be held. * * If @nonblocking is NULL, it may be held for read or write and will * be unperturbed. * * If @nonblocking is non-NULL, it must held for read only and may be * released. If it's released, *@nonblocking will be set to 0. */ long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; int gup_flags; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; if (vma->vm_flags & VM_LOCKONFAULT) gup_flags &= ~FOLL_POPULATE; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; /* * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ return __get_user_pages(current, mm, start, nr_pages, gup_flags, NULL, NULL, nonblocking); } /* * __mm_populate - populate and/or mlock pages within a range of address space. * * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap * flags. VMAs must be already marked with the desired vm_flags, and * mmap_sem must not be held. */ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; down_read(&mm->mmap_sem); vma = find_vma(mm, nstart); } else if (nstart >= vma->vm_end) vma = vma->vm_next; if (!vma || vma->vm_start >= end) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. populate_vma_page_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = populate_vma_page_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */ } /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address * * Returns struct page pointer of user page pinned for dump, * to be freed afterwards by put_page(). * * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - * allowing a hole to be left in the corefile to save diskspace. * * Called without mmap_sem, but after all other threads have been killed. */ #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct vm_area_struct *vma; struct page *page; if (__get_user_pages(current, current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; flush_cache_page(vma, addr, page_to_pfn(page)); return page; } #endif /* CONFIG_ELF_CORE */ /* * Generic Fast GUP * * get_user_pages_fast attempts to pin user pages by walking the page * tables directly and avoids taking locks. Thus the walker needs to be * protected from page table pages being freed from under it, and should * block any THP splits. * * One way to achieve this is to have the walker disable interrupts, and * rely on IPIs from the TLB flushing code blocking before the page table * pages are freed. This is unsuitable for architectures that do not need * to broadcast an IPI when invalidating TLBs. * * Another way to achieve this is to batch up page table containing pages * belonging to more than one mm_user, then rcu_sched a callback to free those * pages. Disabling interrupts will allow the fast_gup walker to both block * the rcu_sched callback, and an IPI that we broadcast for splitting THPs * (which is a relatively rare event). The code below adopts this strategy. * * Before activating this code, please be aware that the following assumptions * are currently made: * * *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to * free pages containing page tables or TLB flushing requires IPI broadcast. * * *) ptes can be read atomically by the architecture. * * *) access_ok is sufficient to validate userspace address ranges. * * The last two assumptions can be relaxed by the addition of helper functions. * * This code is based heavily on the PowerPC implementation by Nick Piggin. */ #ifdef CONFIG_HAVE_GENERIC_GUP #ifndef gup_get_pte /* * We assume that the PTE can be read atomically. If this is not the case for * your architecture, please provide the helper. */ static inline pte_t gup_get_pte(pte_t *ptep) { return READ_ONCE(*ptep); } #endif static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) { while ((*nr) - nr_start) { struct page *page = pages[--(*nr)]; ClearPageReferenced(page); put_page(page); } } /* * Return the compund head page with ref appropriately incremented, * or NULL if that failed. */ static inline struct page *try_get_compound_head(struct page *page, int refs) { struct page *head = compound_head(page); if (WARN_ON_ONCE(page_ref_count(head) < 0)) return NULL; if (unlikely(!page_cache_add_speculative(head, refs))) return NULL; return head; } #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *head, *page; /* * Similar to the PMD case below, NUMA hinting must take slow * path using the pte_protnone check. */ if (pte_protnone(pte)) goto pte_unmap; if (!pte_access_permitted(pte, write)) goto pte_unmap; if (pte_devmap(pte)) { pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); goto pte_unmap; } } else if (pte_special(pte)) goto pte_unmap; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); head = try_get_compound_head(page, 1); if (!head) goto pte_unmap; if (unlikely(pte_val(pte) != pte_val(*ptep))) { put_page(head); goto pte_unmap; } VM_BUG_ON_PAGE(compound_head(page) != head, page); SetPageReferenced(page); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); ret = 1; pte_unmap: if (pgmap) put_dev_pagemap(pgmap); pte_unmap(ptem); return ret; } #else /* * If we can't determine whether or not a pte is special, then fail immediately * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not * to be special. * * For a futex to be placed on a THP tail page, get_futex_key requires a * __get_user_pages_fast implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { return 0; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) static int __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, struct page **pages, int *nr) { int nr_start = *nr; struct dev_pagemap *pgmap = NULL; do { struct page *page = pfn_to_page(pfn); pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); return 0; } SetPageReferenced(page); pages[*nr] = page; get_page(page); (*nr)++; pfn++; } while (addr += PAGE_SIZE, addr != end); if (pgmap) put_dev_pagemap(pgmap); return 1; } static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pud_val(orig) != pud_val(*pudp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } #else static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } #endif static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pmd_access_permitted(orig, write)) return 0; if (pmd_devmap(orig)) return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pmd_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pud_access_permitted(orig, write)) return 0; if (pud_devmap(orig)) return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pud_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { int refs; struct page *head, *page; if (!pgd_access_permitted(orig, write)) return 0; BUILD_BUG_ON(pgd_devmap(orig)); refs = 0; page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pgd_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { pmd_t pmd = READ_ONCE(*pmdp); next = pmd_addr_end(addr, end); if (!pmd_present(pmd)) return 0; if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))) { /* * NUMA hinting faults need to be handled in the GUP * slowpath for accounting purposes and so that they * can be serialised against THP migration. */ if (pmd_protnone(pmd)) return 0; if (!gup_huge_pmd(pmd, pmdp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { /* * architecture have different format for hugetlbfs * pmd format and THP pmd format */ if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, PMD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset(&p4d, addr); do { pud_t pud = READ_ONCE(*pudp); next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; if (unlikely(pud_huge(pud))) { if (!gup_huge_pud(pud, pudp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp; p4dp = p4d_offset(&pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp); next = p4d_addr_end(addr, end); if (p4d_none(p4d)) return 0; BUILD_BUG_ON(p4d_huge(p4d)); if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, P4D_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end); return 1; } static void gup_pgd_range(unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pgd_t *pgdp; pgdp = pgd_offset(current->mm, addr); do { pgd_t pgd = READ_ONCE(*pgdp); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) return; if (unlikely(pgd_huge(pgd))) { if (!gup_huge_pgd(pgd, pgdp, addr, next, write, pages, nr)) return; } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, write, pages, nr)) return; } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) return; } while (pgdp++, addr = next, addr != end); } #ifndef gup_fast_permitted /* * Check if it's allowed to use __get_user_pages_fast() for the range, or * we need to fall back to the slow version: */ bool gup_fast_permitted(unsigned long start, int nr_pages, int write) { unsigned long len, end; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; return end >= start; } #endif /* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. * Note a difference with get_user_pages_fast: this always returns the * number of pages pinned, 0 if no pages were pinned. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long len, end; unsigned long flags; int nr = 0; start &= PAGE_MASK; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok((void __user *)start, len))) return 0; /* * Disable interrupts. We use the nested form as we can already have * interrupts disabled by get_futex_key. * * With interrupts disabled, we block page table pages from being * freed from under us. See struct mmu_table_batch comments in * include/asm-generic/tlb.h for more details. * * We do not adopt an rcu_read_lock(.) here as we also want to * block IPIs that come from THPs splitting. */ if (gup_fast_permitted(start, nr_pages, write)) { local_irq_save(flags); gup_pgd_range(start, end, write, pages, &nr); local_irq_restore(flags); } return nr; } /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_sem. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long addr, len, end; int nr = 0, ret = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (nr_pages <= 0) return 0; if (unlikely(!access_ok((void __user *)start, len))) return -EFAULT; if (gup_fast_permitted(start, nr_pages, write)) { local_irq_disable(); gup_pgd_range(addr, end, write, pages, &nr); local_irq_enable(); ret = nr; } if (nr < nr_pages) { /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; ret = get_user_pages_unlocked(start, nr_pages - nr, pages, write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } } return ret; } #endif /* CONFIG_HAVE_GENERIC_GUP */
static struct page *follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx) { pmd_t *pmd, pmdval; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pmd = pmd_offset(pudp, address); /* * The READ_ONCE() will stabilize the pmdval in a register or * on the stack so that it will stop changing under the code. */ pmdval = READ_ONCE(*pmd); if (pmd_none(pmdval)) return no_page_table(vma, flags); if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pmd(mm, address, pmd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pmd_val(pmdval)))) { page = follow_huge_pd(vma, address, __hugepd(pmd_val(pmdval)), flags, PMD_SHIFT); if (page) return page; return no_page_table(vma, flags); } retry: if (!pmd_present(pmdval)) { if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(pmdval)); if (is_pmd_migration_entry(pmdval)) pmd_migration_entry_wait(mm, pmd); pmdval = READ_ONCE(*pmd); /* * MADV_DONTNEED may convert the pmd to null because * mmap_sem is held in read mode */ if (pmd_none(pmdval)) return no_page_table(vma, flags); goto retry; } if (pmd_devmap(pmdval)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (likely(!pmd_trans_huge(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) return no_page_table(vma, flags); retry_locked: ptl = pmd_lock(mm, pmd); if (unlikely(pmd_none(*pmd))) { spin_unlock(ptl); return no_page_table(vma, flags); } if (unlikely(!pmd_present(*pmd))) { spin_unlock(ptl); if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); pmd_migration_entry_wait(mm, pmd); goto retry_locked; } if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } if (flags & FOLL_SPLIT) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); ret = 0; split_huge_pmd(vma, pmd, address); if (pmd_trans_unstable(pmd)) ret = -EBUSY; } else { get_page(page); spin_unlock(ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (pmd_none(*pmd)) return no_page_table(vma, flags); } return ret ? ERR_PTR(ret) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(ptl); ctx->page_mask = HPAGE_PMD_NR - 1; return page; }
static struct page *follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx) { pmd_t *pmd, pmdval; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pmd = pmd_offset(pudp, address); /* * The READ_ONCE() will stabilize the pmdval in a register or * on the stack so that it will stop changing under the code. */ pmdval = READ_ONCE(*pmd); if (pmd_none(pmdval)) return no_page_table(vma, flags); if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pmd(mm, address, pmd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pmd_val(pmdval)))) { page = follow_huge_pd(vma, address, __hugepd(pmd_val(pmdval)), flags, PMD_SHIFT); if (page) return page; return no_page_table(vma, flags); } retry: if (!pmd_present(pmdval)) { if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(pmdval)); if (is_pmd_migration_entry(pmdval)) pmd_migration_entry_wait(mm, pmd); pmdval = READ_ONCE(*pmd); /* * MADV_DONTNEED may convert the pmd to null because * mmap_sem is held in read mode */ if (pmd_none(pmdval)) return no_page_table(vma, flags); goto retry; } if (pmd_devmap(pmdval)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (likely(!pmd_trans_huge(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) return no_page_table(vma, flags); retry_locked: ptl = pmd_lock(mm, pmd); if (unlikely(pmd_none(*pmd))) { spin_unlock(ptl); return no_page_table(vma, flags); } if (unlikely(!pmd_present(*pmd))) { spin_unlock(ptl); if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); pmd_migration_entry_wait(mm, pmd); goto retry_locked; } if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } if (flags & FOLL_SPLIT) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); ret = 0; split_huge_pmd(vma, pmd, address); if (pmd_trans_unstable(pmd)) ret = -EBUSY; } else { if (unlikely(!try_get_page(page))) { spin_unlock(ptl); return ERR_PTR(-ENOMEM); } spin_unlock(ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (pmd_none(*pmd)) return no_page_table(vma, flags); } return ret ? ERR_PTR(ret) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(ptl); ctx->page_mask = HPAGE_PMD_NR - 1; return page; }
{'added': [(160, '\tif (flags & FOLL_GET) {'), (161, '\t\tif (unlikely(!try_get_page(page))) {'), (162, '\t\t\tpage = ERR_PTR(-ENOMEM);'), (163, '\t\t\tgoto out;'), (164, '\t\t}'), (165, '\t}'), (302, '\t\t\tif (unlikely(!try_get_page(page))) {'), (303, '\t\t\t\tspin_unlock(ptl);'), (304, '\t\t\t\treturn ERR_PTR(-ENOMEM);'), (305, '\t\t\t}'), (507, '\tif (unlikely(!try_get_page(*page))) {'), (508, '\t\tret = -ENOMEM;'), (509, '\t\tgoto unmap;'), (510, '\t}'), (1406, '/*'), (1407, ' * Return the compund head page with ref appropriately incremented,'), (1408, ' * or NULL if that failed.'), (1409, ' */'), (1410, 'static inline struct page *try_get_compound_head(struct page *page, int refs)'), (1411, '{'), (1412, '\tstruct page *head = compound_head(page);'), (1413, '\tif (WARN_ON_ONCE(page_ref_count(head) < 0))'), (1414, '\t\treturn NULL;'), (1415, '\tif (unlikely(!page_cache_add_speculative(head, refs)))'), (1416, '\t\treturn NULL;'), (1417, '\treturn head;'), (1418, '}'), (1419, ''), (1455, '\t\thead = try_get_compound_head(page, 1);'), (1456, '\t\tif (!head)'), (1595, '\thead = try_get_compound_head(pmd_page(orig), refs);'), (1596, '\tif (!head) {'), (1633, '\thead = try_get_compound_head(pud_page(orig), refs);'), (1634, '\tif (!head) {'), (1670, '\thead = try_get_compound_head(pgd_page(orig), refs);'), (1671, '\tif (!head) {')], 'deleted': [(160, '\tif (flags & FOLL_GET)'), (161, '\t\tget_page(page);'), (298, '\t\t\tget_page(page);'), (500, '\tget_page(*page);'), (1430, '\t\thead = compound_head(page);'), (1432, '\t\tif (!page_cache_get_speculative(head))'), (1571, '\thead = compound_head(pmd_page(orig));'), (1572, '\tif (!page_cache_add_speculative(head, refs)) {'), (1609, '\thead = compound_head(pud_page(orig));'), (1610, '\tif (!page_cache_add_speculative(head, refs)) {'), (1646, '\thead = compound_head(pgd_page(orig));'), (1647, '\tif (!page_cache_add_speculative(head, refs)) {')]}
36
12
1,194
7,593
95
642
26
https://github.com/torvalds/linux
CVE-2019-11487
CWE-416
2,990
nodemanager.c
C
o2nm_node_local_store
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, }; static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; return count; } static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EBUSY; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page) { return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local); } static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) return -EBUSY; /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) return ret; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } return count; } CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, ipv4_port); CONFIGFS_ATTR(o2nm_node_, ipv4_address); CONFIGFS_ATTR(o2nm_node_, local); static struct configfs_attribute *o2nm_node_attrs[] = { &o2nm_node_attr_num, &o2nm_node_attr_ipv4_port, &o2nm_node_attr_ipv4_address, &o2nm_node_attr_local, NULL, }; static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, }; static const struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_keepalive_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_keepalive_delay_ms_store( struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_reconnect_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_reconnect_delay_ms_store( struct config_item *item, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_fence_method_show( struct config_item *item, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_fence_method_store( struct config_item *item, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (to_o2nm_cluster(item)->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); to_o2nm_cluster(item)->cl_fence_method = i; } return count; } bail: return -EINVAL; } CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms); CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, fence_method); static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms, &o2nm_cluster_attr_keepalive_delay_ms, &o2nm_cluster_attr_reconnect_delay_ms, &o2nm_cluster_attr_fence_method, NULL, }; static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static const struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, }; static const struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); configfs_add_default_group(&ns->ns_group, &cluster->cl_group); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); configfs_add_default_group(o2hb_group, &cluster->cl_group); rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; configfs_remove_default_groups(&cluster->cl_group); config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static const struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret = -1; ret = o2hb_init(); if (ret) goto out; ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 cluster management"); module_init(init_o2nm) module_exit(exit_o2nm)
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; static inline void o2nm_lock_subsystem(void); static inline void o2nm_unlock_subsystem(void); struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ if (node->nd_item.ci_parent) return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); else return NULL; } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, }; static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; return count; } static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EBUSY; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page) { return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local); } static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { ret = -EINVAL; goto out; } /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) { ret = -EBUSY; goto out; } /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) goto out; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } ret = count; out: o2nm_unlock_subsystem(); return ret; } CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, ipv4_port); CONFIGFS_ATTR(o2nm_node_, ipv4_address); CONFIGFS_ATTR(o2nm_node_, local); static struct configfs_attribute *o2nm_node_attrs[] = { &o2nm_node_attr_num, &o2nm_node_attr_ipv4_port, &o2nm_node_attr_ipv4_address, &o2nm_node_attr_local, NULL, }; static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, }; static const struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_keepalive_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_keepalive_delay_ms_store( struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_reconnect_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_reconnect_delay_ms_store( struct config_item *item, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_fence_method_show( struct config_item *item, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_fence_method_store( struct config_item *item, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (to_o2nm_cluster(item)->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); to_o2nm_cluster(item)->cl_fence_method = i; } return count; } bail: return -EINVAL; } CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms); CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, fence_method); static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms, &o2nm_cluster_attr_keepalive_delay_ms, &o2nm_cluster_attr_reconnect_delay_ms, &o2nm_cluster_attr_fence_method, NULL, }; static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static const struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, }; static const struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); configfs_add_default_group(&ns->ns_group, &cluster->cl_group); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); configfs_add_default_group(o2hb_group, &cluster->cl_group); rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; configfs_remove_default_groups(&cluster->cl_group); config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static const struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; static inline void o2nm_lock_subsystem(void) { mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex); } static inline void o2nm_unlock_subsystem(void) { mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex); } int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret = -1; ret = o2hb_init(); if (ret) goto out; ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 cluster management"); module_init(init_o2nm) module_exit(exit_o2nm)
static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) return -EBUSY; /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) return ret; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } return count; }
static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { ret = -EINVAL; goto out; } /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) { ret = -EBUSY; goto out; } /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) goto out; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } ret = count; out: o2nm_unlock_subsystem(); return ret; }
{'added': [(43, 'static inline void o2nm_lock_subsystem(void);'), (44, 'static inline void o2nm_unlock_subsystem(void);'), (45, ''), (187, '\tif (node->nd_item.ci_parent)'), (188, '\t\treturn to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);'), (189, '\telse'), (190, '\t\treturn NULL;'), (203, '\tstruct o2nm_cluster *cluster;'), (223, '\to2nm_lock_subsystem();'), (224, '\tcluster = to_o2nm_cluster_from_node(node);'), (225, '\tif (!cluster) {'), (226, '\t\to2nm_unlock_subsystem();'), (227, '\t\treturn -EINVAL;'), (228, '\t}'), (229, ''), (242, '\to2nm_unlock_subsystem();'), (243, ''), (287, '\tstruct o2nm_cluster *cluster;'), (304, '\to2nm_lock_subsystem();'), (305, '\tcluster = to_o2nm_cluster_from_node(node);'), (306, '\tif (!cluster) {'), (307, '\t\to2nm_unlock_subsystem();'), (308, '\t\treturn -EINVAL;'), (309, '\t}'), (310, ''), (323, '\to2nm_unlock_subsystem();'), (324, ''), (342, '\tstruct o2nm_cluster *cluster;'), (360, '\to2nm_lock_subsystem();'), (361, '\tcluster = to_o2nm_cluster_from_node(node);'), (362, '\tif (!cluster) {'), (363, '\t\tret = -EINVAL;'), (364, '\t\tgoto out;'), (365, '\t}'), (366, ''), (370, '\t cluster->cl_local_node != node->nd_num) {'), (371, '\t\tret = -EBUSY;'), (372, '\t\tgoto out;'), (373, '\t}'), (379, '\t\t\tgoto out;'), (394, '\tret = count;'), (395, ''), (396, 'out:'), (397, '\to2nm_unlock_subsystem();'), (398, '\treturn ret;'), (778, 'static inline void o2nm_lock_subsystem(void)'), (779, '{'), (780, '\tmutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);'), (781, '}'), (782, ''), (783, 'static inline void o2nm_unlock_subsystem(void)'), (784, '{'), (785, '\tmutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);'), (786, '}'), (787, '')], 'deleted': [(184, '\treturn to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);'), (197, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (272, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (318, '\tstruct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);'), (339, '\t cluster->cl_local_node != node->nd_num)'), (340, '\t\treturn -EBUSY;'), (346, '\t\t\treturn ret;'), (361, '\treturn count;')]}
55
8
669
3,744
36
240
17
https://github.com/torvalds/linux
CVE-2017-18216
CWE-476
1,553
update.c
C
update_read_synchronize
/** * FreeRDP: A Remote Desktop Protocol Implementation * Update Data PDUs * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <winpr/crt.h> #include <winpr/print.h> #include <winpr/synch.h> #include <winpr/thread.h> #include <winpr/collections.h> #include "update.h" #include "surface.h" #include "message.h" #include "info.h" #include "window.h" #include <freerdp/log.h> #include <freerdp/peer.h> #include <freerdp/codec/bitmap.h> #include "../cache/pointer.h" #include "../cache/palette.h" #include "../cache/bitmap.h" #define TAG FREERDP_TAG("core.update") static const char* const UPDATE_TYPE_STRINGS[] = { "Orders", "Bitmap", "Palette", "Synchronize" }; static const char* update_type_to_string(UINT16 updateType) { if (updateType >= ARRAYSIZE(UPDATE_TYPE_STRINGS)) return "UNKNOWN"; return UPDATE_TYPE_STRINGS[updateType]; } static BOOL update_recv_orders(rdpUpdate* update, wStream* s) { UINT16 numberOrders; if (Stream_GetRemainingLength(s) < 6) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 6"); return FALSE; } Stream_Seek_UINT16(s); /* pad2OctetsA (2 bytes) */ Stream_Read_UINT16(s, numberOrders); /* numberOrders (2 bytes) */ Stream_Seek_UINT16(s); /* pad2OctetsB (2 bytes) */ while (numberOrders > 0) { if (!update_recv_order(update, s)) { WLog_ERR(TAG, "update_recv_order() failed"); return FALSE; } numberOrders--; } return TRUE; } static BOOL update_read_bitmap_data(rdpUpdate* update, wStream* s, BITMAP_DATA* bitmapData) { WINPR_UNUSED(update); if (Stream_GetRemainingLength(s) < 18) return FALSE; Stream_Read_UINT16(s, bitmapData->destLeft); Stream_Read_UINT16(s, bitmapData->destTop); Stream_Read_UINT16(s, bitmapData->destRight); Stream_Read_UINT16(s, bitmapData->destBottom); Stream_Read_UINT16(s, bitmapData->width); Stream_Read_UINT16(s, bitmapData->height); Stream_Read_UINT16(s, bitmapData->bitsPerPixel); Stream_Read_UINT16(s, bitmapData->flags); Stream_Read_UINT16(s, bitmapData->bitmapLength); if (bitmapData->flags & BITMAP_COMPRESSION) { if (!(bitmapData->flags & NO_BITMAP_COMPRESSION_HDR)) { Stream_Read_UINT16(s, bitmapData->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16(s, bitmapData->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, bitmapData->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16(s, bitmapData->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ bitmapData->bitmapLength = bitmapData->cbCompMainBodySize; } bitmapData->compressed = TRUE; } else bitmapData->compressed = FALSE; if (Stream_GetRemainingLength(s) < bitmapData->bitmapLength) return FALSE; if (bitmapData->bitmapLength > 0) { bitmapData->bitmapDataStream = malloc(bitmapData->bitmapLength); if (!bitmapData->bitmapDataStream) return FALSE; memcpy(bitmapData->bitmapDataStream, Stream_Pointer(s), bitmapData->bitmapLength); Stream_Seek(s, bitmapData->bitmapLength); } return TRUE; } static BOOL update_write_bitmap_data(rdpUpdate* update, wStream* s, BITMAP_DATA* bitmapData) { if (!Stream_EnsureRemainingCapacity(s, 64 + bitmapData->bitmapLength)) return FALSE; if (update->autoCalculateBitmapData) { bitmapData->flags = 0; bitmapData->cbCompFirstRowSize = 0; if (bitmapData->compressed) bitmapData->flags |= BITMAP_COMPRESSION; if (update->context->settings->NoBitmapCompressionHeader) { bitmapData->flags |= NO_BITMAP_COMPRESSION_HDR; bitmapData->cbCompMainBodySize = bitmapData->bitmapLength; } } Stream_Write_UINT16(s, bitmapData->destLeft); Stream_Write_UINT16(s, bitmapData->destTop); Stream_Write_UINT16(s, bitmapData->destRight); Stream_Write_UINT16(s, bitmapData->destBottom); Stream_Write_UINT16(s, bitmapData->width); Stream_Write_UINT16(s, bitmapData->height); Stream_Write_UINT16(s, bitmapData->bitsPerPixel); Stream_Write_UINT16(s, bitmapData->flags); Stream_Write_UINT16(s, bitmapData->bitmapLength); if (bitmapData->flags & BITMAP_COMPRESSION) { if (!(bitmapData->flags & NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16(s, bitmapData->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16(s, bitmapData->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, bitmapData->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16(s, bitmapData->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ } Stream_Write(s, bitmapData->bitmapDataStream, bitmapData->bitmapLength); } else { Stream_Write(s, bitmapData->bitmapDataStream, bitmapData->bitmapLength); } return TRUE; } BITMAP_UPDATE* update_read_bitmap_update(rdpUpdate* update, wStream* s) { UINT32 i; BITMAP_UPDATE* bitmapUpdate = calloc(1, sizeof(BITMAP_UPDATE)); if (!bitmapUpdate) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT16(s, bitmapUpdate->number); /* numberRectangles (2 bytes) */ WLog_Print(update->log, WLOG_TRACE, "BitmapUpdate: %" PRIu32 "", bitmapUpdate->number); if (bitmapUpdate->number > bitmapUpdate->count) { UINT32 count = bitmapUpdate->number * 2; BITMAP_DATA* newdata = (BITMAP_DATA*)realloc(bitmapUpdate->rectangles, sizeof(BITMAP_DATA) * count); if (!newdata) goto fail; bitmapUpdate->rectangles = newdata; ZeroMemory(&bitmapUpdate->rectangles[bitmapUpdate->count], sizeof(BITMAP_DATA) * (count - bitmapUpdate->count)); bitmapUpdate->count = count; } /* rectangles */ for (i = 0; i < bitmapUpdate->number; i++) { if (!update_read_bitmap_data(update, s, &bitmapUpdate->rectangles[i])) goto fail; } return bitmapUpdate; fail: free_bitmap_update(update->context, bitmapUpdate); return NULL; } static BOOL update_write_bitmap_update(rdpUpdate* update, wStream* s, const BITMAP_UPDATE* bitmapUpdate) { int i; if (!Stream_EnsureRemainingCapacity(s, 32)) return FALSE; Stream_Write_UINT16(s, UPDATE_TYPE_BITMAP); /* updateType */ Stream_Write_UINT16(s, bitmapUpdate->number); /* numberRectangles (2 bytes) */ /* rectangles */ for (i = 0; i < (int)bitmapUpdate->number; i++) { if (!update_write_bitmap_data(update, s, &bitmapUpdate->rectangles[i])) return FALSE; } return TRUE; } PALETTE_UPDATE* update_read_palette(rdpUpdate* update, wStream* s) { int i; PALETTE_ENTRY* entry; PALETTE_UPDATE* palette_update = calloc(1, sizeof(PALETTE_UPDATE)); if (!palette_update) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */ Stream_Read_UINT32(s, palette_update->number); /* numberColors (4 bytes), must be set to 256 */ if (palette_update->number > 256) palette_update->number = 256; if (Stream_GetRemainingLength(s) < palette_update->number * 3) goto fail; /* paletteEntries */ for (i = 0; i < (int)palette_update->number; i++) { entry = &palette_update->entries[i]; Stream_Read_UINT8(s, entry->red); Stream_Read_UINT8(s, entry->green); Stream_Read_UINT8(s, entry->blue); } return palette_update; fail: free_palette_update(update->context, palette_update); return NULL; } static void update_read_synchronize(rdpUpdate* update, wStream* s) { WINPR_UNUSED(update); Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */ /** * The Synchronize Update is an artifact from the * T.128 protocol and should be ignored. */ } static BOOL update_read_play_sound(wStream* s, PLAY_SOUND_UPDATE* play_sound) { if (Stream_GetRemainingLength(s) < 8) return FALSE; Stream_Read_UINT32(s, play_sound->duration); /* duration (4 bytes) */ Stream_Read_UINT32(s, play_sound->frequency); /* frequency (4 bytes) */ return TRUE; } BOOL update_recv_play_sound(rdpUpdate* update, wStream* s) { PLAY_SOUND_UPDATE play_sound; if (!update_read_play_sound(s, &play_sound)) return FALSE; return IFCALLRESULT(FALSE, update->PlaySound, update->context, &play_sound); } POINTER_POSITION_UPDATE* update_read_pointer_position(rdpUpdate* update, wStream* s) { POINTER_POSITION_UPDATE* pointer_position = calloc(1, sizeof(POINTER_POSITION_UPDATE)); if (!pointer_position) goto fail; if (Stream_GetRemainingLength(s) < 4) goto fail; Stream_Read_UINT16(s, pointer_position->xPos); /* xPos (2 bytes) */ Stream_Read_UINT16(s, pointer_position->yPos); /* yPos (2 bytes) */ return pointer_position; fail: free_pointer_position_update(update->context, pointer_position); return NULL; } POINTER_SYSTEM_UPDATE* update_read_pointer_system(rdpUpdate* update, wStream* s) { POINTER_SYSTEM_UPDATE* pointer_system = calloc(1, sizeof(POINTER_SYSTEM_UPDATE)); if (!pointer_system) goto fail; if (Stream_GetRemainingLength(s) < 4) goto fail; Stream_Read_UINT32(s, pointer_system->type); /* systemPointerType (4 bytes) */ return pointer_system; fail: free_pointer_system_update(update->context, pointer_system); return NULL; } static BOOL _update_read_pointer_color(wStream* s, POINTER_COLOR_UPDATE* pointer_color, BYTE xorBpp) { BYTE* newMask; UINT32 scanlineSize; if (!pointer_color) goto fail; if (Stream_GetRemainingLength(s) < 14) goto fail; Stream_Read_UINT16(s, pointer_color->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, pointer_color->xPos); /* xPos (2 bytes) */ Stream_Read_UINT16(s, pointer_color->yPos); /* yPos (2 bytes) */ /** * As stated in 2.2.9.1.1.4.4 Color Pointer Update: * The maximum allowed pointer width/height is 96 pixels if the client indicated support * for large pointers by setting the LARGE_POINTER_FLAG (0x00000001) in the Large * Pointer Capability Set (section 2.2.7.2.7). If the LARGE_POINTER_FLAG was not * set, the maximum allowed pointer width/height is 32 pixels. * * So we check for a maximum of 96 for CVE-2014-0250. */ Stream_Read_UINT16(s, pointer_color->width); /* width (2 bytes) */ Stream_Read_UINT16(s, pointer_color->height); /* height (2 bytes) */ if ((pointer_color->width > 96) || (pointer_color->height > 96)) goto fail; Stream_Read_UINT16(s, pointer_color->lengthAndMask); /* lengthAndMask (2 bytes) */ Stream_Read_UINT16(s, pointer_color->lengthXorMask); /* lengthXorMask (2 bytes) */ /** * There does not seem to be any documentation on why * xPos / yPos can be larger than width / height * so it is missing in documentation or a bug in implementation * 2.2.9.1.1.4.4 Color Pointer Update (TS_COLORPOINTERATTRIBUTE) */ if (pointer_color->xPos >= pointer_color->width) pointer_color->xPos = 0; if (pointer_color->yPos >= pointer_color->height) pointer_color->yPos = 0; if (pointer_color->lengthXorMask > 0) { /** * Spec states that: * * xorMaskData (variable): A variable-length array of bytes. Contains the 24-bpp, bottom-up * XOR mask scan-line data. The XOR mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 3x3 pixel cursor is being sent, then each scan-line will * consume 10 bytes (3 pixels per scan-line multiplied by 3 bytes per pixel, rounded up to * the next even number of bytes). * * In fact instead of 24-bpp, the bpp parameter is given by the containing packet. */ if (Stream_GetRemainingLength(s) < pointer_color->lengthXorMask) goto fail; scanlineSize = (7 + xorBpp * pointer_color->width) / 8; scanlineSize = ((scanlineSize + 1) / 2) * 2; if (scanlineSize * pointer_color->height != pointer_color->lengthXorMask) { WLog_ERR(TAG, "invalid lengthXorMask: width=%" PRIu32 " height=%" PRIu32 ", %" PRIu32 " instead of %" PRIu32 "", pointer_color->width, pointer_color->height, pointer_color->lengthXorMask, scanlineSize * pointer_color->height); goto fail; } newMask = realloc(pointer_color->xorMaskData, pointer_color->lengthXorMask); if (!newMask) goto fail; pointer_color->xorMaskData = newMask; Stream_Read(s, pointer_color->xorMaskData, pointer_color->lengthXorMask); } if (pointer_color->lengthAndMask > 0) { /** * andMaskData (variable): A variable-length array of bytes. Contains the 1-bpp, bottom-up * AND mask scan-line data. The AND mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 7x7 pixel cursor is being sent, then each scan-line will * consume 2 bytes (7 pixels per scan-line multiplied by 1 bpp, rounded up to the next even * number of bytes). */ if (Stream_GetRemainingLength(s) < pointer_color->lengthAndMask) goto fail; scanlineSize = ((7 + pointer_color->width) / 8); scanlineSize = ((1 + scanlineSize) / 2) * 2; if (scanlineSize * pointer_color->height != pointer_color->lengthAndMask) { WLog_ERR(TAG, "invalid lengthAndMask: %" PRIu32 " instead of %" PRIu32 "", pointer_color->lengthAndMask, scanlineSize * pointer_color->height); goto fail; } newMask = realloc(pointer_color->andMaskData, pointer_color->lengthAndMask); if (!newMask) goto fail; pointer_color->andMaskData = newMask; Stream_Read(s, pointer_color->andMaskData, pointer_color->lengthAndMask); } if (Stream_GetRemainingLength(s) > 0) Stream_Seek_UINT8(s); /* pad (1 byte) */ return TRUE; fail: return FALSE; } POINTER_COLOR_UPDATE* update_read_pointer_color(rdpUpdate* update, wStream* s, BYTE xorBpp) { POINTER_COLOR_UPDATE* pointer_color = calloc(1, sizeof(POINTER_COLOR_UPDATE)); if (!pointer_color) goto fail; if (!_update_read_pointer_color(s, pointer_color, xorBpp)) goto fail; return pointer_color; fail: free_pointer_color_update(update->context, pointer_color); return NULL; } static BOOL _update_read_pointer_large(wStream* s, POINTER_LARGE_UPDATE* pointer) { BYTE* newMask; UINT32 scanlineSize; if (!pointer) goto fail; if (Stream_GetRemainingLength(s) < 14) goto fail; Stream_Read_UINT16(s, pointer->xorBpp); Stream_Read_UINT16(s, pointer->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, pointer->hotSpotX); /* xPos (2 bytes) */ Stream_Read_UINT16(s, pointer->hotSpotY); /* yPos (2 bytes) */ Stream_Read_UINT16(s, pointer->width); /* width (2 bytes) */ Stream_Read_UINT16(s, pointer->height); /* height (2 bytes) */ if ((pointer->width > 384) || (pointer->height > 384)) goto fail; Stream_Read_UINT16(s, pointer->lengthAndMask); /* lengthAndMask (2 bytes) */ Stream_Read_UINT16(s, pointer->lengthXorMask); /* lengthXorMask (2 bytes) */ if (pointer->hotSpotX >= pointer->width) pointer->hotSpotX = 0; if (pointer->hotSpotY >= pointer->height) pointer->hotSpotY = 0; if (pointer->lengthXorMask > 0) { /** * Spec states that: * * xorMaskData (variable): A variable-length array of bytes. Contains the 24-bpp, bottom-up * XOR mask scan-line data. The XOR mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 3x3 pixel cursor is being sent, then each scan-line will * consume 10 bytes (3 pixels per scan-line multiplied by 3 bytes per pixel, rounded up to * the next even number of bytes). * * In fact instead of 24-bpp, the bpp parameter is given by the containing packet. */ if (Stream_GetRemainingLength(s) < pointer->lengthXorMask) goto fail; scanlineSize = (7 + pointer->xorBpp * pointer->width) / 8; scanlineSize = ((scanlineSize + 1) / 2) * 2; if (scanlineSize * pointer->height != pointer->lengthXorMask) { WLog_ERR(TAG, "invalid lengthXorMask: width=%" PRIu32 " height=%" PRIu32 ", %" PRIu32 " instead of %" PRIu32 "", pointer->width, pointer->height, pointer->lengthXorMask, scanlineSize * pointer->height); goto fail; } newMask = realloc(pointer->xorMaskData, pointer->lengthXorMask); if (!newMask) goto fail; pointer->xorMaskData = newMask; Stream_Read(s, pointer->xorMaskData, pointer->lengthXorMask); } if (pointer->lengthAndMask > 0) { /** * andMaskData (variable): A variable-length array of bytes. Contains the 1-bpp, bottom-up * AND mask scan-line data. The AND mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 7x7 pixel cursor is being sent, then each scan-line will * consume 2 bytes (7 pixels per scan-line multiplied by 1 bpp, rounded up to the next even * number of bytes). */ if (Stream_GetRemainingLength(s) < pointer->lengthAndMask) goto fail; scanlineSize = ((7 + pointer->width) / 8); scanlineSize = ((1 + scanlineSize) / 2) * 2; if (scanlineSize * pointer->height != pointer->lengthAndMask) { WLog_ERR(TAG, "invalid lengthAndMask: %" PRIu32 " instead of %" PRIu32 "", pointer->lengthAndMask, scanlineSize * pointer->height); goto fail; } newMask = realloc(pointer->andMaskData, pointer->lengthAndMask); if (!newMask) goto fail; pointer->andMaskData = newMask; Stream_Read(s, pointer->andMaskData, pointer->lengthAndMask); } if (Stream_GetRemainingLength(s) > 0) Stream_Seek_UINT8(s); /* pad (1 byte) */ return TRUE; fail: return FALSE; } POINTER_LARGE_UPDATE* update_read_pointer_large(rdpUpdate* update, wStream* s) { POINTER_LARGE_UPDATE* pointer = calloc(1, sizeof(POINTER_LARGE_UPDATE)); if (!pointer) goto fail; if (!_update_read_pointer_large(s, pointer)) goto fail; return pointer; fail: free_pointer_large_update(update->context, pointer); return NULL; } POINTER_NEW_UPDATE* update_read_pointer_new(rdpUpdate* update, wStream* s) { POINTER_NEW_UPDATE* pointer_new = calloc(1, sizeof(POINTER_NEW_UPDATE)); if (!pointer_new) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT16(s, pointer_new->xorBpp); /* xorBpp (2 bytes) */ if ((pointer_new->xorBpp < 1) || (pointer_new->xorBpp > 32)) { WLog_ERR(TAG, "invalid xorBpp %" PRIu32 "", pointer_new->xorBpp); goto fail; } if (!_update_read_pointer_color(s, &pointer_new->colorPtrAttr, pointer_new->xorBpp)) /* colorPtrAttr */ goto fail; return pointer_new; fail: free_pointer_new_update(update->context, pointer_new); return NULL; } POINTER_CACHED_UPDATE* update_read_pointer_cached(rdpUpdate* update, wStream* s) { POINTER_CACHED_UPDATE* pointer = calloc(1, sizeof(POINTER_CACHED_UPDATE)); if (!pointer) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT16(s, pointer->cacheIndex); /* cacheIndex (2 bytes) */ return pointer; fail: free_pointer_cached_update(update->context, pointer); return NULL; } BOOL update_recv_pointer(rdpUpdate* update, wStream* s) { BOOL rc = FALSE; UINT16 messageType; rdpContext* context = update->context; rdpPointerUpdate* pointer = update->pointer; if (Stream_GetRemainingLength(s) < 2 + 2) return FALSE; Stream_Read_UINT16(s, messageType); /* messageType (2 bytes) */ Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */ switch (messageType) { case PTR_MSG_TYPE_POSITION: { POINTER_POSITION_UPDATE* pointer_position = update_read_pointer_position(update, s); if (pointer_position) { rc = IFCALLRESULT(FALSE, pointer->PointerPosition, context, pointer_position); free_pointer_position_update(context, pointer_position); } } break; case PTR_MSG_TYPE_SYSTEM: { POINTER_SYSTEM_UPDATE* pointer_system = update_read_pointer_system(update, s); if (pointer_system) { rc = IFCALLRESULT(FALSE, pointer->PointerSystem, context, pointer_system); free_pointer_system_update(context, pointer_system); } } break; case PTR_MSG_TYPE_COLOR: { POINTER_COLOR_UPDATE* pointer_color = update_read_pointer_color(update, s, 24); if (pointer_color) { rc = IFCALLRESULT(FALSE, pointer->PointerColor, context, pointer_color); free_pointer_color_update(context, pointer_color); } } break; case PTR_MSG_TYPE_POINTER_LARGE: { POINTER_LARGE_UPDATE* pointer_large = update_read_pointer_large(update, s); if (pointer_large) { rc = IFCALLRESULT(FALSE, pointer->PointerLarge, context, pointer_large); free_pointer_large_update(context, pointer_large); } } break; case PTR_MSG_TYPE_POINTER: { POINTER_NEW_UPDATE* pointer_new = update_read_pointer_new(update, s); if (pointer_new) { rc = IFCALLRESULT(FALSE, pointer->PointerNew, context, pointer_new); free_pointer_new_update(context, pointer_new); } } break; case PTR_MSG_TYPE_CACHED: { POINTER_CACHED_UPDATE* pointer_cached = update_read_pointer_cached(update, s); if (pointer_cached) { rc = IFCALLRESULT(FALSE, pointer->PointerCached, context, pointer_cached); free_pointer_cached_update(context, pointer_cached); } } break; default: break; } return rc; } BOOL update_recv(rdpUpdate* update, wStream* s) { BOOL rc = FALSE; UINT16 updateType; rdpContext* context = update->context; if (Stream_GetRemainingLength(s) < 2) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 2"); return FALSE; } Stream_Read_UINT16(s, updateType); /* updateType (2 bytes) */ WLog_Print(update->log, WLOG_TRACE, "%s Update Data PDU", UPDATE_TYPE_STRINGS[updateType]); if (!update_begin_paint(update)) goto fail; switch (updateType) { case UPDATE_TYPE_ORDERS: rc = update_recv_orders(update, s); break; case UPDATE_TYPE_BITMAP: { BITMAP_UPDATE* bitmap_update = update_read_bitmap_update(update, s); if (!bitmap_update) { WLog_ERR(TAG, "UPDATE_TYPE_BITMAP - update_read_bitmap_update() failed"); goto fail; } rc = IFCALLRESULT(FALSE, update->BitmapUpdate, context, bitmap_update); free_bitmap_update(update->context, bitmap_update); } break; case UPDATE_TYPE_PALETTE: { PALETTE_UPDATE* palette_update = update_read_palette(update, s); if (!palette_update) { WLog_ERR(TAG, "UPDATE_TYPE_PALETTE - update_read_palette() failed"); goto fail; } rc = IFCALLRESULT(FALSE, update->Palette, context, palette_update); free_palette_update(context, palette_update); } break; case UPDATE_TYPE_SYNCHRONIZE: update_read_synchronize(update, s); rc = IFCALLRESULT(TRUE, update->Synchronize, context); break; default: break; } fail: if (!update_end_paint(update)) rc = FALSE; if (!rc) { WLog_ERR(TAG, "UPDATE_TYPE %s [%" PRIu16 "] failed", update_type_to_string(updateType), updateType); return FALSE; } return TRUE; } void update_reset_state(rdpUpdate* update) { rdpPrimaryUpdate* primary = update->primary; rdpAltSecUpdate* altsec = update->altsec; if (primary->fast_glyph.glyphData.aj) { free(primary->fast_glyph.glyphData.aj); primary->fast_glyph.glyphData.aj = NULL; } ZeroMemory(&primary->order_info, sizeof(ORDER_INFO)); ZeroMemory(&primary->dstblt, sizeof(DSTBLT_ORDER)); ZeroMemory(&primary->patblt, sizeof(PATBLT_ORDER)); ZeroMemory(&primary->scrblt, sizeof(SCRBLT_ORDER)); ZeroMemory(&primary->opaque_rect, sizeof(OPAQUE_RECT_ORDER)); ZeroMemory(&primary->draw_nine_grid, sizeof(DRAW_NINE_GRID_ORDER)); ZeroMemory(&primary->multi_dstblt, sizeof(MULTI_DSTBLT_ORDER)); ZeroMemory(&primary->multi_patblt, sizeof(MULTI_PATBLT_ORDER)); ZeroMemory(&primary->multi_scrblt, sizeof(MULTI_SCRBLT_ORDER)); ZeroMemory(&primary->multi_opaque_rect, sizeof(MULTI_OPAQUE_RECT_ORDER)); ZeroMemory(&primary->multi_draw_nine_grid, sizeof(MULTI_DRAW_NINE_GRID_ORDER)); ZeroMemory(&primary->line_to, sizeof(LINE_TO_ORDER)); ZeroMemory(&primary->polyline, sizeof(POLYLINE_ORDER)); ZeroMemory(&primary->memblt, sizeof(MEMBLT_ORDER)); ZeroMemory(&primary->mem3blt, sizeof(MEM3BLT_ORDER)); ZeroMemory(&primary->save_bitmap, sizeof(SAVE_BITMAP_ORDER)); ZeroMemory(&primary->glyph_index, sizeof(GLYPH_INDEX_ORDER)); ZeroMemory(&primary->fast_index, sizeof(FAST_INDEX_ORDER)); ZeroMemory(&primary->fast_glyph, sizeof(FAST_GLYPH_ORDER)); ZeroMemory(&primary->polygon_sc, sizeof(POLYGON_SC_ORDER)); ZeroMemory(&primary->polygon_cb, sizeof(POLYGON_CB_ORDER)); ZeroMemory(&primary->ellipse_sc, sizeof(ELLIPSE_SC_ORDER)); ZeroMemory(&primary->ellipse_cb, sizeof(ELLIPSE_CB_ORDER)); primary->order_info.orderType = ORDER_TYPE_PATBLT; if (!update->initialState) { altsec->switch_surface.bitmapId = SCREEN_BITMAP_SURFACE; IFCALL(altsec->SwitchSurface, update->context, &(altsec->switch_surface)); } } BOOL update_post_connect(rdpUpdate* update) { update->asynchronous = update->context->settings->AsyncUpdate; if (update->asynchronous) if (!(update->proxy = update_message_proxy_new(update))) return FALSE; update->altsec->switch_surface.bitmapId = SCREEN_BITMAP_SURFACE; IFCALL(update->altsec->SwitchSurface, update->context, &(update->altsec->switch_surface)); update->initialState = FALSE; return TRUE; } void update_post_disconnect(rdpUpdate* update) { update->asynchronous = update->context->settings->AsyncUpdate; if (update->asynchronous) update_message_proxy_free(update->proxy); update->initialState = TRUE; } static BOOL _update_begin_paint(rdpContext* context) { wStream* s; rdpUpdate* update = context->update; if (update->us) { if (!update_end_paint(update)) return FALSE; } s = fastpath_update_pdu_init_new(context->rdp->fastpath); if (!s) return FALSE; Stream_SealLength(s); Stream_Seek(s, 2); /* numberOrders (2 bytes) */ update->combineUpdates = TRUE; update->numberOrders = 0; update->us = s; return TRUE; } static BOOL _update_end_paint(rdpContext* context) { wStream* s; int headerLength; rdpUpdate* update = context->update; if (!update->us) return FALSE; s = update->us; headerLength = Stream_Length(s); Stream_SealLength(s); Stream_SetPosition(s, headerLength); Stream_Write_UINT16(s, update->numberOrders); /* numberOrders (2 bytes) */ Stream_SetPosition(s, Stream_Length(s)); if (update->numberOrders > 0) { WLog_DBG(TAG, "sending %" PRIu16 " orders", update->numberOrders); fastpath_send_update_pdu(context->rdp->fastpath, FASTPATH_UPDATETYPE_ORDERS, s, FALSE); } update->combineUpdates = FALSE; update->numberOrders = 0; update->us = NULL; Stream_Free(s, TRUE); return TRUE; } static void update_flush(rdpContext* context) { rdpUpdate* update = context->update; if (update->numberOrders > 0) { update_end_paint(update); update_begin_paint(update); } } static void update_force_flush(rdpContext* context) { update_flush(context); } static BOOL update_check_flush(rdpContext* context, int size) { wStream* s; rdpUpdate* update = context->update; s = update->us; if (!update->us) { update_begin_paint(update); return FALSE; } if (Stream_GetPosition(s) + size + 64 >= 0x3FFF) { update_flush(context); return TRUE; } return FALSE; } static BOOL update_set_bounds(rdpContext* context, const rdpBounds* bounds) { rdpUpdate* update = context->update; CopyMemory(&update->previousBounds, &update->currentBounds, sizeof(rdpBounds)); if (!bounds) ZeroMemory(&update->currentBounds, sizeof(rdpBounds)); else CopyMemory(&update->currentBounds, bounds, sizeof(rdpBounds)); return TRUE; } static BOOL update_bounds_is_null(rdpBounds* bounds) { if ((bounds->left == 0) && (bounds->top == 0) && (bounds->right == 0) && (bounds->bottom == 0)) return TRUE; return FALSE; } static BOOL update_bounds_equals(rdpBounds* bounds1, rdpBounds* bounds2) { if ((bounds1->left == bounds2->left) && (bounds1->top == bounds2->top) && (bounds1->right == bounds2->right) && (bounds1->bottom == bounds2->bottom)) return TRUE; return FALSE; } static int update_prepare_bounds(rdpContext* context, ORDER_INFO* orderInfo) { int length = 0; rdpUpdate* update = context->update; orderInfo->boundsFlags = 0; if (update_bounds_is_null(&update->currentBounds)) return 0; orderInfo->controlFlags |= ORDER_BOUNDS; if (update_bounds_equals(&update->previousBounds, &update->currentBounds)) { orderInfo->controlFlags |= ORDER_ZERO_BOUNDS_DELTAS; return 0; } else { length += 1; if (update->previousBounds.left != update->currentBounds.left) { orderInfo->bounds.left = update->currentBounds.left; orderInfo->boundsFlags |= BOUND_LEFT; length += 2; } if (update->previousBounds.top != update->currentBounds.top) { orderInfo->bounds.top = update->currentBounds.top; orderInfo->boundsFlags |= BOUND_TOP; length += 2; } if (update->previousBounds.right != update->currentBounds.right) { orderInfo->bounds.right = update->currentBounds.right; orderInfo->boundsFlags |= BOUND_RIGHT; length += 2; } if (update->previousBounds.bottom != update->currentBounds.bottom) { orderInfo->bounds.bottom = update->currentBounds.bottom; orderInfo->boundsFlags |= BOUND_BOTTOM; length += 2; } } return length; } static int update_prepare_order_info(rdpContext* context, ORDER_INFO* orderInfo, UINT32 orderType) { int length = 1; orderInfo->fieldFlags = 0; orderInfo->orderType = orderType; orderInfo->controlFlags = ORDER_STANDARD; orderInfo->controlFlags |= ORDER_TYPE_CHANGE; length += 1; length += PRIMARY_DRAWING_ORDER_FIELD_BYTES[orderInfo->orderType]; length += update_prepare_bounds(context, orderInfo); return length; } static int update_write_order_info(rdpContext* context, wStream* s, ORDER_INFO* orderInfo, size_t offset) { size_t position; WINPR_UNUSED(context); position = Stream_GetPosition(s); Stream_SetPosition(s, offset); Stream_Write_UINT8(s, orderInfo->controlFlags); /* controlFlags (1 byte) */ if (orderInfo->controlFlags & ORDER_TYPE_CHANGE) Stream_Write_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ update_write_field_flags(s, orderInfo->fieldFlags, orderInfo->controlFlags, PRIMARY_DRAWING_ORDER_FIELD_BYTES[orderInfo->orderType]); update_write_bounds(s, orderInfo); Stream_SetPosition(s, position); return 0; } static void update_write_refresh_rect(wStream* s, BYTE count, const RECTANGLE_16* areas) { int i; Stream_Write_UINT8(s, count); /* numberOfAreas (1 byte) */ Stream_Seek(s, 3); /* pad3Octets (3 bytes) */ for (i = 0; i < count; i++) { Stream_Write_UINT16(s, areas[i].left); /* left (2 bytes) */ Stream_Write_UINT16(s, areas[i].top); /* top (2 bytes) */ Stream_Write_UINT16(s, areas[i].right); /* right (2 bytes) */ Stream_Write_UINT16(s, areas[i].bottom); /* bottom (2 bytes) */ } } static BOOL update_send_refresh_rect(rdpContext* context, BYTE count, const RECTANGLE_16* areas) { rdpRdp* rdp = context->rdp; if (rdp->settings->RefreshRect) { wStream* s = rdp_data_pdu_init(rdp); if (!s) return FALSE; update_write_refresh_rect(s, count, areas); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_REFRESH_RECT, rdp->mcs->userId); } return TRUE; } static void update_write_suppress_output(wStream* s, BYTE allow, const RECTANGLE_16* area) { Stream_Write_UINT8(s, allow); /* allowDisplayUpdates (1 byte) */ /* Use zeros for padding (like mstsc) for compatibility with legacy servers */ Stream_Zero(s, 3); /* pad3Octets (3 bytes) */ if (allow > 0) { Stream_Write_UINT16(s, area->left); /* left (2 bytes) */ Stream_Write_UINT16(s, area->top); /* top (2 bytes) */ Stream_Write_UINT16(s, area->right); /* right (2 bytes) */ Stream_Write_UINT16(s, area->bottom); /* bottom (2 bytes) */ } } static BOOL update_send_suppress_output(rdpContext* context, BYTE allow, const RECTANGLE_16* area) { rdpRdp* rdp = context->rdp; if (rdp->settings->SuppressOutput) { wStream* s = rdp_data_pdu_init(rdp); if (!s) return FALSE; update_write_suppress_output(s, allow, area); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_SUPPRESS_OUTPUT, rdp->mcs->userId); } return TRUE; } static BOOL update_send_surface_command(rdpContext* context, wStream* s) { wStream* update; rdpRdp* rdp = context->rdp; BOOL ret; update = fastpath_update_pdu_init(rdp->fastpath); if (!update) return FALSE; if (!Stream_EnsureRemainingCapacity(update, Stream_GetPosition(s))) { ret = FALSE; goto out; } Stream_Write(update, Stream_Buffer(s), Stream_GetPosition(s)); ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, update, FALSE); out: Stream_Release(update); return ret; } static BOOL update_send_surface_bits(rdpContext* context, const SURFACE_BITS_COMMAND* surfaceBitsCommand) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_surfcmd_surface_bits(s, surfaceBitsCommand)) goto out_fail; if (!fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, s, surfaceBitsCommand->skipCompression)) goto out_fail; update_force_flush(context); ret = TRUE; out_fail: Stream_Release(s); return ret; } static BOOL update_send_surface_frame_marker(rdpContext* context, const SURFACE_FRAME_MARKER* surfaceFrameMarker) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_surfcmd_frame_marker(s, surfaceFrameMarker->frameAction, surfaceFrameMarker->frameId) || !fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, s, FALSE)) goto out_fail; update_force_flush(context); ret = TRUE; out_fail: Stream_Release(s); return ret; } static BOOL update_send_surface_frame_bits(rdpContext* context, const SURFACE_BITS_COMMAND* cmd, BOOL first, BOOL last, UINT32 frameId) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (first) { if (!update_write_surfcmd_frame_marker(s, SURFACECMD_FRAMEACTION_BEGIN, frameId)) goto out_fail; } if (!update_write_surfcmd_surface_bits(s, cmd)) goto out_fail; if (last) { if (!update_write_surfcmd_frame_marker(s, SURFACECMD_FRAMEACTION_END, frameId)) goto out_fail; } ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, s, cmd->skipCompression); update_force_flush(context); out_fail: Stream_Release(s); return ret; } static BOOL update_send_frame_acknowledge(rdpContext* context, UINT32 frameId) { rdpRdp* rdp = context->rdp; if (rdp->settings->ReceivedCapabilities[CAPSET_TYPE_FRAME_ACKNOWLEDGE]) { wStream* s = rdp_data_pdu_init(rdp); if (!s) return FALSE; Stream_Write_UINT32(s, frameId); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_FRAME_ACKNOWLEDGE, rdp->mcs->userId); } return TRUE; } static BOOL update_send_synchronize(rdpContext* context) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; Stream_Zero(s, 2); /* pad2Octets (2 bytes) */ ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SYNCHRONIZE, s, FALSE); Stream_Release(s); return ret; } static BOOL update_send_desktop_resize(rdpContext* context) { return rdp_server_reactivate(context->rdp); } static BOOL update_send_bitmap_update(rdpContext* context, const BITMAP_UPDATE* bitmapUpdate) { wStream* s; rdpRdp* rdp = context->rdp; rdpUpdate* update = context->update; BOOL ret = TRUE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_bitmap_update(update, s, bitmapUpdate) || !fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_BITMAP, s, bitmapUpdate->skipCompression)) { ret = FALSE; goto out_fail; } update_force_flush(context); out_fail: Stream_Release(s); return ret; } static BOOL update_send_play_sound(rdpContext* context, const PLAY_SOUND_UPDATE* play_sound) { wStream* s; rdpRdp* rdp = context->rdp; if (!rdp->settings->ReceivedCapabilities[CAPSET_TYPE_SOUND]) { return TRUE; } s = rdp_data_pdu_init(rdp); if (!s) return FALSE; Stream_Write_UINT32(s, play_sound->duration); Stream_Write_UINT32(s, play_sound->frequency); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_PLAY_SOUND, rdp->mcs->userId); } /** * Primary Drawing Orders */ static BOOL update_send_dstblt(rdpContext* context, const DSTBLT_ORDER* dstblt) { wStream* s; UINT32 offset; UINT32 headerLength; ORDER_INFO orderInfo; int inf; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_DSTBLT); inf = update_approximate_dstblt_order(&orderInfo, dstblt); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_dstblt_order(s, &orderInfo, dstblt)) return FALSE; update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_patblt(rdpContext* context, PATBLT_ORDER* patblt) { wStream* s; size_t offset; int headerLength; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_PATBLT); update_check_flush(context, headerLength + update_approximate_patblt_order(&orderInfo, patblt)); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_patblt_order(s, &orderInfo, patblt); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_scrblt(rdpContext* context, const SCRBLT_ORDER* scrblt) { wStream* s; UINT32 offset; UINT32 headerLength; ORDER_INFO orderInfo; int inf; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_SCRBLT); inf = update_approximate_scrblt_order(&orderInfo, scrblt); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return TRUE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_scrblt_order(s, &orderInfo, scrblt); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_opaque_rect(rdpContext* context, const OPAQUE_RECT_ORDER* opaque_rect) { wStream* s; size_t offset; int headerLength; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_OPAQUE_RECT); update_check_flush(context, headerLength + update_approximate_opaque_rect_order(&orderInfo, opaque_rect)); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_opaque_rect_order(s, &orderInfo, opaque_rect); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_line_to(rdpContext* context, const LINE_TO_ORDER* line_to) { wStream* s; int offset; int headerLength; ORDER_INFO orderInfo; int inf; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_LINE_TO); inf = update_approximate_line_to_order(&orderInfo, line_to); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_line_to_order(s, &orderInfo, line_to); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_memblt(rdpContext* context, MEMBLT_ORDER* memblt) { wStream* s; size_t offset; int headerLength; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_MEMBLT); update_check_flush(context, headerLength + update_approximate_memblt_order(&orderInfo, memblt)); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_memblt_order(s, &orderInfo, memblt); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_glyph_index(rdpContext* context, GLYPH_INDEX_ORDER* glyph_index) { wStream* s; size_t offset; int headerLength; int inf; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_GLYPH_INDEX); inf = update_approximate_glyph_index_order(&orderInfo, glyph_index); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_glyph_index_order(s, &orderInfo, glyph_index); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } /* * Secondary Drawing Orders */ static BOOL update_send_cache_bitmap(rdpContext* context, const CACHE_BITMAP_ORDER* cache_bitmap) { wStream* s; size_t bm, em; BYTE orderType; int headerLength; int inf; UINT16 extraFlags; INT16 orderLength; rdpUpdate* update = context->update; extraFlags = 0; headerLength = 6; orderType = cache_bitmap->compressed ? ORDER_TYPE_CACHE_BITMAP_COMPRESSED : ORDER_TYPE_BITMAP_UNCOMPRESSED; inf = update_approximate_cache_bitmap_order(cache_bitmap, cache_bitmap->compressed, &extraFlags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_bitmap_order(s, cache_bitmap, cache_bitmap->compressed, &extraFlags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, orderType); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_bitmap_v2(rdpContext* context, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2) { wStream* s; size_t bm, em; BYTE orderType; int headerLength; UINT16 extraFlags; INT16 orderLength; rdpUpdate* update = context->update; extraFlags = 0; headerLength = 6; orderType = cache_bitmap_v2->compressed ? ORDER_TYPE_BITMAP_COMPRESSED_V2 : ORDER_TYPE_BITMAP_UNCOMPRESSED_V2; if (context->settings->NoBitmapCompressionHeader) cache_bitmap_v2->flags |= CBR2_NO_BITMAP_COMPRESSION_HDR; update_check_flush(context, headerLength + update_approximate_cache_bitmap_v2_order( cache_bitmap_v2, cache_bitmap_v2->compressed, &extraFlags)); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_bitmap_v2_order(s, cache_bitmap_v2, cache_bitmap_v2->compressed, &extraFlags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, orderType); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_bitmap_v3(rdpContext* context, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3) { wStream* s; size_t bm, em; BYTE orderType; int headerLength; UINT16 extraFlags; INT16 orderLength; rdpUpdate* update = context->update; extraFlags = 0; headerLength = 6; orderType = ORDER_TYPE_BITMAP_COMPRESSED_V3; update_check_flush(context, headerLength + update_approximate_cache_bitmap_v3_order( cache_bitmap_v3, &extraFlags)); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_bitmap_v3_order(s, cache_bitmap_v3, &extraFlags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, orderType); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_color_table(rdpContext* context, const CACHE_COLOR_TABLE_ORDER* cache_color_table) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_color_table_order(cache_color_table, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_color_table_order(s, cache_color_table, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_COLOR_TABLE); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_glyph(rdpContext* context, const CACHE_GLYPH_ORDER* cache_glyph) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_glyph_order(cache_glyph, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_glyph_order(s, cache_glyph, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_GLYPH); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_glyph_v2(rdpContext* context, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_glyph_v2_order(s, cache_glyph_v2, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_GLYPH); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_brush(rdpContext* context, const CACHE_BRUSH_ORDER* cache_brush) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_brush_order(cache_brush, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_brush_order(s, cache_brush, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_BRUSH); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } /** * Alternate Secondary Drawing Orders */ static BOOL update_send_create_offscreen_bitmap_order( rdpContext* context, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { wStream* s; size_t bm, em, inf; BYTE orderType; BYTE controlFlags; int headerLength; rdpUpdate* update = context->update; headerLength = 1; orderType = ORDER_TYPE_CREATE_OFFSCREEN_BITMAP; controlFlags = ORDER_SECONDARY | (orderType << 2); inf = update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_create_offscreen_bitmap_order(s, create_offscreen_bitmap)) return FALSE; em = Stream_GetPosition(s); Stream_SetPosition(s, bm); Stream_Write_UINT8(s, controlFlags); /* controlFlags (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_switch_surface_order(rdpContext* context, const SWITCH_SURFACE_ORDER* switch_surface) { wStream* s; size_t bm, em, inf; BYTE orderType; BYTE controlFlags; int headerLength; rdpUpdate* update; if (!context || !switch_surface || !context->update) return FALSE; update = context->update; headerLength = 1; orderType = ORDER_TYPE_SWITCH_SURFACE; controlFlags = ORDER_SECONDARY | (orderType << 2); inf = update_approximate_switch_surface_order(switch_surface); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_switch_surface_order(s, switch_surface)) return FALSE; em = Stream_GetPosition(s); Stream_SetPosition(s, bm); Stream_Write_UINT8(s, controlFlags); /* controlFlags (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_pointer_system(rdpContext* context, const POINTER_SYSTEM_UPDATE* pointer_system) { wStream* s; BYTE updateCode; rdpRdp* rdp = context->rdp; BOOL ret; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (pointer_system->type == SYSPTR_NULL) updateCode = FASTPATH_UPDATETYPE_PTR_NULL; else updateCode = FASTPATH_UPDATETYPE_PTR_DEFAULT; ret = fastpath_send_update_pdu(rdp->fastpath, updateCode, s, FALSE); Stream_Release(s); return ret; } static BOOL update_send_pointer_position(rdpContext* context, const POINTER_POSITION_UPDATE* pointerPosition) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, 16)) goto out_fail; Stream_Write_UINT16(s, pointerPosition->xPos); /* xPos (2 bytes) */ Stream_Write_UINT16(s, pointerPosition->yPos); /* yPos (2 bytes) */ ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_PTR_POSITION, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_write_pointer_color(wStream* s, const POINTER_COLOR_UPDATE* pointer_color) { if (!Stream_EnsureRemainingCapacity(s, 32 + pointer_color->lengthAndMask + pointer_color->lengthXorMask)) return FALSE; Stream_Write_UINT16(s, pointer_color->cacheIndex); Stream_Write_UINT16(s, pointer_color->xPos); Stream_Write_UINT16(s, pointer_color->yPos); Stream_Write_UINT16(s, pointer_color->width); Stream_Write_UINT16(s, pointer_color->height); Stream_Write_UINT16(s, pointer_color->lengthAndMask); Stream_Write_UINT16(s, pointer_color->lengthXorMask); if (pointer_color->lengthXorMask > 0) Stream_Write(s, pointer_color->xorMaskData, pointer_color->lengthXorMask); if (pointer_color->lengthAndMask > 0) Stream_Write(s, pointer_color->andMaskData, pointer_color->lengthAndMask); Stream_Write_UINT8(s, 0); /* pad (1 byte) */ return TRUE; } static BOOL update_send_pointer_color(rdpContext* context, const POINTER_COLOR_UPDATE* pointer_color) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_pointer_color(s, pointer_color)) goto out_fail; ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_COLOR, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_write_pointer_large(wStream* s, const POINTER_LARGE_UPDATE* pointer) { if (!Stream_EnsureRemainingCapacity(s, 32 + pointer->lengthAndMask + pointer->lengthXorMask)) return FALSE; Stream_Write_UINT16(s, pointer->xorBpp); Stream_Write_UINT16(s, pointer->cacheIndex); Stream_Write_UINT16(s, pointer->hotSpotX); Stream_Write_UINT16(s, pointer->hotSpotY); Stream_Write_UINT16(s, pointer->width); Stream_Write_UINT16(s, pointer->height); Stream_Write_UINT32(s, pointer->lengthAndMask); Stream_Write_UINT32(s, pointer->lengthXorMask); Stream_Write(s, pointer->xorMaskData, pointer->lengthXorMask); Stream_Write(s, pointer->andMaskData, pointer->lengthAndMask); Stream_Write_UINT8(s, 0); /* pad (1 byte) */ return TRUE; } static BOOL update_send_pointer_large(rdpContext* context, const POINTER_LARGE_UPDATE* pointer) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_pointer_large(s, pointer)) goto out_fail; ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_LARGE_POINTER, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_send_pointer_new(rdpContext* context, const POINTER_NEW_UPDATE* pointer_new) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, 16)) goto out_fail; Stream_Write_UINT16(s, pointer_new->xorBpp); /* xorBpp (2 bytes) */ update_write_pointer_color(s, &pointer_new->colorPtrAttr); ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_POINTER, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_send_pointer_cached(rdpContext* context, const POINTER_CACHED_UPDATE* pointer_cached) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; Stream_Write_UINT16(s, pointer_cached->cacheIndex); /* cacheIndex (2 bytes) */ ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_CACHED, s, FALSE); Stream_Release(s); return ret; } BOOL update_read_refresh_rect(rdpUpdate* update, wStream* s) { int index; BYTE numberOfAreas; RECTANGLE_16* areas; if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT8(s, numberOfAreas); Stream_Seek(s, 3); /* pad3Octects */ if (Stream_GetRemainingLength(s) < ((size_t)numberOfAreas * 4 * 2)) return FALSE; areas = (RECTANGLE_16*)calloc(numberOfAreas, sizeof(RECTANGLE_16)); if (!areas) return FALSE; for (index = 0; index < numberOfAreas; index++) { Stream_Read_UINT16(s, areas[index].left); Stream_Read_UINT16(s, areas[index].top); Stream_Read_UINT16(s, areas[index].right); Stream_Read_UINT16(s, areas[index].bottom); } if (update->context->settings->RefreshRect) IFCALL(update->RefreshRect, update->context, numberOfAreas, areas); else WLog_Print(update->log, WLOG_WARN, "ignoring refresh rect request from client"); free(areas); return TRUE; } BOOL update_read_suppress_output(rdpUpdate* update, wStream* s) { RECTANGLE_16* prect = NULL; RECTANGLE_16 rect = { 0 }; BYTE allowDisplayUpdates; if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT8(s, allowDisplayUpdates); Stream_Seek(s, 3); /* pad3Octects */ if (allowDisplayUpdates > 0) { if (Stream_GetRemainingLength(s) < sizeof(RECTANGLE_16)) return FALSE; Stream_Read_UINT16(s, rect.left); Stream_Read_UINT16(s, rect.top); Stream_Read_UINT16(s, rect.right); Stream_Read_UINT16(s, rect.bottom); prect = &rect; } if (update->context->settings->SuppressOutput) IFCALL(update->SuppressOutput, update->context, allowDisplayUpdates, prect); else WLog_Print(update->log, WLOG_WARN, "ignoring suppress output request from client"); return TRUE; } static BOOL update_send_set_keyboard_indicators(rdpContext* context, UINT16 led_flags) { wStream* s; rdpRdp* rdp = context->rdp; s = rdp_data_pdu_init(rdp); if (!s) return FALSE; Stream_Write_UINT16(s, 0); /* unitId should be 0 according to MS-RDPBCGR 2.2.8.2.1.1 */ Stream_Write_UINT16(s, led_flags); /* ledFlags (2 bytes) */ return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_SET_KEYBOARD_INDICATORS, rdp->mcs->userId); } static BOOL update_send_set_keyboard_ime_status(rdpContext* context, UINT16 imeId, UINT32 imeState, UINT32 imeConvMode) { wStream* s; rdpRdp* rdp = context->rdp; s = rdp_data_pdu_init(rdp); if (!s) return FALSE; /* unitId should be 0 according to MS-RDPBCGR 2.2.8.2.2.1 */ Stream_Write_UINT16(s, imeId); Stream_Write_UINT32(s, imeState); Stream_Write_UINT32(s, imeConvMode); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_SET_KEYBOARD_IME_STATUS, rdp->mcs->userId); } static UINT16 update_calculate_new_or_existing_window(const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { UINT16 orderSize = 11; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OWNER) != 0) orderSize += 4; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_STYLE) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_SHOW) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TITLE) != 0) orderSize += 2 + stateOrder->titleInfo.length; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_OFFSET) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_SIZE) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_X) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_Y) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RP_CONTENT) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ROOT_PARENT) != 0) orderSize += 4; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_OFFSET) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_CLIENT_DELTA) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_SIZE) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_RECTS) != 0) orderSize += 2 + stateOrder->numWindowRects * sizeof(RECTANGLE_16); if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VIS_OFFSET) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VISIBILITY) != 0) orderSize += 2 + stateOrder->numVisibilityRects * sizeof(RECTANGLE_16); if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OVERLAY_DESCRIPTION) != 0) orderSize += 2 + stateOrder->OverlayDescription.length; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TASKBAR_BUTTON) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ENFORCE_SERVER_ZORDER) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_STATE) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_EDGE) != 0) orderSize += 1; return orderSize; } static BOOL update_send_new_or_existing_window(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = update_calculate_new_or_existing_window(orderInfo, stateOrder); update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OWNER) != 0) Stream_Write_UINT32(s, stateOrder->ownerWindowId); if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_STYLE) != 0) { Stream_Write_UINT32(s, stateOrder->style); Stream_Write_UINT32(s, stateOrder->extendedStyle); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_SHOW) != 0) { Stream_Write_UINT8(s, stateOrder->showState); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TITLE) != 0) { Stream_Write_UINT16(s, stateOrder->titleInfo.length); Stream_Write(s, stateOrder->titleInfo.string, stateOrder->titleInfo.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_OFFSET) != 0) { Stream_Write_INT32(s, stateOrder->clientOffsetX); Stream_Write_INT32(s, stateOrder->clientOffsetY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_SIZE) != 0) { Stream_Write_UINT32(s, stateOrder->clientAreaWidth); Stream_Write_UINT32(s, stateOrder->clientAreaHeight); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_X) != 0) { Stream_Write_UINT32(s, stateOrder->resizeMarginLeft); Stream_Write_UINT32(s, stateOrder->resizeMarginRight); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_Y) != 0) { Stream_Write_UINT32(s, stateOrder->resizeMarginTop); Stream_Write_UINT32(s, stateOrder->resizeMarginBottom); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RP_CONTENT) != 0) { Stream_Write_UINT8(s, stateOrder->RPContent); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ROOT_PARENT) != 0) { Stream_Write_UINT32(s, stateOrder->rootParentHandle); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_OFFSET) != 0) { Stream_Write_INT32(s, stateOrder->windowOffsetX); Stream_Write_INT32(s, stateOrder->windowOffsetY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_CLIENT_DELTA) != 0) { Stream_Write_INT32(s, stateOrder->windowClientDeltaX); Stream_Write_INT32(s, stateOrder->windowClientDeltaY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_SIZE) != 0) { Stream_Write_UINT32(s, stateOrder->windowWidth); Stream_Write_UINT32(s, stateOrder->windowHeight); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_RECTS) != 0) { Stream_Write_UINT16(s, stateOrder->numWindowRects); Stream_Write(s, stateOrder->windowRects, stateOrder->numWindowRects * sizeof(RECTANGLE_16)); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VIS_OFFSET) != 0) { Stream_Write_UINT32(s, stateOrder->visibleOffsetX); Stream_Write_UINT32(s, stateOrder->visibleOffsetY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VISIBILITY) != 0) { Stream_Write_UINT16(s, stateOrder->numVisibilityRects); Stream_Write(s, stateOrder->visibilityRects, stateOrder->numVisibilityRects * sizeof(RECTANGLE_16)); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OVERLAY_DESCRIPTION) != 0) { Stream_Write_UINT16(s, stateOrder->OverlayDescription.length); Stream_Write(s, stateOrder->OverlayDescription.string, stateOrder->OverlayDescription.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TASKBAR_BUTTON) != 0) { Stream_Write_UINT8(s, stateOrder->TaskbarButton); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ENFORCE_SERVER_ZORDER) != 0) { Stream_Write_UINT8(s, stateOrder->EnforceServerZOrder); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_STATE) != 0) { Stream_Write_UINT8(s, stateOrder->AppBarState); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_EDGE) != 0) { Stream_Write_UINT8(s, stateOrder->AppBarEdge); } update->numberOrders++; return TRUE; } static BOOL update_send_window_create(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { return update_send_new_or_existing_window(context, orderInfo, stateOrder); } static BOOL update_send_window_update(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { return update_send_new_or_existing_window(context, orderInfo, stateOrder); } static UINT16 update_calculate_window_icon_order(const WINDOW_ORDER_INFO* orderInfo, const WINDOW_ICON_ORDER* iconOrder) { UINT16 orderSize = 23; ICON_INFO* iconInfo = iconOrder->iconInfo; orderSize += iconInfo->cbBitsColor + iconInfo->cbBitsMask; if (iconInfo->bpp <= 8) orderSize += 2 + iconInfo->cbColorTable; return orderSize; } static BOOL update_send_window_icon(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_ICON_ORDER* iconOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); ICON_INFO* iconInfo = iconOrder->iconInfo; UINT16 orderSize = update_calculate_window_icon_order(orderInfo, iconOrder); update_check_flush(context, orderSize); s = update->us; if (!s || !iconInfo) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ /* Write body */ Stream_Write_UINT16(s, iconInfo->cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, iconInfo->cacheId); /* CacheId (1 byte) */ Stream_Write_UINT8(s, iconInfo->bpp); /* Bpp (1 byte) */ Stream_Write_UINT16(s, iconInfo->width); /* Width (2 bytes) */ Stream_Write_UINT16(s, iconInfo->height); /* Height (2 bytes) */ if (iconInfo->bpp <= 8) { Stream_Write_UINT16(s, iconInfo->cbColorTable); /* CbColorTable (2 bytes) */ } Stream_Write_UINT16(s, iconInfo->cbBitsMask); /* CbBitsMask (2 bytes) */ Stream_Write_UINT16(s, iconInfo->cbBitsColor); /* CbBitsColor (2 bytes) */ Stream_Write(s, iconInfo->bitsMask, iconInfo->cbBitsMask); /* BitsMask (variable) */ if (iconInfo->bpp <= 8) { Stream_Write(s, iconInfo->colorTable, iconInfo->cbColorTable); /* ColorTable (variable) */ } Stream_Write(s, iconInfo->bitsColor, iconInfo->cbBitsColor); /* BitsColor (variable) */ update->numberOrders++; return TRUE; } static BOOL update_send_window_cached_icon(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_CACHED_ICON_ORDER* cachedIconOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 14; CACHED_ICON_INFO cachedIcon = cachedIconOrder->cachedIcon; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ /* Write body */ Stream_Write_UINT16(s, cachedIcon.cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, cachedIcon.cacheId); /* CacheId (1 byte) */ update->numberOrders++; return TRUE; } static BOOL update_send_window_delete(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 11; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ update->numberOrders++; return TRUE; } static UINT16 update_calculate_new_or_existing_notification_icons_order( const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { UINT16 orderSize = 15; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_VERSION) != 0) orderSize += 4; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_TIP) != 0) { orderSize += 2 + iconStateOrder->toolTip.length; } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_INFO_TIP) != 0) { NOTIFY_ICON_INFOTIP infoTip = iconStateOrder->infoTip; orderSize += 12 + infoTip.text.length + infoTip.title.length; } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_STATE) != 0) { orderSize += 4; } if ((orderInfo->fieldFlags & WINDOW_ORDER_ICON) != 0) { ICON_INFO iconInfo = iconStateOrder->icon; orderSize += 12; if (iconInfo.bpp <= 8) orderSize += 2 + iconInfo.cbColorTable; orderSize += iconInfo.cbBitsMask + iconInfo.cbBitsColor; } else if ((orderInfo->fieldFlags & WINDOW_ORDER_CACHED_ICON) != 0) { orderSize += 3; } return orderSize; } static BOOL update_send_new_or_existing_notification_icons(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); BOOL versionFieldPresent = FALSE; UINT16 orderSize = update_calculate_new_or_existing_notification_icons_order(orderInfo, iconStateOrder); update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_INT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ Stream_Write_UINT32(s, orderInfo->notifyIconId); /* NotifyIconId (4 bytes) */ /* Write body */ if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_VERSION) != 0) { versionFieldPresent = TRUE; Stream_Write_UINT32(s, iconStateOrder->version); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_TIP) != 0) { Stream_Write_UINT16(s, iconStateOrder->toolTip.length); Stream_Write(s, iconStateOrder->toolTip.string, iconStateOrder->toolTip.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_INFO_TIP) != 0) { NOTIFY_ICON_INFOTIP infoTip = iconStateOrder->infoTip; /* info tip should not be sent when version is 0 */ if (versionFieldPresent && iconStateOrder->version == 0) return FALSE; Stream_Write_UINT32(s, infoTip.timeout); /* Timeout (4 bytes) */ Stream_Write_UINT32(s, infoTip.flags); /* InfoFlags (4 bytes) */ Stream_Write_UINT16(s, infoTip.text.length); /* InfoTipText (variable) */ Stream_Write(s, infoTip.text.string, infoTip.text.length); Stream_Write_UINT16(s, infoTip.title.length); /* Title (variable) */ Stream_Write(s, infoTip.title.string, infoTip.title.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_STATE) != 0) { /* notify state should not be sent when version is 0 */ if (versionFieldPresent && iconStateOrder->version == 0) return FALSE; Stream_Write_UINT32(s, iconStateOrder->state); } if ((orderInfo->fieldFlags & WINDOW_ORDER_ICON) != 0) { ICON_INFO iconInfo = iconStateOrder->icon; Stream_Write_UINT16(s, iconInfo.cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, iconInfo.cacheId); /* CacheId (1 byte) */ Stream_Write_UINT8(s, iconInfo.bpp); /* Bpp (1 byte) */ Stream_Write_UINT16(s, iconInfo.width); /* Width (2 bytes) */ Stream_Write_UINT16(s, iconInfo.height); /* Height (2 bytes) */ if (iconInfo.bpp <= 8) { Stream_Write_UINT16(s, iconInfo.cbColorTable); /* CbColorTable (2 bytes) */ } Stream_Write_UINT16(s, iconInfo.cbBitsMask); /* CbBitsMask (2 bytes) */ Stream_Write_UINT16(s, iconInfo.cbBitsColor); /* CbBitsColor (2 bytes) */ Stream_Write(s, iconInfo.bitsMask, iconInfo.cbBitsMask); /* BitsMask (variable) */ orderSize += iconInfo.cbBitsMask; if (iconInfo.bpp <= 8) { Stream_Write(s, iconInfo.colorTable, iconInfo.cbColorTable); /* ColorTable (variable) */ } Stream_Write(s, iconInfo.bitsColor, iconInfo.cbBitsColor); /* BitsColor (variable) */ } else if ((orderInfo->fieldFlags & WINDOW_ORDER_CACHED_ICON) != 0) { CACHED_ICON_INFO cachedIcon = iconStateOrder->cachedIcon; Stream_Write_UINT16(s, cachedIcon.cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, cachedIcon.cacheId); /* CacheId (1 byte) */ } update->numberOrders++; return TRUE; } static BOOL update_send_notify_icon_create(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { return update_send_new_or_existing_notification_icons(context, orderInfo, iconStateOrder); } static BOOL update_send_notify_icon_update(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { return update_send_new_or_existing_notification_icons(context, orderInfo, iconStateOrder); } static BOOL update_send_notify_icon_delete(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 15; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ Stream_Write_UINT32(s, orderInfo->notifyIconId); /* NotifyIconId (4 bytes) */ update->numberOrders++; return TRUE; } static UINT16 update_calculate_monitored_desktop(const WINDOW_ORDER_INFO* orderInfo, const MONITORED_DESKTOP_ORDER* monitoredDesktop) { UINT16 orderSize = 7; if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ACTIVE_WND) { orderSize += 4; } if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ZORDER) { orderSize += 1 + (4 * monitoredDesktop->numWindowIds); } return orderSize; } static BOOL update_send_monitored_desktop(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const MONITORED_DESKTOP_ORDER* monitoredDesktop) { UINT32 i; wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = update_calculate_monitored_desktop(orderInfo, monitoredDesktop); update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ACTIVE_WND) { Stream_Write_UINT32(s, monitoredDesktop->activeWindowId); /* activeWindowId (4 bytes) */ } if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ZORDER) { Stream_Write_UINT8(s, monitoredDesktop->numWindowIds); /* numWindowIds (1 byte) */ /* windowIds */ for (i = 0; i < monitoredDesktop->numWindowIds; i++) { Stream_Write_UINT32(s, monitoredDesktop->windowIds[i]); } } update->numberOrders++; return TRUE; } static BOOL update_send_non_monitored_desktop(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 7; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ update->numberOrders++; return TRUE; } void update_register_server_callbacks(rdpUpdate* update) { update->BeginPaint = _update_begin_paint; update->EndPaint = _update_end_paint; update->SetBounds = update_set_bounds; update->Synchronize = update_send_synchronize; update->DesktopResize = update_send_desktop_resize; update->BitmapUpdate = update_send_bitmap_update; update->SurfaceBits = update_send_surface_bits; update->SurfaceFrameMarker = update_send_surface_frame_marker; update->SurfaceCommand = update_send_surface_command; update->SurfaceFrameBits = update_send_surface_frame_bits; update->PlaySound = update_send_play_sound; update->SetKeyboardIndicators = update_send_set_keyboard_indicators; update->SetKeyboardImeStatus = update_send_set_keyboard_ime_status; update->SaveSessionInfo = rdp_send_save_session_info; update->ServerStatusInfo = rdp_send_server_status_info; update->primary->DstBlt = update_send_dstblt; update->primary->PatBlt = update_send_patblt; update->primary->ScrBlt = update_send_scrblt; update->primary->OpaqueRect = update_send_opaque_rect; update->primary->LineTo = update_send_line_to; update->primary->MemBlt = update_send_memblt; update->primary->GlyphIndex = update_send_glyph_index; update->secondary->CacheBitmap = update_send_cache_bitmap; update->secondary->CacheBitmapV2 = update_send_cache_bitmap_v2; update->secondary->CacheBitmapV3 = update_send_cache_bitmap_v3; update->secondary->CacheColorTable = update_send_cache_color_table; update->secondary->CacheGlyph = update_send_cache_glyph; update->secondary->CacheGlyphV2 = update_send_cache_glyph_v2; update->secondary->CacheBrush = update_send_cache_brush; update->altsec->CreateOffscreenBitmap = update_send_create_offscreen_bitmap_order; update->altsec->SwitchSurface = update_send_switch_surface_order; update->pointer->PointerSystem = update_send_pointer_system; update->pointer->PointerPosition = update_send_pointer_position; update->pointer->PointerColor = update_send_pointer_color; update->pointer->PointerLarge = update_send_pointer_large; update->pointer->PointerNew = update_send_pointer_new; update->pointer->PointerCached = update_send_pointer_cached; update->window->WindowCreate = update_send_window_create; update->window->WindowUpdate = update_send_window_update; update->window->WindowIcon = update_send_window_icon; update->window->WindowCachedIcon = update_send_window_cached_icon; update->window->WindowDelete = update_send_window_delete; update->window->NotifyIconCreate = update_send_notify_icon_create; update->window->NotifyIconUpdate = update_send_notify_icon_update; update->window->NotifyIconDelete = update_send_notify_icon_delete; update->window->MonitoredDesktop = update_send_monitored_desktop; update->window->NonMonitoredDesktop = update_send_non_monitored_desktop; } void update_register_client_callbacks(rdpUpdate* update) { update->RefreshRect = update_send_refresh_rect; update->SuppressOutput = update_send_suppress_output; update->SurfaceFrameAcknowledge = update_send_frame_acknowledge; } int update_process_messages(rdpUpdate* update) { return update_message_queue_process_pending_messages(update); } static void update_free_queued_message(void* obj) { wMessage* msg = (wMessage*)obj; update_message_queue_free_message(msg); } void update_free_window_state(WINDOW_STATE_ORDER* window_state) { if (!window_state) return; free(window_state->OverlayDescription.string); free(window_state->titleInfo.string); free(window_state->windowRects); free(window_state->visibilityRects); memset(window_state, 0, sizeof(WINDOW_STATE_ORDER)); } rdpUpdate* update_new(rdpRdp* rdp) { const wObject cb = { NULL, NULL, NULL, update_free_queued_message, NULL }; rdpUpdate* update; OFFSCREEN_DELETE_LIST* deleteList; WINPR_UNUSED(rdp); update = (rdpUpdate*)calloc(1, sizeof(rdpUpdate)); if (!update) return NULL; update->log = WLog_Get("com.freerdp.core.update"); InitializeCriticalSection(&(update->mux)); update->pointer = (rdpPointerUpdate*)calloc(1, sizeof(rdpPointerUpdate)); if (!update->pointer) goto fail; update->primary = (rdpPrimaryUpdate*)calloc(1, sizeof(rdpPrimaryUpdate)); if (!update->primary) goto fail; update->secondary = (rdpSecondaryUpdate*)calloc(1, sizeof(rdpSecondaryUpdate)); if (!update->secondary) goto fail; update->altsec = (rdpAltSecUpdate*)calloc(1, sizeof(rdpAltSecUpdate)); if (!update->altsec) goto fail; update->window = (rdpWindowUpdate*)calloc(1, sizeof(rdpWindowUpdate)); if (!update->window) goto fail; deleteList = &(update->altsec->create_offscreen_bitmap.deleteList); deleteList->sIndices = 64; deleteList->indices = calloc(deleteList->sIndices, 2); if (!deleteList->indices) goto fail; deleteList->cIndices = 0; update->SuppressOutput = update_send_suppress_output; update->initialState = TRUE; update->autoCalculateBitmapData = TRUE; update->queue = MessageQueue_New(&cb); if (!update->queue) goto fail; return update; fail: update_free(update); return NULL; } void update_free(rdpUpdate* update) { if (update != NULL) { OFFSCREEN_DELETE_LIST* deleteList = &(update->altsec->create_offscreen_bitmap.deleteList); if (deleteList) free(deleteList->indices); free(update->pointer); if (update->primary) { free(update->primary->polyline.points); free(update->primary->polygon_sc.points); free(update->primary->fast_glyph.glyphData.aj); free(update->primary); } free(update->secondary); free(update->altsec); if (update->window) { free(update->window); } MessageQueue_Free(update->queue); DeleteCriticalSection(&update->mux); free(update); } } BOOL update_begin_paint(rdpUpdate* update) { if (!update) return FALSE; EnterCriticalSection(&update->mux); if (!update->BeginPaint) return TRUE; return update->BeginPaint(update->context); } BOOL update_end_paint(rdpUpdate* update) { BOOL rc = FALSE; if (!update) return FALSE; if (update->EndPaint) rc = update->EndPaint(update->context); LeaveCriticalSection(&update->mux); return rc; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * Update Data PDUs * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <winpr/crt.h> #include <winpr/print.h> #include <winpr/synch.h> #include <winpr/thread.h> #include <winpr/collections.h> #include "update.h" #include "surface.h" #include "message.h" #include "info.h" #include "window.h" #include <freerdp/log.h> #include <freerdp/peer.h> #include <freerdp/codec/bitmap.h> #include "../cache/pointer.h" #include "../cache/palette.h" #include "../cache/bitmap.h" #define TAG FREERDP_TAG("core.update") static const char* const UPDATE_TYPE_STRINGS[] = { "Orders", "Bitmap", "Palette", "Synchronize" }; static const char* update_type_to_string(UINT16 updateType) { if (updateType >= ARRAYSIZE(UPDATE_TYPE_STRINGS)) return "UNKNOWN"; return UPDATE_TYPE_STRINGS[updateType]; } static BOOL update_recv_orders(rdpUpdate* update, wStream* s) { UINT16 numberOrders; if (Stream_GetRemainingLength(s) < 6) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 6"); return FALSE; } Stream_Seek_UINT16(s); /* pad2OctetsA (2 bytes) */ Stream_Read_UINT16(s, numberOrders); /* numberOrders (2 bytes) */ Stream_Seek_UINT16(s); /* pad2OctetsB (2 bytes) */ while (numberOrders > 0) { if (!update_recv_order(update, s)) { WLog_ERR(TAG, "update_recv_order() failed"); return FALSE; } numberOrders--; } return TRUE; } static BOOL update_read_bitmap_data(rdpUpdate* update, wStream* s, BITMAP_DATA* bitmapData) { WINPR_UNUSED(update); if (Stream_GetRemainingLength(s) < 18) return FALSE; Stream_Read_UINT16(s, bitmapData->destLeft); Stream_Read_UINT16(s, bitmapData->destTop); Stream_Read_UINT16(s, bitmapData->destRight); Stream_Read_UINT16(s, bitmapData->destBottom); Stream_Read_UINT16(s, bitmapData->width); Stream_Read_UINT16(s, bitmapData->height); Stream_Read_UINT16(s, bitmapData->bitsPerPixel); Stream_Read_UINT16(s, bitmapData->flags); Stream_Read_UINT16(s, bitmapData->bitmapLength); if (bitmapData->flags & BITMAP_COMPRESSION) { if (!(bitmapData->flags & NO_BITMAP_COMPRESSION_HDR)) { Stream_Read_UINT16(s, bitmapData->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16(s, bitmapData->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, bitmapData->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16(s, bitmapData->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ bitmapData->bitmapLength = bitmapData->cbCompMainBodySize; } bitmapData->compressed = TRUE; } else bitmapData->compressed = FALSE; if (Stream_GetRemainingLength(s) < bitmapData->bitmapLength) return FALSE; if (bitmapData->bitmapLength > 0) { bitmapData->bitmapDataStream = malloc(bitmapData->bitmapLength); if (!bitmapData->bitmapDataStream) return FALSE; memcpy(bitmapData->bitmapDataStream, Stream_Pointer(s), bitmapData->bitmapLength); Stream_Seek(s, bitmapData->bitmapLength); } return TRUE; } static BOOL update_write_bitmap_data(rdpUpdate* update, wStream* s, BITMAP_DATA* bitmapData) { if (!Stream_EnsureRemainingCapacity(s, 64 + bitmapData->bitmapLength)) return FALSE; if (update->autoCalculateBitmapData) { bitmapData->flags = 0; bitmapData->cbCompFirstRowSize = 0; if (bitmapData->compressed) bitmapData->flags |= BITMAP_COMPRESSION; if (update->context->settings->NoBitmapCompressionHeader) { bitmapData->flags |= NO_BITMAP_COMPRESSION_HDR; bitmapData->cbCompMainBodySize = bitmapData->bitmapLength; } } Stream_Write_UINT16(s, bitmapData->destLeft); Stream_Write_UINT16(s, bitmapData->destTop); Stream_Write_UINT16(s, bitmapData->destRight); Stream_Write_UINT16(s, bitmapData->destBottom); Stream_Write_UINT16(s, bitmapData->width); Stream_Write_UINT16(s, bitmapData->height); Stream_Write_UINT16(s, bitmapData->bitsPerPixel); Stream_Write_UINT16(s, bitmapData->flags); Stream_Write_UINT16(s, bitmapData->bitmapLength); if (bitmapData->flags & BITMAP_COMPRESSION) { if (!(bitmapData->flags & NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16(s, bitmapData->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16(s, bitmapData->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, bitmapData->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16(s, bitmapData->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ } Stream_Write(s, bitmapData->bitmapDataStream, bitmapData->bitmapLength); } else { Stream_Write(s, bitmapData->bitmapDataStream, bitmapData->bitmapLength); } return TRUE; } BITMAP_UPDATE* update_read_bitmap_update(rdpUpdate* update, wStream* s) { UINT32 i; BITMAP_UPDATE* bitmapUpdate = calloc(1, sizeof(BITMAP_UPDATE)); if (!bitmapUpdate) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT16(s, bitmapUpdate->number); /* numberRectangles (2 bytes) */ WLog_Print(update->log, WLOG_TRACE, "BitmapUpdate: %" PRIu32 "", bitmapUpdate->number); if (bitmapUpdate->number > bitmapUpdate->count) { UINT32 count = bitmapUpdate->number * 2; BITMAP_DATA* newdata = (BITMAP_DATA*)realloc(bitmapUpdate->rectangles, sizeof(BITMAP_DATA) * count); if (!newdata) goto fail; bitmapUpdate->rectangles = newdata; ZeroMemory(&bitmapUpdate->rectangles[bitmapUpdate->count], sizeof(BITMAP_DATA) * (count - bitmapUpdate->count)); bitmapUpdate->count = count; } /* rectangles */ for (i = 0; i < bitmapUpdate->number; i++) { if (!update_read_bitmap_data(update, s, &bitmapUpdate->rectangles[i])) goto fail; } return bitmapUpdate; fail: free_bitmap_update(update->context, bitmapUpdate); return NULL; } static BOOL update_write_bitmap_update(rdpUpdate* update, wStream* s, const BITMAP_UPDATE* bitmapUpdate) { int i; if (!Stream_EnsureRemainingCapacity(s, 32)) return FALSE; Stream_Write_UINT16(s, UPDATE_TYPE_BITMAP); /* updateType */ Stream_Write_UINT16(s, bitmapUpdate->number); /* numberRectangles (2 bytes) */ /* rectangles */ for (i = 0; i < (int)bitmapUpdate->number; i++) { if (!update_write_bitmap_data(update, s, &bitmapUpdate->rectangles[i])) return FALSE; } return TRUE; } PALETTE_UPDATE* update_read_palette(rdpUpdate* update, wStream* s) { int i; PALETTE_ENTRY* entry; PALETTE_UPDATE* palette_update = calloc(1, sizeof(PALETTE_UPDATE)); if (!palette_update) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */ Stream_Read_UINT32(s, palette_update->number); /* numberColors (4 bytes), must be set to 256 */ if (palette_update->number > 256) palette_update->number = 256; if (Stream_GetRemainingLength(s) < palette_update->number * 3) goto fail; /* paletteEntries */ for (i = 0; i < (int)palette_update->number; i++) { entry = &palette_update->entries[i]; Stream_Read_UINT8(s, entry->red); Stream_Read_UINT8(s, entry->green); Stream_Read_UINT8(s, entry->blue); } return palette_update; fail: free_palette_update(update->context, palette_update); return NULL; } static BOOL update_read_synchronize(rdpUpdate* update, wStream* s) { WINPR_UNUSED(update); return Stream_SafeSeek(s, 2); /* pad2Octets (2 bytes) */ /** * The Synchronize Update is an artifact from the * T.128 protocol and should be ignored. */ } static BOOL update_read_play_sound(wStream* s, PLAY_SOUND_UPDATE* play_sound) { if (Stream_GetRemainingLength(s) < 8) return FALSE; Stream_Read_UINT32(s, play_sound->duration); /* duration (4 bytes) */ Stream_Read_UINT32(s, play_sound->frequency); /* frequency (4 bytes) */ return TRUE; } BOOL update_recv_play_sound(rdpUpdate* update, wStream* s) { PLAY_SOUND_UPDATE play_sound; if (!update_read_play_sound(s, &play_sound)) return FALSE; return IFCALLRESULT(FALSE, update->PlaySound, update->context, &play_sound); } POINTER_POSITION_UPDATE* update_read_pointer_position(rdpUpdate* update, wStream* s) { POINTER_POSITION_UPDATE* pointer_position = calloc(1, sizeof(POINTER_POSITION_UPDATE)); if (!pointer_position) goto fail; if (Stream_GetRemainingLength(s) < 4) goto fail; Stream_Read_UINT16(s, pointer_position->xPos); /* xPos (2 bytes) */ Stream_Read_UINT16(s, pointer_position->yPos); /* yPos (2 bytes) */ return pointer_position; fail: free_pointer_position_update(update->context, pointer_position); return NULL; } POINTER_SYSTEM_UPDATE* update_read_pointer_system(rdpUpdate* update, wStream* s) { POINTER_SYSTEM_UPDATE* pointer_system = calloc(1, sizeof(POINTER_SYSTEM_UPDATE)); if (!pointer_system) goto fail; if (Stream_GetRemainingLength(s) < 4) goto fail; Stream_Read_UINT32(s, pointer_system->type); /* systemPointerType (4 bytes) */ return pointer_system; fail: free_pointer_system_update(update->context, pointer_system); return NULL; } static BOOL _update_read_pointer_color(wStream* s, POINTER_COLOR_UPDATE* pointer_color, BYTE xorBpp) { BYTE* newMask; UINT32 scanlineSize; if (!pointer_color) goto fail; if (Stream_GetRemainingLength(s) < 14) goto fail; Stream_Read_UINT16(s, pointer_color->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, pointer_color->xPos); /* xPos (2 bytes) */ Stream_Read_UINT16(s, pointer_color->yPos); /* yPos (2 bytes) */ /** * As stated in 2.2.9.1.1.4.4 Color Pointer Update: * The maximum allowed pointer width/height is 96 pixels if the client indicated support * for large pointers by setting the LARGE_POINTER_FLAG (0x00000001) in the Large * Pointer Capability Set (section 2.2.7.2.7). If the LARGE_POINTER_FLAG was not * set, the maximum allowed pointer width/height is 32 pixels. * * So we check for a maximum of 96 for CVE-2014-0250. */ Stream_Read_UINT16(s, pointer_color->width); /* width (2 bytes) */ Stream_Read_UINT16(s, pointer_color->height); /* height (2 bytes) */ if ((pointer_color->width > 96) || (pointer_color->height > 96)) goto fail; Stream_Read_UINT16(s, pointer_color->lengthAndMask); /* lengthAndMask (2 bytes) */ Stream_Read_UINT16(s, pointer_color->lengthXorMask); /* lengthXorMask (2 bytes) */ /** * There does not seem to be any documentation on why * xPos / yPos can be larger than width / height * so it is missing in documentation or a bug in implementation * 2.2.9.1.1.4.4 Color Pointer Update (TS_COLORPOINTERATTRIBUTE) */ if (pointer_color->xPos >= pointer_color->width) pointer_color->xPos = 0; if (pointer_color->yPos >= pointer_color->height) pointer_color->yPos = 0; if (pointer_color->lengthXorMask > 0) { /** * Spec states that: * * xorMaskData (variable): A variable-length array of bytes. Contains the 24-bpp, bottom-up * XOR mask scan-line data. The XOR mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 3x3 pixel cursor is being sent, then each scan-line will * consume 10 bytes (3 pixels per scan-line multiplied by 3 bytes per pixel, rounded up to * the next even number of bytes). * * In fact instead of 24-bpp, the bpp parameter is given by the containing packet. */ if (Stream_GetRemainingLength(s) < pointer_color->lengthXorMask) goto fail; scanlineSize = (7 + xorBpp * pointer_color->width) / 8; scanlineSize = ((scanlineSize + 1) / 2) * 2; if (scanlineSize * pointer_color->height != pointer_color->lengthXorMask) { WLog_ERR(TAG, "invalid lengthXorMask: width=%" PRIu32 " height=%" PRIu32 ", %" PRIu32 " instead of %" PRIu32 "", pointer_color->width, pointer_color->height, pointer_color->lengthXorMask, scanlineSize * pointer_color->height); goto fail; } newMask = realloc(pointer_color->xorMaskData, pointer_color->lengthXorMask); if (!newMask) goto fail; pointer_color->xorMaskData = newMask; Stream_Read(s, pointer_color->xorMaskData, pointer_color->lengthXorMask); } if (pointer_color->lengthAndMask > 0) { /** * andMaskData (variable): A variable-length array of bytes. Contains the 1-bpp, bottom-up * AND mask scan-line data. The AND mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 7x7 pixel cursor is being sent, then each scan-line will * consume 2 bytes (7 pixels per scan-line multiplied by 1 bpp, rounded up to the next even * number of bytes). */ if (Stream_GetRemainingLength(s) < pointer_color->lengthAndMask) goto fail; scanlineSize = ((7 + pointer_color->width) / 8); scanlineSize = ((1 + scanlineSize) / 2) * 2; if (scanlineSize * pointer_color->height != pointer_color->lengthAndMask) { WLog_ERR(TAG, "invalid lengthAndMask: %" PRIu32 " instead of %" PRIu32 "", pointer_color->lengthAndMask, scanlineSize * pointer_color->height); goto fail; } newMask = realloc(pointer_color->andMaskData, pointer_color->lengthAndMask); if (!newMask) goto fail; pointer_color->andMaskData = newMask; Stream_Read(s, pointer_color->andMaskData, pointer_color->lengthAndMask); } if (Stream_GetRemainingLength(s) > 0) Stream_Seek_UINT8(s); /* pad (1 byte) */ return TRUE; fail: return FALSE; } POINTER_COLOR_UPDATE* update_read_pointer_color(rdpUpdate* update, wStream* s, BYTE xorBpp) { POINTER_COLOR_UPDATE* pointer_color = calloc(1, sizeof(POINTER_COLOR_UPDATE)); if (!pointer_color) goto fail; if (!_update_read_pointer_color(s, pointer_color, xorBpp)) goto fail; return pointer_color; fail: free_pointer_color_update(update->context, pointer_color); return NULL; } static BOOL _update_read_pointer_large(wStream* s, POINTER_LARGE_UPDATE* pointer) { BYTE* newMask; UINT32 scanlineSize; if (!pointer) goto fail; if (Stream_GetRemainingLength(s) < 14) goto fail; Stream_Read_UINT16(s, pointer->xorBpp); Stream_Read_UINT16(s, pointer->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, pointer->hotSpotX); /* xPos (2 bytes) */ Stream_Read_UINT16(s, pointer->hotSpotY); /* yPos (2 bytes) */ Stream_Read_UINT16(s, pointer->width); /* width (2 bytes) */ Stream_Read_UINT16(s, pointer->height); /* height (2 bytes) */ if ((pointer->width > 384) || (pointer->height > 384)) goto fail; Stream_Read_UINT16(s, pointer->lengthAndMask); /* lengthAndMask (2 bytes) */ Stream_Read_UINT16(s, pointer->lengthXorMask); /* lengthXorMask (2 bytes) */ if (pointer->hotSpotX >= pointer->width) pointer->hotSpotX = 0; if (pointer->hotSpotY >= pointer->height) pointer->hotSpotY = 0; if (pointer->lengthXorMask > 0) { /** * Spec states that: * * xorMaskData (variable): A variable-length array of bytes. Contains the 24-bpp, bottom-up * XOR mask scan-line data. The XOR mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 3x3 pixel cursor is being sent, then each scan-line will * consume 10 bytes (3 pixels per scan-line multiplied by 3 bytes per pixel, rounded up to * the next even number of bytes). * * In fact instead of 24-bpp, the bpp parameter is given by the containing packet. */ if (Stream_GetRemainingLength(s) < pointer->lengthXorMask) goto fail; scanlineSize = (7 + pointer->xorBpp * pointer->width) / 8; scanlineSize = ((scanlineSize + 1) / 2) * 2; if (scanlineSize * pointer->height != pointer->lengthXorMask) { WLog_ERR(TAG, "invalid lengthXorMask: width=%" PRIu32 " height=%" PRIu32 ", %" PRIu32 " instead of %" PRIu32 "", pointer->width, pointer->height, pointer->lengthXorMask, scanlineSize * pointer->height); goto fail; } newMask = realloc(pointer->xorMaskData, pointer->lengthXorMask); if (!newMask) goto fail; pointer->xorMaskData = newMask; Stream_Read(s, pointer->xorMaskData, pointer->lengthXorMask); } if (pointer->lengthAndMask > 0) { /** * andMaskData (variable): A variable-length array of bytes. Contains the 1-bpp, bottom-up * AND mask scan-line data. The AND mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 7x7 pixel cursor is being sent, then each scan-line will * consume 2 bytes (7 pixels per scan-line multiplied by 1 bpp, rounded up to the next even * number of bytes). */ if (Stream_GetRemainingLength(s) < pointer->lengthAndMask) goto fail; scanlineSize = ((7 + pointer->width) / 8); scanlineSize = ((1 + scanlineSize) / 2) * 2; if (scanlineSize * pointer->height != pointer->lengthAndMask) { WLog_ERR(TAG, "invalid lengthAndMask: %" PRIu32 " instead of %" PRIu32 "", pointer->lengthAndMask, scanlineSize * pointer->height); goto fail; } newMask = realloc(pointer->andMaskData, pointer->lengthAndMask); if (!newMask) goto fail; pointer->andMaskData = newMask; Stream_Read(s, pointer->andMaskData, pointer->lengthAndMask); } if (Stream_GetRemainingLength(s) > 0) Stream_Seek_UINT8(s); /* pad (1 byte) */ return TRUE; fail: return FALSE; } POINTER_LARGE_UPDATE* update_read_pointer_large(rdpUpdate* update, wStream* s) { POINTER_LARGE_UPDATE* pointer = calloc(1, sizeof(POINTER_LARGE_UPDATE)); if (!pointer) goto fail; if (!_update_read_pointer_large(s, pointer)) goto fail; return pointer; fail: free_pointer_large_update(update->context, pointer); return NULL; } POINTER_NEW_UPDATE* update_read_pointer_new(rdpUpdate* update, wStream* s) { POINTER_NEW_UPDATE* pointer_new = calloc(1, sizeof(POINTER_NEW_UPDATE)); if (!pointer_new) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT16(s, pointer_new->xorBpp); /* xorBpp (2 bytes) */ if ((pointer_new->xorBpp < 1) || (pointer_new->xorBpp > 32)) { WLog_ERR(TAG, "invalid xorBpp %" PRIu32 "", pointer_new->xorBpp); goto fail; } if (!_update_read_pointer_color(s, &pointer_new->colorPtrAttr, pointer_new->xorBpp)) /* colorPtrAttr */ goto fail; return pointer_new; fail: free_pointer_new_update(update->context, pointer_new); return NULL; } POINTER_CACHED_UPDATE* update_read_pointer_cached(rdpUpdate* update, wStream* s) { POINTER_CACHED_UPDATE* pointer = calloc(1, sizeof(POINTER_CACHED_UPDATE)); if (!pointer) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT16(s, pointer->cacheIndex); /* cacheIndex (2 bytes) */ return pointer; fail: free_pointer_cached_update(update->context, pointer); return NULL; } BOOL update_recv_pointer(rdpUpdate* update, wStream* s) { BOOL rc = FALSE; UINT16 messageType; rdpContext* context = update->context; rdpPointerUpdate* pointer = update->pointer; if (Stream_GetRemainingLength(s) < 2 + 2) return FALSE; Stream_Read_UINT16(s, messageType); /* messageType (2 bytes) */ Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */ switch (messageType) { case PTR_MSG_TYPE_POSITION: { POINTER_POSITION_UPDATE* pointer_position = update_read_pointer_position(update, s); if (pointer_position) { rc = IFCALLRESULT(FALSE, pointer->PointerPosition, context, pointer_position); free_pointer_position_update(context, pointer_position); } } break; case PTR_MSG_TYPE_SYSTEM: { POINTER_SYSTEM_UPDATE* pointer_system = update_read_pointer_system(update, s); if (pointer_system) { rc = IFCALLRESULT(FALSE, pointer->PointerSystem, context, pointer_system); free_pointer_system_update(context, pointer_system); } } break; case PTR_MSG_TYPE_COLOR: { POINTER_COLOR_UPDATE* pointer_color = update_read_pointer_color(update, s, 24); if (pointer_color) { rc = IFCALLRESULT(FALSE, pointer->PointerColor, context, pointer_color); free_pointer_color_update(context, pointer_color); } } break; case PTR_MSG_TYPE_POINTER_LARGE: { POINTER_LARGE_UPDATE* pointer_large = update_read_pointer_large(update, s); if (pointer_large) { rc = IFCALLRESULT(FALSE, pointer->PointerLarge, context, pointer_large); free_pointer_large_update(context, pointer_large); } } break; case PTR_MSG_TYPE_POINTER: { POINTER_NEW_UPDATE* pointer_new = update_read_pointer_new(update, s); if (pointer_new) { rc = IFCALLRESULT(FALSE, pointer->PointerNew, context, pointer_new); free_pointer_new_update(context, pointer_new); } } break; case PTR_MSG_TYPE_CACHED: { POINTER_CACHED_UPDATE* pointer_cached = update_read_pointer_cached(update, s); if (pointer_cached) { rc = IFCALLRESULT(FALSE, pointer->PointerCached, context, pointer_cached); free_pointer_cached_update(context, pointer_cached); } } break; default: break; } return rc; } BOOL update_recv(rdpUpdate* update, wStream* s) { BOOL rc = FALSE; UINT16 updateType; rdpContext* context = update->context; if (Stream_GetRemainingLength(s) < 2) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 2"); return FALSE; } Stream_Read_UINT16(s, updateType); /* updateType (2 bytes) */ WLog_Print(update->log, WLOG_TRACE, "%s Update Data PDU", UPDATE_TYPE_STRINGS[updateType]); if (!update_begin_paint(update)) goto fail; switch (updateType) { case UPDATE_TYPE_ORDERS: rc = update_recv_orders(update, s); break; case UPDATE_TYPE_BITMAP: { BITMAP_UPDATE* bitmap_update = update_read_bitmap_update(update, s); if (!bitmap_update) { WLog_ERR(TAG, "UPDATE_TYPE_BITMAP - update_read_bitmap_update() failed"); goto fail; } rc = IFCALLRESULT(FALSE, update->BitmapUpdate, context, bitmap_update); free_bitmap_update(update->context, bitmap_update); } break; case UPDATE_TYPE_PALETTE: { PALETTE_UPDATE* palette_update = update_read_palette(update, s); if (!palette_update) { WLog_ERR(TAG, "UPDATE_TYPE_PALETTE - update_read_palette() failed"); goto fail; } rc = IFCALLRESULT(FALSE, update->Palette, context, palette_update); free_palette_update(context, palette_update); } break; case UPDATE_TYPE_SYNCHRONIZE: if (!update_read_synchronize(update, s)) goto fail; rc = IFCALLRESULT(TRUE, update->Synchronize, context); break; default: break; } fail: if (!update_end_paint(update)) rc = FALSE; if (!rc) { WLog_ERR(TAG, "UPDATE_TYPE %s [%" PRIu16 "] failed", update_type_to_string(updateType), updateType); return FALSE; } return TRUE; } void update_reset_state(rdpUpdate* update) { rdpPrimaryUpdate* primary = update->primary; rdpAltSecUpdate* altsec = update->altsec; if (primary->fast_glyph.glyphData.aj) { free(primary->fast_glyph.glyphData.aj); primary->fast_glyph.glyphData.aj = NULL; } ZeroMemory(&primary->order_info, sizeof(ORDER_INFO)); ZeroMemory(&primary->dstblt, sizeof(DSTBLT_ORDER)); ZeroMemory(&primary->patblt, sizeof(PATBLT_ORDER)); ZeroMemory(&primary->scrblt, sizeof(SCRBLT_ORDER)); ZeroMemory(&primary->opaque_rect, sizeof(OPAQUE_RECT_ORDER)); ZeroMemory(&primary->draw_nine_grid, sizeof(DRAW_NINE_GRID_ORDER)); ZeroMemory(&primary->multi_dstblt, sizeof(MULTI_DSTBLT_ORDER)); ZeroMemory(&primary->multi_patblt, sizeof(MULTI_PATBLT_ORDER)); ZeroMemory(&primary->multi_scrblt, sizeof(MULTI_SCRBLT_ORDER)); ZeroMemory(&primary->multi_opaque_rect, sizeof(MULTI_OPAQUE_RECT_ORDER)); ZeroMemory(&primary->multi_draw_nine_grid, sizeof(MULTI_DRAW_NINE_GRID_ORDER)); ZeroMemory(&primary->line_to, sizeof(LINE_TO_ORDER)); ZeroMemory(&primary->polyline, sizeof(POLYLINE_ORDER)); ZeroMemory(&primary->memblt, sizeof(MEMBLT_ORDER)); ZeroMemory(&primary->mem3blt, sizeof(MEM3BLT_ORDER)); ZeroMemory(&primary->save_bitmap, sizeof(SAVE_BITMAP_ORDER)); ZeroMemory(&primary->glyph_index, sizeof(GLYPH_INDEX_ORDER)); ZeroMemory(&primary->fast_index, sizeof(FAST_INDEX_ORDER)); ZeroMemory(&primary->fast_glyph, sizeof(FAST_GLYPH_ORDER)); ZeroMemory(&primary->polygon_sc, sizeof(POLYGON_SC_ORDER)); ZeroMemory(&primary->polygon_cb, sizeof(POLYGON_CB_ORDER)); ZeroMemory(&primary->ellipse_sc, sizeof(ELLIPSE_SC_ORDER)); ZeroMemory(&primary->ellipse_cb, sizeof(ELLIPSE_CB_ORDER)); primary->order_info.orderType = ORDER_TYPE_PATBLT; if (!update->initialState) { altsec->switch_surface.bitmapId = SCREEN_BITMAP_SURFACE; IFCALL(altsec->SwitchSurface, update->context, &(altsec->switch_surface)); } } BOOL update_post_connect(rdpUpdate* update) { update->asynchronous = update->context->settings->AsyncUpdate; if (update->asynchronous) if (!(update->proxy = update_message_proxy_new(update))) return FALSE; update->altsec->switch_surface.bitmapId = SCREEN_BITMAP_SURFACE; IFCALL(update->altsec->SwitchSurface, update->context, &(update->altsec->switch_surface)); update->initialState = FALSE; return TRUE; } void update_post_disconnect(rdpUpdate* update) { update->asynchronous = update->context->settings->AsyncUpdate; if (update->asynchronous) update_message_proxy_free(update->proxy); update->initialState = TRUE; } static BOOL _update_begin_paint(rdpContext* context) { wStream* s; rdpUpdate* update = context->update; if (update->us) { if (!update_end_paint(update)) return FALSE; } s = fastpath_update_pdu_init_new(context->rdp->fastpath); if (!s) return FALSE; Stream_SealLength(s); Stream_Seek(s, 2); /* numberOrders (2 bytes) */ update->combineUpdates = TRUE; update->numberOrders = 0; update->us = s; return TRUE; } static BOOL _update_end_paint(rdpContext* context) { wStream* s; int headerLength; rdpUpdate* update = context->update; if (!update->us) return FALSE; s = update->us; headerLength = Stream_Length(s); Stream_SealLength(s); Stream_SetPosition(s, headerLength); Stream_Write_UINT16(s, update->numberOrders); /* numberOrders (2 bytes) */ Stream_SetPosition(s, Stream_Length(s)); if (update->numberOrders > 0) { WLog_DBG(TAG, "sending %" PRIu16 " orders", update->numberOrders); fastpath_send_update_pdu(context->rdp->fastpath, FASTPATH_UPDATETYPE_ORDERS, s, FALSE); } update->combineUpdates = FALSE; update->numberOrders = 0; update->us = NULL; Stream_Free(s, TRUE); return TRUE; } static void update_flush(rdpContext* context) { rdpUpdate* update = context->update; if (update->numberOrders > 0) { update_end_paint(update); update_begin_paint(update); } } static void update_force_flush(rdpContext* context) { update_flush(context); } static BOOL update_check_flush(rdpContext* context, int size) { wStream* s; rdpUpdate* update = context->update; s = update->us; if (!update->us) { update_begin_paint(update); return FALSE; } if (Stream_GetPosition(s) + size + 64 >= 0x3FFF) { update_flush(context); return TRUE; } return FALSE; } static BOOL update_set_bounds(rdpContext* context, const rdpBounds* bounds) { rdpUpdate* update = context->update; CopyMemory(&update->previousBounds, &update->currentBounds, sizeof(rdpBounds)); if (!bounds) ZeroMemory(&update->currentBounds, sizeof(rdpBounds)); else CopyMemory(&update->currentBounds, bounds, sizeof(rdpBounds)); return TRUE; } static BOOL update_bounds_is_null(rdpBounds* bounds) { if ((bounds->left == 0) && (bounds->top == 0) && (bounds->right == 0) && (bounds->bottom == 0)) return TRUE; return FALSE; } static BOOL update_bounds_equals(rdpBounds* bounds1, rdpBounds* bounds2) { if ((bounds1->left == bounds2->left) && (bounds1->top == bounds2->top) && (bounds1->right == bounds2->right) && (bounds1->bottom == bounds2->bottom)) return TRUE; return FALSE; } static int update_prepare_bounds(rdpContext* context, ORDER_INFO* orderInfo) { int length = 0; rdpUpdate* update = context->update; orderInfo->boundsFlags = 0; if (update_bounds_is_null(&update->currentBounds)) return 0; orderInfo->controlFlags |= ORDER_BOUNDS; if (update_bounds_equals(&update->previousBounds, &update->currentBounds)) { orderInfo->controlFlags |= ORDER_ZERO_BOUNDS_DELTAS; return 0; } else { length += 1; if (update->previousBounds.left != update->currentBounds.left) { orderInfo->bounds.left = update->currentBounds.left; orderInfo->boundsFlags |= BOUND_LEFT; length += 2; } if (update->previousBounds.top != update->currentBounds.top) { orderInfo->bounds.top = update->currentBounds.top; orderInfo->boundsFlags |= BOUND_TOP; length += 2; } if (update->previousBounds.right != update->currentBounds.right) { orderInfo->bounds.right = update->currentBounds.right; orderInfo->boundsFlags |= BOUND_RIGHT; length += 2; } if (update->previousBounds.bottom != update->currentBounds.bottom) { orderInfo->bounds.bottom = update->currentBounds.bottom; orderInfo->boundsFlags |= BOUND_BOTTOM; length += 2; } } return length; } static int update_prepare_order_info(rdpContext* context, ORDER_INFO* orderInfo, UINT32 orderType) { int length = 1; orderInfo->fieldFlags = 0; orderInfo->orderType = orderType; orderInfo->controlFlags = ORDER_STANDARD; orderInfo->controlFlags |= ORDER_TYPE_CHANGE; length += 1; length += PRIMARY_DRAWING_ORDER_FIELD_BYTES[orderInfo->orderType]; length += update_prepare_bounds(context, orderInfo); return length; } static int update_write_order_info(rdpContext* context, wStream* s, ORDER_INFO* orderInfo, size_t offset) { size_t position; WINPR_UNUSED(context); position = Stream_GetPosition(s); Stream_SetPosition(s, offset); Stream_Write_UINT8(s, orderInfo->controlFlags); /* controlFlags (1 byte) */ if (orderInfo->controlFlags & ORDER_TYPE_CHANGE) Stream_Write_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ update_write_field_flags(s, orderInfo->fieldFlags, orderInfo->controlFlags, PRIMARY_DRAWING_ORDER_FIELD_BYTES[orderInfo->orderType]); update_write_bounds(s, orderInfo); Stream_SetPosition(s, position); return 0; } static void update_write_refresh_rect(wStream* s, BYTE count, const RECTANGLE_16* areas) { int i; Stream_Write_UINT8(s, count); /* numberOfAreas (1 byte) */ Stream_Seek(s, 3); /* pad3Octets (3 bytes) */ for (i = 0; i < count; i++) { Stream_Write_UINT16(s, areas[i].left); /* left (2 bytes) */ Stream_Write_UINT16(s, areas[i].top); /* top (2 bytes) */ Stream_Write_UINT16(s, areas[i].right); /* right (2 bytes) */ Stream_Write_UINT16(s, areas[i].bottom); /* bottom (2 bytes) */ } } static BOOL update_send_refresh_rect(rdpContext* context, BYTE count, const RECTANGLE_16* areas) { rdpRdp* rdp = context->rdp; if (rdp->settings->RefreshRect) { wStream* s = rdp_data_pdu_init(rdp); if (!s) return FALSE; update_write_refresh_rect(s, count, areas); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_REFRESH_RECT, rdp->mcs->userId); } return TRUE; } static void update_write_suppress_output(wStream* s, BYTE allow, const RECTANGLE_16* area) { Stream_Write_UINT8(s, allow); /* allowDisplayUpdates (1 byte) */ /* Use zeros for padding (like mstsc) for compatibility with legacy servers */ Stream_Zero(s, 3); /* pad3Octets (3 bytes) */ if (allow > 0) { Stream_Write_UINT16(s, area->left); /* left (2 bytes) */ Stream_Write_UINT16(s, area->top); /* top (2 bytes) */ Stream_Write_UINT16(s, area->right); /* right (2 bytes) */ Stream_Write_UINT16(s, area->bottom); /* bottom (2 bytes) */ } } static BOOL update_send_suppress_output(rdpContext* context, BYTE allow, const RECTANGLE_16* area) { rdpRdp* rdp = context->rdp; if (rdp->settings->SuppressOutput) { wStream* s = rdp_data_pdu_init(rdp); if (!s) return FALSE; update_write_suppress_output(s, allow, area); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_SUPPRESS_OUTPUT, rdp->mcs->userId); } return TRUE; } static BOOL update_send_surface_command(rdpContext* context, wStream* s) { wStream* update; rdpRdp* rdp = context->rdp; BOOL ret; update = fastpath_update_pdu_init(rdp->fastpath); if (!update) return FALSE; if (!Stream_EnsureRemainingCapacity(update, Stream_GetPosition(s))) { ret = FALSE; goto out; } Stream_Write(update, Stream_Buffer(s), Stream_GetPosition(s)); ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, update, FALSE); out: Stream_Release(update); return ret; } static BOOL update_send_surface_bits(rdpContext* context, const SURFACE_BITS_COMMAND* surfaceBitsCommand) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_surfcmd_surface_bits(s, surfaceBitsCommand)) goto out_fail; if (!fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, s, surfaceBitsCommand->skipCompression)) goto out_fail; update_force_flush(context); ret = TRUE; out_fail: Stream_Release(s); return ret; } static BOOL update_send_surface_frame_marker(rdpContext* context, const SURFACE_FRAME_MARKER* surfaceFrameMarker) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_surfcmd_frame_marker(s, surfaceFrameMarker->frameAction, surfaceFrameMarker->frameId) || !fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, s, FALSE)) goto out_fail; update_force_flush(context); ret = TRUE; out_fail: Stream_Release(s); return ret; } static BOOL update_send_surface_frame_bits(rdpContext* context, const SURFACE_BITS_COMMAND* cmd, BOOL first, BOOL last, UINT32 frameId) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (first) { if (!update_write_surfcmd_frame_marker(s, SURFACECMD_FRAMEACTION_BEGIN, frameId)) goto out_fail; } if (!update_write_surfcmd_surface_bits(s, cmd)) goto out_fail; if (last) { if (!update_write_surfcmd_frame_marker(s, SURFACECMD_FRAMEACTION_END, frameId)) goto out_fail; } ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, s, cmd->skipCompression); update_force_flush(context); out_fail: Stream_Release(s); return ret; } static BOOL update_send_frame_acknowledge(rdpContext* context, UINT32 frameId) { rdpRdp* rdp = context->rdp; if (rdp->settings->ReceivedCapabilities[CAPSET_TYPE_FRAME_ACKNOWLEDGE]) { wStream* s = rdp_data_pdu_init(rdp); if (!s) return FALSE; Stream_Write_UINT32(s, frameId); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_FRAME_ACKNOWLEDGE, rdp->mcs->userId); } return TRUE; } static BOOL update_send_synchronize(rdpContext* context) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; Stream_Zero(s, 2); /* pad2Octets (2 bytes) */ ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SYNCHRONIZE, s, FALSE); Stream_Release(s); return ret; } static BOOL update_send_desktop_resize(rdpContext* context) { return rdp_server_reactivate(context->rdp); } static BOOL update_send_bitmap_update(rdpContext* context, const BITMAP_UPDATE* bitmapUpdate) { wStream* s; rdpRdp* rdp = context->rdp; rdpUpdate* update = context->update; BOOL ret = TRUE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_bitmap_update(update, s, bitmapUpdate) || !fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_BITMAP, s, bitmapUpdate->skipCompression)) { ret = FALSE; goto out_fail; } update_force_flush(context); out_fail: Stream_Release(s); return ret; } static BOOL update_send_play_sound(rdpContext* context, const PLAY_SOUND_UPDATE* play_sound) { wStream* s; rdpRdp* rdp = context->rdp; if (!rdp->settings->ReceivedCapabilities[CAPSET_TYPE_SOUND]) { return TRUE; } s = rdp_data_pdu_init(rdp); if (!s) return FALSE; Stream_Write_UINT32(s, play_sound->duration); Stream_Write_UINT32(s, play_sound->frequency); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_PLAY_SOUND, rdp->mcs->userId); } /** * Primary Drawing Orders */ static BOOL update_send_dstblt(rdpContext* context, const DSTBLT_ORDER* dstblt) { wStream* s; UINT32 offset; UINT32 headerLength; ORDER_INFO orderInfo; int inf; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_DSTBLT); inf = update_approximate_dstblt_order(&orderInfo, dstblt); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_dstblt_order(s, &orderInfo, dstblt)) return FALSE; update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_patblt(rdpContext* context, PATBLT_ORDER* patblt) { wStream* s; size_t offset; int headerLength; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_PATBLT); update_check_flush(context, headerLength + update_approximate_patblt_order(&orderInfo, patblt)); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_patblt_order(s, &orderInfo, patblt); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_scrblt(rdpContext* context, const SCRBLT_ORDER* scrblt) { wStream* s; UINT32 offset; UINT32 headerLength; ORDER_INFO orderInfo; int inf; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_SCRBLT); inf = update_approximate_scrblt_order(&orderInfo, scrblt); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return TRUE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_scrblt_order(s, &orderInfo, scrblt); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_opaque_rect(rdpContext* context, const OPAQUE_RECT_ORDER* opaque_rect) { wStream* s; size_t offset; int headerLength; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_OPAQUE_RECT); update_check_flush(context, headerLength + update_approximate_opaque_rect_order(&orderInfo, opaque_rect)); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_opaque_rect_order(s, &orderInfo, opaque_rect); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_line_to(rdpContext* context, const LINE_TO_ORDER* line_to) { wStream* s; int offset; int headerLength; ORDER_INFO orderInfo; int inf; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_LINE_TO); inf = update_approximate_line_to_order(&orderInfo, line_to); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_line_to_order(s, &orderInfo, line_to); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_memblt(rdpContext* context, MEMBLT_ORDER* memblt) { wStream* s; size_t offset; int headerLength; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_MEMBLT); update_check_flush(context, headerLength + update_approximate_memblt_order(&orderInfo, memblt)); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_memblt_order(s, &orderInfo, memblt); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_glyph_index(rdpContext* context, GLYPH_INDEX_ORDER* glyph_index) { wStream* s; size_t offset; int headerLength; int inf; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_GLYPH_INDEX); inf = update_approximate_glyph_index_order(&orderInfo, glyph_index); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_glyph_index_order(s, &orderInfo, glyph_index); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } /* * Secondary Drawing Orders */ static BOOL update_send_cache_bitmap(rdpContext* context, const CACHE_BITMAP_ORDER* cache_bitmap) { wStream* s; size_t bm, em; BYTE orderType; int headerLength; int inf; UINT16 extraFlags; INT16 orderLength; rdpUpdate* update = context->update; extraFlags = 0; headerLength = 6; orderType = cache_bitmap->compressed ? ORDER_TYPE_CACHE_BITMAP_COMPRESSED : ORDER_TYPE_BITMAP_UNCOMPRESSED; inf = update_approximate_cache_bitmap_order(cache_bitmap, cache_bitmap->compressed, &extraFlags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_bitmap_order(s, cache_bitmap, cache_bitmap->compressed, &extraFlags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, orderType); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_bitmap_v2(rdpContext* context, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2) { wStream* s; size_t bm, em; BYTE orderType; int headerLength; UINT16 extraFlags; INT16 orderLength; rdpUpdate* update = context->update; extraFlags = 0; headerLength = 6; orderType = cache_bitmap_v2->compressed ? ORDER_TYPE_BITMAP_COMPRESSED_V2 : ORDER_TYPE_BITMAP_UNCOMPRESSED_V2; if (context->settings->NoBitmapCompressionHeader) cache_bitmap_v2->flags |= CBR2_NO_BITMAP_COMPRESSION_HDR; update_check_flush(context, headerLength + update_approximate_cache_bitmap_v2_order( cache_bitmap_v2, cache_bitmap_v2->compressed, &extraFlags)); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_bitmap_v2_order(s, cache_bitmap_v2, cache_bitmap_v2->compressed, &extraFlags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, orderType); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_bitmap_v3(rdpContext* context, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3) { wStream* s; size_t bm, em; BYTE orderType; int headerLength; UINT16 extraFlags; INT16 orderLength; rdpUpdate* update = context->update; extraFlags = 0; headerLength = 6; orderType = ORDER_TYPE_BITMAP_COMPRESSED_V3; update_check_flush(context, headerLength + update_approximate_cache_bitmap_v3_order( cache_bitmap_v3, &extraFlags)); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_bitmap_v3_order(s, cache_bitmap_v3, &extraFlags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, orderType); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_color_table(rdpContext* context, const CACHE_COLOR_TABLE_ORDER* cache_color_table) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_color_table_order(cache_color_table, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_color_table_order(s, cache_color_table, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_COLOR_TABLE); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_glyph(rdpContext* context, const CACHE_GLYPH_ORDER* cache_glyph) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_glyph_order(cache_glyph, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_glyph_order(s, cache_glyph, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_GLYPH); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_glyph_v2(rdpContext* context, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_glyph_v2_order(s, cache_glyph_v2, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_GLYPH); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_brush(rdpContext* context, const CACHE_BRUSH_ORDER* cache_brush) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_brush_order(cache_brush, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_brush_order(s, cache_brush, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_BRUSH); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } /** * Alternate Secondary Drawing Orders */ static BOOL update_send_create_offscreen_bitmap_order( rdpContext* context, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { wStream* s; size_t bm, em, inf; BYTE orderType; BYTE controlFlags; int headerLength; rdpUpdate* update = context->update; headerLength = 1; orderType = ORDER_TYPE_CREATE_OFFSCREEN_BITMAP; controlFlags = ORDER_SECONDARY | (orderType << 2); inf = update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_create_offscreen_bitmap_order(s, create_offscreen_bitmap)) return FALSE; em = Stream_GetPosition(s); Stream_SetPosition(s, bm); Stream_Write_UINT8(s, controlFlags); /* controlFlags (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_switch_surface_order(rdpContext* context, const SWITCH_SURFACE_ORDER* switch_surface) { wStream* s; size_t bm, em, inf; BYTE orderType; BYTE controlFlags; int headerLength; rdpUpdate* update; if (!context || !switch_surface || !context->update) return FALSE; update = context->update; headerLength = 1; orderType = ORDER_TYPE_SWITCH_SURFACE; controlFlags = ORDER_SECONDARY | (orderType << 2); inf = update_approximate_switch_surface_order(switch_surface); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_switch_surface_order(s, switch_surface)) return FALSE; em = Stream_GetPosition(s); Stream_SetPosition(s, bm); Stream_Write_UINT8(s, controlFlags); /* controlFlags (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_pointer_system(rdpContext* context, const POINTER_SYSTEM_UPDATE* pointer_system) { wStream* s; BYTE updateCode; rdpRdp* rdp = context->rdp; BOOL ret; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (pointer_system->type == SYSPTR_NULL) updateCode = FASTPATH_UPDATETYPE_PTR_NULL; else updateCode = FASTPATH_UPDATETYPE_PTR_DEFAULT; ret = fastpath_send_update_pdu(rdp->fastpath, updateCode, s, FALSE); Stream_Release(s); return ret; } static BOOL update_send_pointer_position(rdpContext* context, const POINTER_POSITION_UPDATE* pointerPosition) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, 16)) goto out_fail; Stream_Write_UINT16(s, pointerPosition->xPos); /* xPos (2 bytes) */ Stream_Write_UINT16(s, pointerPosition->yPos); /* yPos (2 bytes) */ ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_PTR_POSITION, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_write_pointer_color(wStream* s, const POINTER_COLOR_UPDATE* pointer_color) { if (!Stream_EnsureRemainingCapacity(s, 32 + pointer_color->lengthAndMask + pointer_color->lengthXorMask)) return FALSE; Stream_Write_UINT16(s, pointer_color->cacheIndex); Stream_Write_UINT16(s, pointer_color->xPos); Stream_Write_UINT16(s, pointer_color->yPos); Stream_Write_UINT16(s, pointer_color->width); Stream_Write_UINT16(s, pointer_color->height); Stream_Write_UINT16(s, pointer_color->lengthAndMask); Stream_Write_UINT16(s, pointer_color->lengthXorMask); if (pointer_color->lengthXorMask > 0) Stream_Write(s, pointer_color->xorMaskData, pointer_color->lengthXorMask); if (pointer_color->lengthAndMask > 0) Stream_Write(s, pointer_color->andMaskData, pointer_color->lengthAndMask); Stream_Write_UINT8(s, 0); /* pad (1 byte) */ return TRUE; } static BOOL update_send_pointer_color(rdpContext* context, const POINTER_COLOR_UPDATE* pointer_color) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_pointer_color(s, pointer_color)) goto out_fail; ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_COLOR, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_write_pointer_large(wStream* s, const POINTER_LARGE_UPDATE* pointer) { if (!Stream_EnsureRemainingCapacity(s, 32 + pointer->lengthAndMask + pointer->lengthXorMask)) return FALSE; Stream_Write_UINT16(s, pointer->xorBpp); Stream_Write_UINT16(s, pointer->cacheIndex); Stream_Write_UINT16(s, pointer->hotSpotX); Stream_Write_UINT16(s, pointer->hotSpotY); Stream_Write_UINT16(s, pointer->width); Stream_Write_UINT16(s, pointer->height); Stream_Write_UINT32(s, pointer->lengthAndMask); Stream_Write_UINT32(s, pointer->lengthXorMask); Stream_Write(s, pointer->xorMaskData, pointer->lengthXorMask); Stream_Write(s, pointer->andMaskData, pointer->lengthAndMask); Stream_Write_UINT8(s, 0); /* pad (1 byte) */ return TRUE; } static BOOL update_send_pointer_large(rdpContext* context, const POINTER_LARGE_UPDATE* pointer) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_pointer_large(s, pointer)) goto out_fail; ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_LARGE_POINTER, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_send_pointer_new(rdpContext* context, const POINTER_NEW_UPDATE* pointer_new) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, 16)) goto out_fail; Stream_Write_UINT16(s, pointer_new->xorBpp); /* xorBpp (2 bytes) */ update_write_pointer_color(s, &pointer_new->colorPtrAttr); ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_POINTER, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_send_pointer_cached(rdpContext* context, const POINTER_CACHED_UPDATE* pointer_cached) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; Stream_Write_UINT16(s, pointer_cached->cacheIndex); /* cacheIndex (2 bytes) */ ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_CACHED, s, FALSE); Stream_Release(s); return ret; } BOOL update_read_refresh_rect(rdpUpdate* update, wStream* s) { int index; BYTE numberOfAreas; RECTANGLE_16* areas; if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT8(s, numberOfAreas); Stream_Seek(s, 3); /* pad3Octects */ if (Stream_GetRemainingLength(s) < ((size_t)numberOfAreas * 4 * 2)) return FALSE; areas = (RECTANGLE_16*)calloc(numberOfAreas, sizeof(RECTANGLE_16)); if (!areas) return FALSE; for (index = 0; index < numberOfAreas; index++) { Stream_Read_UINT16(s, areas[index].left); Stream_Read_UINT16(s, areas[index].top); Stream_Read_UINT16(s, areas[index].right); Stream_Read_UINT16(s, areas[index].bottom); } if (update->context->settings->RefreshRect) IFCALL(update->RefreshRect, update->context, numberOfAreas, areas); else WLog_Print(update->log, WLOG_WARN, "ignoring refresh rect request from client"); free(areas); return TRUE; } BOOL update_read_suppress_output(rdpUpdate* update, wStream* s) { RECTANGLE_16* prect = NULL; RECTANGLE_16 rect = { 0 }; BYTE allowDisplayUpdates; if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT8(s, allowDisplayUpdates); Stream_Seek(s, 3); /* pad3Octects */ if (allowDisplayUpdates > 0) { if (Stream_GetRemainingLength(s) < sizeof(RECTANGLE_16)) return FALSE; Stream_Read_UINT16(s, rect.left); Stream_Read_UINT16(s, rect.top); Stream_Read_UINT16(s, rect.right); Stream_Read_UINT16(s, rect.bottom); prect = &rect; } if (update->context->settings->SuppressOutput) IFCALL(update->SuppressOutput, update->context, allowDisplayUpdates, prect); else WLog_Print(update->log, WLOG_WARN, "ignoring suppress output request from client"); return TRUE; } static BOOL update_send_set_keyboard_indicators(rdpContext* context, UINT16 led_flags) { wStream* s; rdpRdp* rdp = context->rdp; s = rdp_data_pdu_init(rdp); if (!s) return FALSE; Stream_Write_UINT16(s, 0); /* unitId should be 0 according to MS-RDPBCGR 2.2.8.2.1.1 */ Stream_Write_UINT16(s, led_flags); /* ledFlags (2 bytes) */ return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_SET_KEYBOARD_INDICATORS, rdp->mcs->userId); } static BOOL update_send_set_keyboard_ime_status(rdpContext* context, UINT16 imeId, UINT32 imeState, UINT32 imeConvMode) { wStream* s; rdpRdp* rdp = context->rdp; s = rdp_data_pdu_init(rdp); if (!s) return FALSE; /* unitId should be 0 according to MS-RDPBCGR 2.2.8.2.2.1 */ Stream_Write_UINT16(s, imeId); Stream_Write_UINT32(s, imeState); Stream_Write_UINT32(s, imeConvMode); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_SET_KEYBOARD_IME_STATUS, rdp->mcs->userId); } static UINT16 update_calculate_new_or_existing_window(const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { UINT16 orderSize = 11; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OWNER) != 0) orderSize += 4; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_STYLE) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_SHOW) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TITLE) != 0) orderSize += 2 + stateOrder->titleInfo.length; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_OFFSET) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_SIZE) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_X) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_Y) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RP_CONTENT) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ROOT_PARENT) != 0) orderSize += 4; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_OFFSET) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_CLIENT_DELTA) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_SIZE) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_RECTS) != 0) orderSize += 2 + stateOrder->numWindowRects * sizeof(RECTANGLE_16); if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VIS_OFFSET) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VISIBILITY) != 0) orderSize += 2 + stateOrder->numVisibilityRects * sizeof(RECTANGLE_16); if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OVERLAY_DESCRIPTION) != 0) orderSize += 2 + stateOrder->OverlayDescription.length; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TASKBAR_BUTTON) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ENFORCE_SERVER_ZORDER) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_STATE) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_EDGE) != 0) orderSize += 1; return orderSize; } static BOOL update_send_new_or_existing_window(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = update_calculate_new_or_existing_window(orderInfo, stateOrder); update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OWNER) != 0) Stream_Write_UINT32(s, stateOrder->ownerWindowId); if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_STYLE) != 0) { Stream_Write_UINT32(s, stateOrder->style); Stream_Write_UINT32(s, stateOrder->extendedStyle); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_SHOW) != 0) { Stream_Write_UINT8(s, stateOrder->showState); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TITLE) != 0) { Stream_Write_UINT16(s, stateOrder->titleInfo.length); Stream_Write(s, stateOrder->titleInfo.string, stateOrder->titleInfo.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_OFFSET) != 0) { Stream_Write_INT32(s, stateOrder->clientOffsetX); Stream_Write_INT32(s, stateOrder->clientOffsetY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_SIZE) != 0) { Stream_Write_UINT32(s, stateOrder->clientAreaWidth); Stream_Write_UINT32(s, stateOrder->clientAreaHeight); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_X) != 0) { Stream_Write_UINT32(s, stateOrder->resizeMarginLeft); Stream_Write_UINT32(s, stateOrder->resizeMarginRight); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_Y) != 0) { Stream_Write_UINT32(s, stateOrder->resizeMarginTop); Stream_Write_UINT32(s, stateOrder->resizeMarginBottom); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RP_CONTENT) != 0) { Stream_Write_UINT8(s, stateOrder->RPContent); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ROOT_PARENT) != 0) { Stream_Write_UINT32(s, stateOrder->rootParentHandle); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_OFFSET) != 0) { Stream_Write_INT32(s, stateOrder->windowOffsetX); Stream_Write_INT32(s, stateOrder->windowOffsetY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_CLIENT_DELTA) != 0) { Stream_Write_INT32(s, stateOrder->windowClientDeltaX); Stream_Write_INT32(s, stateOrder->windowClientDeltaY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_SIZE) != 0) { Stream_Write_UINT32(s, stateOrder->windowWidth); Stream_Write_UINT32(s, stateOrder->windowHeight); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_RECTS) != 0) { Stream_Write_UINT16(s, stateOrder->numWindowRects); Stream_Write(s, stateOrder->windowRects, stateOrder->numWindowRects * sizeof(RECTANGLE_16)); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VIS_OFFSET) != 0) { Stream_Write_UINT32(s, stateOrder->visibleOffsetX); Stream_Write_UINT32(s, stateOrder->visibleOffsetY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VISIBILITY) != 0) { Stream_Write_UINT16(s, stateOrder->numVisibilityRects); Stream_Write(s, stateOrder->visibilityRects, stateOrder->numVisibilityRects * sizeof(RECTANGLE_16)); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OVERLAY_DESCRIPTION) != 0) { Stream_Write_UINT16(s, stateOrder->OverlayDescription.length); Stream_Write(s, stateOrder->OverlayDescription.string, stateOrder->OverlayDescription.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TASKBAR_BUTTON) != 0) { Stream_Write_UINT8(s, stateOrder->TaskbarButton); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ENFORCE_SERVER_ZORDER) != 0) { Stream_Write_UINT8(s, stateOrder->EnforceServerZOrder); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_STATE) != 0) { Stream_Write_UINT8(s, stateOrder->AppBarState); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_EDGE) != 0) { Stream_Write_UINT8(s, stateOrder->AppBarEdge); } update->numberOrders++; return TRUE; } static BOOL update_send_window_create(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { return update_send_new_or_existing_window(context, orderInfo, stateOrder); } static BOOL update_send_window_update(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { return update_send_new_or_existing_window(context, orderInfo, stateOrder); } static UINT16 update_calculate_window_icon_order(const WINDOW_ORDER_INFO* orderInfo, const WINDOW_ICON_ORDER* iconOrder) { UINT16 orderSize = 23; ICON_INFO* iconInfo = iconOrder->iconInfo; orderSize += iconInfo->cbBitsColor + iconInfo->cbBitsMask; if (iconInfo->bpp <= 8) orderSize += 2 + iconInfo->cbColorTable; return orderSize; } static BOOL update_send_window_icon(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_ICON_ORDER* iconOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); ICON_INFO* iconInfo = iconOrder->iconInfo; UINT16 orderSize = update_calculate_window_icon_order(orderInfo, iconOrder); update_check_flush(context, orderSize); s = update->us; if (!s || !iconInfo) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ /* Write body */ Stream_Write_UINT16(s, iconInfo->cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, iconInfo->cacheId); /* CacheId (1 byte) */ Stream_Write_UINT8(s, iconInfo->bpp); /* Bpp (1 byte) */ Stream_Write_UINT16(s, iconInfo->width); /* Width (2 bytes) */ Stream_Write_UINT16(s, iconInfo->height); /* Height (2 bytes) */ if (iconInfo->bpp <= 8) { Stream_Write_UINT16(s, iconInfo->cbColorTable); /* CbColorTable (2 bytes) */ } Stream_Write_UINT16(s, iconInfo->cbBitsMask); /* CbBitsMask (2 bytes) */ Stream_Write_UINT16(s, iconInfo->cbBitsColor); /* CbBitsColor (2 bytes) */ Stream_Write(s, iconInfo->bitsMask, iconInfo->cbBitsMask); /* BitsMask (variable) */ if (iconInfo->bpp <= 8) { Stream_Write(s, iconInfo->colorTable, iconInfo->cbColorTable); /* ColorTable (variable) */ } Stream_Write(s, iconInfo->bitsColor, iconInfo->cbBitsColor); /* BitsColor (variable) */ update->numberOrders++; return TRUE; } static BOOL update_send_window_cached_icon(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_CACHED_ICON_ORDER* cachedIconOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 14; CACHED_ICON_INFO cachedIcon = cachedIconOrder->cachedIcon; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ /* Write body */ Stream_Write_UINT16(s, cachedIcon.cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, cachedIcon.cacheId); /* CacheId (1 byte) */ update->numberOrders++; return TRUE; } static BOOL update_send_window_delete(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 11; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ update->numberOrders++; return TRUE; } static UINT16 update_calculate_new_or_existing_notification_icons_order( const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { UINT16 orderSize = 15; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_VERSION) != 0) orderSize += 4; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_TIP) != 0) { orderSize += 2 + iconStateOrder->toolTip.length; } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_INFO_TIP) != 0) { NOTIFY_ICON_INFOTIP infoTip = iconStateOrder->infoTip; orderSize += 12 + infoTip.text.length + infoTip.title.length; } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_STATE) != 0) { orderSize += 4; } if ((orderInfo->fieldFlags & WINDOW_ORDER_ICON) != 0) { ICON_INFO iconInfo = iconStateOrder->icon; orderSize += 12; if (iconInfo.bpp <= 8) orderSize += 2 + iconInfo.cbColorTable; orderSize += iconInfo.cbBitsMask + iconInfo.cbBitsColor; } else if ((orderInfo->fieldFlags & WINDOW_ORDER_CACHED_ICON) != 0) { orderSize += 3; } return orderSize; } static BOOL update_send_new_or_existing_notification_icons(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); BOOL versionFieldPresent = FALSE; UINT16 orderSize = update_calculate_new_or_existing_notification_icons_order(orderInfo, iconStateOrder); update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_INT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ Stream_Write_UINT32(s, orderInfo->notifyIconId); /* NotifyIconId (4 bytes) */ /* Write body */ if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_VERSION) != 0) { versionFieldPresent = TRUE; Stream_Write_UINT32(s, iconStateOrder->version); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_TIP) != 0) { Stream_Write_UINT16(s, iconStateOrder->toolTip.length); Stream_Write(s, iconStateOrder->toolTip.string, iconStateOrder->toolTip.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_INFO_TIP) != 0) { NOTIFY_ICON_INFOTIP infoTip = iconStateOrder->infoTip; /* info tip should not be sent when version is 0 */ if (versionFieldPresent && iconStateOrder->version == 0) return FALSE; Stream_Write_UINT32(s, infoTip.timeout); /* Timeout (4 bytes) */ Stream_Write_UINT32(s, infoTip.flags); /* InfoFlags (4 bytes) */ Stream_Write_UINT16(s, infoTip.text.length); /* InfoTipText (variable) */ Stream_Write(s, infoTip.text.string, infoTip.text.length); Stream_Write_UINT16(s, infoTip.title.length); /* Title (variable) */ Stream_Write(s, infoTip.title.string, infoTip.title.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_STATE) != 0) { /* notify state should not be sent when version is 0 */ if (versionFieldPresent && iconStateOrder->version == 0) return FALSE; Stream_Write_UINT32(s, iconStateOrder->state); } if ((orderInfo->fieldFlags & WINDOW_ORDER_ICON) != 0) { ICON_INFO iconInfo = iconStateOrder->icon; Stream_Write_UINT16(s, iconInfo.cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, iconInfo.cacheId); /* CacheId (1 byte) */ Stream_Write_UINT8(s, iconInfo.bpp); /* Bpp (1 byte) */ Stream_Write_UINT16(s, iconInfo.width); /* Width (2 bytes) */ Stream_Write_UINT16(s, iconInfo.height); /* Height (2 bytes) */ if (iconInfo.bpp <= 8) { Stream_Write_UINT16(s, iconInfo.cbColorTable); /* CbColorTable (2 bytes) */ } Stream_Write_UINT16(s, iconInfo.cbBitsMask); /* CbBitsMask (2 bytes) */ Stream_Write_UINT16(s, iconInfo.cbBitsColor); /* CbBitsColor (2 bytes) */ Stream_Write(s, iconInfo.bitsMask, iconInfo.cbBitsMask); /* BitsMask (variable) */ orderSize += iconInfo.cbBitsMask; if (iconInfo.bpp <= 8) { Stream_Write(s, iconInfo.colorTable, iconInfo.cbColorTable); /* ColorTable (variable) */ } Stream_Write(s, iconInfo.bitsColor, iconInfo.cbBitsColor); /* BitsColor (variable) */ } else if ((orderInfo->fieldFlags & WINDOW_ORDER_CACHED_ICON) != 0) { CACHED_ICON_INFO cachedIcon = iconStateOrder->cachedIcon; Stream_Write_UINT16(s, cachedIcon.cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, cachedIcon.cacheId); /* CacheId (1 byte) */ } update->numberOrders++; return TRUE; } static BOOL update_send_notify_icon_create(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { return update_send_new_or_existing_notification_icons(context, orderInfo, iconStateOrder); } static BOOL update_send_notify_icon_update(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { return update_send_new_or_existing_notification_icons(context, orderInfo, iconStateOrder); } static BOOL update_send_notify_icon_delete(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 15; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ Stream_Write_UINT32(s, orderInfo->notifyIconId); /* NotifyIconId (4 bytes) */ update->numberOrders++; return TRUE; } static UINT16 update_calculate_monitored_desktop(const WINDOW_ORDER_INFO* orderInfo, const MONITORED_DESKTOP_ORDER* monitoredDesktop) { UINT16 orderSize = 7; if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ACTIVE_WND) { orderSize += 4; } if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ZORDER) { orderSize += 1 + (4 * monitoredDesktop->numWindowIds); } return orderSize; } static BOOL update_send_monitored_desktop(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const MONITORED_DESKTOP_ORDER* monitoredDesktop) { UINT32 i; wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = update_calculate_monitored_desktop(orderInfo, monitoredDesktop); update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ACTIVE_WND) { Stream_Write_UINT32(s, monitoredDesktop->activeWindowId); /* activeWindowId (4 bytes) */ } if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ZORDER) { Stream_Write_UINT8(s, monitoredDesktop->numWindowIds); /* numWindowIds (1 byte) */ /* windowIds */ for (i = 0; i < monitoredDesktop->numWindowIds; i++) { Stream_Write_UINT32(s, monitoredDesktop->windowIds[i]); } } update->numberOrders++; return TRUE; } static BOOL update_send_non_monitored_desktop(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 7; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ update->numberOrders++; return TRUE; } void update_register_server_callbacks(rdpUpdate* update) { update->BeginPaint = _update_begin_paint; update->EndPaint = _update_end_paint; update->SetBounds = update_set_bounds; update->Synchronize = update_send_synchronize; update->DesktopResize = update_send_desktop_resize; update->BitmapUpdate = update_send_bitmap_update; update->SurfaceBits = update_send_surface_bits; update->SurfaceFrameMarker = update_send_surface_frame_marker; update->SurfaceCommand = update_send_surface_command; update->SurfaceFrameBits = update_send_surface_frame_bits; update->PlaySound = update_send_play_sound; update->SetKeyboardIndicators = update_send_set_keyboard_indicators; update->SetKeyboardImeStatus = update_send_set_keyboard_ime_status; update->SaveSessionInfo = rdp_send_save_session_info; update->ServerStatusInfo = rdp_send_server_status_info; update->primary->DstBlt = update_send_dstblt; update->primary->PatBlt = update_send_patblt; update->primary->ScrBlt = update_send_scrblt; update->primary->OpaqueRect = update_send_opaque_rect; update->primary->LineTo = update_send_line_to; update->primary->MemBlt = update_send_memblt; update->primary->GlyphIndex = update_send_glyph_index; update->secondary->CacheBitmap = update_send_cache_bitmap; update->secondary->CacheBitmapV2 = update_send_cache_bitmap_v2; update->secondary->CacheBitmapV3 = update_send_cache_bitmap_v3; update->secondary->CacheColorTable = update_send_cache_color_table; update->secondary->CacheGlyph = update_send_cache_glyph; update->secondary->CacheGlyphV2 = update_send_cache_glyph_v2; update->secondary->CacheBrush = update_send_cache_brush; update->altsec->CreateOffscreenBitmap = update_send_create_offscreen_bitmap_order; update->altsec->SwitchSurface = update_send_switch_surface_order; update->pointer->PointerSystem = update_send_pointer_system; update->pointer->PointerPosition = update_send_pointer_position; update->pointer->PointerColor = update_send_pointer_color; update->pointer->PointerLarge = update_send_pointer_large; update->pointer->PointerNew = update_send_pointer_new; update->pointer->PointerCached = update_send_pointer_cached; update->window->WindowCreate = update_send_window_create; update->window->WindowUpdate = update_send_window_update; update->window->WindowIcon = update_send_window_icon; update->window->WindowCachedIcon = update_send_window_cached_icon; update->window->WindowDelete = update_send_window_delete; update->window->NotifyIconCreate = update_send_notify_icon_create; update->window->NotifyIconUpdate = update_send_notify_icon_update; update->window->NotifyIconDelete = update_send_notify_icon_delete; update->window->MonitoredDesktop = update_send_monitored_desktop; update->window->NonMonitoredDesktop = update_send_non_monitored_desktop; } void update_register_client_callbacks(rdpUpdate* update) { update->RefreshRect = update_send_refresh_rect; update->SuppressOutput = update_send_suppress_output; update->SurfaceFrameAcknowledge = update_send_frame_acknowledge; } int update_process_messages(rdpUpdate* update) { return update_message_queue_process_pending_messages(update); } static void update_free_queued_message(void* obj) { wMessage* msg = (wMessage*)obj; update_message_queue_free_message(msg); } void update_free_window_state(WINDOW_STATE_ORDER* window_state) { if (!window_state) return; free(window_state->OverlayDescription.string); free(window_state->titleInfo.string); free(window_state->windowRects); free(window_state->visibilityRects); memset(window_state, 0, sizeof(WINDOW_STATE_ORDER)); } rdpUpdate* update_new(rdpRdp* rdp) { const wObject cb = { NULL, NULL, NULL, update_free_queued_message, NULL }; rdpUpdate* update; OFFSCREEN_DELETE_LIST* deleteList; WINPR_UNUSED(rdp); update = (rdpUpdate*)calloc(1, sizeof(rdpUpdate)); if (!update) return NULL; update->log = WLog_Get("com.freerdp.core.update"); InitializeCriticalSection(&(update->mux)); update->pointer = (rdpPointerUpdate*)calloc(1, sizeof(rdpPointerUpdate)); if (!update->pointer) goto fail; update->primary = (rdpPrimaryUpdate*)calloc(1, sizeof(rdpPrimaryUpdate)); if (!update->primary) goto fail; update->secondary = (rdpSecondaryUpdate*)calloc(1, sizeof(rdpSecondaryUpdate)); if (!update->secondary) goto fail; update->altsec = (rdpAltSecUpdate*)calloc(1, sizeof(rdpAltSecUpdate)); if (!update->altsec) goto fail; update->window = (rdpWindowUpdate*)calloc(1, sizeof(rdpWindowUpdate)); if (!update->window) goto fail; deleteList = &(update->altsec->create_offscreen_bitmap.deleteList); deleteList->sIndices = 64; deleteList->indices = calloc(deleteList->sIndices, 2); if (!deleteList->indices) goto fail; deleteList->cIndices = 0; update->SuppressOutput = update_send_suppress_output; update->initialState = TRUE; update->autoCalculateBitmapData = TRUE; update->queue = MessageQueue_New(&cb); if (!update->queue) goto fail; return update; fail: update_free(update); return NULL; } void update_free(rdpUpdate* update) { if (update != NULL) { OFFSCREEN_DELETE_LIST* deleteList = &(update->altsec->create_offscreen_bitmap.deleteList); if (deleteList) free(deleteList->indices); free(update->pointer); if (update->primary) { free(update->primary->polyline.points); free(update->primary->polygon_sc.points); free(update->primary->fast_glyph.glyphData.aj); free(update->primary); } free(update->secondary); free(update->altsec); if (update->window) { free(update->window); } MessageQueue_Free(update->queue); DeleteCriticalSection(&update->mux); free(update); } } BOOL update_begin_paint(rdpUpdate* update) { if (!update) return FALSE; EnterCriticalSection(&update->mux); if (!update->BeginPaint) return TRUE; return update->BeginPaint(update->context); } BOOL update_end_paint(rdpUpdate* update) { BOOL rc = FALSE; if (!update) return FALSE; if (update->EndPaint) rc = update->EndPaint(update->context); LeaveCriticalSection(&update->mux); return rc; }
static void update_read_synchronize(rdpUpdate* update, wStream* s) { WINPR_UNUSED(update); Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */ /** * The Synchronize Update is an artifact from the * T.128 protocol and should be ignored. */ }
static BOOL update_read_synchronize(rdpUpdate* update, wStream* s) { WINPR_UNUSED(update); return Stream_SafeSeek(s, 2); /* pad2Octets (2 bytes) */ /** * The Synchronize Update is an artifact from the * T.128 protocol and should be ignored. */ }
{'added': [(290, 'static BOOL update_read_synchronize(rdpUpdate* update, wStream* s)'), (293, '\treturn Stream_SafeSeek(s, 2); /* pad2Octets (2 bytes) */'), (294, '\t /**'), (295, '\t * The Synchronize Update is an artifact from the'), (296, '\t * T.128 protocol and should be ignored.'), (297, '\t */'), (810, '\t\t\tif (!update_read_synchronize(update, s))'), (811, '\t\t\t\tgoto fail;')], 'deleted': [(290, 'static void update_read_synchronize(rdpUpdate* update, wStream* s)'), (293, '\tStream_Seek_UINT16(s); /* pad2Octets (2 bytes) */'), (294, '\t /**'), (295, '\t * The Synchronize Update is an artifact from the'), (296, '\t * T.128 protocol and should be ignored.'), (297, '\t */'), (810, '\t\t\tupdate_read_synchronize(update, s);')]}
8
7
2,319
14,272
5
22
1
https://github.com/FreeRDP/FreeRDP
CVE-2020-11046
CWE-119
1,224
ac3_parser.c
C
avpriv_ac3_parse_header
/* * AC-3 parser * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2003 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "libavutil/channel_layout.h" #include "parser.h" #include "ac3_parser.h" #include "ac3_parser_internal.h" #include "aac_ac3_parser.h" #include "get_bits.h" #define AC3_HEADER_SIZE 7 #if CONFIG_AC3_PARSER static const uint8_t eac3_blocks[4] = { 1, 2, 3, 6 }; /** * Table for center mix levels * reference: Section 5.4.2.4 cmixlev */ static const uint8_t center_levels[4] = { 4, 5, 6, 5 }; /** * Table for surround mix levels * reference: Section 5.4.2.5 surmixlev */ static const uint8_t surround_levels[4] = { 4, 6, 7, 6 }; int ff_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr) { int frame_size_code; memset(hdr, 0, sizeof(*hdr)); hdr->sync_word = get_bits(gbc, 16); if(hdr->sync_word != 0x0B77) return AAC_AC3_PARSE_ERROR_SYNC; /* read ahead to bsid to distinguish between AC-3 and E-AC-3 */ hdr->bitstream_id = show_bits_long(gbc, 29) & 0x1F; if(hdr->bitstream_id > 16) return AAC_AC3_PARSE_ERROR_BSID; hdr->num_blocks = 6; /* set default mix levels */ hdr->center_mix_level = 5; // -4.5dB hdr->surround_mix_level = 6; // -6.0dB /* set default dolby surround mode */ hdr->dolby_surround_mode = AC3_DSURMOD_NOTINDICATED; if(hdr->bitstream_id <= 10) { /* Normal AC-3 */ hdr->crc1 = get_bits(gbc, 16); hdr->sr_code = get_bits(gbc, 2); if(hdr->sr_code == 3) return AAC_AC3_PARSE_ERROR_SAMPLE_RATE; frame_size_code = get_bits(gbc, 6); if(frame_size_code > 37) return AAC_AC3_PARSE_ERROR_FRAME_SIZE; skip_bits(gbc, 5); // skip bsid, already got it hdr->bitstream_mode = get_bits(gbc, 3); hdr->channel_mode = get_bits(gbc, 3); if(hdr->channel_mode == AC3_CHMODE_STEREO) { hdr->dolby_surround_mode = get_bits(gbc, 2); } else { if((hdr->channel_mode & 1) && hdr->channel_mode != AC3_CHMODE_MONO) hdr-> center_mix_level = center_levels[get_bits(gbc, 2)]; if(hdr->channel_mode & 4) hdr->surround_mix_level = surround_levels[get_bits(gbc, 2)]; } hdr->lfe_on = get_bits1(gbc); hdr->sr_shift = FFMAX(hdr->bitstream_id, 8) - 8; hdr->sample_rate = ff_ac3_sample_rate_tab[hdr->sr_code] >> hdr->sr_shift; hdr->bit_rate = (ff_ac3_bitrate_tab[frame_size_code>>1] * 1000) >> hdr->sr_shift; hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on; hdr->frame_size = ff_ac3_frame_size_tab[frame_size_code][hdr->sr_code] * 2; hdr->frame_type = EAC3_FRAME_TYPE_AC3_CONVERT; //EAC3_FRAME_TYPE_INDEPENDENT; hdr->substreamid = 0; } else { /* Enhanced AC-3 */ hdr->crc1 = 0; hdr->frame_type = get_bits(gbc, 2); if(hdr->frame_type == EAC3_FRAME_TYPE_RESERVED) return AAC_AC3_PARSE_ERROR_FRAME_TYPE; hdr->substreamid = get_bits(gbc, 3); hdr->frame_size = (get_bits(gbc, 11) + 1) << 1; if(hdr->frame_size < AC3_HEADER_SIZE) return AAC_AC3_PARSE_ERROR_FRAME_SIZE; hdr->sr_code = get_bits(gbc, 2); if (hdr->sr_code == 3) { int sr_code2 = get_bits(gbc, 2); if(sr_code2 == 3) return AAC_AC3_PARSE_ERROR_SAMPLE_RATE; hdr->sample_rate = ff_ac3_sample_rate_tab[sr_code2] / 2; hdr->sr_shift = 1; } else { hdr->num_blocks = eac3_blocks[get_bits(gbc, 2)]; hdr->sample_rate = ff_ac3_sample_rate_tab[hdr->sr_code]; hdr->sr_shift = 0; } hdr->channel_mode = get_bits(gbc, 3); hdr->lfe_on = get_bits1(gbc); hdr->bit_rate = 8LL * hdr->frame_size * hdr->sample_rate / (hdr->num_blocks * 256); hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on; } hdr->channel_layout = avpriv_ac3_channel_layout_tab[hdr->channel_mode]; if (hdr->lfe_on) hdr->channel_layout |= AV_CH_LOW_FREQUENCY; return 0; } // TODO: Better way to pass AC3HeaderInfo fields to mov muxer. int avpriv_ac3_parse_header(AC3HeaderInfo **phdr, const uint8_t *buf, size_t size) { GetBitContext gb; AC3HeaderInfo *hdr; int err; if (!*phdr) *phdr = av_mallocz(sizeof(AC3HeaderInfo)); if (!*phdr) return AVERROR(ENOMEM); hdr = *phdr; init_get_bits8(&gb, buf, size); err = ff_ac3_parse_header(&gb, hdr); if (err < 0) return AVERROR_INVALIDDATA; return get_bits_count(&gb); } int av_ac3_parse_header(const uint8_t *buf, size_t size, uint8_t *bitstream_id, uint16_t *frame_size) { GetBitContext gb; AC3HeaderInfo hdr; int err; init_get_bits8(&gb, buf, size); err = ff_ac3_parse_header(&gb, &hdr); if (err < 0) return AVERROR_INVALIDDATA; *bitstream_id = hdr.bitstream_id; *frame_size = hdr.frame_size; return 0; } static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info, int *need_next_header, int *new_frame_start) { int err; union { uint64_t u64; uint8_t u8[8 + AV_INPUT_BUFFER_PADDING_SIZE]; } tmp = { av_be2ne64(state) }; AC3HeaderInfo hdr; GetBitContext gbc; init_get_bits(&gbc, tmp.u8+8-AC3_HEADER_SIZE, 54); err = ff_ac3_parse_header(&gbc, &hdr); if(err < 0) return 0; hdr_info->sample_rate = hdr.sample_rate; hdr_info->bit_rate = hdr.bit_rate; hdr_info->channels = hdr.channels; hdr_info->channel_layout = hdr.channel_layout; hdr_info->samples = hdr.num_blocks * 256; hdr_info->service_type = hdr.bitstream_mode; if (hdr.bitstream_mode == 0x7 && hdr.channels > 1) hdr_info->service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE; if(hdr.bitstream_id>10) hdr_info->codec_id = AV_CODEC_ID_EAC3; else if (hdr_info->codec_id == AV_CODEC_ID_NONE) hdr_info->codec_id = AV_CODEC_ID_AC3; *new_frame_start = (hdr.frame_type != EAC3_FRAME_TYPE_DEPENDENT); *need_next_header = *new_frame_start || (hdr.frame_type != EAC3_FRAME_TYPE_AC3_CONVERT); return hdr.frame_size; } static av_cold int ac3_parse_init(AVCodecParserContext *s1) { AACAC3ParseContext *s = s1->priv_data; s->header_size = AC3_HEADER_SIZE; s->sync = ac3_sync; return 0; } AVCodecParser ff_ac3_parser = { .codec_ids = { AV_CODEC_ID_AC3, AV_CODEC_ID_EAC3 }, .priv_data_size = sizeof(AACAC3ParseContext), .parser_init = ac3_parse_init, .parser_parse = ff_aac_ac3_parse, .parser_close = ff_parse_close, }; #else int avpriv_ac3_parse_header(AC3HeaderInfo **phdr, const uint8_t *buf, size_t size) { return AVERROR(ENOSYS); } int av_ac3_parse_header(const uint8_t *buf, size_t size, uint8_t *bitstream_id, uint16_t *frame_size) { return AVERROR(ENOSYS); } #endif
/* * AC-3 parser * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2003 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "libavutil/channel_layout.h" #include "parser.h" #include "ac3_parser.h" #include "ac3_parser_internal.h" #include "aac_ac3_parser.h" #include "get_bits.h" #define AC3_HEADER_SIZE 7 #if CONFIG_AC3_PARSER static const uint8_t eac3_blocks[4] = { 1, 2, 3, 6 }; /** * Table for center mix levels * reference: Section 5.4.2.4 cmixlev */ static const uint8_t center_levels[4] = { 4, 5, 6, 5 }; /** * Table for surround mix levels * reference: Section 5.4.2.5 surmixlev */ static const uint8_t surround_levels[4] = { 4, 6, 7, 6 }; int ff_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr) { int frame_size_code; memset(hdr, 0, sizeof(*hdr)); hdr->sync_word = get_bits(gbc, 16); if(hdr->sync_word != 0x0B77) return AAC_AC3_PARSE_ERROR_SYNC; /* read ahead to bsid to distinguish between AC-3 and E-AC-3 */ hdr->bitstream_id = show_bits_long(gbc, 29) & 0x1F; if(hdr->bitstream_id > 16) return AAC_AC3_PARSE_ERROR_BSID; hdr->num_blocks = 6; /* set default mix levels */ hdr->center_mix_level = 5; // -4.5dB hdr->surround_mix_level = 6; // -6.0dB /* set default dolby surround mode */ hdr->dolby_surround_mode = AC3_DSURMOD_NOTINDICATED; if(hdr->bitstream_id <= 10) { /* Normal AC-3 */ hdr->crc1 = get_bits(gbc, 16); hdr->sr_code = get_bits(gbc, 2); if(hdr->sr_code == 3) return AAC_AC3_PARSE_ERROR_SAMPLE_RATE; frame_size_code = get_bits(gbc, 6); if(frame_size_code > 37) return AAC_AC3_PARSE_ERROR_FRAME_SIZE; skip_bits(gbc, 5); // skip bsid, already got it hdr->bitstream_mode = get_bits(gbc, 3); hdr->channel_mode = get_bits(gbc, 3); if(hdr->channel_mode == AC3_CHMODE_STEREO) { hdr->dolby_surround_mode = get_bits(gbc, 2); } else { if((hdr->channel_mode & 1) && hdr->channel_mode != AC3_CHMODE_MONO) hdr-> center_mix_level = center_levels[get_bits(gbc, 2)]; if(hdr->channel_mode & 4) hdr->surround_mix_level = surround_levels[get_bits(gbc, 2)]; } hdr->lfe_on = get_bits1(gbc); hdr->sr_shift = FFMAX(hdr->bitstream_id, 8) - 8; hdr->sample_rate = ff_ac3_sample_rate_tab[hdr->sr_code] >> hdr->sr_shift; hdr->bit_rate = (ff_ac3_bitrate_tab[frame_size_code>>1] * 1000) >> hdr->sr_shift; hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on; hdr->frame_size = ff_ac3_frame_size_tab[frame_size_code][hdr->sr_code] * 2; hdr->frame_type = EAC3_FRAME_TYPE_AC3_CONVERT; //EAC3_FRAME_TYPE_INDEPENDENT; hdr->substreamid = 0; } else { /* Enhanced AC-3 */ hdr->crc1 = 0; hdr->frame_type = get_bits(gbc, 2); if(hdr->frame_type == EAC3_FRAME_TYPE_RESERVED) return AAC_AC3_PARSE_ERROR_FRAME_TYPE; hdr->substreamid = get_bits(gbc, 3); hdr->frame_size = (get_bits(gbc, 11) + 1) << 1; if(hdr->frame_size < AC3_HEADER_SIZE) return AAC_AC3_PARSE_ERROR_FRAME_SIZE; hdr->sr_code = get_bits(gbc, 2); if (hdr->sr_code == 3) { int sr_code2 = get_bits(gbc, 2); if(sr_code2 == 3) return AAC_AC3_PARSE_ERROR_SAMPLE_RATE; hdr->sample_rate = ff_ac3_sample_rate_tab[sr_code2] / 2; hdr->sr_shift = 1; } else { hdr->num_blocks = eac3_blocks[get_bits(gbc, 2)]; hdr->sample_rate = ff_ac3_sample_rate_tab[hdr->sr_code]; hdr->sr_shift = 0; } hdr->channel_mode = get_bits(gbc, 3); hdr->lfe_on = get_bits1(gbc); hdr->bit_rate = 8LL * hdr->frame_size * hdr->sample_rate / (hdr->num_blocks * 256); hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on; } hdr->channel_layout = avpriv_ac3_channel_layout_tab[hdr->channel_mode]; if (hdr->lfe_on) hdr->channel_layout |= AV_CH_LOW_FREQUENCY; return 0; } // TODO: Better way to pass AC3HeaderInfo fields to mov muxer. int avpriv_ac3_parse_header(AC3HeaderInfo **phdr, const uint8_t *buf, size_t size) { GetBitContext gb; AC3HeaderInfo *hdr; int err; if (!*phdr) *phdr = av_mallocz(sizeof(AC3HeaderInfo)); if (!*phdr) return AVERROR(ENOMEM); hdr = *phdr; err = init_get_bits8(&gb, buf, size); if (err < 0) return AVERROR_INVALIDDATA; err = ff_ac3_parse_header(&gb, hdr); if (err < 0) return AVERROR_INVALIDDATA; return get_bits_count(&gb); } int av_ac3_parse_header(const uint8_t *buf, size_t size, uint8_t *bitstream_id, uint16_t *frame_size) { GetBitContext gb; AC3HeaderInfo hdr; int err; init_get_bits8(&gb, buf, size); err = ff_ac3_parse_header(&gb, &hdr); if (err < 0) return AVERROR_INVALIDDATA; *bitstream_id = hdr.bitstream_id; *frame_size = hdr.frame_size; return 0; } static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info, int *need_next_header, int *new_frame_start) { int err; union { uint64_t u64; uint8_t u8[8 + AV_INPUT_BUFFER_PADDING_SIZE]; } tmp = { av_be2ne64(state) }; AC3HeaderInfo hdr; GetBitContext gbc; init_get_bits(&gbc, tmp.u8+8-AC3_HEADER_SIZE, 54); err = ff_ac3_parse_header(&gbc, &hdr); if(err < 0) return 0; hdr_info->sample_rate = hdr.sample_rate; hdr_info->bit_rate = hdr.bit_rate; hdr_info->channels = hdr.channels; hdr_info->channel_layout = hdr.channel_layout; hdr_info->samples = hdr.num_blocks * 256; hdr_info->service_type = hdr.bitstream_mode; if (hdr.bitstream_mode == 0x7 && hdr.channels > 1) hdr_info->service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE; if(hdr.bitstream_id>10) hdr_info->codec_id = AV_CODEC_ID_EAC3; else if (hdr_info->codec_id == AV_CODEC_ID_NONE) hdr_info->codec_id = AV_CODEC_ID_AC3; *new_frame_start = (hdr.frame_type != EAC3_FRAME_TYPE_DEPENDENT); *need_next_header = *new_frame_start || (hdr.frame_type != EAC3_FRAME_TYPE_AC3_CONVERT); return hdr.frame_size; } static av_cold int ac3_parse_init(AVCodecParserContext *s1) { AACAC3ParseContext *s = s1->priv_data; s->header_size = AC3_HEADER_SIZE; s->sync = ac3_sync; return 0; } AVCodecParser ff_ac3_parser = { .codec_ids = { AV_CODEC_ID_AC3, AV_CODEC_ID_EAC3 }, .priv_data_size = sizeof(AACAC3ParseContext), .parser_init = ac3_parse_init, .parser_parse = ff_aac_ac3_parse, .parser_close = ff_parse_close, }; #else int avpriv_ac3_parse_header(AC3HeaderInfo **phdr, const uint8_t *buf, size_t size) { return AVERROR(ENOSYS); } int av_ac3_parse_header(const uint8_t *buf, size_t size, uint8_t *bitstream_id, uint16_t *frame_size) { return AVERROR(ENOSYS); } #endif
int avpriv_ac3_parse_header(AC3HeaderInfo **phdr, const uint8_t *buf, size_t size) { GetBitContext gb; AC3HeaderInfo *hdr; int err; if (!*phdr) *phdr = av_mallocz(sizeof(AC3HeaderInfo)); if (!*phdr) return AVERROR(ENOMEM); hdr = *phdr; init_get_bits8(&gb, buf, size); err = ff_ac3_parse_header(&gb, hdr); if (err < 0) return AVERROR_INVALIDDATA; return get_bits_count(&gb); }
int avpriv_ac3_parse_header(AC3HeaderInfo **phdr, const uint8_t *buf, size_t size) { GetBitContext gb; AC3HeaderInfo *hdr; int err; if (!*phdr) *phdr = av_mallocz(sizeof(AC3HeaderInfo)); if (!*phdr) return AVERROR(ENOMEM); hdr = *phdr; err = init_get_bits8(&gb, buf, size); if (err < 0) return AVERROR_INVALIDDATA; err = ff_ac3_parse_header(&gb, hdr); if (err < 0) return AVERROR_INVALIDDATA; return get_bits_count(&gb); }
{'added': [(165, ' err = init_get_bits8(&gb, buf, size);'), (166, ' if (err < 0)'), (167, ' return AVERROR_INVALIDDATA;')], 'deleted': [(165, ' init_get_bits8(&gb, buf, size);')]}
3
1
172
1,207
17
96
4
https://github.com/FFmpeg/FFmpeg
CVE-2018-13303
CWE-476
342
print-ppp.c
C
ppp_hdlc
/* * Copyright (c) 1990, 1991, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Extensively modified by Motonori Shindo (mshindo@mshindo.net) for more * complete PPP support. */ /* * TODO: * o resolve XXX as much as possible * o MP support * o BAP support */ #define NETDISSECT_REWORKED #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <tcpdump-stdinc.h> #ifdef __bsdi__ #include <net/slcompress.h> #include <net/if_ppp.h> #endif #include <stdlib.h> #include "interface.h" #include "extract.h" #include "addrtoname.h" #include "ppp.h" #include "chdlc.h" #include "ethertype.h" #include "oui.h" /* * The following constatns are defined by IANA. Please refer to * http://www.isi.edu/in-notes/iana/assignments/ppp-numbers * for the up-to-date information. */ /* Protocol Codes defined in ppp.h */ static const struct tok ppptype2str[] = { { PPP_IP, "IP" }, { PPP_OSI, "OSI" }, { PPP_NS, "NS" }, { PPP_DECNET, "DECNET" }, { PPP_APPLE, "APPLE" }, { PPP_IPX, "IPX" }, { PPP_VJC, "VJC IP" }, { PPP_VJNC, "VJNC IP" }, { PPP_BRPDU, "BRPDU" }, { PPP_STII, "STII" }, { PPP_VINES, "VINES" }, { PPP_MPLS_UCAST, "MPLS" }, { PPP_MPLS_MCAST, "MPLS" }, { PPP_COMP, "Compressed"}, { PPP_ML, "MLPPP"}, { PPP_IPV6, "IP6"}, { PPP_HELLO, "HELLO" }, { PPP_LUXCOM, "LUXCOM" }, { PPP_SNS, "SNS" }, { PPP_IPCP, "IPCP" }, { PPP_OSICP, "OSICP" }, { PPP_NSCP, "NSCP" }, { PPP_DECNETCP, "DECNETCP" }, { PPP_APPLECP, "APPLECP" }, { PPP_IPXCP, "IPXCP" }, { PPP_STIICP, "STIICP" }, { PPP_VINESCP, "VINESCP" }, { PPP_IPV6CP, "IP6CP" }, { PPP_MPLSCP, "MPLSCP" }, { PPP_LCP, "LCP" }, { PPP_PAP, "PAP" }, { PPP_LQM, "LQM" }, { PPP_CHAP, "CHAP" }, { PPP_EAP, "EAP" }, { PPP_SPAP, "SPAP" }, { PPP_SPAP_OLD, "Old-SPAP" }, { PPP_BACP, "BACP" }, { PPP_BAP, "BAP" }, { PPP_MPCP, "MLPPP-CP" }, { PPP_CCP, "CCP" }, { 0, NULL } }; /* Control Protocols (LCP/IPCP/CCP etc.) Codes defined in RFC 1661 */ #define CPCODES_VEXT 0 /* Vendor-Specific (RFC2153) */ #define CPCODES_CONF_REQ 1 /* Configure-Request */ #define CPCODES_CONF_ACK 2 /* Configure-Ack */ #define CPCODES_CONF_NAK 3 /* Configure-Nak */ #define CPCODES_CONF_REJ 4 /* Configure-Reject */ #define CPCODES_TERM_REQ 5 /* Terminate-Request */ #define CPCODES_TERM_ACK 6 /* Terminate-Ack */ #define CPCODES_CODE_REJ 7 /* Code-Reject */ #define CPCODES_PROT_REJ 8 /* Protocol-Reject (LCP only) */ #define CPCODES_ECHO_REQ 9 /* Echo-Request (LCP only) */ #define CPCODES_ECHO_RPL 10 /* Echo-Reply (LCP only) */ #define CPCODES_DISC_REQ 11 /* Discard-Request (LCP only) */ #define CPCODES_ID 12 /* Identification (LCP only) RFC1570 */ #define CPCODES_TIME_REM 13 /* Time-Remaining (LCP only) RFC1570 */ #define CPCODES_RESET_REQ 14 /* Reset-Request (CCP only) RFC1962 */ #define CPCODES_RESET_REP 15 /* Reset-Reply (CCP only) */ static const struct tok cpcodes[] = { {CPCODES_VEXT, "Vendor-Extension"}, /* RFC2153 */ {CPCODES_CONF_REQ, "Conf-Request"}, {CPCODES_CONF_ACK, "Conf-Ack"}, {CPCODES_CONF_NAK, "Conf-Nack"}, {CPCODES_CONF_REJ, "Conf-Reject"}, {CPCODES_TERM_REQ, "Term-Request"}, {CPCODES_TERM_ACK, "Term-Ack"}, {CPCODES_CODE_REJ, "Code-Reject"}, {CPCODES_PROT_REJ, "Prot-Reject"}, {CPCODES_ECHO_REQ, "Echo-Request"}, {CPCODES_ECHO_RPL, "Echo-Reply"}, {CPCODES_DISC_REQ, "Disc-Req"}, {CPCODES_ID, "Ident"}, /* RFC1570 */ {CPCODES_TIME_REM, "Time-Rem"}, /* RFC1570 */ {CPCODES_RESET_REQ, "Reset-Req"}, /* RFC1962 */ {CPCODES_RESET_REP, "Reset-Ack"}, /* RFC1962 */ {0, NULL} }; /* LCP Config Options */ #define LCPOPT_VEXT 0 #define LCPOPT_MRU 1 #define LCPOPT_ACCM 2 #define LCPOPT_AP 3 #define LCPOPT_QP 4 #define LCPOPT_MN 5 #define LCPOPT_DEP6 6 #define LCPOPT_PFC 7 #define LCPOPT_ACFC 8 #define LCPOPT_FCSALT 9 #define LCPOPT_SDP 10 #define LCPOPT_NUMMODE 11 #define LCPOPT_DEP12 12 #define LCPOPT_CBACK 13 #define LCPOPT_DEP14 14 #define LCPOPT_DEP15 15 #define LCPOPT_DEP16 16 #define LCPOPT_MLMRRU 17 #define LCPOPT_MLSSNHF 18 #define LCPOPT_MLED 19 #define LCPOPT_PROP 20 #define LCPOPT_DCEID 21 #define LCPOPT_MPP 22 #define LCPOPT_LD 23 #define LCPOPT_LCPAOPT 24 #define LCPOPT_COBS 25 #define LCPOPT_PE 26 #define LCPOPT_MLHF 27 #define LCPOPT_I18N 28 #define LCPOPT_SDLOS 29 #define LCPOPT_PPPMUX 30 #define LCPOPT_MIN LCPOPT_VEXT #define LCPOPT_MAX LCPOPT_PPPMUX static const char *lcpconfopts[] = { "Vend-Ext", /* (0) */ "MRU", /* (1) */ "ACCM", /* (2) */ "Auth-Prot", /* (3) */ "Qual-Prot", /* (4) */ "Magic-Num", /* (5) */ "deprecated(6)", /* used to be a Quality Protocol */ "PFC", /* (7) */ "ACFC", /* (8) */ "FCS-Alt", /* (9) */ "SDP", /* (10) */ "Num-Mode", /* (11) */ "deprecated(12)", /* used to be a Multi-Link-Procedure*/ "Call-Back", /* (13) */ "deprecated(14)", /* used to be a Connect-Time */ "deprecated(15)", /* used to be a Compund-Frames */ "deprecated(16)", /* used to be a Nominal-Data-Encap */ "MRRU", /* (17) */ "12-Bit seq #", /* (18) */ "End-Disc", /* (19) */ "Proprietary", /* (20) */ "DCE-Id", /* (21) */ "MP+", /* (22) */ "Link-Disc", /* (23) */ "LCP-Auth-Opt", /* (24) */ "COBS", /* (25) */ "Prefix-elision", /* (26) */ "Multilink-header-Form",/* (27) */ "I18N", /* (28) */ "SDL-over-SONET/SDH", /* (29) */ "PPP-Muxing", /* (30) */ }; /* ECP - to be supported */ /* CCP Config Options */ #define CCPOPT_OUI 0 /* RFC1962 */ #define CCPOPT_PRED1 1 /* RFC1962 */ #define CCPOPT_PRED2 2 /* RFC1962 */ #define CCPOPT_PJUMP 3 /* RFC1962 */ /* 4-15 unassigned */ #define CCPOPT_HPPPC 16 /* RFC1962 */ #define CCPOPT_STACLZS 17 /* RFC1974 */ #define CCPOPT_MPPC 18 /* RFC2118 */ #define CCPOPT_GFZA 19 /* RFC1962 */ #define CCPOPT_V42BIS 20 /* RFC1962 */ #define CCPOPT_BSDCOMP 21 /* RFC1977 */ /* 22 unassigned */ #define CCPOPT_LZSDCP 23 /* RFC1967 */ #define CCPOPT_MVRCA 24 /* RFC1975 */ #define CCPOPT_DEC 25 /* RFC1976 */ #define CCPOPT_DEFLATE 26 /* RFC1979 */ /* 27-254 unassigned */ #define CCPOPT_RESV 255 /* RFC1962 */ static const struct tok ccpconfopts_values[] = { { CCPOPT_OUI, "OUI" }, { CCPOPT_PRED1, "Pred-1" }, { CCPOPT_PRED2, "Pred-2" }, { CCPOPT_PJUMP, "Puddle" }, { CCPOPT_HPPPC, "HP-PPC" }, { CCPOPT_STACLZS, "Stac-LZS" }, { CCPOPT_MPPC, "MPPC" }, { CCPOPT_GFZA, "Gand-FZA" }, { CCPOPT_V42BIS, "V.42bis" }, { CCPOPT_BSDCOMP, "BSD-Comp" }, { CCPOPT_LZSDCP, "LZS-DCP" }, { CCPOPT_MVRCA, "MVRCA" }, { CCPOPT_DEC, "DEC" }, { CCPOPT_DEFLATE, "Deflate" }, { CCPOPT_RESV, "Reserved"}, {0, NULL} }; /* BACP Config Options */ #define BACPOPT_FPEER 1 /* RFC2125 */ static const struct tok bacconfopts_values[] = { { BACPOPT_FPEER, "Favored-Peer" }, {0, NULL} }; /* SDCP - to be supported */ /* IPCP Config Options */ #define IPCPOPT_2ADDR 1 /* RFC1172, RFC1332 (deprecated) */ #define IPCPOPT_IPCOMP 2 /* RFC1332 */ #define IPCPOPT_ADDR 3 /* RFC1332 */ #define IPCPOPT_MOBILE4 4 /* RFC2290 */ #define IPCPOPT_PRIDNS 129 /* RFC1877 */ #define IPCPOPT_PRINBNS 130 /* RFC1877 */ #define IPCPOPT_SECDNS 131 /* RFC1877 */ #define IPCPOPT_SECNBNS 132 /* RFC1877 */ static const struct tok ipcpopt_values[] = { { IPCPOPT_2ADDR, "IP-Addrs" }, { IPCPOPT_IPCOMP, "IP-Comp" }, { IPCPOPT_ADDR, "IP-Addr" }, { IPCPOPT_MOBILE4, "Home-Addr" }, { IPCPOPT_PRIDNS, "Pri-DNS" }, { IPCPOPT_PRINBNS, "Pri-NBNS" }, { IPCPOPT_SECDNS, "Sec-DNS" }, { IPCPOPT_SECNBNS, "Sec-NBNS" }, { 0, NULL } }; #define IPCPOPT_IPCOMP_HDRCOMP 0x61 /* rfc3544 */ #define IPCPOPT_IPCOMP_MINLEN 14 static const struct tok ipcpopt_compproto_values[] = { { PPP_VJC, "VJ-Comp" }, { IPCPOPT_IPCOMP_HDRCOMP, "IP Header Compression" }, { 0, NULL } }; static const struct tok ipcpopt_compproto_subopt_values[] = { { 1, "RTP-Compression" }, { 2, "Enhanced RTP-Compression" }, { 0, NULL } }; /* IP6CP Config Options */ #define IP6CP_IFID 1 static const struct tok ip6cpopt_values[] = { { IP6CP_IFID, "Interface-ID" }, { 0, NULL } }; /* ATCP - to be supported */ /* OSINLCP - to be supported */ /* BVCP - to be supported */ /* BCP - to be supported */ /* IPXCP - to be supported */ /* MPLSCP - to be supported */ /* Auth Algorithms */ /* 0-4 Reserved (RFC1994) */ #define AUTHALG_CHAPMD5 5 /* RFC1994 */ #define AUTHALG_MSCHAP1 128 /* RFC2433 */ #define AUTHALG_MSCHAP2 129 /* RFC2795 */ static const struct tok authalg_values[] = { { AUTHALG_CHAPMD5, "MD5" }, { AUTHALG_MSCHAP1, "MS-CHAPv1" }, { AUTHALG_MSCHAP2, "MS-CHAPv2" }, { 0, NULL } }; /* FCS Alternatives - to be supported */ /* Multilink Endpoint Discriminator (RFC1717) */ #define MEDCLASS_NULL 0 /* Null Class */ #define MEDCLASS_LOCAL 1 /* Locally Assigned */ #define MEDCLASS_IPV4 2 /* Internet Protocol (IPv4) */ #define MEDCLASS_MAC 3 /* IEEE 802.1 global MAC address */ #define MEDCLASS_MNB 4 /* PPP Magic Number Block */ #define MEDCLASS_PSNDN 5 /* Public Switched Network Director Number */ /* PPP LCP Callback */ #define CALLBACK_AUTH 0 /* Location determined by user auth */ #define CALLBACK_DSTR 1 /* Dialing string */ #define CALLBACK_LID 2 /* Location identifier */ #define CALLBACK_E164 3 /* E.164 number */ #define CALLBACK_X500 4 /* X.500 distinguished name */ #define CALLBACK_CBCP 6 /* Location is determined during CBCP nego */ static const struct tok ppp_callback_values[] = { { CALLBACK_AUTH, "UserAuth" }, { CALLBACK_DSTR, "DialString" }, { CALLBACK_LID, "LocalID" }, { CALLBACK_E164, "E.164" }, { CALLBACK_X500, "X.500" }, { CALLBACK_CBCP, "CBCP" }, { 0, NULL } }; /* CHAP */ #define CHAP_CHAL 1 #define CHAP_RESP 2 #define CHAP_SUCC 3 #define CHAP_FAIL 4 static const struct tok chapcode_values[] = { { CHAP_CHAL, "Challenge" }, { CHAP_RESP, "Response" }, { CHAP_SUCC, "Success" }, { CHAP_FAIL, "Fail" }, { 0, NULL} }; /* PAP */ #define PAP_AREQ 1 #define PAP_AACK 2 #define PAP_ANAK 3 static const struct tok papcode_values[] = { { PAP_AREQ, "Auth-Req" }, { PAP_AACK, "Auth-ACK" }, { PAP_ANAK, "Auth-NACK" }, { 0, NULL } }; /* BAP */ #define BAP_CALLREQ 1 #define BAP_CALLRES 2 #define BAP_CBREQ 3 #define BAP_CBRES 4 #define BAP_LDQREQ 5 #define BAP_LDQRES 6 #define BAP_CSIND 7 #define BAP_CSRES 8 static int print_lcp_config_options(netdissect_options *, const u_char *p, int); static int print_ipcp_config_options(netdissect_options *, const u_char *p, int); static int print_ip6cp_config_options(netdissect_options *, const u_char *p, int); static int print_ccp_config_options(netdissect_options *, const u_char *p, int); static int print_bacp_config_options(netdissect_options *, const u_char *p, int); static void handle_ppp(netdissect_options *, u_int proto, const u_char *p, int length); /* generic Control Protocol (e.g. LCP, IPCP, CCP, etc.) handler */ static void handle_ctrl_proto(netdissect_options *ndo, u_int proto, const u_char *pptr, int length) { const char *typestr; u_int code, len; int (*pfunc)(netdissect_options *, const u_char *, int); int x, j; const u_char *tptr; tptr=pptr; typestr = tok2str(ppptype2str, "unknown ctrl-proto (0x%04x)", proto); ND_PRINT((ndo, "%s, ", typestr)); if (length < 4) /* FIXME weak boundary checking */ goto trunc; ND_TCHECK2(*tptr, 2); code = *tptr++; ND_PRINT((ndo, "%s (0x%02x), id %u, length %u", tok2str(cpcodes, "Unknown Opcode",code), code, *tptr++, /* ID */ length + 2)); if (!ndo->ndo_vflag) return; if (length <= 4) return; /* there may be a NULL confreq etc. */ ND_TCHECK2(*tptr, 2); len = EXTRACT_16BITS(tptr); tptr += 2; ND_PRINT((ndo, "\n\tencoded length %u (=Option(s) length %u)", len, len - 4)); if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr - 2, "\n\t", 6); switch (code) { case CPCODES_VEXT: if (length < 11) break; ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Magic-Num 0x%08x", EXTRACT_32BITS(tptr))); tptr += 4; ND_TCHECK2(*tptr, 3); ND_PRINT((ndo, " Vendor: %s (%u)", tok2str(oui_values,"Unknown",EXTRACT_24BITS(tptr)), EXTRACT_24BITS(tptr))); /* XXX: need to decode Kind and Value(s)? */ break; case CPCODES_CONF_REQ: case CPCODES_CONF_ACK: case CPCODES_CONF_NAK: case CPCODES_CONF_REJ: x = len - 4; /* Code(1), Identifier(1) and Length(2) */ do { switch (proto) { case PPP_LCP: pfunc = print_lcp_config_options; break; case PPP_IPCP: pfunc = print_ipcp_config_options; break; case PPP_IPV6CP: pfunc = print_ip6cp_config_options; break; case PPP_CCP: pfunc = print_ccp_config_options; break; case PPP_BACP: pfunc = print_bacp_config_options; break; default: /* * No print routine for the options for * this protocol. */ pfunc = NULL; break; } if (pfunc == NULL) /* catch the above null pointer if unknown CP */ break; if ((j = (*pfunc)(ndo, tptr, len)) == 0) break; x -= j; tptr += j; } while (x > 0); break; case CPCODES_TERM_REQ: case CPCODES_TERM_ACK: /* XXX: need to decode Data? */ break; case CPCODES_CODE_REJ: /* XXX: need to decode Rejected-Packet? */ break; case CPCODES_PROT_REJ: if (length < 6) break; ND_TCHECK2(*tptr, 2); ND_PRINT((ndo, "\n\t Rejected %s Protocol (0x%04x)", tok2str(ppptype2str,"unknown", EXTRACT_16BITS(tptr)), EXTRACT_16BITS(tptr))); /* XXX: need to decode Rejected-Information? - hexdump for now */ if (len > 6) { ND_PRINT((ndo, "\n\t Rejected Packet")); print_unknown_data(ndo, tptr + 2, "\n\t ", len - 2); } break; case CPCODES_ECHO_REQ: case CPCODES_ECHO_RPL: case CPCODES_DISC_REQ: if (length < 8) break; ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Magic-Num 0x%08x", EXTRACT_32BITS(tptr))); /* XXX: need to decode Data? - hexdump for now */ if (len > 8) { ND_PRINT((ndo, "\n\t -----trailing data-----")); ND_TCHECK2(tptr[4], len - 8); print_unknown_data(ndo, tptr + 4, "\n\t ", len - 8); } break; case CPCODES_ID: if (length < 8) break; ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Magic-Num 0x%08x", EXTRACT_32BITS(tptr))); /* RFC 1661 says this is intended to be human readable */ if (len > 8) { ND_PRINT((ndo, "\n\t Message\n\t ")); if (fn_printn(ndo, tptr + 4, len - 4, ndo->ndo_snapend)) goto trunc; } break; case CPCODES_TIME_REM: if (length < 12) break; ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Magic-Num 0x%08x", EXTRACT_32BITS(tptr))); ND_TCHECK2(*(tptr + 4), 4); ND_PRINT((ndo, ", Seconds-Remaining %us", EXTRACT_32BITS(tptr + 4))); /* XXX: need to decode Message? */ break; default: /* XXX this is dirty but we do not get the * original pointer passed to the begin * the PPP packet */ if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, pptr - 2, "\n\t ", length + 2); break; } return; trunc: ND_PRINT((ndo, "[|%s]", typestr)); } /* LCP config options */ static int print_lcp_config_options(netdissect_options *ndo, const u_char *p, int length) { int len, opt; if (length < 2) return 0; ND_TCHECK2(*p, 2); len = p[1]; opt = p[0]; if (length < len) return 0; if (len < 2) { if ((opt >= LCPOPT_MIN) && (opt <= LCPOPT_MAX)) ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u (length bogus, should be >= 2)", lcpconfopts[opt], opt, len)); else ND_PRINT((ndo, "\n\tunknown LCP option 0x%02x", opt)); return 0; } if ((opt >= LCPOPT_MIN) && (opt <= LCPOPT_MAX)) ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u", lcpconfopts[opt], opt, len)); else { ND_PRINT((ndo, "\n\tunknown LCP option 0x%02x", opt)); return len; } switch (opt) { case LCPOPT_VEXT: if (len < 6) { ND_PRINT((ndo, " (length bogus, should be >= 6)")); return len; } ND_TCHECK2(*(p + 2), 3); ND_PRINT((ndo, ": Vendor: %s (%u)", tok2str(oui_values,"Unknown",EXTRACT_24BITS(p+2)), EXTRACT_24BITS(p + 2))); #if 0 ND_TCHECK(p[5]); ND_PRINT((ndo, ", kind: 0x%02x", p[5])); ND_PRINT((ndo, ", Value: 0x")); for (i = 0; i < len - 6; i++) { ND_TCHECK(p[6 + i]); ND_PRINT((ndo, "%02x", p[6 + i])); } #endif break; case LCPOPT_MRU: if (len != 4) { ND_PRINT((ndo, " (length bogus, should be = 4)")); return len; } ND_TCHECK2(*(p + 2), 2); ND_PRINT((ndo, ": %u", EXTRACT_16BITS(p + 2))); break; case LCPOPT_ACCM: if (len != 6) { ND_PRINT((ndo, " (length bogus, should be = 6)")); return len; } ND_TCHECK2(*(p + 2), 4); ND_PRINT((ndo, ": 0x%08x", EXTRACT_32BITS(p + 2))); break; case LCPOPT_AP: if (len < 4) { ND_PRINT((ndo, " (length bogus, should be >= 4)")); return len; } ND_TCHECK2(*(p + 2), 2); ND_PRINT((ndo, ": %s", tok2str(ppptype2str, "Unknown Auth Proto (0x04x)", EXTRACT_16BITS(p + 2)))); switch (EXTRACT_16BITS(p+2)) { case PPP_CHAP: ND_TCHECK(p[4]); ND_PRINT((ndo, ", %s", tok2str(authalg_values, "Unknown Auth Alg %u", p[4]))); break; case PPP_PAP: /* fall through */ case PPP_EAP: case PPP_SPAP: case PPP_SPAP_OLD: break; default: print_unknown_data(ndo, p, "\n\t", len); } break; case LCPOPT_QP: if (len < 4) { ND_PRINT((ndo, " (length bogus, should be >= 4)")); return 0; } ND_TCHECK2(*(p + 2), 2); if (EXTRACT_16BITS(p+2) == PPP_LQM) ND_PRINT((ndo, ": LQR")); else ND_PRINT((ndo, ": unknown")); break; case LCPOPT_MN: if (len != 6) { ND_PRINT((ndo, " (length bogus, should be = 6)")); return 0; } ND_TCHECK2(*(p + 2), 4); ND_PRINT((ndo, ": 0x%08x", EXTRACT_32BITS(p + 2))); break; case LCPOPT_PFC: break; case LCPOPT_ACFC: break; case LCPOPT_LD: if (len != 4) { ND_PRINT((ndo, " (length bogus, should be = 4)")); return 0; } ND_TCHECK2(*(p + 2), 2); ND_PRINT((ndo, ": 0x%04x", EXTRACT_16BITS(p + 2))); break; case LCPOPT_CBACK: if (len < 3) { ND_PRINT((ndo, " (length bogus, should be >= 3)")); return 0; } ND_PRINT((ndo, ": ")); ND_TCHECK(p[2]); ND_PRINT((ndo, ": Callback Operation %s (%u)", tok2str(ppp_callback_values, "Unknown", p[2]), p[2])); break; case LCPOPT_MLMRRU: if (len != 4) { ND_PRINT((ndo, " (length bogus, should be = 4)")); return 0; } ND_TCHECK2(*(p + 2), 2); ND_PRINT((ndo, ": %u", EXTRACT_16BITS(p + 2))); break; case LCPOPT_MLED: if (len < 3) { ND_PRINT((ndo, " (length bogus, should be >= 3)")); return 0; } ND_TCHECK(p[2]); switch (p[2]) { /* class */ case MEDCLASS_NULL: ND_PRINT((ndo, ": Null")); break; case MEDCLASS_LOCAL: ND_PRINT((ndo, ": Local")); /* XXX */ break; case MEDCLASS_IPV4: if (len != 7) { ND_PRINT((ndo, " (length bogus, should be = 7)")); return 0; } ND_TCHECK2(*(p + 3), 4); ND_PRINT((ndo, ": IPv4 %s", ipaddr_string(ndo, p + 3))); break; case MEDCLASS_MAC: if (len != 9) { ND_PRINT((ndo, " (length bogus, should be = 9)")); return 0; } ND_TCHECK2(*(p + 3), 6); ND_PRINT((ndo, ": MAC %s", etheraddr_string(ndo, p + 3))); break; case MEDCLASS_MNB: ND_PRINT((ndo, ": Magic-Num-Block")); /* XXX */ break; case MEDCLASS_PSNDN: ND_PRINT((ndo, ": PSNDN")); /* XXX */ break; default: ND_PRINT((ndo, ": Unknown class %u", p[2])); break; } break; /* XXX: to be supported */ #if 0 case LCPOPT_DEP6: case LCPOPT_FCSALT: case LCPOPT_SDP: case LCPOPT_NUMMODE: case LCPOPT_DEP12: case LCPOPT_DEP14: case LCPOPT_DEP15: case LCPOPT_DEP16: case LCPOPT_MLSSNHF: case LCPOPT_PROP: case LCPOPT_DCEID: case LCPOPT_MPP: case LCPOPT_LCPAOPT: case LCPOPT_COBS: case LCPOPT_PE: case LCPOPT_MLHF: case LCPOPT_I18N: case LCPOPT_SDLOS: case LCPOPT_PPPMUX: break; #endif default: /* * Unknown option; dump it as raw bytes now if we're * not going to do so below. */ if (ndo->ndo_vflag < 2) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); /* exclude TLV header */ return len; trunc: ND_PRINT((ndo, "[|lcp]")); return 0; } /* ML-PPP*/ static const struct tok ppp_ml_flag_values[] = { { 0x80, "begin" }, { 0x40, "end" }, { 0, NULL } }; static void handle_mlppp(netdissect_options *ndo, const u_char *p, int length) { if (!ndo->ndo_eflag) ND_PRINT((ndo, "MLPPP, ")); ND_PRINT((ndo, "seq 0x%03x, Flags [%s], length %u", (EXTRACT_16BITS(p))&0x0fff, /* only support 12-Bit sequence space for now */ bittok2str(ppp_ml_flag_values, "none", *p & 0xc0), length)); } /* CHAP */ static void handle_chap(netdissect_options *ndo, const u_char *p, int length) { u_int code, len; int val_size, name_size, msg_size; const u_char *p0; int i; p0 = p; if (length < 1) { ND_PRINT((ndo, "[|chap]")); return; } else if (length < 4) { ND_TCHECK(*p); ND_PRINT((ndo, "[|chap 0x%02x]", *p)); return; } ND_TCHECK(*p); code = *p; ND_PRINT((ndo, "CHAP, %s (0x%02x)", tok2str(chapcode_values,"unknown",code), code)); p++; ND_TCHECK(*p); ND_PRINT((ndo, ", id %u", *p)); /* ID */ p++; ND_TCHECK2(*p, 2); len = EXTRACT_16BITS(p); p += 2; /* * Note that this is a generic CHAP decoding routine. Since we * don't know which flavor of CHAP (i.e. CHAP-MD5, MS-CHAPv1, * MS-CHAPv2) is used at this point, we can't decode packet * specifically to each algorithms. Instead, we simply decode * the GCD (Gratest Common Denominator) for all algorithms. */ switch (code) { case CHAP_CHAL: case CHAP_RESP: if (length - (p - p0) < 1) return; ND_TCHECK(*p); val_size = *p; /* value size */ p++; if (length - (p - p0) < val_size) return; ND_PRINT((ndo, ", Value ")); for (i = 0; i < val_size; i++) { ND_TCHECK(*p); ND_PRINT((ndo, "%02x", *p++)); } name_size = len - (p - p0); ND_PRINT((ndo, ", Name ")); for (i = 0; i < name_size; i++) { ND_TCHECK(*p); safeputchar(ndo, *p++); } break; case CHAP_SUCC: case CHAP_FAIL: msg_size = len - (p - p0); ND_PRINT((ndo, ", Msg ")); for (i = 0; i< msg_size; i++) { ND_TCHECK(*p); safeputchar(ndo, *p++); } break; } return; trunc: ND_PRINT((ndo, "[|chap]")); } /* PAP (see RFC 1334) */ static void handle_pap(netdissect_options *ndo, const u_char *p, int length) { u_int code, len; int peerid_len, passwd_len, msg_len; const u_char *p0; int i; p0 = p; if (length < 1) { ND_PRINT((ndo, "[|pap]")); return; } else if (length < 4) { ND_TCHECK(*p); ND_PRINT((ndo, "[|pap 0x%02x]", *p)); return; } ND_TCHECK(*p); code = *p; ND_PRINT((ndo, "PAP, %s (0x%02x)", tok2str(papcode_values, "unknown", code), code)); p++; ND_TCHECK(*p); ND_PRINT((ndo, ", id %u", *p)); /* ID */ p++; ND_TCHECK2(*p, 2); len = EXTRACT_16BITS(p); p += 2; if ((int)len > length) { ND_PRINT((ndo, ", length %u > packet size", len)); return; } length = len; if (length < (p - p0)) { ND_PRINT((ndo, ", length %u < PAP header length", length)); return; } switch (code) { case PAP_AREQ: if (length - (p - p0) < 1) return; ND_TCHECK(*p); peerid_len = *p; /* Peer-ID Length */ p++; if (length - (p - p0) < peerid_len) return; ND_PRINT((ndo, ", Peer ")); for (i = 0; i < peerid_len; i++) { ND_TCHECK(*p); safeputchar(ndo, *p++); } if (length - (p - p0) < 1) return; ND_TCHECK(*p); passwd_len = *p; /* Password Length */ p++; if (length - (p - p0) < passwd_len) return; ND_PRINT((ndo, ", Name ")); for (i = 0; i < passwd_len; i++) { ND_TCHECK(*p); safeputchar(ndo, *p++); } break; case PAP_AACK: case PAP_ANAK: if (length - (p - p0) < 1) return; ND_TCHECK(*p); msg_len = *p; /* Msg-Length */ p++; if (length - (p - p0) < msg_len) return; ND_PRINT((ndo, ", Msg ")); for (i = 0; i< msg_len; i++) { ND_TCHECK(*p); safeputchar(ndo, *p++); } break; } return; trunc: ND_PRINT((ndo, "[|pap]")); } /* BAP */ static void handle_bap(netdissect_options *ndo _U_, const u_char *p _U_, int length _U_) { /* XXX: to be supported!! */ } /* IPCP config options */ static int print_ipcp_config_options(netdissect_options *ndo, const u_char *p, int length) { int len, opt; u_int compproto, ipcomp_subopttotallen, ipcomp_subopt, ipcomp_suboptlen; if (length < 2) return 0; ND_TCHECK2(*p, 2); len = p[1]; opt = p[0]; if (length < len) return 0; if (len < 2) { ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u (length bogus, should be >= 2)", tok2str(ipcpopt_values,"unknown",opt), opt, len)); return 0; } ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u", tok2str(ipcpopt_values,"unknown",opt), opt, len)); switch (opt) { case IPCPOPT_2ADDR: /* deprecated */ if (len != 10) { ND_PRINT((ndo, " (length bogus, should be = 10)")); return len; } ND_TCHECK2(*(p + 6), 4); ND_PRINT((ndo, ": src %s, dst %s", ipaddr_string(ndo, p + 2), ipaddr_string(ndo, p + 6))); break; case IPCPOPT_IPCOMP: if (len < 4) { ND_PRINT((ndo, " (length bogus, should be >= 4)")); return 0; } ND_TCHECK2(*(p + 2), 2); compproto = EXTRACT_16BITS(p+2); ND_PRINT((ndo, ": %s (0x%02x):", tok2str(ipcpopt_compproto_values, "Unknown", compproto), compproto)); switch (compproto) { case PPP_VJC: /* XXX: VJ-Comp parameters should be decoded */ break; case IPCPOPT_IPCOMP_HDRCOMP: if (len < IPCPOPT_IPCOMP_MINLEN) { ND_PRINT((ndo, " (length bogus, should be >= %u)", IPCPOPT_IPCOMP_MINLEN)); return 0; } ND_TCHECK2(*(p + 2), IPCPOPT_IPCOMP_MINLEN); ND_PRINT((ndo, "\n\t TCP Space %u, non-TCP Space %u" \ ", maxPeriod %u, maxTime %u, maxHdr %u", EXTRACT_16BITS(p+4), EXTRACT_16BITS(p+6), EXTRACT_16BITS(p+8), EXTRACT_16BITS(p+10), EXTRACT_16BITS(p+12))); /* suboptions present ? */ if (len > IPCPOPT_IPCOMP_MINLEN) { ipcomp_subopttotallen = len - IPCPOPT_IPCOMP_MINLEN; p += IPCPOPT_IPCOMP_MINLEN; ND_PRINT((ndo, "\n\t Suboptions, length %u", ipcomp_subopttotallen)); while (ipcomp_subopttotallen >= 2) { ND_TCHECK2(*p, 2); ipcomp_subopt = *p; ipcomp_suboptlen = *(p+1); /* sanity check */ if (ipcomp_subopt == 0 || ipcomp_suboptlen == 0 ) break; /* XXX: just display the suboptions for now */ ND_PRINT((ndo, "\n\t\t%s Suboption #%u, length %u", tok2str(ipcpopt_compproto_subopt_values, "Unknown", ipcomp_subopt), ipcomp_subopt, ipcomp_suboptlen)); ipcomp_subopttotallen -= ipcomp_suboptlen; p += ipcomp_suboptlen; } } break; default: break; } break; case IPCPOPT_ADDR: /* those options share the same format - fall through */ case IPCPOPT_MOBILE4: case IPCPOPT_PRIDNS: case IPCPOPT_PRINBNS: case IPCPOPT_SECDNS: case IPCPOPT_SECNBNS: if (len != 6) { ND_PRINT((ndo, " (length bogus, should be = 6)")); return 0; } ND_TCHECK2(*(p + 2), 4); ND_PRINT((ndo, ": %s", ipaddr_string(ndo, p + 2))); break; default: /* * Unknown option; dump it as raw bytes now if we're * not going to do so below. */ if (ndo->ndo_vflag < 2) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); /* exclude TLV header */ return len; trunc: ND_PRINT((ndo, "[|ipcp]")); return 0; } /* IP6CP config options */ static int print_ip6cp_config_options(netdissect_options *ndo, const u_char *p, int length) { int len, opt; if (length < 2) return 0; ND_TCHECK2(*p, 2); len = p[1]; opt = p[0]; if (length < len) return 0; if (len < 2) { ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u (length bogus, should be >= 2)", tok2str(ip6cpopt_values,"unknown",opt), opt, len)); return 0; } ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u", tok2str(ip6cpopt_values,"unknown",opt), opt, len)); switch (opt) { case IP6CP_IFID: if (len != 10) { ND_PRINT((ndo, " (length bogus, should be = 10)")); return len; } ND_TCHECK2(*(p + 2), 8); ND_PRINT((ndo, ": %04x:%04x:%04x:%04x", EXTRACT_16BITS(p + 2), EXTRACT_16BITS(p + 4), EXTRACT_16BITS(p + 6), EXTRACT_16BITS(p + 8))); break; default: /* * Unknown option; dump it as raw bytes now if we're * not going to do so below. */ if (ndo->ndo_vflag < 2) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); /* exclude TLV header */ return len; trunc: ND_PRINT((ndo, "[|ip6cp]")); return 0; } /* CCP config options */ static int print_ccp_config_options(netdissect_options *ndo, const u_char *p, int length) { int len, opt; if (length < 2) return 0; ND_TCHECK2(*p, 2); len = p[1]; opt = p[0]; if (length < len) return 0; if (len < 2) { ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u (length bogus, should be >= 2)", tok2str(ccpconfopts_values, "Unknown", opt), opt, len)); return 0; } ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u", tok2str(ccpconfopts_values, "Unknown", opt), opt, len)); switch (opt) { case CCPOPT_BSDCOMP: if (len < 3) { ND_PRINT((ndo, " (length bogus, should be >= 3)")); return len; } ND_TCHECK2(*(p + 2), 1); ND_PRINT((ndo, ": Version: %u, Dictionary Bits: %u", p[2] >> 5, p[2] & 0x1f)); break; case CCPOPT_MVRCA: if (len < 4) { ND_PRINT((ndo, " (length bogus, should be >= 4)")); return len; } ND_TCHECK2(*(p + 2), 1); ND_PRINT((ndo, ": Features: %u, PxP: %s, History: %u, #CTX-ID: %u", (p[2] & 0xc0) >> 6, (p[2] & 0x20) ? "Enabled" : "Disabled", p[2] & 0x1f, p[3])); break; case CCPOPT_DEFLATE: if (len < 4) { ND_PRINT((ndo, " (length bogus, should be >= 4)")); return len; } ND_TCHECK2(*(p + 2), 1); ND_PRINT((ndo, ": Window: %uK, Method: %s (0x%x), MBZ: %u, CHK: %u", (p[2] & 0xf0) >> 4, ((p[2] & 0x0f) == 8) ? "zlib" : "unkown", p[2] & 0x0f, (p[3] & 0xfc) >> 2, p[3] & 0x03)); break; /* XXX: to be supported */ #if 0 case CCPOPT_OUI: case CCPOPT_PRED1: case CCPOPT_PRED2: case CCPOPT_PJUMP: case CCPOPT_HPPPC: case CCPOPT_STACLZS: case CCPOPT_MPPC: case CCPOPT_GFZA: case CCPOPT_V42BIS: case CCPOPT_LZSDCP: case CCPOPT_DEC: case CCPOPT_RESV: break; #endif default: /* * Unknown option; dump it as raw bytes now if we're * not going to do so below. */ if (ndo->ndo_vflag < 2) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); /* exclude TLV header */ return len; trunc: ND_PRINT((ndo, "[|ccp]")); return 0; } /* BACP config options */ static int print_bacp_config_options(netdissect_options *ndo, const u_char *p, int length) { int len, opt; if (length < 2) return 0; ND_TCHECK2(*p, 2); len = p[1]; opt = p[0]; if (length < len) return 0; if (len < 2) { ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u (length bogus, should be >= 2)", tok2str(bacconfopts_values, "Unknown", opt), opt, len)); return 0; } ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u", tok2str(bacconfopts_values, "Unknown", opt), opt, len)); switch (opt) { case BACPOPT_FPEER: if (len != 6) { ND_PRINT((ndo, " (length bogus, should be = 6)")); return len; } ND_TCHECK2(*(p + 2), 4); ND_PRINT((ndo, ": Magic-Num 0x%08x", EXTRACT_32BITS(p + 2))); break; default: /* * Unknown option; dump it as raw bytes now if we're * not going to do so below. */ if (ndo->ndo_vflag < 2) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); /* exclude TLV header */ return len; trunc: ND_PRINT((ndo, "[|bacp]")); return 0; } static void ppp_hdlc(netdissect_options *ndo, const u_char *p, int length) { u_char *b, *s, *t, c; int i, proto; const void *se; if (length <= 0) return; b = (uint8_t *)malloc(length); if (b == NULL) return; /* * Unescape all the data into a temporary, private, buffer. * Do this so that we dont overwrite the original packet * contents. */ for (s = (u_char *)p, t = b, i = length; i > 0; i--) { c = *s++; if (c == 0x7d) { if (i > 1) { i--; c = *s++ ^ 0x20; } else continue; } *t++ = c; } se = ndo->ndo_snapend; ndo->ndo_snapend = t; length = t - b; /* now lets guess about the payload codepoint format */ if (length < 1) goto trunc; proto = *b; /* start with a one-octet codepoint guess */ switch (proto) { case PPP_IP: ip_print(ndo, b + 1, length - 1); goto cleanup; case PPP_IPV6: ip6_print(ndo, b + 1, length - 1); goto cleanup; default: /* no luck - try next guess */ break; } if (length < 2) goto trunc; proto = EXTRACT_16BITS(b); /* next guess - load two octets */ switch (proto) { case (PPP_ADDRESS << 8 | PPP_CONTROL): /* looks like a PPP frame */ if (length < 4) goto trunc; proto = EXTRACT_16BITS(b+2); /* load the PPP proto-id */ handle_ppp(ndo, proto, b + 4, length - 4); break; default: /* last guess - proto must be a PPP proto-id */ handle_ppp(ndo, proto, b + 2, length - 2); break; } cleanup: ndo->ndo_snapend = se; free(b); return; trunc: ndo->ndo_snapend = se; free(b); ND_PRINT((ndo, "[|ppp]")); } /* PPP */ static void handle_ppp(netdissect_options *ndo, u_int proto, const u_char *p, int length) { if ((proto & 0xff00) == 0x7e00) { /* is this an escape code ? */ ppp_hdlc(ndo, p - 1, length); return; } switch (proto) { case PPP_LCP: /* fall through */ case PPP_IPCP: case PPP_OSICP: case PPP_MPLSCP: case PPP_IPV6CP: case PPP_CCP: case PPP_BACP: handle_ctrl_proto(ndo, proto, p, length); break; case PPP_ML: handle_mlppp(ndo, p, length); break; case PPP_CHAP: handle_chap(ndo, p, length); break; case PPP_PAP: handle_pap(ndo, p, length); break; case PPP_BAP: /* XXX: not yet completed */ handle_bap(ndo, p, length); break; case ETHERTYPE_IP: /*XXX*/ case PPP_VJNC: case PPP_IP: ip_print(ndo, p, length); break; case ETHERTYPE_IPV6: /*XXX*/ case PPP_IPV6: ip6_print(ndo, p, length); break; case ETHERTYPE_IPX: /*XXX*/ case PPP_IPX: ipx_print(ndo, p, length); break; case PPP_OSI: isoclns_print(ndo, p, length, length); break; case PPP_MPLS_UCAST: case PPP_MPLS_MCAST: mpls_print(ndo, p, length); break; case PPP_COMP: ND_PRINT((ndo, "compressed PPP data")); break; default: ND_PRINT((ndo, "%s ", tok2str(ppptype2str, "unknown PPP protocol (0x%04x)", proto))); print_unknown_data(ndo, p, "\n\t", length); break; } } /* Standard PPP printer */ u_int ppp_print(netdissect_options *ndo, register const u_char *p, u_int length) { u_int proto,ppp_header; u_int olen = length; /* _o_riginal length */ u_int hdr_len = 0; /* * Here, we assume that p points to the Address and Control * field (if they present). */ if (length < 2) goto trunc; ND_TCHECK2(*p, 2); ppp_header = EXTRACT_16BITS(p); switch(ppp_header) { case (PPP_WITHDIRECTION_IN << 8 | PPP_CONTROL): if (ndo->ndo_eflag) ND_PRINT((ndo, "In ")); p += 2; length -= 2; hdr_len += 2; break; case (PPP_WITHDIRECTION_OUT << 8 | PPP_CONTROL): if (ndo->ndo_eflag) ND_PRINT((ndo, "Out ")); p += 2; length -= 2; hdr_len += 2; break; case (PPP_ADDRESS << 8 | PPP_CONTROL): p += 2; /* ACFC not used */ length -= 2; hdr_len += 2; break; default: break; } if (length < 2) goto trunc; ND_TCHECK(*p); if (*p % 2) { proto = *p; /* PFC is used */ p++; length--; hdr_len++; } else { ND_TCHECK2(*p, 2); proto = EXTRACT_16BITS(p); p += 2; length -= 2; hdr_len += 2; } if (ndo->ndo_eflag) ND_PRINT((ndo, "%s (0x%04x), length %u: ", tok2str(ppptype2str, "unknown", proto), proto, olen)); handle_ppp(ndo, proto, p, length); return (hdr_len); trunc: ND_PRINT((ndo, "[|ppp]")); return (0); } /* PPP I/F printer */ u_int ppp_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, register const u_char *p) { register u_int length = h->len; register u_int caplen = h->caplen; if (caplen < PPP_HDRLEN) { ND_PRINT((ndo, "[|ppp]")); return (caplen); } #if 0 /* * XXX: seems to assume that there are 2 octets prepended to an * actual PPP frame. The 1st octet looks like Input/Output flag * while 2nd octet is unknown, at least to me * (mshindo@mshindo.net). * * That was what the original tcpdump code did. * * FreeBSD's "if_ppp.c" *does* set the first octet to 1 for outbound * packets and 0 for inbound packets - but only if the * protocol field has the 0x8000 bit set (i.e., it's a network * control protocol); it does so before running the packet through * "bpf_filter" to see if it should be discarded, and to see * if we should update the time we sent the most recent packet... * * ...but it puts the original address field back after doing * so. * * NetBSD's "if_ppp.c" doesn't set the first octet in that fashion. * * I don't know if any PPP implementation handed up to a BPF * device packets with the first octet being 1 for outbound and * 0 for inbound packets, so I (guy@alum.mit.edu) don't know * whether that ever needs to be checked or not. * * Note that NetBSD has a DLT_PPP_SERIAL, which it uses for PPP, * and its tcpdump appears to assume that the frame always * begins with an address field and a control field, and that * the address field might be 0x0f or 0x8f, for Cisco * point-to-point with HDLC framing as per section 4.3.1 of RFC * 1547, as well as 0xff, for PPP in HDLC-like framing as per * RFC 1662. * * (Is the Cisco framing in question what DLT_C_HDLC, in * BSD/OS, is?) */ if (ndo->ndo_eflag) ND_PRINT((ndo, "%c %4d %02x ", p[0] ? 'O' : 'I', length, p[1])); #endif ppp_print(ndo, p, length); return (0); } /* * PPP I/F printer to use if we know that RFC 1662-style PPP in HDLC-like * framing, or Cisco PPP with HDLC framing as per section 4.3.1 of RFC 1547, * is being used (i.e., we don't check for PPP_ADDRESS and PPP_CONTROL, * discard them *if* those are the first two octets, and parse the remaining * packet as a PPP packet, as "ppp_print()" does). * * This handles, for example, DLT_PPP_SERIAL in NetBSD. */ u_int ppp_hdlc_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, register const u_char *p) { register u_int length = h->len; register u_int caplen = h->caplen; u_int proto; u_int hdrlen = 0; if (caplen < 2) { ND_PRINT((ndo, "[|ppp]")); return (caplen); } switch (p[0]) { case PPP_ADDRESS: if (caplen < 4) { ND_PRINT((ndo, "[|ppp]")); return (caplen); } if (ndo->ndo_eflag) ND_PRINT((ndo, "%02x %02x %d ", p[0], p[1], length)); p += 2; length -= 2; hdrlen += 2; proto = EXTRACT_16BITS(p); p += 2; length -= 2; hdrlen += 2; ND_PRINT((ndo, "%s: ", tok2str(ppptype2str, "unknown PPP protocol (0x%04x)", proto))); handle_ppp(ndo, proto, p, length); break; case CHDLC_UNICAST: case CHDLC_BCAST: return (chdlc_if_print(ndo, h, p)); default: if (ndo->ndo_eflag) ND_PRINT((ndo, "%02x %02x %d ", p[0], p[1], length)); p += 2; hdrlen += 2; /* * XXX - NetBSD's "ppp_netbsd_serial_if_print()" treats * the next two octets as an Ethernet type; does that * ever happen? */ ND_PRINT((ndo, "unknown addr %02x; ctrl %02x", p[0], p[1])); break; } return (hdrlen); } #define PPP_BSDI_HDRLEN 24 /* BSD/OS specific PPP printer */ u_int ppp_bsdos_if_print(netdissect_options *ndo _U_, const struct pcap_pkthdr *h _U_, register const u_char *p _U_) { register int hdrlength; #ifdef __bsdi__ register u_int length = h->len; register u_int caplen = h->caplen; uint16_t ptype; const u_char *q; int i; if (caplen < PPP_BSDI_HDRLEN) { ND_PRINT((ndo, "[|ppp]")); return (caplen) } hdrlength = 0; #if 0 if (p[0] == PPP_ADDRESS && p[1] == PPP_CONTROL) { if (ndo->ndo_eflag) ND_PRINT((ndo, "%02x %02x ", p[0], p[1])); p += 2; hdrlength = 2; } if (ndo->ndo_eflag) ND_PRINT((ndo, "%d ", length)); /* Retrieve the protocol type */ if (*p & 01) { /* Compressed protocol field */ ptype = *p; if (ndo->ndo_eflag) ND_PRINT((ndo, "%02x ", ptype)); p++; hdrlength += 1; } else { /* Un-compressed protocol field */ ptype = EXTRACT_16BITS(p); if (ndo->ndo_eflag) ND_PRINT((ndo, "%04x ", ptype)); p += 2; hdrlength += 2; } #else ptype = 0; /*XXX*/ if (ndo->ndo_eflag) ND_PRINT((ndo, "%c ", p[SLC_DIR] ? 'O' : 'I')); if (p[SLC_LLHL]) { /* link level header */ struct ppp_header *ph; q = p + SLC_BPFHDRLEN; ph = (struct ppp_header *)q; if (ph->phdr_addr == PPP_ADDRESS && ph->phdr_ctl == PPP_CONTROL) { if (ndo->ndo_eflag) ND_PRINT((ndo, "%02x %02x ", q[0], q[1])); ptype = EXTRACT_16BITS(&ph->phdr_type); if (ndo->ndo_eflag && (ptype == PPP_VJC || ptype == PPP_VJNC)) { ND_PRINT((ndo, "%s ", tok2str(ppptype2str, "proto-#%d", ptype))); } } else { if (ndo->ndo_eflag) { ND_PRINT((ndo, "LLH=[")); for (i = 0; i < p[SLC_LLHL]; i++) ND_PRINT((ndo, "%02x", q[i])); ND_PRINT((ndo, "] ")); } } } if (ndo->ndo_eflag) ND_PRINT((ndo, "%d ", length)); if (p[SLC_CHL]) { q = p + SLC_BPFHDRLEN + p[SLC_LLHL]; switch (ptype) { case PPP_VJC: ptype = vjc_print(ndo, q, ptype); hdrlength = PPP_BSDI_HDRLEN; p += hdrlength; switch (ptype) { case PPP_IP: ip_print(ndo, p, length); break; case PPP_IPV6: ip6_print(ndo, p, length); break; case PPP_MPLS_UCAST: case PPP_MPLS_MCAST: mpls_print(ndo, p, length); break; } goto printx; case PPP_VJNC: ptype = vjc_print(ndo, q, ptype); hdrlength = PPP_BSDI_HDRLEN; p += hdrlength; switch (ptype) { case PPP_IP: ip_print(ndo, p, length); break; case PPP_IPV6: ip6_print(ndo, p, length); break; case PPP_MPLS_UCAST: case PPP_MPLS_MCAST: mpls_print(ndo, p, length); break; } goto printx; default: if (ndo->ndo_eflag) { ND_PRINT((ndo, "CH=[")); for (i = 0; i < p[SLC_LLHL]; i++) ND_PRINT((ndo, "%02x", q[i])); ND_PRINT((ndo, "] ")); } break; } } hdrlength = PPP_BSDI_HDRLEN; #endif length -= hdrlength; p += hdrlength; switch (ptype) { case PPP_IP: ip_print(p, length); break; case PPP_IPV6: ip6_print(ndo, p, length); break; case PPP_MPLS_UCAST: case PPP_MPLS_MCAST: mpls_print(ndo, p, length); break; default: ND_PRINT((ndo, "%s ", tok2str(ppptype2str, "unknown PPP protocol (0x%04x)", ptype))); } printx: #else /* __bsdi */ hdrlength = 0; #endif /* __bsdi__ */ return (hdrlength); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1990, 1991, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Extensively modified by Motonori Shindo (mshindo@mshindo.net) for more * complete PPP support. */ /* * TODO: * o resolve XXX as much as possible * o MP support * o BAP support */ #define NETDISSECT_REWORKED #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <tcpdump-stdinc.h> #ifdef __bsdi__ #include <net/slcompress.h> #include <net/if_ppp.h> #endif #include <stdlib.h> #include "interface.h" #include "extract.h" #include "addrtoname.h" #include "ppp.h" #include "chdlc.h" #include "ethertype.h" #include "oui.h" /* * The following constatns are defined by IANA. Please refer to * http://www.isi.edu/in-notes/iana/assignments/ppp-numbers * for the up-to-date information. */ /* Protocol Codes defined in ppp.h */ static const struct tok ppptype2str[] = { { PPP_IP, "IP" }, { PPP_OSI, "OSI" }, { PPP_NS, "NS" }, { PPP_DECNET, "DECNET" }, { PPP_APPLE, "APPLE" }, { PPP_IPX, "IPX" }, { PPP_VJC, "VJC IP" }, { PPP_VJNC, "VJNC IP" }, { PPP_BRPDU, "BRPDU" }, { PPP_STII, "STII" }, { PPP_VINES, "VINES" }, { PPP_MPLS_UCAST, "MPLS" }, { PPP_MPLS_MCAST, "MPLS" }, { PPP_COMP, "Compressed"}, { PPP_ML, "MLPPP"}, { PPP_IPV6, "IP6"}, { PPP_HELLO, "HELLO" }, { PPP_LUXCOM, "LUXCOM" }, { PPP_SNS, "SNS" }, { PPP_IPCP, "IPCP" }, { PPP_OSICP, "OSICP" }, { PPP_NSCP, "NSCP" }, { PPP_DECNETCP, "DECNETCP" }, { PPP_APPLECP, "APPLECP" }, { PPP_IPXCP, "IPXCP" }, { PPP_STIICP, "STIICP" }, { PPP_VINESCP, "VINESCP" }, { PPP_IPV6CP, "IP6CP" }, { PPP_MPLSCP, "MPLSCP" }, { PPP_LCP, "LCP" }, { PPP_PAP, "PAP" }, { PPP_LQM, "LQM" }, { PPP_CHAP, "CHAP" }, { PPP_EAP, "EAP" }, { PPP_SPAP, "SPAP" }, { PPP_SPAP_OLD, "Old-SPAP" }, { PPP_BACP, "BACP" }, { PPP_BAP, "BAP" }, { PPP_MPCP, "MLPPP-CP" }, { PPP_CCP, "CCP" }, { 0, NULL } }; /* Control Protocols (LCP/IPCP/CCP etc.) Codes defined in RFC 1661 */ #define CPCODES_VEXT 0 /* Vendor-Specific (RFC2153) */ #define CPCODES_CONF_REQ 1 /* Configure-Request */ #define CPCODES_CONF_ACK 2 /* Configure-Ack */ #define CPCODES_CONF_NAK 3 /* Configure-Nak */ #define CPCODES_CONF_REJ 4 /* Configure-Reject */ #define CPCODES_TERM_REQ 5 /* Terminate-Request */ #define CPCODES_TERM_ACK 6 /* Terminate-Ack */ #define CPCODES_CODE_REJ 7 /* Code-Reject */ #define CPCODES_PROT_REJ 8 /* Protocol-Reject (LCP only) */ #define CPCODES_ECHO_REQ 9 /* Echo-Request (LCP only) */ #define CPCODES_ECHO_RPL 10 /* Echo-Reply (LCP only) */ #define CPCODES_DISC_REQ 11 /* Discard-Request (LCP only) */ #define CPCODES_ID 12 /* Identification (LCP only) RFC1570 */ #define CPCODES_TIME_REM 13 /* Time-Remaining (LCP only) RFC1570 */ #define CPCODES_RESET_REQ 14 /* Reset-Request (CCP only) RFC1962 */ #define CPCODES_RESET_REP 15 /* Reset-Reply (CCP only) */ static const struct tok cpcodes[] = { {CPCODES_VEXT, "Vendor-Extension"}, /* RFC2153 */ {CPCODES_CONF_REQ, "Conf-Request"}, {CPCODES_CONF_ACK, "Conf-Ack"}, {CPCODES_CONF_NAK, "Conf-Nack"}, {CPCODES_CONF_REJ, "Conf-Reject"}, {CPCODES_TERM_REQ, "Term-Request"}, {CPCODES_TERM_ACK, "Term-Ack"}, {CPCODES_CODE_REJ, "Code-Reject"}, {CPCODES_PROT_REJ, "Prot-Reject"}, {CPCODES_ECHO_REQ, "Echo-Request"}, {CPCODES_ECHO_RPL, "Echo-Reply"}, {CPCODES_DISC_REQ, "Disc-Req"}, {CPCODES_ID, "Ident"}, /* RFC1570 */ {CPCODES_TIME_REM, "Time-Rem"}, /* RFC1570 */ {CPCODES_RESET_REQ, "Reset-Req"}, /* RFC1962 */ {CPCODES_RESET_REP, "Reset-Ack"}, /* RFC1962 */ {0, NULL} }; /* LCP Config Options */ #define LCPOPT_VEXT 0 #define LCPOPT_MRU 1 #define LCPOPT_ACCM 2 #define LCPOPT_AP 3 #define LCPOPT_QP 4 #define LCPOPT_MN 5 #define LCPOPT_DEP6 6 #define LCPOPT_PFC 7 #define LCPOPT_ACFC 8 #define LCPOPT_FCSALT 9 #define LCPOPT_SDP 10 #define LCPOPT_NUMMODE 11 #define LCPOPT_DEP12 12 #define LCPOPT_CBACK 13 #define LCPOPT_DEP14 14 #define LCPOPT_DEP15 15 #define LCPOPT_DEP16 16 #define LCPOPT_MLMRRU 17 #define LCPOPT_MLSSNHF 18 #define LCPOPT_MLED 19 #define LCPOPT_PROP 20 #define LCPOPT_DCEID 21 #define LCPOPT_MPP 22 #define LCPOPT_LD 23 #define LCPOPT_LCPAOPT 24 #define LCPOPT_COBS 25 #define LCPOPT_PE 26 #define LCPOPT_MLHF 27 #define LCPOPT_I18N 28 #define LCPOPT_SDLOS 29 #define LCPOPT_PPPMUX 30 #define LCPOPT_MIN LCPOPT_VEXT #define LCPOPT_MAX LCPOPT_PPPMUX static const char *lcpconfopts[] = { "Vend-Ext", /* (0) */ "MRU", /* (1) */ "ACCM", /* (2) */ "Auth-Prot", /* (3) */ "Qual-Prot", /* (4) */ "Magic-Num", /* (5) */ "deprecated(6)", /* used to be a Quality Protocol */ "PFC", /* (7) */ "ACFC", /* (8) */ "FCS-Alt", /* (9) */ "SDP", /* (10) */ "Num-Mode", /* (11) */ "deprecated(12)", /* used to be a Multi-Link-Procedure*/ "Call-Back", /* (13) */ "deprecated(14)", /* used to be a Connect-Time */ "deprecated(15)", /* used to be a Compund-Frames */ "deprecated(16)", /* used to be a Nominal-Data-Encap */ "MRRU", /* (17) */ "12-Bit seq #", /* (18) */ "End-Disc", /* (19) */ "Proprietary", /* (20) */ "DCE-Id", /* (21) */ "MP+", /* (22) */ "Link-Disc", /* (23) */ "LCP-Auth-Opt", /* (24) */ "COBS", /* (25) */ "Prefix-elision", /* (26) */ "Multilink-header-Form",/* (27) */ "I18N", /* (28) */ "SDL-over-SONET/SDH", /* (29) */ "PPP-Muxing", /* (30) */ }; /* ECP - to be supported */ /* CCP Config Options */ #define CCPOPT_OUI 0 /* RFC1962 */ #define CCPOPT_PRED1 1 /* RFC1962 */ #define CCPOPT_PRED2 2 /* RFC1962 */ #define CCPOPT_PJUMP 3 /* RFC1962 */ /* 4-15 unassigned */ #define CCPOPT_HPPPC 16 /* RFC1962 */ #define CCPOPT_STACLZS 17 /* RFC1974 */ #define CCPOPT_MPPC 18 /* RFC2118 */ #define CCPOPT_GFZA 19 /* RFC1962 */ #define CCPOPT_V42BIS 20 /* RFC1962 */ #define CCPOPT_BSDCOMP 21 /* RFC1977 */ /* 22 unassigned */ #define CCPOPT_LZSDCP 23 /* RFC1967 */ #define CCPOPT_MVRCA 24 /* RFC1975 */ #define CCPOPT_DEC 25 /* RFC1976 */ #define CCPOPT_DEFLATE 26 /* RFC1979 */ /* 27-254 unassigned */ #define CCPOPT_RESV 255 /* RFC1962 */ static const struct tok ccpconfopts_values[] = { { CCPOPT_OUI, "OUI" }, { CCPOPT_PRED1, "Pred-1" }, { CCPOPT_PRED2, "Pred-2" }, { CCPOPT_PJUMP, "Puddle" }, { CCPOPT_HPPPC, "HP-PPC" }, { CCPOPT_STACLZS, "Stac-LZS" }, { CCPOPT_MPPC, "MPPC" }, { CCPOPT_GFZA, "Gand-FZA" }, { CCPOPT_V42BIS, "V.42bis" }, { CCPOPT_BSDCOMP, "BSD-Comp" }, { CCPOPT_LZSDCP, "LZS-DCP" }, { CCPOPT_MVRCA, "MVRCA" }, { CCPOPT_DEC, "DEC" }, { CCPOPT_DEFLATE, "Deflate" }, { CCPOPT_RESV, "Reserved"}, {0, NULL} }; /* BACP Config Options */ #define BACPOPT_FPEER 1 /* RFC2125 */ static const struct tok bacconfopts_values[] = { { BACPOPT_FPEER, "Favored-Peer" }, {0, NULL} }; /* SDCP - to be supported */ /* IPCP Config Options */ #define IPCPOPT_2ADDR 1 /* RFC1172, RFC1332 (deprecated) */ #define IPCPOPT_IPCOMP 2 /* RFC1332 */ #define IPCPOPT_ADDR 3 /* RFC1332 */ #define IPCPOPT_MOBILE4 4 /* RFC2290 */ #define IPCPOPT_PRIDNS 129 /* RFC1877 */ #define IPCPOPT_PRINBNS 130 /* RFC1877 */ #define IPCPOPT_SECDNS 131 /* RFC1877 */ #define IPCPOPT_SECNBNS 132 /* RFC1877 */ static const struct tok ipcpopt_values[] = { { IPCPOPT_2ADDR, "IP-Addrs" }, { IPCPOPT_IPCOMP, "IP-Comp" }, { IPCPOPT_ADDR, "IP-Addr" }, { IPCPOPT_MOBILE4, "Home-Addr" }, { IPCPOPT_PRIDNS, "Pri-DNS" }, { IPCPOPT_PRINBNS, "Pri-NBNS" }, { IPCPOPT_SECDNS, "Sec-DNS" }, { IPCPOPT_SECNBNS, "Sec-NBNS" }, { 0, NULL } }; #define IPCPOPT_IPCOMP_HDRCOMP 0x61 /* rfc3544 */ #define IPCPOPT_IPCOMP_MINLEN 14 static const struct tok ipcpopt_compproto_values[] = { { PPP_VJC, "VJ-Comp" }, { IPCPOPT_IPCOMP_HDRCOMP, "IP Header Compression" }, { 0, NULL } }; static const struct tok ipcpopt_compproto_subopt_values[] = { { 1, "RTP-Compression" }, { 2, "Enhanced RTP-Compression" }, { 0, NULL } }; /* IP6CP Config Options */ #define IP6CP_IFID 1 static const struct tok ip6cpopt_values[] = { { IP6CP_IFID, "Interface-ID" }, { 0, NULL } }; /* ATCP - to be supported */ /* OSINLCP - to be supported */ /* BVCP - to be supported */ /* BCP - to be supported */ /* IPXCP - to be supported */ /* MPLSCP - to be supported */ /* Auth Algorithms */ /* 0-4 Reserved (RFC1994) */ #define AUTHALG_CHAPMD5 5 /* RFC1994 */ #define AUTHALG_MSCHAP1 128 /* RFC2433 */ #define AUTHALG_MSCHAP2 129 /* RFC2795 */ static const struct tok authalg_values[] = { { AUTHALG_CHAPMD5, "MD5" }, { AUTHALG_MSCHAP1, "MS-CHAPv1" }, { AUTHALG_MSCHAP2, "MS-CHAPv2" }, { 0, NULL } }; /* FCS Alternatives - to be supported */ /* Multilink Endpoint Discriminator (RFC1717) */ #define MEDCLASS_NULL 0 /* Null Class */ #define MEDCLASS_LOCAL 1 /* Locally Assigned */ #define MEDCLASS_IPV4 2 /* Internet Protocol (IPv4) */ #define MEDCLASS_MAC 3 /* IEEE 802.1 global MAC address */ #define MEDCLASS_MNB 4 /* PPP Magic Number Block */ #define MEDCLASS_PSNDN 5 /* Public Switched Network Director Number */ /* PPP LCP Callback */ #define CALLBACK_AUTH 0 /* Location determined by user auth */ #define CALLBACK_DSTR 1 /* Dialing string */ #define CALLBACK_LID 2 /* Location identifier */ #define CALLBACK_E164 3 /* E.164 number */ #define CALLBACK_X500 4 /* X.500 distinguished name */ #define CALLBACK_CBCP 6 /* Location is determined during CBCP nego */ static const struct tok ppp_callback_values[] = { { CALLBACK_AUTH, "UserAuth" }, { CALLBACK_DSTR, "DialString" }, { CALLBACK_LID, "LocalID" }, { CALLBACK_E164, "E.164" }, { CALLBACK_X500, "X.500" }, { CALLBACK_CBCP, "CBCP" }, { 0, NULL } }; /* CHAP */ #define CHAP_CHAL 1 #define CHAP_RESP 2 #define CHAP_SUCC 3 #define CHAP_FAIL 4 static const struct tok chapcode_values[] = { { CHAP_CHAL, "Challenge" }, { CHAP_RESP, "Response" }, { CHAP_SUCC, "Success" }, { CHAP_FAIL, "Fail" }, { 0, NULL} }; /* PAP */ #define PAP_AREQ 1 #define PAP_AACK 2 #define PAP_ANAK 3 static const struct tok papcode_values[] = { { PAP_AREQ, "Auth-Req" }, { PAP_AACK, "Auth-ACK" }, { PAP_ANAK, "Auth-NACK" }, { 0, NULL } }; /* BAP */ #define BAP_CALLREQ 1 #define BAP_CALLRES 2 #define BAP_CBREQ 3 #define BAP_CBRES 4 #define BAP_LDQREQ 5 #define BAP_LDQRES 6 #define BAP_CSIND 7 #define BAP_CSRES 8 static int print_lcp_config_options(netdissect_options *, const u_char *p, int); static int print_ipcp_config_options(netdissect_options *, const u_char *p, int); static int print_ip6cp_config_options(netdissect_options *, const u_char *p, int); static int print_ccp_config_options(netdissect_options *, const u_char *p, int); static int print_bacp_config_options(netdissect_options *, const u_char *p, int); static void handle_ppp(netdissect_options *, u_int proto, const u_char *p, int length); /* generic Control Protocol (e.g. LCP, IPCP, CCP, etc.) handler */ static void handle_ctrl_proto(netdissect_options *ndo, u_int proto, const u_char *pptr, int length) { const char *typestr; u_int code, len; int (*pfunc)(netdissect_options *, const u_char *, int); int x, j; const u_char *tptr; tptr=pptr; typestr = tok2str(ppptype2str, "unknown ctrl-proto (0x%04x)", proto); ND_PRINT((ndo, "%s, ", typestr)); if (length < 4) /* FIXME weak boundary checking */ goto trunc; ND_TCHECK2(*tptr, 2); code = *tptr++; ND_PRINT((ndo, "%s (0x%02x), id %u, length %u", tok2str(cpcodes, "Unknown Opcode",code), code, *tptr++, /* ID */ length + 2)); if (!ndo->ndo_vflag) return; if (length <= 4) return; /* there may be a NULL confreq etc. */ ND_TCHECK2(*tptr, 2); len = EXTRACT_16BITS(tptr); tptr += 2; ND_PRINT((ndo, "\n\tencoded length %u (=Option(s) length %u)", len, len - 4)); if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr - 2, "\n\t", 6); switch (code) { case CPCODES_VEXT: if (length < 11) break; ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Magic-Num 0x%08x", EXTRACT_32BITS(tptr))); tptr += 4; ND_TCHECK2(*tptr, 3); ND_PRINT((ndo, " Vendor: %s (%u)", tok2str(oui_values,"Unknown",EXTRACT_24BITS(tptr)), EXTRACT_24BITS(tptr))); /* XXX: need to decode Kind and Value(s)? */ break; case CPCODES_CONF_REQ: case CPCODES_CONF_ACK: case CPCODES_CONF_NAK: case CPCODES_CONF_REJ: x = len - 4; /* Code(1), Identifier(1) and Length(2) */ do { switch (proto) { case PPP_LCP: pfunc = print_lcp_config_options; break; case PPP_IPCP: pfunc = print_ipcp_config_options; break; case PPP_IPV6CP: pfunc = print_ip6cp_config_options; break; case PPP_CCP: pfunc = print_ccp_config_options; break; case PPP_BACP: pfunc = print_bacp_config_options; break; default: /* * No print routine for the options for * this protocol. */ pfunc = NULL; break; } if (pfunc == NULL) /* catch the above null pointer if unknown CP */ break; if ((j = (*pfunc)(ndo, tptr, len)) == 0) break; x -= j; tptr += j; } while (x > 0); break; case CPCODES_TERM_REQ: case CPCODES_TERM_ACK: /* XXX: need to decode Data? */ break; case CPCODES_CODE_REJ: /* XXX: need to decode Rejected-Packet? */ break; case CPCODES_PROT_REJ: if (length < 6) break; ND_TCHECK2(*tptr, 2); ND_PRINT((ndo, "\n\t Rejected %s Protocol (0x%04x)", tok2str(ppptype2str,"unknown", EXTRACT_16BITS(tptr)), EXTRACT_16BITS(tptr))); /* XXX: need to decode Rejected-Information? - hexdump for now */ if (len > 6) { ND_PRINT((ndo, "\n\t Rejected Packet")); print_unknown_data(ndo, tptr + 2, "\n\t ", len - 2); } break; case CPCODES_ECHO_REQ: case CPCODES_ECHO_RPL: case CPCODES_DISC_REQ: if (length < 8) break; ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Magic-Num 0x%08x", EXTRACT_32BITS(tptr))); /* XXX: need to decode Data? - hexdump for now */ if (len > 8) { ND_PRINT((ndo, "\n\t -----trailing data-----")); ND_TCHECK2(tptr[4], len - 8); print_unknown_data(ndo, tptr + 4, "\n\t ", len - 8); } break; case CPCODES_ID: if (length < 8) break; ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Magic-Num 0x%08x", EXTRACT_32BITS(tptr))); /* RFC 1661 says this is intended to be human readable */ if (len > 8) { ND_PRINT((ndo, "\n\t Message\n\t ")); if (fn_printn(ndo, tptr + 4, len - 4, ndo->ndo_snapend)) goto trunc; } break; case CPCODES_TIME_REM: if (length < 12) break; ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Magic-Num 0x%08x", EXTRACT_32BITS(tptr))); ND_TCHECK2(*(tptr + 4), 4); ND_PRINT((ndo, ", Seconds-Remaining %us", EXTRACT_32BITS(tptr + 4))); /* XXX: need to decode Message? */ break; default: /* XXX this is dirty but we do not get the * original pointer passed to the begin * the PPP packet */ if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, pptr - 2, "\n\t ", length + 2); break; } return; trunc: ND_PRINT((ndo, "[|%s]", typestr)); } /* LCP config options */ static int print_lcp_config_options(netdissect_options *ndo, const u_char *p, int length) { int len, opt; if (length < 2) return 0; ND_TCHECK2(*p, 2); len = p[1]; opt = p[0]; if (length < len) return 0; if (len < 2) { if ((opt >= LCPOPT_MIN) && (opt <= LCPOPT_MAX)) ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u (length bogus, should be >= 2)", lcpconfopts[opt], opt, len)); else ND_PRINT((ndo, "\n\tunknown LCP option 0x%02x", opt)); return 0; } if ((opt >= LCPOPT_MIN) && (opt <= LCPOPT_MAX)) ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u", lcpconfopts[opt], opt, len)); else { ND_PRINT((ndo, "\n\tunknown LCP option 0x%02x", opt)); return len; } switch (opt) { case LCPOPT_VEXT: if (len < 6) { ND_PRINT((ndo, " (length bogus, should be >= 6)")); return len; } ND_TCHECK2(*(p + 2), 3); ND_PRINT((ndo, ": Vendor: %s (%u)", tok2str(oui_values,"Unknown",EXTRACT_24BITS(p+2)), EXTRACT_24BITS(p + 2))); #if 0 ND_TCHECK(p[5]); ND_PRINT((ndo, ", kind: 0x%02x", p[5])); ND_PRINT((ndo, ", Value: 0x")); for (i = 0; i < len - 6; i++) { ND_TCHECK(p[6 + i]); ND_PRINT((ndo, "%02x", p[6 + i])); } #endif break; case LCPOPT_MRU: if (len != 4) { ND_PRINT((ndo, " (length bogus, should be = 4)")); return len; } ND_TCHECK2(*(p + 2), 2); ND_PRINT((ndo, ": %u", EXTRACT_16BITS(p + 2))); break; case LCPOPT_ACCM: if (len != 6) { ND_PRINT((ndo, " (length bogus, should be = 6)")); return len; } ND_TCHECK2(*(p + 2), 4); ND_PRINT((ndo, ": 0x%08x", EXTRACT_32BITS(p + 2))); break; case LCPOPT_AP: if (len < 4) { ND_PRINT((ndo, " (length bogus, should be >= 4)")); return len; } ND_TCHECK2(*(p + 2), 2); ND_PRINT((ndo, ": %s", tok2str(ppptype2str, "Unknown Auth Proto (0x04x)", EXTRACT_16BITS(p + 2)))); switch (EXTRACT_16BITS(p+2)) { case PPP_CHAP: ND_TCHECK(p[4]); ND_PRINT((ndo, ", %s", tok2str(authalg_values, "Unknown Auth Alg %u", p[4]))); break; case PPP_PAP: /* fall through */ case PPP_EAP: case PPP_SPAP: case PPP_SPAP_OLD: break; default: print_unknown_data(ndo, p, "\n\t", len); } break; case LCPOPT_QP: if (len < 4) { ND_PRINT((ndo, " (length bogus, should be >= 4)")); return 0; } ND_TCHECK2(*(p + 2), 2); if (EXTRACT_16BITS(p+2) == PPP_LQM) ND_PRINT((ndo, ": LQR")); else ND_PRINT((ndo, ": unknown")); break; case LCPOPT_MN: if (len != 6) { ND_PRINT((ndo, " (length bogus, should be = 6)")); return 0; } ND_TCHECK2(*(p + 2), 4); ND_PRINT((ndo, ": 0x%08x", EXTRACT_32BITS(p + 2))); break; case LCPOPT_PFC: break; case LCPOPT_ACFC: break; case LCPOPT_LD: if (len != 4) { ND_PRINT((ndo, " (length bogus, should be = 4)")); return 0; } ND_TCHECK2(*(p + 2), 2); ND_PRINT((ndo, ": 0x%04x", EXTRACT_16BITS(p + 2))); break; case LCPOPT_CBACK: if (len < 3) { ND_PRINT((ndo, " (length bogus, should be >= 3)")); return 0; } ND_PRINT((ndo, ": ")); ND_TCHECK(p[2]); ND_PRINT((ndo, ": Callback Operation %s (%u)", tok2str(ppp_callback_values, "Unknown", p[2]), p[2])); break; case LCPOPT_MLMRRU: if (len != 4) { ND_PRINT((ndo, " (length bogus, should be = 4)")); return 0; } ND_TCHECK2(*(p + 2), 2); ND_PRINT((ndo, ": %u", EXTRACT_16BITS(p + 2))); break; case LCPOPT_MLED: if (len < 3) { ND_PRINT((ndo, " (length bogus, should be >= 3)")); return 0; } ND_TCHECK(p[2]); switch (p[2]) { /* class */ case MEDCLASS_NULL: ND_PRINT((ndo, ": Null")); break; case MEDCLASS_LOCAL: ND_PRINT((ndo, ": Local")); /* XXX */ break; case MEDCLASS_IPV4: if (len != 7) { ND_PRINT((ndo, " (length bogus, should be = 7)")); return 0; } ND_TCHECK2(*(p + 3), 4); ND_PRINT((ndo, ": IPv4 %s", ipaddr_string(ndo, p + 3))); break; case MEDCLASS_MAC: if (len != 9) { ND_PRINT((ndo, " (length bogus, should be = 9)")); return 0; } ND_TCHECK2(*(p + 3), 6); ND_PRINT((ndo, ": MAC %s", etheraddr_string(ndo, p + 3))); break; case MEDCLASS_MNB: ND_PRINT((ndo, ": Magic-Num-Block")); /* XXX */ break; case MEDCLASS_PSNDN: ND_PRINT((ndo, ": PSNDN")); /* XXX */ break; default: ND_PRINT((ndo, ": Unknown class %u", p[2])); break; } break; /* XXX: to be supported */ #if 0 case LCPOPT_DEP6: case LCPOPT_FCSALT: case LCPOPT_SDP: case LCPOPT_NUMMODE: case LCPOPT_DEP12: case LCPOPT_DEP14: case LCPOPT_DEP15: case LCPOPT_DEP16: case LCPOPT_MLSSNHF: case LCPOPT_PROP: case LCPOPT_DCEID: case LCPOPT_MPP: case LCPOPT_LCPAOPT: case LCPOPT_COBS: case LCPOPT_PE: case LCPOPT_MLHF: case LCPOPT_I18N: case LCPOPT_SDLOS: case LCPOPT_PPPMUX: break; #endif default: /* * Unknown option; dump it as raw bytes now if we're * not going to do so below. */ if (ndo->ndo_vflag < 2) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); /* exclude TLV header */ return len; trunc: ND_PRINT((ndo, "[|lcp]")); return 0; } /* ML-PPP*/ static const struct tok ppp_ml_flag_values[] = { { 0x80, "begin" }, { 0x40, "end" }, { 0, NULL } }; static void handle_mlppp(netdissect_options *ndo, const u_char *p, int length) { if (!ndo->ndo_eflag) ND_PRINT((ndo, "MLPPP, ")); ND_PRINT((ndo, "seq 0x%03x, Flags [%s], length %u", (EXTRACT_16BITS(p))&0x0fff, /* only support 12-Bit sequence space for now */ bittok2str(ppp_ml_flag_values, "none", *p & 0xc0), length)); } /* CHAP */ static void handle_chap(netdissect_options *ndo, const u_char *p, int length) { u_int code, len; int val_size, name_size, msg_size; const u_char *p0; int i; p0 = p; if (length < 1) { ND_PRINT((ndo, "[|chap]")); return; } else if (length < 4) { ND_TCHECK(*p); ND_PRINT((ndo, "[|chap 0x%02x]", *p)); return; } ND_TCHECK(*p); code = *p; ND_PRINT((ndo, "CHAP, %s (0x%02x)", tok2str(chapcode_values,"unknown",code), code)); p++; ND_TCHECK(*p); ND_PRINT((ndo, ", id %u", *p)); /* ID */ p++; ND_TCHECK2(*p, 2); len = EXTRACT_16BITS(p); p += 2; /* * Note that this is a generic CHAP decoding routine. Since we * don't know which flavor of CHAP (i.e. CHAP-MD5, MS-CHAPv1, * MS-CHAPv2) is used at this point, we can't decode packet * specifically to each algorithms. Instead, we simply decode * the GCD (Gratest Common Denominator) for all algorithms. */ switch (code) { case CHAP_CHAL: case CHAP_RESP: if (length - (p - p0) < 1) return; ND_TCHECK(*p); val_size = *p; /* value size */ p++; if (length - (p - p0) < val_size) return; ND_PRINT((ndo, ", Value ")); for (i = 0; i < val_size; i++) { ND_TCHECK(*p); ND_PRINT((ndo, "%02x", *p++)); } name_size = len - (p - p0); ND_PRINT((ndo, ", Name ")); for (i = 0; i < name_size; i++) { ND_TCHECK(*p); safeputchar(ndo, *p++); } break; case CHAP_SUCC: case CHAP_FAIL: msg_size = len - (p - p0); ND_PRINT((ndo, ", Msg ")); for (i = 0; i< msg_size; i++) { ND_TCHECK(*p); safeputchar(ndo, *p++); } break; } return; trunc: ND_PRINT((ndo, "[|chap]")); } /* PAP (see RFC 1334) */ static void handle_pap(netdissect_options *ndo, const u_char *p, int length) { u_int code, len; int peerid_len, passwd_len, msg_len; const u_char *p0; int i; p0 = p; if (length < 1) { ND_PRINT((ndo, "[|pap]")); return; } else if (length < 4) { ND_TCHECK(*p); ND_PRINT((ndo, "[|pap 0x%02x]", *p)); return; } ND_TCHECK(*p); code = *p; ND_PRINT((ndo, "PAP, %s (0x%02x)", tok2str(papcode_values, "unknown", code), code)); p++; ND_TCHECK(*p); ND_PRINT((ndo, ", id %u", *p)); /* ID */ p++; ND_TCHECK2(*p, 2); len = EXTRACT_16BITS(p); p += 2; if ((int)len > length) { ND_PRINT((ndo, ", length %u > packet size", len)); return; } length = len; if (length < (p - p0)) { ND_PRINT((ndo, ", length %u < PAP header length", length)); return; } switch (code) { case PAP_AREQ: if (length - (p - p0) < 1) return; ND_TCHECK(*p); peerid_len = *p; /* Peer-ID Length */ p++; if (length - (p - p0) < peerid_len) return; ND_PRINT((ndo, ", Peer ")); for (i = 0; i < peerid_len; i++) { ND_TCHECK(*p); safeputchar(ndo, *p++); } if (length - (p - p0) < 1) return; ND_TCHECK(*p); passwd_len = *p; /* Password Length */ p++; if (length - (p - p0) < passwd_len) return; ND_PRINT((ndo, ", Name ")); for (i = 0; i < passwd_len; i++) { ND_TCHECK(*p); safeputchar(ndo, *p++); } break; case PAP_AACK: case PAP_ANAK: if (length - (p - p0) < 1) return; ND_TCHECK(*p); msg_len = *p; /* Msg-Length */ p++; if (length - (p - p0) < msg_len) return; ND_PRINT((ndo, ", Msg ")); for (i = 0; i< msg_len; i++) { ND_TCHECK(*p); safeputchar(ndo, *p++); } break; } return; trunc: ND_PRINT((ndo, "[|pap]")); } /* BAP */ static void handle_bap(netdissect_options *ndo _U_, const u_char *p _U_, int length _U_) { /* XXX: to be supported!! */ } /* IPCP config options */ static int print_ipcp_config_options(netdissect_options *ndo, const u_char *p, int length) { int len, opt; u_int compproto, ipcomp_subopttotallen, ipcomp_subopt, ipcomp_suboptlen; if (length < 2) return 0; ND_TCHECK2(*p, 2); len = p[1]; opt = p[0]; if (length < len) return 0; if (len < 2) { ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u (length bogus, should be >= 2)", tok2str(ipcpopt_values,"unknown",opt), opt, len)); return 0; } ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u", tok2str(ipcpopt_values,"unknown",opt), opt, len)); switch (opt) { case IPCPOPT_2ADDR: /* deprecated */ if (len != 10) { ND_PRINT((ndo, " (length bogus, should be = 10)")); return len; } ND_TCHECK2(*(p + 6), 4); ND_PRINT((ndo, ": src %s, dst %s", ipaddr_string(ndo, p + 2), ipaddr_string(ndo, p + 6))); break; case IPCPOPT_IPCOMP: if (len < 4) { ND_PRINT((ndo, " (length bogus, should be >= 4)")); return 0; } ND_TCHECK2(*(p + 2), 2); compproto = EXTRACT_16BITS(p+2); ND_PRINT((ndo, ": %s (0x%02x):", tok2str(ipcpopt_compproto_values, "Unknown", compproto), compproto)); switch (compproto) { case PPP_VJC: /* XXX: VJ-Comp parameters should be decoded */ break; case IPCPOPT_IPCOMP_HDRCOMP: if (len < IPCPOPT_IPCOMP_MINLEN) { ND_PRINT((ndo, " (length bogus, should be >= %u)", IPCPOPT_IPCOMP_MINLEN)); return 0; } ND_TCHECK2(*(p + 2), IPCPOPT_IPCOMP_MINLEN); ND_PRINT((ndo, "\n\t TCP Space %u, non-TCP Space %u" \ ", maxPeriod %u, maxTime %u, maxHdr %u", EXTRACT_16BITS(p+4), EXTRACT_16BITS(p+6), EXTRACT_16BITS(p+8), EXTRACT_16BITS(p+10), EXTRACT_16BITS(p+12))); /* suboptions present ? */ if (len > IPCPOPT_IPCOMP_MINLEN) { ipcomp_subopttotallen = len - IPCPOPT_IPCOMP_MINLEN; p += IPCPOPT_IPCOMP_MINLEN; ND_PRINT((ndo, "\n\t Suboptions, length %u", ipcomp_subopttotallen)); while (ipcomp_subopttotallen >= 2) { ND_TCHECK2(*p, 2); ipcomp_subopt = *p; ipcomp_suboptlen = *(p+1); /* sanity check */ if (ipcomp_subopt == 0 || ipcomp_suboptlen == 0 ) break; /* XXX: just display the suboptions for now */ ND_PRINT((ndo, "\n\t\t%s Suboption #%u, length %u", tok2str(ipcpopt_compproto_subopt_values, "Unknown", ipcomp_subopt), ipcomp_subopt, ipcomp_suboptlen)); ipcomp_subopttotallen -= ipcomp_suboptlen; p += ipcomp_suboptlen; } } break; default: break; } break; case IPCPOPT_ADDR: /* those options share the same format - fall through */ case IPCPOPT_MOBILE4: case IPCPOPT_PRIDNS: case IPCPOPT_PRINBNS: case IPCPOPT_SECDNS: case IPCPOPT_SECNBNS: if (len != 6) { ND_PRINT((ndo, " (length bogus, should be = 6)")); return 0; } ND_TCHECK2(*(p + 2), 4); ND_PRINT((ndo, ": %s", ipaddr_string(ndo, p + 2))); break; default: /* * Unknown option; dump it as raw bytes now if we're * not going to do so below. */ if (ndo->ndo_vflag < 2) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); /* exclude TLV header */ return len; trunc: ND_PRINT((ndo, "[|ipcp]")); return 0; } /* IP6CP config options */ static int print_ip6cp_config_options(netdissect_options *ndo, const u_char *p, int length) { int len, opt; if (length < 2) return 0; ND_TCHECK2(*p, 2); len = p[1]; opt = p[0]; if (length < len) return 0; if (len < 2) { ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u (length bogus, should be >= 2)", tok2str(ip6cpopt_values,"unknown",opt), opt, len)); return 0; } ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u", tok2str(ip6cpopt_values,"unknown",opt), opt, len)); switch (opt) { case IP6CP_IFID: if (len != 10) { ND_PRINT((ndo, " (length bogus, should be = 10)")); return len; } ND_TCHECK2(*(p + 2), 8); ND_PRINT((ndo, ": %04x:%04x:%04x:%04x", EXTRACT_16BITS(p + 2), EXTRACT_16BITS(p + 4), EXTRACT_16BITS(p + 6), EXTRACT_16BITS(p + 8))); break; default: /* * Unknown option; dump it as raw bytes now if we're * not going to do so below. */ if (ndo->ndo_vflag < 2) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); /* exclude TLV header */ return len; trunc: ND_PRINT((ndo, "[|ip6cp]")); return 0; } /* CCP config options */ static int print_ccp_config_options(netdissect_options *ndo, const u_char *p, int length) { int len, opt; if (length < 2) return 0; ND_TCHECK2(*p, 2); len = p[1]; opt = p[0]; if (length < len) return 0; if (len < 2) { ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u (length bogus, should be >= 2)", tok2str(ccpconfopts_values, "Unknown", opt), opt, len)); return 0; } ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u", tok2str(ccpconfopts_values, "Unknown", opt), opt, len)); switch (opt) { case CCPOPT_BSDCOMP: if (len < 3) { ND_PRINT((ndo, " (length bogus, should be >= 3)")); return len; } ND_TCHECK2(*(p + 2), 1); ND_PRINT((ndo, ": Version: %u, Dictionary Bits: %u", p[2] >> 5, p[2] & 0x1f)); break; case CCPOPT_MVRCA: if (len < 4) { ND_PRINT((ndo, " (length bogus, should be >= 4)")); return len; } ND_TCHECK2(*(p + 2), 1); ND_PRINT((ndo, ": Features: %u, PxP: %s, History: %u, #CTX-ID: %u", (p[2] & 0xc0) >> 6, (p[2] & 0x20) ? "Enabled" : "Disabled", p[2] & 0x1f, p[3])); break; case CCPOPT_DEFLATE: if (len < 4) { ND_PRINT((ndo, " (length bogus, should be >= 4)")); return len; } ND_TCHECK2(*(p + 2), 1); ND_PRINT((ndo, ": Window: %uK, Method: %s (0x%x), MBZ: %u, CHK: %u", (p[2] & 0xf0) >> 4, ((p[2] & 0x0f) == 8) ? "zlib" : "unkown", p[2] & 0x0f, (p[3] & 0xfc) >> 2, p[3] & 0x03)); break; /* XXX: to be supported */ #if 0 case CCPOPT_OUI: case CCPOPT_PRED1: case CCPOPT_PRED2: case CCPOPT_PJUMP: case CCPOPT_HPPPC: case CCPOPT_STACLZS: case CCPOPT_MPPC: case CCPOPT_GFZA: case CCPOPT_V42BIS: case CCPOPT_LZSDCP: case CCPOPT_DEC: case CCPOPT_RESV: break; #endif default: /* * Unknown option; dump it as raw bytes now if we're * not going to do so below. */ if (ndo->ndo_vflag < 2) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); /* exclude TLV header */ return len; trunc: ND_PRINT((ndo, "[|ccp]")); return 0; } /* BACP config options */ static int print_bacp_config_options(netdissect_options *ndo, const u_char *p, int length) { int len, opt; if (length < 2) return 0; ND_TCHECK2(*p, 2); len = p[1]; opt = p[0]; if (length < len) return 0; if (len < 2) { ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u (length bogus, should be >= 2)", tok2str(bacconfopts_values, "Unknown", opt), opt, len)); return 0; } ND_PRINT((ndo, "\n\t %s Option (0x%02x), length %u", tok2str(bacconfopts_values, "Unknown", opt), opt, len)); switch (opt) { case BACPOPT_FPEER: if (len != 6) { ND_PRINT((ndo, " (length bogus, should be = 6)")); return len; } ND_TCHECK2(*(p + 2), 4); ND_PRINT((ndo, ": Magic-Num 0x%08x", EXTRACT_32BITS(p + 2))); break; default: /* * Unknown option; dump it as raw bytes now if we're * not going to do so below. */ if (ndo->ndo_vflag < 2) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, &p[2], "\n\t ", len - 2); /* exclude TLV header */ return len; trunc: ND_PRINT((ndo, "[|bacp]")); return 0; } static void ppp_hdlc(netdissect_options *ndo, const u_char *p, int length) { u_char *b, *t, c; const u_char *s; int i, proto; const void *se; if (length <= 0) return; b = (u_char *)malloc(length); if (b == NULL) return; /* * Unescape all the data into a temporary, private, buffer. * Do this so that we dont overwrite the original packet * contents. */ for (s = p, t = b, i = length; i > 0 && ND_TTEST(*s); i--) { c = *s++; if (c == 0x7d) { if (i <= 1 || !ND_TTEST(*s)) break; i--; c = *s++ ^ 0x20; } *t++ = c; } se = ndo->ndo_snapend; ndo->ndo_snapend = t; length = t - b; /* now lets guess about the payload codepoint format */ if (length < 1) goto trunc; proto = *b; /* start with a one-octet codepoint guess */ switch (proto) { case PPP_IP: ip_print(ndo, b + 1, length - 1); goto cleanup; case PPP_IPV6: ip6_print(ndo, b + 1, length - 1); goto cleanup; default: /* no luck - try next guess */ break; } if (length < 2) goto trunc; proto = EXTRACT_16BITS(b); /* next guess - load two octets */ switch (proto) { case (PPP_ADDRESS << 8 | PPP_CONTROL): /* looks like a PPP frame */ if (length < 4) goto trunc; proto = EXTRACT_16BITS(b+2); /* load the PPP proto-id */ handle_ppp(ndo, proto, b + 4, length - 4); break; default: /* last guess - proto must be a PPP proto-id */ handle_ppp(ndo, proto, b + 2, length - 2); break; } cleanup: ndo->ndo_snapend = se; free(b); return; trunc: ndo->ndo_snapend = se; free(b); ND_PRINT((ndo, "[|ppp]")); } /* PPP */ static void handle_ppp(netdissect_options *ndo, u_int proto, const u_char *p, int length) { if ((proto & 0xff00) == 0x7e00) { /* is this an escape code ? */ ppp_hdlc(ndo, p - 1, length); return; } switch (proto) { case PPP_LCP: /* fall through */ case PPP_IPCP: case PPP_OSICP: case PPP_MPLSCP: case PPP_IPV6CP: case PPP_CCP: case PPP_BACP: handle_ctrl_proto(ndo, proto, p, length); break; case PPP_ML: handle_mlppp(ndo, p, length); break; case PPP_CHAP: handle_chap(ndo, p, length); break; case PPP_PAP: handle_pap(ndo, p, length); break; case PPP_BAP: /* XXX: not yet completed */ handle_bap(ndo, p, length); break; case ETHERTYPE_IP: /*XXX*/ case PPP_VJNC: case PPP_IP: ip_print(ndo, p, length); break; case ETHERTYPE_IPV6: /*XXX*/ case PPP_IPV6: ip6_print(ndo, p, length); break; case ETHERTYPE_IPX: /*XXX*/ case PPP_IPX: ipx_print(ndo, p, length); break; case PPP_OSI: isoclns_print(ndo, p, length, length); break; case PPP_MPLS_UCAST: case PPP_MPLS_MCAST: mpls_print(ndo, p, length); break; case PPP_COMP: ND_PRINT((ndo, "compressed PPP data")); break; default: ND_PRINT((ndo, "%s ", tok2str(ppptype2str, "unknown PPP protocol (0x%04x)", proto))); print_unknown_data(ndo, p, "\n\t", length); break; } } /* Standard PPP printer */ u_int ppp_print(netdissect_options *ndo, register const u_char *p, u_int length) { u_int proto,ppp_header; u_int olen = length; /* _o_riginal length */ u_int hdr_len = 0; /* * Here, we assume that p points to the Address and Control * field (if they present). */ if (length < 2) goto trunc; ND_TCHECK2(*p, 2); ppp_header = EXTRACT_16BITS(p); switch(ppp_header) { case (PPP_WITHDIRECTION_IN << 8 | PPP_CONTROL): if (ndo->ndo_eflag) ND_PRINT((ndo, "In ")); p += 2; length -= 2; hdr_len += 2; break; case (PPP_WITHDIRECTION_OUT << 8 | PPP_CONTROL): if (ndo->ndo_eflag) ND_PRINT((ndo, "Out ")); p += 2; length -= 2; hdr_len += 2; break; case (PPP_ADDRESS << 8 | PPP_CONTROL): p += 2; /* ACFC not used */ length -= 2; hdr_len += 2; break; default: break; } if (length < 2) goto trunc; ND_TCHECK(*p); if (*p % 2) { proto = *p; /* PFC is used */ p++; length--; hdr_len++; } else { ND_TCHECK2(*p, 2); proto = EXTRACT_16BITS(p); p += 2; length -= 2; hdr_len += 2; } if (ndo->ndo_eflag) ND_PRINT((ndo, "%s (0x%04x), length %u: ", tok2str(ppptype2str, "unknown", proto), proto, olen)); handle_ppp(ndo, proto, p, length); return (hdr_len); trunc: ND_PRINT((ndo, "[|ppp]")); return (0); } /* PPP I/F printer */ u_int ppp_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, register const u_char *p) { register u_int length = h->len; register u_int caplen = h->caplen; if (caplen < PPP_HDRLEN) { ND_PRINT((ndo, "[|ppp]")); return (caplen); } #if 0 /* * XXX: seems to assume that there are 2 octets prepended to an * actual PPP frame. The 1st octet looks like Input/Output flag * while 2nd octet is unknown, at least to me * (mshindo@mshindo.net). * * That was what the original tcpdump code did. * * FreeBSD's "if_ppp.c" *does* set the first octet to 1 for outbound * packets and 0 for inbound packets - but only if the * protocol field has the 0x8000 bit set (i.e., it's a network * control protocol); it does so before running the packet through * "bpf_filter" to see if it should be discarded, and to see * if we should update the time we sent the most recent packet... * * ...but it puts the original address field back after doing * so. * * NetBSD's "if_ppp.c" doesn't set the first octet in that fashion. * * I don't know if any PPP implementation handed up to a BPF * device packets with the first octet being 1 for outbound and * 0 for inbound packets, so I (guy@alum.mit.edu) don't know * whether that ever needs to be checked or not. * * Note that NetBSD has a DLT_PPP_SERIAL, which it uses for PPP, * and its tcpdump appears to assume that the frame always * begins with an address field and a control field, and that * the address field might be 0x0f or 0x8f, for Cisco * point-to-point with HDLC framing as per section 4.3.1 of RFC * 1547, as well as 0xff, for PPP in HDLC-like framing as per * RFC 1662. * * (Is the Cisco framing in question what DLT_C_HDLC, in * BSD/OS, is?) */ if (ndo->ndo_eflag) ND_PRINT((ndo, "%c %4d %02x ", p[0] ? 'O' : 'I', length, p[1])); #endif ppp_print(ndo, p, length); return (0); } /* * PPP I/F printer to use if we know that RFC 1662-style PPP in HDLC-like * framing, or Cisco PPP with HDLC framing as per section 4.3.1 of RFC 1547, * is being used (i.e., we don't check for PPP_ADDRESS and PPP_CONTROL, * discard them *if* those are the first two octets, and parse the remaining * packet as a PPP packet, as "ppp_print()" does). * * This handles, for example, DLT_PPP_SERIAL in NetBSD. */ u_int ppp_hdlc_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, register const u_char *p) { register u_int length = h->len; register u_int caplen = h->caplen; u_int proto; u_int hdrlen = 0; if (caplen < 2) { ND_PRINT((ndo, "[|ppp]")); return (caplen); } switch (p[0]) { case PPP_ADDRESS: if (caplen < 4) { ND_PRINT((ndo, "[|ppp]")); return (caplen); } if (ndo->ndo_eflag) ND_PRINT((ndo, "%02x %02x %d ", p[0], p[1], length)); p += 2; length -= 2; hdrlen += 2; proto = EXTRACT_16BITS(p); p += 2; length -= 2; hdrlen += 2; ND_PRINT((ndo, "%s: ", tok2str(ppptype2str, "unknown PPP protocol (0x%04x)", proto))); handle_ppp(ndo, proto, p, length); break; case CHDLC_UNICAST: case CHDLC_BCAST: return (chdlc_if_print(ndo, h, p)); default: if (ndo->ndo_eflag) ND_PRINT((ndo, "%02x %02x %d ", p[0], p[1], length)); p += 2; hdrlen += 2; /* * XXX - NetBSD's "ppp_netbsd_serial_if_print()" treats * the next two octets as an Ethernet type; does that * ever happen? */ ND_PRINT((ndo, "unknown addr %02x; ctrl %02x", p[0], p[1])); break; } return (hdrlen); } #define PPP_BSDI_HDRLEN 24 /* BSD/OS specific PPP printer */ u_int ppp_bsdos_if_print(netdissect_options *ndo _U_, const struct pcap_pkthdr *h _U_, register const u_char *p _U_) { register int hdrlength; #ifdef __bsdi__ register u_int length = h->len; register u_int caplen = h->caplen; uint16_t ptype; const u_char *q; int i; if (caplen < PPP_BSDI_HDRLEN) { ND_PRINT((ndo, "[|ppp]")); return (caplen) } hdrlength = 0; #if 0 if (p[0] == PPP_ADDRESS && p[1] == PPP_CONTROL) { if (ndo->ndo_eflag) ND_PRINT((ndo, "%02x %02x ", p[0], p[1])); p += 2; hdrlength = 2; } if (ndo->ndo_eflag) ND_PRINT((ndo, "%d ", length)); /* Retrieve the protocol type */ if (*p & 01) { /* Compressed protocol field */ ptype = *p; if (ndo->ndo_eflag) ND_PRINT((ndo, "%02x ", ptype)); p++; hdrlength += 1; } else { /* Un-compressed protocol field */ ptype = EXTRACT_16BITS(p); if (ndo->ndo_eflag) ND_PRINT((ndo, "%04x ", ptype)); p += 2; hdrlength += 2; } #else ptype = 0; /*XXX*/ if (ndo->ndo_eflag) ND_PRINT((ndo, "%c ", p[SLC_DIR] ? 'O' : 'I')); if (p[SLC_LLHL]) { /* link level header */ struct ppp_header *ph; q = p + SLC_BPFHDRLEN; ph = (struct ppp_header *)q; if (ph->phdr_addr == PPP_ADDRESS && ph->phdr_ctl == PPP_CONTROL) { if (ndo->ndo_eflag) ND_PRINT((ndo, "%02x %02x ", q[0], q[1])); ptype = EXTRACT_16BITS(&ph->phdr_type); if (ndo->ndo_eflag && (ptype == PPP_VJC || ptype == PPP_VJNC)) { ND_PRINT((ndo, "%s ", tok2str(ppptype2str, "proto-#%d", ptype))); } } else { if (ndo->ndo_eflag) { ND_PRINT((ndo, "LLH=[")); for (i = 0; i < p[SLC_LLHL]; i++) ND_PRINT((ndo, "%02x", q[i])); ND_PRINT((ndo, "] ")); } } } if (ndo->ndo_eflag) ND_PRINT((ndo, "%d ", length)); if (p[SLC_CHL]) { q = p + SLC_BPFHDRLEN + p[SLC_LLHL]; switch (ptype) { case PPP_VJC: ptype = vjc_print(ndo, q, ptype); hdrlength = PPP_BSDI_HDRLEN; p += hdrlength; switch (ptype) { case PPP_IP: ip_print(ndo, p, length); break; case PPP_IPV6: ip6_print(ndo, p, length); break; case PPP_MPLS_UCAST: case PPP_MPLS_MCAST: mpls_print(ndo, p, length); break; } goto printx; case PPP_VJNC: ptype = vjc_print(ndo, q, ptype); hdrlength = PPP_BSDI_HDRLEN; p += hdrlength; switch (ptype) { case PPP_IP: ip_print(ndo, p, length); break; case PPP_IPV6: ip6_print(ndo, p, length); break; case PPP_MPLS_UCAST: case PPP_MPLS_MCAST: mpls_print(ndo, p, length); break; } goto printx; default: if (ndo->ndo_eflag) { ND_PRINT((ndo, "CH=[")); for (i = 0; i < p[SLC_LLHL]; i++) ND_PRINT((ndo, "%02x", q[i])); ND_PRINT((ndo, "] ")); } break; } } hdrlength = PPP_BSDI_HDRLEN; #endif length -= hdrlength; p += hdrlength; switch (ptype) { case PPP_IP: ip_print(p, length); break; case PPP_IPV6: ip6_print(ndo, p, length); break; case PPP_MPLS_UCAST: case PPP_MPLS_MCAST: mpls_print(ndo, p, length); break; default: ND_PRINT((ndo, "%s ", tok2str(ppptype2str, "unknown PPP protocol (0x%04x)", ptype))); } printx: #else /* __bsdi */ hdrlength = 0; #endif /* __bsdi__ */ return (hdrlength); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
ppp_hdlc(netdissect_options *ndo, const u_char *p, int length) { u_char *b, *s, *t, c; int i, proto; const void *se; if (length <= 0) return; b = (uint8_t *)malloc(length); if (b == NULL) return; /* * Unescape all the data into a temporary, private, buffer. * Do this so that we dont overwrite the original packet * contents. */ for (s = (u_char *)p, t = b, i = length; i > 0; i--) { c = *s++; if (c == 0x7d) { if (i > 1) { i--; c = *s++ ^ 0x20; } else continue; } *t++ = c; } se = ndo->ndo_snapend; ndo->ndo_snapend = t; length = t - b; /* now lets guess about the payload codepoint format */ if (length < 1) goto trunc; proto = *b; /* start with a one-octet codepoint guess */ switch (proto) { case PPP_IP: ip_print(ndo, b + 1, length - 1); goto cleanup; case PPP_IPV6: ip6_print(ndo, b + 1, length - 1); goto cleanup; default: /* no luck - try next guess */ break; } if (length < 2) goto trunc; proto = EXTRACT_16BITS(b); /* next guess - load two octets */ switch (proto) { case (PPP_ADDRESS << 8 | PPP_CONTROL): /* looks like a PPP frame */ if (length < 4) goto trunc; proto = EXTRACT_16BITS(b+2); /* load the PPP proto-id */ handle_ppp(ndo, proto, b + 4, length - 4); break; default: /* last guess - proto must be a PPP proto-id */ handle_ppp(ndo, proto, b + 2, length - 2); break; } cleanup: ndo->ndo_snapend = se; free(b); return; trunc: ndo->ndo_snapend = se; free(b); ND_PRINT((ndo, "[|ppp]")); }
ppp_hdlc(netdissect_options *ndo, const u_char *p, int length) { u_char *b, *t, c; const u_char *s; int i, proto; const void *se; if (length <= 0) return; b = (u_char *)malloc(length); if (b == NULL) return; /* * Unescape all the data into a temporary, private, buffer. * Do this so that we dont overwrite the original packet * contents. */ for (s = p, t = b, i = length; i > 0 && ND_TTEST(*s); i--) { c = *s++; if (c == 0x7d) { if (i <= 1 || !ND_TTEST(*s)) break; i--; c = *s++ ^ 0x20; } *t++ = c; } se = ndo->ndo_snapend; ndo->ndo_snapend = t; length = t - b; /* now lets guess about the payload codepoint format */ if (length < 1) goto trunc; proto = *b; /* start with a one-octet codepoint guess */ switch (proto) { case PPP_IP: ip_print(ndo, b + 1, length - 1); goto cleanup; case PPP_IPV6: ip6_print(ndo, b + 1, length - 1); goto cleanup; default: /* no luck - try next guess */ break; } if (length < 2) goto trunc; proto = EXTRACT_16BITS(b); /* next guess - load two octets */ switch (proto) { case (PPP_ADDRESS << 8 | PPP_CONTROL): /* looks like a PPP frame */ if (length < 4) goto trunc; proto = EXTRACT_16BITS(b+2); /* load the PPP proto-id */ handle_ppp(ndo, proto, b + 4, length - 4); break; default: /* last guess - proto must be a PPP proto-id */ handle_ppp(ndo, proto, b + 2, length - 2); break; } cleanup: ndo->ndo_snapend = se; free(b); return; trunc: ndo->ndo_snapend = se; free(b); ND_PRINT((ndo, "[|ppp]")); }
{'added': [(1354, '\tu_char *b, *t, c;'), (1355, '\tconst u_char *s;'), (1362, '\tb = (u_char *)malloc(length);'), (1371, '\tfor (s = p, t = b, i = length; i > 0 && ND_TTEST(*s); i--) {'), (1374, '\t\t\tif (i <= 1 || !ND_TTEST(*s))'), (1375, '\t\t\t\tbreak;'), (1376, '\t\t\ti--;'), (1377, '\t\t\tc = *s++ ^ 0x20;')], 'deleted': [(1354, '\tu_char *b, *s, *t, c;'), (1361, '\tb = (uint8_t *)malloc(length);'), (1370, '\tfor (s = (u_char *)p, t = b, i = length; i > 0; i--) {'), (1373, '\t\t\tif (i > 1) {'), (1374, '\t\t\t\ti--;'), (1375, '\t\t\t\tc = *s++ ^ 0x20;'), (1376, '\t\t\t} else'), (1377, '\t\t\t\tcontinue;')]}
8
8
1,349
7,624
61
337
12
https://github.com/the-tcpdump-group/tcpdump
CVE-2014-9140
CWE-119
790
hv_kvp.c
C
kvp_respond_to_host
/* * An implementation of key value pair (KVP) functionality for Linux. * * * Copyright (C) 2010, Novell, Inc. * Author : K. Y. Srinivasan <ksrinivasan@novell.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/net.h> #include <linux/nls.h> #include <linux/connector.h> #include <linux/workqueue.h> #include <linux/hyperv.h> #include "hv_kvp.h" /* * Global state maintained for transaction that is being processed. * Note that only one transaction can be active at any point in time. * * This state is set when we receive a request from the host; we * cleanup this state when the transaction is completed - when we respond * to the host with the key value. */ static struct { bool active; /* transaction status - active or not */ int recv_len; /* number of bytes received. */ int index; /* current index */ struct vmbus_channel *recv_channel; /* chn we got the request */ u64 recv_req_id; /* request ID. */ } kvp_transaction; static void kvp_send_key(struct work_struct *dummy); #define TIMEOUT_FIRED 1 static void kvp_respond_to_host(char *key, char *value, int error); static void kvp_work_func(struct work_struct *dummy); static void kvp_register(void); static DECLARE_DELAYED_WORK(kvp_work, kvp_work_func); static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); static struct cb_id kvp_id = { CN_KVP_IDX, CN_KVP_VAL }; static const char kvp_name[] = "kvp_kernel_module"; static u8 *recv_buffer; /* * Register the kernel component with the user-level daemon. * As part of this registration, pass the LIC version number. */ static void kvp_register(void) { struct cn_msg *msg; msg = kzalloc(sizeof(*msg) + strlen(HV_DRV_VERSION) + 1 , GFP_ATOMIC); if (msg) { msg->id.idx = CN_KVP_IDX; msg->id.val = CN_KVP_VAL; msg->seq = KVP_REGISTER; strcpy(msg->data, HV_DRV_VERSION); msg->len = strlen(HV_DRV_VERSION) + 1; cn_netlink_send(msg, 0, GFP_ATOMIC); kfree(msg); } } static void kvp_work_func(struct work_struct *dummy) { /* * If the timer fires, the user-mode component has not responded; * process the pending transaction. */ kvp_respond_to_host("Unknown key", "Guest timed out", TIMEOUT_FIRED); } /* * Callback when data is received from user mode. */ static void kvp_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { struct hv_ku_msg *message; message = (struct hv_ku_msg *)msg->data; if (msg->seq == KVP_REGISTER) { pr_info("KVP: user-mode registering done.\n"); kvp_register(); } if (msg->seq == KVP_USER_SET) { /* * Complete the transaction by forwarding the key value * to the host. But first, cancel the timeout. */ if (cancel_delayed_work_sync(&kvp_work)) kvp_respond_to_host(message->kvp_key, message->kvp_value, !strlen(message->kvp_key)); } } static void kvp_send_key(struct work_struct *dummy) { struct cn_msg *msg; int index = kvp_transaction.index; msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg) , GFP_ATOMIC); if (msg) { msg->id.idx = CN_KVP_IDX; msg->id.val = CN_KVP_VAL; msg->seq = KVP_KERNEL_GET; ((struct hv_ku_msg *)msg->data)->kvp_index = index; msg->len = sizeof(struct hv_ku_msg); cn_netlink_send(msg, 0, GFP_ATOMIC); kfree(msg); } return; } /* * Send a response back to the host. */ static void kvp_respond_to_host(char *key, char *value, int error) { struct hv_kvp_msg *kvp_msg; struct hv_kvp_msg_enumerate *kvp_data; char *key_name; struct icmsg_hdr *icmsghdrp; int keylen, valuelen; u32 buf_len; struct vmbus_channel *channel; u64 req_id; /* * If a transaction is not active; log and return. */ if (!kvp_transaction.active) { /* * This is a spurious call! */ pr_warn("KVP: Transaction not active\n"); return; } /* * Copy the global state for completing the transaction. Note that * only one transaction can be active at a time. */ buf_len = kvp_transaction.recv_len; channel = kvp_transaction.recv_channel; req_id = kvp_transaction.recv_req_id; kvp_transaction.active = false; if (channel->onchannel_callback == NULL) /* * We have raced with util driver being unloaded; * silently return. */ return; icmsghdrp = (struct icmsg_hdr *) &recv_buffer[sizeof(struct vmbuspipe_hdr)]; kvp_msg = (struct hv_kvp_msg *) &recv_buffer[sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; kvp_data = &kvp_msg->kvp_data; key_name = key; /* * If the error parameter is set, terminate the host's enumeration. */ if (error) { /* * We don't support this index or the we have timedout; * terminate the host-side iteration by returning an error. */ icmsghdrp->status = HV_E_FAIL; goto response_done; } /* * The windows host expects the key/value pair to be encoded * in utf16. */ keylen = utf8s_to_utf16s(key_name, strlen(key_name), (wchar_t *)kvp_data->data.key); kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */ valuelen = utf8s_to_utf16s(value, strlen(value), (wchar_t *)kvp_data->data.value); kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */ kvp_data->data.value_type = REG_SZ; /* all our values are strings */ icmsghdrp->status = HV_S_OK; response_done: icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, VM_PKT_DATA_INBAND, 0); } /* * This callback is invoked when we get a KVP message from the host. * The host ensures that only one KVP transaction can be active at a time. * KVP implementation in Linux needs to forward the key to a user-mde * component to retrive the corresponding value. Consequently, we cannot * respond to the host in the conext of this callback. Since the host * guarantees that at most only one transaction can be active at a time, * we stash away the transaction state in a set of global variables. */ void hv_kvp_onchannelcallback(void *context) { struct vmbus_channel *channel = context; u32 recvlen; u64 requestid; struct hv_kvp_msg *kvp_msg; struct hv_kvp_msg_enumerate *kvp_data; struct icmsg_hdr *icmsghdrp; struct icmsg_negotiate *negop = NULL; vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE, &recvlen, &requestid); if (recvlen > 0) { icmsghdrp = (struct icmsg_hdr *)&recv_buffer[ sizeof(struct vmbuspipe_hdr)]; if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { vmbus_prep_negotiate_resp(icmsghdrp, negop, recv_buffer); } else { kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; kvp_data = &kvp_msg->kvp_data; /* * We only support the "get" operation on * "KVP_POOL_AUTO" pool. */ if ((kvp_msg->kvp_hdr.pool != KVP_POOL_AUTO) || (kvp_msg->kvp_hdr.operation != KVP_OP_ENUMERATE)) { icmsghdrp->status = HV_E_FAIL; goto callback_done; } /* * Stash away this global state for completing the * transaction; note transactions are serialized. */ kvp_transaction.recv_len = recvlen; kvp_transaction.recv_channel = channel; kvp_transaction.recv_req_id = requestid; kvp_transaction.active = true; kvp_transaction.index = kvp_data->index; /* * Get the information from the * user-mode component. * component. This transaction will be * completed when we get the value from * the user-mode component. * Set a timeout to deal with * user-mode not responding. */ schedule_work(&kvp_sendkey_work); schedule_delayed_work(&kvp_work, 5*HZ); return; } callback_done: icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; vmbus_sendpacket(channel, recv_buffer, recvlen, requestid, VM_PKT_DATA_INBAND, 0); } } int hv_kvp_init(struct hv_util_service *srv) { int err; err = cn_add_callback(&kvp_id, kvp_name, kvp_cn_callback); if (err) return err; recv_buffer = srv->recv_buffer; return 0; } void hv_kvp_deinit(void) { cn_del_callback(&kvp_id); cancel_delayed_work_sync(&kvp_work); cancel_work_sync(&kvp_sendkey_work); }
/* * An implementation of key value pair (KVP) functionality for Linux. * * * Copyright (C) 2010, Novell, Inc. * Author : K. Y. Srinivasan <ksrinivasan@novell.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/net.h> #include <linux/nls.h> #include <linux/connector.h> #include <linux/workqueue.h> #include <linux/hyperv.h> #include "hv_kvp.h" /* * Global state maintained for transaction that is being processed. * Note that only one transaction can be active at any point in time. * * This state is set when we receive a request from the host; we * cleanup this state when the transaction is completed - when we respond * to the host with the key value. */ static struct { bool active; /* transaction status - active or not */ int recv_len; /* number of bytes received. */ int index; /* current index */ struct vmbus_channel *recv_channel; /* chn we got the request */ u64 recv_req_id; /* request ID. */ } kvp_transaction; static void kvp_send_key(struct work_struct *dummy); #define TIMEOUT_FIRED 1 static void kvp_respond_to_host(char *key, char *value, int error); static void kvp_work_func(struct work_struct *dummy); static void kvp_register(void); static DECLARE_DELAYED_WORK(kvp_work, kvp_work_func); static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); static struct cb_id kvp_id = { CN_KVP_IDX, CN_KVP_VAL }; static const char kvp_name[] = "kvp_kernel_module"; static u8 *recv_buffer; /* * Register the kernel component with the user-level daemon. * As part of this registration, pass the LIC version number. */ static void kvp_register(void) { struct cn_msg *msg; msg = kzalloc(sizeof(*msg) + strlen(HV_DRV_VERSION) + 1 , GFP_ATOMIC); if (msg) { msg->id.idx = CN_KVP_IDX; msg->id.val = CN_KVP_VAL; msg->seq = KVP_REGISTER; strcpy(msg->data, HV_DRV_VERSION); msg->len = strlen(HV_DRV_VERSION) + 1; cn_netlink_send(msg, 0, GFP_ATOMIC); kfree(msg); } } static void kvp_work_func(struct work_struct *dummy) { /* * If the timer fires, the user-mode component has not responded; * process the pending transaction. */ kvp_respond_to_host("Unknown key", "Guest timed out", TIMEOUT_FIRED); } /* * Callback when data is received from user mode. */ static void kvp_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { struct hv_ku_msg *message; message = (struct hv_ku_msg *)msg->data; if (msg->seq == KVP_REGISTER) { pr_info("KVP: user-mode registering done.\n"); kvp_register(); } if (msg->seq == KVP_USER_SET) { /* * Complete the transaction by forwarding the key value * to the host. But first, cancel the timeout. */ if (cancel_delayed_work_sync(&kvp_work)) kvp_respond_to_host(message->kvp_key, message->kvp_value, !strlen(message->kvp_key)); } } static void kvp_send_key(struct work_struct *dummy) { struct cn_msg *msg; int index = kvp_transaction.index; msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg) , GFP_ATOMIC); if (msg) { msg->id.idx = CN_KVP_IDX; msg->id.val = CN_KVP_VAL; msg->seq = KVP_KERNEL_GET; ((struct hv_ku_msg *)msg->data)->kvp_index = index; msg->len = sizeof(struct hv_ku_msg); cn_netlink_send(msg, 0, GFP_ATOMIC); kfree(msg); } return; } /* * Send a response back to the host. */ static void kvp_respond_to_host(char *key, char *value, int error) { struct hv_kvp_msg *kvp_msg; struct hv_kvp_msg_enumerate *kvp_data; char *key_name; struct icmsg_hdr *icmsghdrp; int keylen, valuelen; u32 buf_len; struct vmbus_channel *channel; u64 req_id; /* * If a transaction is not active; log and return. */ if (!kvp_transaction.active) { /* * This is a spurious call! */ pr_warn("KVP: Transaction not active\n"); return; } /* * Copy the global state for completing the transaction. Note that * only one transaction can be active at a time. */ buf_len = kvp_transaction.recv_len; channel = kvp_transaction.recv_channel; req_id = kvp_transaction.recv_req_id; kvp_transaction.active = false; if (channel->onchannel_callback == NULL) /* * We have raced with util driver being unloaded; * silently return. */ return; icmsghdrp = (struct icmsg_hdr *) &recv_buffer[sizeof(struct vmbuspipe_hdr)]; kvp_msg = (struct hv_kvp_msg *) &recv_buffer[sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; kvp_data = &kvp_msg->kvp_data; key_name = key; /* * If the error parameter is set, terminate the host's enumeration. */ if (error) { /* * We don't support this index or the we have timedout; * terminate the host-side iteration by returning an error. */ icmsghdrp->status = HV_E_FAIL; goto response_done; } /* * The windows host expects the key/value pair to be encoded * in utf16. */ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN, (wchar_t *) kvp_data->data.key, HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2); kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN, (wchar_t *) kvp_data->data.value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2); kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */ kvp_data->data.value_type = REG_SZ; /* all our values are strings */ icmsghdrp->status = HV_S_OK; response_done: icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, VM_PKT_DATA_INBAND, 0); } /* * This callback is invoked when we get a KVP message from the host. * The host ensures that only one KVP transaction can be active at a time. * KVP implementation in Linux needs to forward the key to a user-mde * component to retrive the corresponding value. Consequently, we cannot * respond to the host in the conext of this callback. Since the host * guarantees that at most only one transaction can be active at a time, * we stash away the transaction state in a set of global variables. */ void hv_kvp_onchannelcallback(void *context) { struct vmbus_channel *channel = context; u32 recvlen; u64 requestid; struct hv_kvp_msg *kvp_msg; struct hv_kvp_msg_enumerate *kvp_data; struct icmsg_hdr *icmsghdrp; struct icmsg_negotiate *negop = NULL; vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE, &recvlen, &requestid); if (recvlen > 0) { icmsghdrp = (struct icmsg_hdr *)&recv_buffer[ sizeof(struct vmbuspipe_hdr)]; if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { vmbus_prep_negotiate_resp(icmsghdrp, negop, recv_buffer); } else { kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; kvp_data = &kvp_msg->kvp_data; /* * We only support the "get" operation on * "KVP_POOL_AUTO" pool. */ if ((kvp_msg->kvp_hdr.pool != KVP_POOL_AUTO) || (kvp_msg->kvp_hdr.operation != KVP_OP_ENUMERATE)) { icmsghdrp->status = HV_E_FAIL; goto callback_done; } /* * Stash away this global state for completing the * transaction; note transactions are serialized. */ kvp_transaction.recv_len = recvlen; kvp_transaction.recv_channel = channel; kvp_transaction.recv_req_id = requestid; kvp_transaction.active = true; kvp_transaction.index = kvp_data->index; /* * Get the information from the * user-mode component. * component. This transaction will be * completed when we get the value from * the user-mode component. * Set a timeout to deal with * user-mode not responding. */ schedule_work(&kvp_sendkey_work); schedule_delayed_work(&kvp_work, 5*HZ); return; } callback_done: icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; vmbus_sendpacket(channel, recv_buffer, recvlen, requestid, VM_PKT_DATA_INBAND, 0); } } int hv_kvp_init(struct hv_util_service *srv) { int err; err = cn_add_callback(&kvp_id, kvp_name, kvp_cn_callback); if (err) return err; recv_buffer = srv->recv_buffer; return 0; } void hv_kvp_deinit(void) { cn_del_callback(&kvp_id); cancel_delayed_work_sync(&kvp_work); cancel_work_sync(&kvp_sendkey_work); }
kvp_respond_to_host(char *key, char *value, int error) { struct hv_kvp_msg *kvp_msg; struct hv_kvp_msg_enumerate *kvp_data; char *key_name; struct icmsg_hdr *icmsghdrp; int keylen, valuelen; u32 buf_len; struct vmbus_channel *channel; u64 req_id; /* * If a transaction is not active; log and return. */ if (!kvp_transaction.active) { /* * This is a spurious call! */ pr_warn("KVP: Transaction not active\n"); return; } /* * Copy the global state for completing the transaction. Note that * only one transaction can be active at a time. */ buf_len = kvp_transaction.recv_len; channel = kvp_transaction.recv_channel; req_id = kvp_transaction.recv_req_id; kvp_transaction.active = false; if (channel->onchannel_callback == NULL) /* * We have raced with util driver being unloaded; * silently return. */ return; icmsghdrp = (struct icmsg_hdr *) &recv_buffer[sizeof(struct vmbuspipe_hdr)]; kvp_msg = (struct hv_kvp_msg *) &recv_buffer[sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; kvp_data = &kvp_msg->kvp_data; key_name = key; /* * If the error parameter is set, terminate the host's enumeration. */ if (error) { /* * We don't support this index or the we have timedout; * terminate the host-side iteration by returning an error. */ icmsghdrp->status = HV_E_FAIL; goto response_done; } /* * The windows host expects the key/value pair to be encoded * in utf16. */ keylen = utf8s_to_utf16s(key_name, strlen(key_name), (wchar_t *)kvp_data->data.key); kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */ valuelen = utf8s_to_utf16s(value, strlen(value), (wchar_t *)kvp_data->data.value); kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */ kvp_data->data.value_type = REG_SZ; /* all our values are strings */ icmsghdrp->status = HV_S_OK; response_done: icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, VM_PKT_DATA_INBAND, 0); }
kvp_respond_to_host(char *key, char *value, int error) { struct hv_kvp_msg *kvp_msg; struct hv_kvp_msg_enumerate *kvp_data; char *key_name; struct icmsg_hdr *icmsghdrp; int keylen, valuelen; u32 buf_len; struct vmbus_channel *channel; u64 req_id; /* * If a transaction is not active; log and return. */ if (!kvp_transaction.active) { /* * This is a spurious call! */ pr_warn("KVP: Transaction not active\n"); return; } /* * Copy the global state for completing the transaction. Note that * only one transaction can be active at a time. */ buf_len = kvp_transaction.recv_len; channel = kvp_transaction.recv_channel; req_id = kvp_transaction.recv_req_id; kvp_transaction.active = false; if (channel->onchannel_callback == NULL) /* * We have raced with util driver being unloaded; * silently return. */ return; icmsghdrp = (struct icmsg_hdr *) &recv_buffer[sizeof(struct vmbuspipe_hdr)]; kvp_msg = (struct hv_kvp_msg *) &recv_buffer[sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; kvp_data = &kvp_msg->kvp_data; key_name = key; /* * If the error parameter is set, terminate the host's enumeration. */ if (error) { /* * We don't support this index or the we have timedout; * terminate the host-side iteration by returning an error. */ icmsghdrp->status = HV_E_FAIL; goto response_done; } /* * The windows host expects the key/value pair to be encoded * in utf16. */ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN, (wchar_t *) kvp_data->data.key, HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2); kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN, (wchar_t *) kvp_data->data.value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2); kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */ kvp_data->data.value_type = REG_SZ; /* all our values are strings */ icmsghdrp->status = HV_S_OK; response_done: icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, VM_PKT_DATA_INBAND, 0); }
{'added': [(215, '\tkeylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,'), (216, '\t\t\t\t(wchar_t *) kvp_data->data.key,'), (217, '\t\t\t\tHV_KVP_EXCHANGE_MAX_KEY_SIZE / 2);'), (219, '\tvaluelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,'), (220, '\t\t\t\t(wchar_t *) kvp_data->data.value,'), (221, '\t\t\t\tHV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2);')], 'deleted': [(215, '\tkeylen = utf8s_to_utf16s(key_name, strlen(key_name),'), (216, '\t\t\t\t(wchar_t *)kvp_data->data.key);'), (218, '\tvaluelen = utf8s_to_utf16s(value, strlen(value),'), (219, '\t\t\t\t(wchar_t *)kvp_data->data.value);')]}
6
4
181
1,034
44
277
4
https://github.com/torvalds/linux
CVE-2013-1773
CWE-119
1,740
xdelta3-test.h
C
mt_random
/* xdelta 3 - delta compression tools and library Copyright (C) 2001, * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012. * Joshua P. MacDonald * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This is public-domain Mersenne Twister code, * attributed to Michael Brundage. Thanks! * http://www.qbrundage.com/michaelb/pubs/essays/random_number_generation.html */ static const uint32_t TEST_SEED1 = 5489UL; #define MT_LEN 624 #define MT_IA 397 static const uint32_t UPPER_MASK = 0x80000000; static const uint32_t LOWER_MASK = 0x7FFFFFFF; static const uint32_t MATRIX_A = 0x9908B0DF; #ifndef SHELL_TESTS #define SHELL_TESTS 1 #endif typedef struct mtrand mtrand; struct mtrand { int mt_index_; uint32_t mt_buffer_[MT_LEN]; }; int test_compare_files (const char* tgt, const char *rec); void mt_init(mtrand *mt, uint32_t seed); uint32_t mt_random (mtrand *mt); int test_setup (void); void mt_init(mtrand *mt, uint32_t seed) { int i; mt->mt_buffer_[0] = seed; mt->mt_index_ = MT_LEN; for (i = 1; i < MT_LEN; i++) { /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt->mt_buffer_[i] = (1812433253UL * (mt->mt_buffer_[i-1] ^ (mt->mt_buffer_[i-1] >> 30)) + i); } } uint32_t mt_random (mtrand *mt) { uint32_t y; unsigned long mag01[2]; mag01[0] = 0; mag01[1] = MATRIX_A; if (mt->mt_index_ >= MT_LEN) { int kk; for (kk = 0; kk < MT_LEN - MT_IA; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk < MT_LEN - 1; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) | (mt->mt_buffer_[0] & LOWER_MASK); mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mt->mt_index_ = 0; } y = mt->mt_buffer_[mt->mt_index_++]; y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } static mtrand static_mtrand; #include <math.h> static uint32_t mt_exp_rand (uint32_t mean, uint32_t max_value) { double mean_d = mean; double erand = log (1.0 / (mt_random (&static_mtrand) / (double)UINT32_MAX)); uint32_t x = (uint32_t) (mean_d * erand + 0.5); return min (x, max_value); } #if SHELL_TESTS #include <sys/wait.h> #endif #define MSG_IS(x) (stream->msg != NULL && strcmp ((x), stream->msg) == 0) static const usize_t TWO_MEGS_AND_DELTA = (3 << 20); static const usize_t ADDR_CACHE_ROUNDS = 10000; static const usize_t TEST_FILE_MEAN = 16384; static const double TEST_ADD_MEAN = 128; static const double TEST_ADD_MAX = 512; static const double TEST_ADD_RATIO = 0.1; static const double TEST_EPSILON = 0.25; #define TESTBUFSIZE (1024 * 16) #define TESTFILESIZE (1024) static char TEST_TARGET_FILE[TESTFILESIZE]; static char TEST_SOURCE_FILE[TESTFILESIZE]; static char TEST_DELTA_FILE[TESTFILESIZE]; static char TEST_RECON_FILE[TESTFILESIZE]; static char TEST_RECON2_FILE[TESTFILESIZE]; static char TEST_COPY_FILE[TESTFILESIZE]; static char TEST_NOPERM_FILE[TESTFILESIZE]; #define CHECK(cond) if (!(cond)) { XPR(NT "check failure: " #cond); abort(); } #if SHELL_TESTS /* Use a fixed soft config so that test values are fixed. See also * test_compress_text(). */ static const char* test_softcfg_str = "-C9,3,4,8,2,36,70"; #endif /*********************************************************************** TEST HELPERS ***********************************************************************/ static void DOT (void) { XPR(NTR "."); } static int do_cmd (xd3_stream *stream, const char *buf) { int ret; if ((ret = system (buf)) != 0) { if (WIFEXITED (ret)) { stream->msg = "command exited non-zero"; IF_DEBUG1 (XPR(NT "command was: %s\n", buf)); } else { stream->msg = "abnormal command termination"; } return XD3_INTERNAL; } return 0; } static int do_fail (xd3_stream *stream, const char *buf) { int ret; ret = system (buf); if (! WIFEXITED (ret) || WEXITSTATUS (ret) != 1) { stream->msg = "command should have not succeeded"; XPR(NT "command was %s\n", buf); return XD3_INTERNAL; } return 0; } /* Test that the exponential distribution actually produces its mean. */ static int test_random_numbers (xd3_stream *stream, int ignore) { usize_t i; usize_t sum = 0; usize_t mean = 50; usize_t n_rounds = 1000000; double average, error; double allowed_error = 0.1; mt_init (& static_mtrand, 0x9f73f7fe); for (i = 0; i < n_rounds; i += 1) { sum += mt_exp_rand (mean, USIZE_T_MAX); } average = (double) sum / (double) n_rounds; error = average - (double) mean; if (error < allowed_error && error > -allowed_error) { return 0; } /*XPR(NT "error is %f\n", error);*/ stream->msg = "random distribution looks broken"; return XD3_INTERNAL; } static void test_unlink (char* file) { int ret; if ((ret = unlink (file)) != 0 && errno != ENOENT) { XPR(NT "unlink %s failed: %s\n", file, strerror(ret)); } } static void test_cleanup (void) { #if 1 test_unlink (TEST_TARGET_FILE); test_unlink (TEST_SOURCE_FILE); test_unlink (TEST_DELTA_FILE); test_unlink (TEST_RECON_FILE); test_unlink (TEST_RECON2_FILE); test_unlink (TEST_COPY_FILE); test_unlink (TEST_NOPERM_FILE); #endif } int test_setup (void) { static int x = 0; x++; snprintf_func (TEST_TARGET_FILE, TESTFILESIZE, "/tmp/xdtest.target.%d", x); snprintf_func (TEST_SOURCE_FILE, TESTFILESIZE, "/tmp/xdtest.source.%d", x); snprintf_func (TEST_DELTA_FILE, TESTFILESIZE, "/tmp/xdtest.delta.%d", x); snprintf_func (TEST_RECON_FILE, TESTFILESIZE, "/tmp/xdtest.recon.%d", x); snprintf_func (TEST_RECON2_FILE, TESTFILESIZE, "/tmp/xdtest.recon2.%d", x); snprintf_func (TEST_COPY_FILE, TESTFILESIZE, "/tmp/xdtest.copy.%d", x); snprintf_func (TEST_NOPERM_FILE, TESTFILESIZE, "/tmp/xdtest.noperm.%d", x); test_cleanup(); return 0; } static int test_make_inputs (xd3_stream *stream, xoff_t *ss_out, xoff_t *ts_out) { usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; uint8_t *buf = (uint8_t*) malloc (ts + ss), *sbuf = buf, *tbuf = buf + ss; usize_t sadd = 0, sadd_max = (usize_t)(ss * TEST_ADD_RATIO); FILE *tf = NULL, *sf = NULL; usize_t i, j; int ret; if (buf == NULL) { return ENOMEM; } if ((tf = fopen (TEST_TARGET_FILE, "w")) == NULL || (ss_out != NULL && (sf = fopen (TEST_SOURCE_FILE, "w")) == NULL)) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if (ss_out != NULL) { for (i = 0; i < ss; ) { sbuf[i++] = (uint8_t) mt_random (&static_mtrand); } } /* Then modify the data to produce copies, everything not copied is * an add. The following logic produces the TEST_ADD_RATIO. The * variable SADD contains the number of adds so far, which should * not exceed SADD_MAX. */ /* XPR(NT "ss = %u ts = %u\n", ss, ts); */ for (i = 0; i < ts; ) { usize_t left = ts - i; usize_t next = mt_exp_rand ((uint32_t) TEST_ADD_MEAN, (uint32_t) TEST_ADD_MAX); usize_t add_left = sadd_max - sadd; double add_prob = (left == 0) ? 0 : (add_left / (double) left); int do_copy; next = min (left, next); do_copy = (next > add_left || (mt_random (&static_mtrand) / \ (double)USIZE_T_MAX) >= add_prob); if (ss_out == NULL) { do_copy &= (i > 0); } else { do_copy &= (ss - next) > 0; } if (do_copy) { /* Copy */ size_t offset = mt_random (&static_mtrand) % ((ss_out == NULL) ? i : (ss - next)); /* XPR(NT "[%u] copy %u at %u ", i, next, offset); */ for (j = 0; j < next; j += 1) { char c = ((ss_out == NULL) ? tbuf : sbuf)[offset + j]; /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ } else { /* Add */ /* XPR(NT "[%u] add %u ", i, next); */ for (j = 0; j < next; j += 1) { char c = (char) mt_random (&static_mtrand); /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ sadd += next; } } /* XPR(NT "sadd = %u max = %u\n", sadd, sadd_max); */ if ((fwrite (tbuf, 1, ts, tf) != ts) || (ss_out != NULL && (fwrite (sbuf, 1, ss, sf) != ss))) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if ((ret = fclose (tf)) || (ss_out != NULL && (ret = fclose (sf)))) { stream->msg = "close failed"; ret = get_errno (); goto failure; } if (ts_out) { (*ts_out) = ts; } if (ss_out) { (*ss_out) = ss; } failure: free (buf); return ret; } int test_compare_files (const char* tgt, const char *rec) { FILE *orig, *recons; static uint8_t obuf[TESTBUFSIZE], rbuf[TESTBUFSIZE]; xoff_t offset = 0; size_t i; size_t oc, rc; xoff_t diffs = 0; if ((orig = fopen (tgt, "r")) == NULL) { XPR(NT "open %s failed\n", tgt); return get_errno (); } if ((recons = fopen (rec, "r")) == NULL) { XPR(NT "open %s failed\n", rec); return get_errno (); } for (;;) { oc = fread (obuf, 1, TESTBUFSIZE, orig); rc = fread (rbuf, 1, TESTBUFSIZE, recons); if (oc != rc) { return XD3_INTERNAL; } if (oc == 0) { break; } for (i = 0; i < oc; i += 1) { if (obuf[i] != rbuf[i]) { XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\n", (int)i, (int)oc, offset, obuf[i], rbuf[i]); diffs++; return XD3_INTERNAL; } } offset += oc; } fclose (orig); fclose (recons); if (diffs != 0) { return XD3_INTERNAL; } return 0; } static int test_save_copy (const char *origname) { char buf[TESTBUFSIZE]; int ret; snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", origname, TEST_COPY_FILE); if ((ret = system (buf)) != 0) { return XD3_INTERNAL; } return 0; } static int test_file_size (const char* file, xoff_t *size) { struct stat sbuf; int ret; (*size) = 0; if (stat (file, & sbuf) < 0) { ret = get_errno (); XPR(NT "stat failed: %s: %s\n", file, strerror (ret)); return ret; } if (! S_ISREG (sbuf.st_mode)) { ret = XD3_INTERNAL; XPR(NT "not a regular file: %s: %s\n", file, strerror (ret)); return ret; } (*size) = sbuf.st_size; return 0; } /*********************************************************************** READ OFFSET ***********************************************************************/ /* Common test for read_integer errors: encodes a 64-bit value and * then attempts to read as a 32-bit value. If TRUNC is non-zero, * attempts to get errors by shortening the input, otherwise it should * overflow. Expects XD3_INTERNAL and MSG. */ static int test_read_integer_error (xd3_stream *stream, usize_t trunto, const char *msg) { uint64_t eval = 1ULL << 34; uint32_t rval; xd3_output *buf = NULL; const uint8_t *max; const uint8_t *inp; int ret; buf = xd3_alloc_output (stream, buf); if ((ret = xd3_emit_uint64_t (stream, & buf, eval))) { goto fail; } again: inp = buf->base; max = buf->base + buf->next - trunto; if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) != XD3_INVALID_INPUT || !MSG_IS (msg)) { ret = XD3_INTERNAL; } else if (trunto && trunto < buf->next) { trunto += 1; goto again; } else { ret = 0; } fail: xd3_free_output (stream, buf); return ret; } /* Test integer overflow using the above routine. */ static int test_decode_integer_overflow (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 0, "overflow in read_intger"); } /* Test integer EOI using the above routine. */ static int test_decode_integer_end_of_input (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 1, "end-of-input in read_integer"); } /* Test that emit_integer/decode_integer/sizeof_integer/read_integer * work on correct inputs. Tests powers of (2^7), plus or minus, up * to the maximum value. */ #define TEST_ENCODE_DECODE_INTEGER(TYPE,ONE,MAX) \ xd3_output *rbuf = NULL; \ xd3_output *dbuf = NULL; \ TYPE values[64]; \ usize_t nvalues = 0; \ usize_t i; \ int ret = 0; \ \ for (i = 0; i < (sizeof (TYPE) * 8); i += 7) \ { \ values[nvalues++] = (ONE << i) - ONE; \ values[nvalues++] = (ONE << i); \ values[nvalues++] = (ONE << i) + ONE; \ } \ \ values[nvalues++] = MAX-ONE; \ values[nvalues++] = MAX; \ \ rbuf = xd3_alloc_output (stream, rbuf); \ dbuf = xd3_alloc_output (stream, dbuf); \ \ for (i = 0; i < nvalues; i += 1) \ { \ const uint8_t *max; \ const uint8_t *inp; \ TYPE val; \ \ DOT (); \ rbuf->next = 0; \ \ if ((ret = xd3_emit_ ## TYPE (stream, & rbuf, values[i])) || \ (ret = xd3_emit_ ## TYPE (stream, & dbuf, values[i]))) \ { \ goto fail; \ } \ \ inp = rbuf->base; \ max = rbuf->base + rbuf->next; \ \ if (rbuf->next != xd3_sizeof_ ## TYPE (values[i])) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ if ((ret = xd3_read_ ## TYPE (stream, & inp, max, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ DOT (); \ } \ \ stream->next_in = dbuf->base; \ stream->avail_in = dbuf->next; \ \ for (i = 0; i < nvalues; i += 1) \ { \ TYPE val; \ \ if ((ret = xd3_decode_ ## TYPE (stream, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ } \ \ if (stream->avail_in != 0) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ fail: \ xd3_free_output (stream, rbuf); \ xd3_free_output (stream, dbuf); \ \ return ret static int test_encode_decode_uint32_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint32_t,1U,UINT32_MAX); } static int test_encode_decode_uint64_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint64_t,1ULL,UINT64_MAX); } static int test_usize_t_overflow (xd3_stream *stream, int unused) { if (USIZE_T_OVERFLOW (USIZE_T_MAX, 0)) { goto fail; } if (USIZE_T_OVERFLOW (0, USIZE_T_MAX)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2 + 1)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX, 1)) { goto fail; } if (! USIZE_T_OVERFLOW (1, USIZE_T_MAX)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX / 2 + 1, USIZE_T_MAX / 2 + 1)) { goto fail; } return 0; fail: stream->msg = "incorrect overflow computation"; return XD3_INTERNAL; } static int test_forward_match (xd3_stream *stream, int unused) { usize_t i; uint8_t buf1[256], buf2[256]; memset(buf1, 0, 256); memset(buf2, 0, 256); for (i = 0; i < 256; i++) { CHECK(xd3_forward_match(buf1, buf2, i) == (int)i); } for (i = 0; i < 255; i++) { buf2[i] = 1; CHECK(xd3_forward_match(buf1, buf2, 256) == (int)i); buf2[i] = 0; } return 0; } /*********************************************************************** Address cache ***********************************************************************/ static int test_address_cache (xd3_stream *stream, int unused) { int ret; usize_t i; usize_t offset; usize_t *addrs; uint8_t *big_buf, *buf_max; const uint8_t *buf; xd3_output *outp; uint8_t *modes; int mode_counts[16]; stream->acache.s_near = stream->code_table_desc->near_modes; stream->acache.s_same = stream->code_table_desc->same_modes; if ((ret = xd3_encode_init_partial (stream))) { return ret; } addrs = (usize_t*) xd3_alloc (stream, sizeof (usize_t), ADDR_CACHE_ROUNDS); modes = (uint8_t*) xd3_alloc (stream, sizeof (uint8_t), ADDR_CACHE_ROUNDS); memset (mode_counts, 0, sizeof (mode_counts)); memset (modes, 0, ADDR_CACHE_ROUNDS); addrs[0] = 0; mt_init (& static_mtrand, 0x9f73f7fc); /* First pass: encode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { double p; usize_t addr; usize_t prev_i; usize_t nearby; p = (mt_random (&static_mtrand) / (double)USIZE_T_MAX); prev_i = mt_random (&static_mtrand) % offset; nearby = (mt_random (&static_mtrand) % 256) % offset; nearby = max (1U, nearby); if (p < 0.1) { addr = addrs[offset-nearby]; } else if (p < 0.4) { addr = min (addrs[prev_i] + nearby, offset-1); } else { addr = prev_i; } if ((ret = xd3_encode_address (stream, addr, offset, & modes[offset]))) { return ret; } addrs[offset] = addr; mode_counts[modes[offset]] += 1; } /* Copy addresses into a contiguous buffer. */ big_buf = (uint8_t*) xd3_alloc (stream, xd3_sizeof_output (ADDR_HEAD (stream)), 1); for (offset = 0, outp = ADDR_HEAD (stream); outp != NULL; offset += outp->next, outp = outp->next_page) { memcpy (big_buf + offset, outp->base, outp->next); } buf_max = big_buf + offset; buf = big_buf; /* Second pass: decode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { uint32_t addr; if ((ret = xd3_decode_address (stream, offset, modes[offset], & buf, buf_max, & addr))) { return ret; } if (addr != addrs[offset]) { stream->msg = "incorrect decoded address"; return XD3_INTERNAL; } } /* Check that every byte, mode was used. */ if (buf != buf_max) { stream->msg = "address bytes not used"; return XD3_INTERNAL; } for (i = 0; i < (2 + stream->acache.s_same + stream->acache.s_near); i += 1) { if (mode_counts[i] == 0) { stream->msg = "address mode not used"; return XD3_INTERNAL; } } xd3_free (stream, modes); xd3_free (stream, addrs); xd3_free (stream, big_buf); return 0; } /*********************************************************************** Encode and decode with single bit error ***********************************************************************/ /* It compresses from 256 to around 185 bytes. * Avoids matching addresses that are a single-bit difference. * Avoids matching address 0. */ static const uint8_t test_text[] = "this is a story\n" "abouttttttttttt\n" "- his is a stor\n" "- about nothing " " all. boutique -" "his story is a -" "about " "what happens all" " the time what -" "am I ttttttt the" " person said, so" " what, per son -" " gory story is -" " about nothing -" "tttttt to test -" "his sto nothing"; static const uint8_t test_apphead[] = "header test"; static int test_compress_text (xd3_stream *stream, uint8_t *encoded, usize_t *encoded_size) { int ret; xd3_config cfg; int oflags = stream->flags; int flags = stream->flags | XD3_FLUSH; xd3_free_stream (stream); xd3_init_config (& cfg, flags); /* This configuration is fixed so that the "expected non-error" the counts in * decompress_single_bit_errors are too. See test_coftcfg_str. */ cfg.smatch_cfg = XD3_SMATCH_SOFT; cfg.smatcher_soft.name = "test"; cfg.smatcher_soft.large_look = 64; /* no source, not used */ cfg.smatcher_soft.large_step = 64; /* no source, not used */ cfg.smatcher_soft.small_look = 4; cfg.smatcher_soft.small_chain = 128; cfg.smatcher_soft.small_lchain = 16; cfg.smatcher_soft.max_lazy = 8; cfg.smatcher_soft.long_enough = 128; xd3_config_stream (stream, & cfg); (*encoded_size) = 0; xd3_set_appheader (stream, test_apphead, (usize_t) strlen ((char*) test_apphead)); if ((ret = xd3_encode_stream (stream, test_text, sizeof (test_text), encoded, encoded_size, 4*sizeof (test_text)))) { goto fail; } if ((ret = xd3_close_stream (stream))) { goto fail; } fail: xd3_free_stream (stream); xd3_init_config (& cfg, oflags); xd3_config_stream (stream, & cfg); return ret; } static int test_decompress_text (xd3_stream *stream, uint8_t *enc, usize_t enc_size, usize_t test_desize) { xd3_config cfg; char decoded[sizeof (test_text)]; uint8_t *apphead; usize_t apphead_size; usize_t decoded_size; const char *msg; int ret; usize_t pos = 0; int flags = stream->flags; usize_t take; input: /* Test decoding test_desize input bytes at a time */ take = min (enc_size - pos, test_desize); CHECK(take > 0); xd3_avail_input (stream, enc + pos, take); again: ret = xd3_decode_input (stream); pos += take; take = 0; switch (ret) { case XD3_OUTPUT: break; case XD3_WINSTART: case XD3_GOTHEADER: goto again; case XD3_INPUT: if (pos < enc_size) { goto input; } /* else fallthrough */ case XD3_WINFINISH: default: goto fail; } CHECK(ret == XD3_OUTPUT); CHECK(pos == enc_size); if (stream->avail_out != sizeof (test_text)) { stream->msg = "incorrect output size"; ret = XD3_INTERNAL; goto fail; } decoded_size = stream->avail_out; memcpy (decoded, stream->next_out, stream->avail_out); xd3_consume_output (stream); if ((ret = xd3_get_appheader (stream, & apphead, & apphead_size))) { goto fail; } if (apphead_size != strlen ((char*) test_apphead) || memcmp (apphead, test_apphead, strlen ((char*) test_apphead)) != 0) { stream->msg = "incorrect appheader"; ret = XD3_INTERNAL; goto fail; } if ((ret = xd3_decode_input (stream)) != XD3_WINFINISH || (ret = xd3_close_stream (stream)) != 0) { goto fail; } if (decoded_size != sizeof (test_text) || memcmp (decoded, test_text, sizeof (test_text)) != 0) { stream->msg = "incorrect output text"; ret = EIO; } fail: msg = stream->msg; xd3_free_stream (stream); xd3_init_config (& cfg, flags); xd3_config_stream (stream, & cfg); stream->msg = msg; return ret; } static int test_decompress_single_bit_error (xd3_stream *stream, int expected_non_failures) { int ret; usize_t i; uint8_t encoded[4*sizeof (test_text)]; /* make room for alt code table */ usize_t encoded_size; int non_failures = 0; int cksum = (stream->flags & XD3_ADLER32) != 0; //#define DEBUG_TEST_FAILURES #ifndef DEBUG_TEST_FAILURES #define TEST_FAILURES() #else /* For checking non-failure cases by hand, enable this macro and run * xdelta printdelta with print_cpymode disabled. Every non-failure * should change a copy address mode, which doesn't cause a failure * because the address cache starts out with all zeros. ./xdelta3 test for i in test_text.xz.*; do ./xdelta3 printdelta $i > $i.out; diff $i.out test_text.xz.0.out; done */ system ("rm -rf test_text.*"); { char buf[TESTBUFSIZE]; FILE *f; snprintf_func (buf, TESTBUFSIZE, "test_text"); f = fopen (buf, "w"); fwrite (test_text,1,sizeof (test_text),f); fclose (f); } #define TEST_FAILURES() \ do { \ char buf[TESTBUFSIZE]; \ FILE *f; \ snprintf_func (buf, TESTBUFSIZE, "test_text.xz.%d", non_failures); \ f = fopen (buf, "w"); \ fwrite (encoded,1,encoded_size,f); \ fclose (f); \ } while (0) #endif stream->sec_data.inefficient = 1; stream->sec_inst.inefficient = 1; stream->sec_addr.inefficient = 1; /* Encode text, test correct input */ if ((ret = test_compress_text (stream, encoded, & encoded_size))) { /*stream->msg = "without error: encode failure";*/ return ret; } if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text) / 4))) { /*stream->msg = "without error: decode failure";*/ return ret; } TEST_FAILURES(); for (i = 0; i < encoded_size*8; i += 1) { /* Single bit error. */ encoded[i/8] ^= 1 << (i%8); if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text))) == 0) { non_failures += 1; #ifdef DEBUG_TEST_FAILURES XPR(NT "%u[%u] non-failure %u\n", i/8, i%8, non_failures); #endif TEST_FAILURES(); } else { /*XPR(NT "%u[%u] failure: %s\n", i/8, i%8, stream->msg);*/ } /* decompress_text returns EIO when the final memcmp() fails, but that * should never happen with checksumming on. */ if (cksum && ret == EIO) { /*XPR(NT "%u[%u] cksum mismatch\n", i/8, i%8);*/ stream->msg = "checksum mismatch"; return XD3_INTERNAL; } /* Undo single bit error. */ encoded[i/8] ^= 1 << (i%8); } /* Test correct input again */ if ((ret = test_decompress_text (stream, encoded, encoded_size, 1))) { /*stream->msg = "without error: decode failure";*/ return ret; } /* Check expected non-failures */ if (non_failures != expected_non_failures) { XPR(NT "non-failures %u; expected %u", non_failures, expected_non_failures); stream->msg = "incorrect"; return XD3_INTERNAL; } DOT (); return 0; } /*********************************************************************** Secondary compression tests ***********************************************************************/ #if SECONDARY_ANY typedef int (*sec_dist_func) (xd3_stream *stream, xd3_output *data); static int sec_dist_func1 (xd3_stream *stream, xd3_output *data); static int sec_dist_func2 (xd3_stream *stream, xd3_output *data); static int sec_dist_func3 (xd3_stream *stream, xd3_output *data); static int sec_dist_func4 (xd3_stream *stream, xd3_output *data); static int sec_dist_func5 (xd3_stream *stream, xd3_output *data); static int sec_dist_func6 (xd3_stream *stream, xd3_output *data); static int sec_dist_func7 (xd3_stream *stream, xd3_output *data); static int sec_dist_func8 (xd3_stream *stream, xd3_output *data); static int sec_dist_func9 (xd3_stream *stream, xd3_output *data); static int sec_dist_func10 (xd3_stream *stream, xd3_output *data); static int sec_dist_func11 (xd3_stream *stream, xd3_output *data); static sec_dist_func sec_dists[] = { sec_dist_func1, sec_dist_func2, sec_dist_func3, sec_dist_func4, sec_dist_func5, sec_dist_func6, sec_dist_func7, sec_dist_func8, sec_dist_func9, sec_dist_func10, sec_dist_func11, }; /* Test ditsribution: 100 bytes of the same character (13). */ static int sec_dist_func1 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < 100; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 13))) { return ret; } } return 0; } /* Test ditsribution: uniform covering half the alphabet. */ static int sec_dist_func2 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%(ALPHABET_SIZE/2)))) { return ret; } } return 0; } /* Test ditsribution: uniform covering the entire alphabet. */ static int sec_dist_func3 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%ALPHABET_SIZE))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering half the alphabet */ static int sec_dist_func4 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering the entire alphabet */ static int sec_dist_func5 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE-1); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering half the alphabet */ static int sec_dist_func6 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_random (&static_mtrand) % (ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering the entire alphabet */ static int sec_dist_func7 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*200; i += 1) { x = mt_random (&static_mtrand) % ALPHABET_SIZE; if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: A small number of frequent characters, difficult * to divide into many groups */ static int sec_dist_func8 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE*5; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 0))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 64))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 128))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 255))) { return ret; } } return 0; } /* Test distribution: One that causes many FGK block promotions (found a bug) */ static int sec_dist_func9 (xd3_stream *stream, xd3_output *data) { int i, ret; int ramp = 0; int rcount = 0; int prom = 0; int pcount = 0; /* 200 was long enough to trigger it--only when stricter checking * that counted all blocks was turned on, but it seems I deleted * this code. (missing fgk_free_block on line 398). */ for (i = 0; i < ALPHABET_SIZE*200; i += 1) { repeat: if (ramp < ALPHABET_SIZE) { /* Initially Nth symbol has (N+1) frequency */ if (rcount <= ramp) { rcount += 1; if ((ret = xd3_emit_byte (stream, & data, ramp))) { return ret; } continue; } ramp += 1; rcount = 0; goto repeat; } /* Thereafter, promote least freq to max freq */ if (pcount == ALPHABET_SIZE) { pcount = 0; prom = (prom + 1) % ALPHABET_SIZE; } pcount += 1; if ((ret = xd3_emit_byte (stream, & data, prom))) { return ret; } } return 0; } /* Test distribution: freq[i] == i*i, creates a 21-bit code length, fixed in 3.0r. */ static int sec_dist_func10 (xd3_stream *stream, xd3_output *data) { int i, j, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { for (j = 0; j <= (i*i); j += 1) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } } return 0; } /* Test distribution: fibonacci */ static int sec_dist_func11 (xd3_stream *stream, xd3_output *data) { int sum0 = 0; int sum1 = 1; int i, j, ret; for (i = 0; i < 33; ++i) { for (j = 0; j < (sum0 + sum1); ++j) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } sum0 = sum1; sum1 = j; } return 0; } static int test_secondary_decode (xd3_stream *stream, const xd3_sec_type *sec, usize_t input_size, usize_t compress_size, const uint8_t *dec_input, const uint8_t *dec_correct, uint8_t *dec_output) { int ret; xd3_sec_stream *dec_stream; const uint8_t *dec_input_used, *dec_input_end; uint8_t *dec_output_used, *dec_output_end; if ((dec_stream = sec->alloc (stream)) == NULL) { return ENOMEM; } if ((ret = sec->init (stream, dec_stream, 0)) != 0) { goto fail; } dec_input_used = dec_input; dec_input_end = dec_input + compress_size; dec_output_used = dec_output; dec_output_end = dec_output + input_size; if ((ret = sec->decode (stream, dec_stream, & dec_input_used, dec_input_end, & dec_output_used, dec_output_end))) { goto fail; } if (dec_input_used != dec_input_end) { stream->msg = "unused input"; ret = XD3_INTERNAL; goto fail; } if (dec_output_used != dec_output_end) { stream->msg = "unfinished output"; ret = XD3_INTERNAL; goto fail; } if (memcmp (dec_output, dec_correct, input_size) != 0) { stream->msg = "incorrect output"; ret = XD3_INTERNAL; goto fail; } fail: sec->destroy (stream, dec_stream); return ret; } static int test_secondary (xd3_stream *stream, const xd3_sec_type *sec, usize_t groups) { usize_t test_i; int ret; xd3_output *in_head, *out_head, *p; usize_t p_off, input_size, compress_size; uint8_t *dec_input = NULL, *dec_output = NULL, *dec_correct = NULL; xd3_sec_stream *enc_stream; xd3_sec_cfg cfg; memset (& cfg, 0, sizeof (cfg)); cfg.inefficient = 1; for (cfg.ngroups = 1; cfg.ngroups <= groups; cfg.ngroups += 1) { XPR(NTR "\n..."); for (test_i = 0; test_i < SIZEOF_ARRAY (sec_dists); test_i += 1) { mt_init (& static_mtrand, 0x9f73f7fc); in_head = xd3_alloc_output (stream, NULL); out_head = xd3_alloc_output (stream, NULL); enc_stream = sec->alloc (stream); dec_input = NULL; dec_output = NULL; dec_correct = NULL; if (in_head == NULL || out_head == NULL || enc_stream == NULL) { goto nomem; } if ((ret = sec_dists[test_i] (stream, in_head))) { goto fail; } if ((ret = sec->init (stream, enc_stream, 1)) != 0) { goto fail; } /* Encode data */ if ((ret = sec->encode (stream, enc_stream, in_head, out_head, & cfg))) { XPR(NT "test %u: encode: %s", test_i, stream->msg); goto fail; } /* Calculate sizes, allocate contiguous arrays for decoding */ input_size = xd3_sizeof_output (in_head); compress_size = xd3_sizeof_output (out_head); XPR(NTR "%.3f", 8.0 * (double) compress_size / (double) input_size); if ((dec_input = (uint8_t*) xd3_alloc (stream, compress_size, 1)) == NULL || (dec_output = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL || (dec_correct = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL) { goto nomem; } /* Fill the compressed data array */ for (p_off = 0, p = out_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_input + p_off, p->base, p->next); } CHECK(p_off == compress_size); /* Fill the input data array */ for (p_off = 0, p = in_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_correct + p_off, p->base, p->next); } CHECK(p_off == input_size); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output))) { XPR(NT "test %u: decode: %s", test_i, stream->msg); goto fail; } /* Single-bit error test, only cover the first 10 bytes. * Some non-failures are expected in the Huffman case: * Changing the clclen array, for example, may not harm the * decoding. Really looking for faults here. */ { int i; int bytes = min (compress_size, 10U); for (i = 0; i < bytes * 8; i += 1) { dec_input[i/8] ^= 1 << (i%8); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output)) == 0) { /*XPR(NT "test %u: decode single-bit [%u/%u] error non-failure", test_i, i/8, i%8);*/ } dec_input[i/8] ^= 1 << (i%8); if ((i % (2*bytes)) == (2*bytes)-1) { DOT (); } } ret = 0; } if (0) { nomem: ret = ENOMEM; } fail: sec->destroy (stream, enc_stream); xd3_free_output (stream, in_head); xd3_free_output (stream, out_head); xd3_free (stream, dec_input); xd3_free (stream, dec_output); xd3_free (stream, dec_correct); if (ret != 0) { return ret; } } } return 0; } IF_FGK (static int test_secondary_fgk (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & fgk_sec_type, gp); }) IF_DJW (static int test_secondary_huff (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & djw_sec_type, gp); }) IF_LZMA (static int test_secondary_lzma (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & lzma_sec_type, gp); }) #endif /*********************************************************************** TEST INSTRUCTION TABLE ***********************************************************************/ /* Test that xd3_choose_instruction() does the right thing for its code * table. */ static int test_choose_instruction (xd3_stream *stream, int ignore) { int i; stream->code_table = (*stream->code_table_func) (); for (i = 0; i < 256; i += 1) { const xd3_dinst *d = stream->code_table + i; xd3_rinst prev, inst; CHECK(d->type1 > 0); memset (& prev, 0, sizeof (prev)); memset (& inst, 0, sizeof (inst)); if (d->type2 == 0) { inst.type = d->type1; if ((inst.size = d->size1) == 0) { inst.size = TESTBUFSIZE; } XD3_CHOOSE_INSTRUCTION (stream, NULL, & inst); if (inst.code2 != 0 || inst.code1 != i) { stream->msg = "wrong single instruction"; return XD3_INTERNAL; } } else { prev.type = d->type1; prev.size = d->size1; inst.type = d->type2; inst.size = d->size2; XD3_CHOOSE_INSTRUCTION (stream, & prev, & inst); if (prev.code2 != i) { stream->msg = "wrong double instruction"; return XD3_INTERNAL; } } } return 0; } /*********************************************************************** TEST INSTRUCTION TABLE CODING ***********************************************************************/ #if GENERIC_ENCODE_TABLES /* Test that encoding and decoding a code table works */ static int test_encode_code_table (xd3_stream *stream, int ignore) { int ret; const uint8_t *comp_data; usize_t comp_size; if ((ret = xd3_compute_alternate_table_encoding (stream, & comp_data, & comp_size))) { return ret; } stream->acache.s_near = __alternate_code_table_desc.near_modes; stream->acache.s_same = __alternate_code_table_desc.same_modes; if ((ret = xd3_apply_table_encoding (stream, comp_data, comp_size))) { return ret; } if (memcmp (stream->code_table, xd3_alternate_code_table (), sizeof (xd3_dinst) * 256) != 0) { stream->msg = "wrong code table reconstruction"; return XD3_INTERNAL; } return 0; } #endif /*********************************************************************** 64BIT STREAMING ***********************************************************************/ /* This test encodes and decodes a series of 1 megabyte windows, each * containing a long run of zeros along with a single xoff_t size * record to indicate the sequence. */ static int test_streaming (xd3_stream *in_stream, uint8_t *encbuf, uint8_t *decbuf, uint8_t *delbuf, usize_t megs) { xd3_stream estream, dstream; int ret; usize_t i, delsize, decsize; xd3_config cfg; xd3_init_config (& cfg, in_stream->flags); cfg.flags |= XD3_COMPLEVEL_6; if ((ret = xd3_config_stream (& estream, & cfg)) || (ret = xd3_config_stream (& dstream, & cfg))) { goto fail; } for (i = 0; i < megs; i += 1) { ((usize_t*) encbuf)[0] = i; if ((i % 200) == 199) { DOT (); } if ((ret = xd3_process_stream (1, & estream, xd3_encode_input, 0, encbuf, 1 << 20, delbuf, & delsize, 1 << 20))) { in_stream->msg = estream.msg; goto fail; } if ((ret = xd3_process_stream (0, & dstream, xd3_decode_input, 0, delbuf, delsize, decbuf, & decsize, 1 << 20))) { in_stream->msg = dstream.msg; goto fail; } if (decsize != 1 << 20 || memcmp (encbuf, decbuf, 1 << 20) != 0) { in_stream->msg = "wrong result"; ret = XD3_INTERNAL; goto fail; } } if ((ret = xd3_close_stream (& estream)) || (ret = xd3_close_stream (& dstream))) { goto fail; } fail: xd3_free_stream (& estream); xd3_free_stream (& dstream); return ret; } /* Run tests of data streaming of over and around 4GB of data. */ static int test_compressed_stream_overflow (xd3_stream *stream, int ignore) { int ret; int i; uint8_t *buf; if ((buf = (uint8_t*) malloc (TWO_MEGS_AND_DELTA)) == NULL) { return ENOMEM; } memset (buf, 0, TWO_MEGS_AND_DELTA); for (i = 0; i < (2 << 20); i += 256) { int j; int off = mt_random(& static_mtrand) % 10; for (j = 0; j < 256; j++) { buf[i + j] = j + off; } } /* Test overflow of a 32-bit file offset. */ if (SIZEOF_XOFF_T == 4) { ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), (1 << 12) + 1); if (ret == XD3_INVALID_INPUT && MSG_IS ("decoder file offset overflow")) { ret = 0; } else { XPR(NT XD3_LIB_ERRMSG (stream, ret)); stream->msg = "expected overflow condition"; ret = XD3_INTERNAL; goto fail; } } /* Test transfer of exactly 32bits worth of data. */ if ((ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), 1 << 12))) { goto fail; } fail: free (buf); return ret; } /*********************************************************************** COMMAND LINE ***********************************************************************/ #if SHELL_TESTS /* For each pair of command templates in the array below, test that * encoding and decoding commands work. Also check for the expected * size delta, which should be approximately TEST_ADD_RATIO times the * file size created by test_make_inputs. Due to differences in the * application header, it is suppressed (-A) so that all delta files * are the same. */ static int test_command_line_arguments (xd3_stream *stream, int ignore) { int i, ret; static const char* cmdpairs[] = { /* standard input, output */ "%s %s -A < %s > %s", "%s -d < %s > %s", "%s %s -A -e < %s > %s", "%s -d < %s > %s", "%s %s -A= encode < %s > %s", "%s decode < %s > %s", "%s %s -A -q encode < %s > %s", "%s -qdq < %s > %s", /* file input, standard output */ "%s %s -A= %s > %s", "%s -d %s > %s", "%s %s -A -e %s > %s", "%s -d %s > %s", "%s %s encode -A= %s > %s", "%s decode %s > %s", /* file input, output */ "%s %s -A= %s %s", "%s -d %s %s", "%s %s -A -e %s %s", "%s -d %s %s", "%s %s -A= encode %s %s", "%s decode %s %s", /* option placement */ "%s %s -A -f %s %s", "%s -f -d %s %s", "%s %s -e -A= %s %s", "%s -d -f %s %s", "%s %s -f encode -A= %s %s", "%s -f decode -f %s %s", }; char ecmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; int pairs = SIZEOF_ARRAY (cmdpairs) / 2; xoff_t tsize; xoff_t dsize; double ratio; mt_init (& static_mtrand, 0x9f73f7fc); for (i = 0; i < pairs; i += 1) { test_setup (); if ((ret = test_make_inputs (stream, NULL, & tsize))) { return ret; } snprintf_func (ecmd, TESTBUFSIZE, cmdpairs[2*i], program_name, test_softcfg_str, TEST_TARGET_FILE, TEST_DELTA_FILE); snprintf_func (dcmd, TESTBUFSIZE, cmdpairs[2*i+1], program_name, TEST_DELTA_FILE, TEST_RECON_FILE); /* Encode and decode. */ if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Compare the target file. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } ratio = (double) dsize / (double) tsize; /* Check that it is not too small, not too large. */ if (ratio >= TEST_ADD_RATIO + TEST_EPSILON) { XPR(NT "test encode with size ratio %.4f, " "expected < %.4f (%"Q"u, %"Q"u)\n", ratio, TEST_ADD_RATIO + TEST_EPSILON, dsize, tsize); stream->msg = "strange encoding"; return XD3_INTERNAL; } if (ratio <= TEST_ADD_RATIO * (1.0 - 2 * TEST_EPSILON)) { XPR(NT "test encode with size ratio %.4f, " "expected > %.4f\n", ratio, TEST_ADD_RATIO - TEST_EPSILON); stream->msg = "strange encoding"; return XD3_INTERNAL; } /* Also check that test_compare_files works. The delta and original should * not be identical. */ if ((ret = test_compare_files (TEST_DELTA_FILE, TEST_TARGET_FILE)) == 0) { stream->msg = "broken test_compare_files"; return XD3_INTERNAL; } test_cleanup (); DOT (); } return 0; } static int check_vcdiff_header (xd3_stream *stream, const char *input, const char *line_start, const char *matches, int yes_or_no) { int ret; char vcmd[TESTBUFSIZE], gcmd[TESTBUFSIZE]; snprintf_func (vcmd, TESTBUFSIZE, "%s printhdr -f %s %s", program_name, input, TEST_RECON2_FILE); if ((ret = system (vcmd)) != 0) { XPR(NT "printhdr command: %s\n", vcmd); stream->msg = "printhdr cmd failed"; return XD3_INTERNAL; } snprintf_func (gcmd, TESTBUFSIZE, "grep \"%s.*%s.*\" %s > /dev/null", line_start, matches, TEST_RECON2_FILE); if (yes_or_no) { if ((ret = do_cmd (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } else { if ((ret = do_fail (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } return 0; } static int test_recode_command2 (xd3_stream *stream, int has_source, int variant, int change) { int has_adler32 = (variant & 0x1) != 0; int has_apphead = (variant & 0x2) != 0; int has_secondary = (variant & 0x4) != 0; int change_adler32 = (change & 0x1) != 0; int change_apphead = (change & 0x2) != 0; int change_secondary = (change & 0x4) != 0; int recoded_adler32 = change_adler32 ? !has_adler32 : has_adler32; int recoded_apphead = change_apphead ? !has_apphead : has_apphead; int recoded_secondary = change_secondary ? !has_secondary : has_secondary; char ecmd[TESTBUFSIZE], recmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; xoff_t tsize, ssize; int ret; test_setup (); if ((ret = test_make_inputs (stream, has_source ? & ssize : NULL, & tsize))) { return ret; } /* First encode */ snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s", program_name, test_softcfg_str, has_adler32 ? "" : "-n ", has_apphead ? "-A=encode_apphead " : "-A= ", has_secondary ? "-S djw " : "-S none ", has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } /* Now recode */ snprintf_func (recmd, TESTBUFSIZE, "%s recode %s -f %s %s %s %s %s", program_name, test_softcfg_str, recoded_adler32 ? "" : "-n ", !change_apphead ? "" : (recoded_apphead ? "-A=recode_apphead " : "-A= "), recoded_secondary ? "-S djw " : "-S none ", TEST_DELTA_FILE, TEST_COPY_FILE); if ((ret = system (recmd)) != 0) { XPR(NT "recode command: %s\n", recmd); stream->msg = "recode cmd failed"; return XD3_INTERNAL; } /* Check recode changes. */ if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_SOURCE", has_source))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_SECONDARY", recoded_secondary))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_ADLER32", /* Recode can't generate an adler32 * checksum, it can only preserve it or * remove it. */ has_adler32 && recoded_adler32))) { return ret; } if (!change_apphead) { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", has_apphead))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "encode_apphead", has_apphead))) { return ret; } } else { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", recoded_apphead))) { return ret; } if (recoded_apphead && (ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "recode_apphead", 1))) { return ret; } } /* Now decode */ snprintf_func (dcmd, TESTBUFSIZE, "%s -fd %s %s %s %s ", program_name, has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_COPY_FILE, TEST_RECON_FILE); if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Now compare. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } return 0; } static int test_recode_command (xd3_stream *stream, int ignore) { /* Things to test: * - with and without a source file (recode does not change) * * (recode may or may not change -- 8 variations) * - with and without adler32 * - with and without app header * - with and without secondary */ int has_source; int variant; int change; int ret; for (has_source = 0; has_source < 2; has_source++) { for (variant = 0; variant < 8; variant++) { for (change = 0; change < 8; change++) { if ((ret = test_recode_command2 (stream, has_source, variant, change))) { return ret; } } DOT (); } } return 0; } #endif /*********************************************************************** EXTERNAL I/O DECOMPRESSION/RECOMPRESSION ***********************************************************************/ #if EXTERNAL_COMPRESSION /* This performs one step of the test_externally_compressed_io * function described below. It builds a pipe containing both Xdelta * and external compression/decompression that should not modify the * data passing through. */ static int test_compressed_pipe (xd3_stream *stream, main_extcomp *ext, char* buf, const char* comp_options, const char* decomp_options, int do_ext_recomp, const char* msg) { int ret; char decomp_buf[TESTBUFSIZE]; if (do_ext_recomp) { snprintf_func (decomp_buf, TESTBUFSIZE, " | %s %s", ext->decomp_cmdname, ext->decomp_options); } else { decomp_buf[0] = 0; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s | %s %s | %s %s%s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_TARGET_FILE, program_name, comp_options, program_name, decomp_options, decomp_buf, TEST_RECON_FILE); if ((ret = system (buf)) != 0) { stream->msg = msg; return XD3_INTERNAL; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return XD3_INTERNAL; } DOT (); return 0; } /* We want to test that a pipe such as: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -dcf | gzip -dcf | --> * * is transparent, i.e., does not modify the stream of data. However, * we also want to verify that at the center the data is properly * compressed, i.e., that we do not just have a re-compressed gzip * format, that we have an VCDIFF format. We do this in two steps. * First test the above pipe, then test with suppressed output * recompression (-D). The result should be the original input: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -Ddcf | --> * * Finally we want to test that -D also disables input decompression: * * --> | gzip -cf | xdelta3 -Dcf | xdelta3 -Ddcf | gzip -dcf | --> */ static int test_externally_compressed_io (xd3_stream *stream, int ignore) { usize_t i; int ret; char buf[TESTBUFSIZE]; mt_init (& static_mtrand, 0x9f73f7fc); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } for (i = 0; i < SIZEOF_ARRAY (extcomp_types); i += 1) { main_extcomp *ext = & extcomp_types[i]; /* Test for the existence of the external command first, if not skip. */ snprintf_func (buf, TESTBUFSIZE, "%s %s < /dev/null > /dev/null", ext->recomp_cmdname, ext->recomp_options); if ((ret = system (buf)) != 0) { XPR(NT "%s=0", ext->recomp_cmdname); continue; } if ((ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-dcfq", 1, "compression failed: identity pipe")) || (ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-Rdcfq", 0, "compression failed: without recompression")) || (ret = test_compressed_pipe (stream, ext, buf, "-Dcfq", "-Rdcfq", 1, "compression failed: without decompression"))) { return ret; } } return 0; } /* This tests the proper functioning of external decompression for * source files. The source and target files are identical and * compressed by gzip. Decoding such a delta with recompression * disbaled (-R) should produce the original, uncompressed * source/target file. Then it checks with output recompression * enabled--in this case the output should be a compressed copy of the * original source/target file. Then it checks that encoding with * decompression disabled works--the compressed files are identical * and decoding them should always produce a compressed output, * regardless of -R since the encoded delta file had decompression * disabled.. */ static int test_source_decompression (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; const main_extcomp *ext; xoff_t dsize; mt_init (& static_mtrand, 0x9f73f7fc); test_setup (); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Use gzip. */ if ((ext = main_get_compressor ("G")) == NULL) { XPR(NT "skipped"); return 0; } /* Save an uncompressed copy. */ if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; } /* Compress the source. */ snprintf_func (buf, TESTBUFSIZE, "%s -1 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_SOURCE_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Compress the target. */ snprintf_func (buf, TESTBUFSIZE, "%s -9 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now the two identical files are compressed. Delta-encode the target, * with decompression. */ snprintf_func (buf, TESTBUFSIZE, "%s -e -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Check that the compressed file is small (b/c inputs are * identical). */ if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } /* Deltas for identical files should be very small. */ if (dsize > 200) { XPR(NT "external compression did not happen\n"); stream->msg = "external compression did not happen"; return XD3_INTERNAL; } /* Decode the delta file with recompression disabled, should get an * uncompressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dq -R -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON_FILE))) { return ret; } /* Decode the delta file with recompression, should get a compressed file * out. But we can't compare compressed files directly. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dqf -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s > %s", ext->decomp_cmdname, ext->decomp_options, TEST_RECON_FILE, TEST_RECON2_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON2_FILE))) { return ret; } /* Encode with decompression disabled */ snprintf_func (buf, TESTBUFSIZE, "%s -e -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Decode the delta file with decompression disabled, should get the * identical compressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -d -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } test_cleanup(); return 0; } #endif /*********************************************************************** FORCE, STDOUT ***********************************************************************/ /* This tests that output will not overwrite an existing file unless * -f was specified. The test is for encoding (the same code handles * it for decoding). */ static int test_force_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; /* Create empty target file */ test_setup (); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode again, should fail. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -e %s %s ", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* Force it, should succeed. */ snprintf_func (buf, TESTBUFSIZE, "%s -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This checks the proper operation of the -c flag. When specified * the default output becomes stdout, otherwise the input must be * provided (encode) or it may be defaulted (decode w/ app header). */ static int test_stdout_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup(); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, encode writes to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* With -c, encode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -e -c %s > %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, decode writes to target file name, but it fails because the * file exists. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -d %s ", program_name, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* With -c, decode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -d -c %s > /dev/null", program_name, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This tests that the no-output flag (-J) works. */ static int test_no_output (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup (); snprintf_func (buf, TESTBUFSIZE, "touch %s && chmod 0000 %s", TEST_NOPERM_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Try no_output encode w/out unwritable output file */ snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now really write the delta to test decode no-output */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -q -f -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup (); return 0; } /*********************************************************************** Source identical optimization ***********************************************************************/ /* Computing a delta should be fastest when the two inputs are * identical, this checks it. The library is called to compute a * delta between a 10000 byte file, 1000 byte winsize, 500 byte source * blocksize. The same buffer is used for both source and target. */ static int test_identical_behavior (xd3_stream *stream, int ignore) { #define IDB_TGTSZ 10000 /* Not a power of two b/c of hard-coded expectations below. */ #define IDB_BLKSZ 512 #define IDB_WINSZ 1000 #define IDB_DELSZ 1000 #define IDB_WINCNT (IDB_TGTSZ / IDB_WINSZ) int ret, i; uint8_t buf[IDB_TGTSZ]; uint8_t del[IDB_DELSZ]; uint8_t rec[IDB_TGTSZ]; xd3_source source; int nextencwin = 0; int winstarts = 0, winfinishes = 0; usize_t delpos = 0, recsize; xd3_config config; memset(&source, 0, sizeof(source)); for (i = 0; i < IDB_TGTSZ; i += 1) { buf[i] = (uint8_t) mt_random (&static_mtrand); } stream->winsize = IDB_WINSZ; source.blksize = IDB_BLKSZ; source.name = ""; source.curblk = NULL; source.curblkno = 0; if ((ret = xd3_set_source (stream, & source))) { goto fail; } /* Compute an delta between identical source and targets. */ for (;;) { ret = xd3_encode_input (stream); if (ret == XD3_INPUT) { xd3_avail_input (stream, buf + (IDB_WINSZ * nextencwin), IDB_WINSZ); nextencwin += 1; continue; } if (ret == XD3_GETSRCBLK) { source.curblkno = source.getblkno; source.onblk = IDB_BLKSZ; source.curblk = buf + source.getblkno * IDB_BLKSZ; continue; } if (ret == XD3_WINSTART) { winstarts++; continue; } if (ret == XD3_WINFINISH) { winfinishes++; if (winfinishes == IDB_WINCNT) { break; } continue; } if (ret != XD3_OUTPUT) { goto fail; } CHECK(delpos + stream->avail_out <= IDB_DELSZ); memcpy (del + delpos, stream->next_out, stream->avail_out); delpos += stream->avail_out; xd3_consume_output (stream); } CHECK(winfinishes == IDB_WINCNT); CHECK(winstarts == IDB_WINCNT); CHECK(nextencwin == IDB_WINCNT); /* Reset. */ memset(&source, 0, sizeof(source)); source.blksize = IDB_TGTSZ; source.onblk = IDB_TGTSZ; source.curblk = buf; source.curblkno = 0; if ((ret = xd3_close_stream (stream))) { goto fail; } xd3_free_stream (stream); xd3_init_config (& config, 0); if ((ret = xd3_config_stream (stream, & config))) { goto fail; } if ((ret = xd3_set_source_and_size (stream, & source, IDB_TGTSZ))) { goto fail; } /* Decode. */ if ((ret = xd3_decode_stream (stream, del, delpos, rec, & recsize, IDB_TGTSZ))) { goto fail; } /* Check result size and data. */ if (recsize != IDB_TGTSZ) { stream->msg = "wrong size reconstruction"; goto fail; } if (memcmp (rec, buf, IDB_TGTSZ) != 0) { stream->msg = "wrong data reconstruction"; goto fail; } /* Check that there was one copy per window. */ IF_DEBUG (if (stream->n_scpy != IDB_WINCNT || stream->n_add != 0 || stream->n_run != 0) { stream->msg = "wrong copy count"; goto fail; }); /* Check that no checksums were computed because the initial match was presumed. */ IF_DEBUG (if (stream->large_ckcnt != 0) { stream->msg = "wrong checksum behavior"; goto fail; }); ret = 0; fail: return ret; } /*********************************************************************** String matching test ***********************************************************************/ /* Check particular matching behaviors by calling * xd3_string_match_soft directly with specific arguments. */ typedef struct _string_match_test string_match_test; typedef enum { SM_NONE = 0, SM_LAZY = (1 << 1), } string_match_flags; struct _string_match_test { const char *input; int flags; const char *result; }; static const string_match_test match_tests[] = { /* nothing */ { "1234567890", SM_NONE, "" }, /* basic run, copy */ { "11111111112323232323", SM_NONE, "R0/10 C12/8@10" }, /* no run smaller than MIN_RUN=8 */ { "1111111", SM_NONE, "C1/6@0" }, { "11111111", SM_NONE, "R0/8" }, /* simple promotion: the third copy address depends on promotion */ { "ABCDEF_ABCDEF^ABCDEF", SM_NONE, "C7/6@0 C14/6@7" }, /* { "ABCDEF_ABCDEF^ABCDEF", SM_PROMOTE, "C7/6@0 C14/6@0" }, forgotten */ /* simple lazy: there is a better copy starting with "23 X" than "123 " */ { "123 23 XYZ 123 XYZ", SM_NONE, "C11/4@0" }, { "123 23 XYZ 123 XYZ", SM_LAZY, "C11/4@0 C12/6@4" }, /* trylazy: no lazy matches unless there are at least two characters beyond * the first match */ { "2123_121212", SM_LAZY, "C7/4@5" }, { "2123_1212123", SM_LAZY, "C7/4@5" }, { "2123_1212123_", SM_LAZY, "C7/4@5 C8/5@0" }, /* trylazy: no lazy matches if the copy is >= MAXLAZY=10 */ { "2123_121212123_", SM_LAZY, "C7/6@5 C10/5@0" }, { "2123_12121212123_", SM_LAZY, "C7/8@5 C12/5@0" }, { "2123_1212121212123_", SM_LAZY, "C7/10@5" }, /* lazy run: check a run overlapped by a longer copy */ { "11111112 111111112 1", SM_LAZY, "C1/6@0 R9/8 C10/10@0" }, /* lazy match: match_length,run_l >= min_match tests, shouldn't get any * copies within the run, no run within the copy */ { "^________^________ ", SM_LAZY, "R1/8 C9/9@0" }, /* chain depth: it only goes back 10. this checks that the 10th match hits * and the 11th misses. */ { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/5@0" }, { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234>1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/4@45 C55/4@50" }, /* ssmatch test */ { "ABCDE___ABCDE*** BCDE***", SM_NONE, "C8/5@0 C17/4@1" }, /*{ "ABCDE___ABCDE*** BCDE***", SM_SSMATCH, "C8/5@0 C17/7@9" }, forgotten */ }; static int test_string_matching (xd3_stream *stream, int ignore) { usize_t i; int ret; xd3_config config; char rbuf[TESTBUFSIZE]; for (i = 0; i < SIZEOF_ARRAY (match_tests); i += 1) { const string_match_test *test = & match_tests[i]; char *rptr = rbuf; usize_t len = (usize_t) strlen (test->input); xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 4; config.smatcher_soft.large_step = 4; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 10; config.smatcher_soft.small_lchain = 10; config.smatcher_soft.max_lazy = (test->flags & SM_LAZY) ? 10 : 0; config.smatcher_soft.long_enough = 10; if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_encode_init_full (stream))) { return ret; } xd3_avail_input (stream, (uint8_t*)test->input, len); if ((ret = stream->smatcher.string_match (stream))) { return ret; } *rptr = 0; while (! xd3_rlist_empty (& stream->iopt_used)) { xd3_rinst *inst = xd3_rlist_pop_front (& stream->iopt_used); switch (inst->type) { case XD3_RUN: *rptr++ = 'R'; break; case XD3_CPY: *rptr++ = 'C'; break; default: CHECK(0); } snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d", inst->pos, inst->size); rptr += strlen (rptr); if (inst->type == XD3_CPY) { *rptr++ = '@'; snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%"Q"d", inst->addr); rptr += strlen (rptr); } *rptr++ = ' '; xd3_rlist_push_back (& stream->iopt_free, inst); } if (rptr != rbuf) { rptr -= 1; *rptr = 0; } if (strcmp (rbuf, test->result) != 0) { XPR(NT "test %u: expected %s: got %s", i, test->result, rbuf); stream->msg = "wrong result"; return XD3_INTERNAL; } } return 0; } /* * This is a test for many overlapping instructions. It must be a lazy * matcher. */ static int test_iopt_flush_instructions (xd3_stream *stream, int ignore) { int ret, i; usize_t tpos = 0; usize_t delta_size, recon_size; xd3_config config; uint8_t target[TESTBUFSIZE]; uint8_t delta[TESTBUFSIZE]; uint8_t recon[TESTBUFSIZE]; xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 16; config.smatcher_soft.large_step = 16; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 128; config.smatcher_soft.small_lchain = 16; config.smatcher_soft.max_lazy = 8; config.smatcher_soft.long_enough = 128; if ((ret = xd3_config_stream (stream, & config))) { return ret; } for (i = 1; i < 250; i++) { target[tpos++] = i; target[tpos++] = i+1; target[tpos++] = i+2; target[tpos++] = i+3; target[tpos++] = 0; } for (i = 1; i < 253; i++) { target[tpos++] = i; } if ((ret = xd3_encode_stream (stream, target, tpos, delta, & delta_size, sizeof (delta)))) { return ret; } xd3_free_stream(stream); if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_decode_stream (stream, delta, delta_size, recon, & recon_size, sizeof (recon)))) { return ret; } CHECK(tpos == recon_size); CHECK(memcmp(target, recon, recon_size) == 0); return 0; } /* * This tests the 32/64bit ambiguity for source-window matching. */ static int test_source_cksum_offset (xd3_stream *stream, int ignore) { xd3_source source; // Inputs are: struct { xoff_t cpos; // stream->srcwin_cksum_pos; xoff_t ipos; // stream->total_in; xoff_t size; // stream->src->size; usize_t input; // input 32-bit offset xoff_t output; // output 64-bit offset } cksum_test[] = { // If cpos is <= 2^32 { 1, 1, 1, 1, 1 }, #if XD3_USE_LARGEFILE64 // cpos ipos size input output // 0x____xxxxxULL, 0x____xxxxxULL, 0x____xxxxxULL, 0x___xxxxxUL, 0x____xxxxxULL { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0x00000000UL, 0x100000000ULL }, { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0xF0000000UL, 0x0F0000000ULL }, { 0x100200000ULL, 0x100100000ULL, 0x100200000ULL, 0x00300000UL, 0x000300000ULL }, { 25771983104ULL, 25770000000ULL, 26414808769ULL, 2139216707UL, 23614053187ULL }, #endif { 0, 0, 0, 0, 0 }, }, *test_ptr; stream->src = &source; for (test_ptr = cksum_test; test_ptr->cpos; test_ptr++) { xoff_t r; stream->srcwin_cksum_pos = test_ptr->cpos; stream->total_in = test_ptr->ipos; r = xd3_source_cksum_offset(stream, test_ptr->input); CHECK(r == test_ptr->output); } return 0; } static int test_in_memory (xd3_stream *stream, int ignore) { // test_text is 256 bytes uint8_t ibuf[sizeof(test_text)]; uint8_t dbuf[sizeof(test_text)]; uint8_t obuf[sizeof(test_text)]; usize_t size = sizeof(test_text); usize_t dsize, osize; int r1, r2; int eflags = SECONDARY_DJW ? XD3_SEC_DJW : 0; memcpy(ibuf, test_text, size); memset(ibuf + 128, 0, 16); r1 = xd3_encode_memory(ibuf, size, test_text, size, dbuf, &dsize, size, eflags); r2 = xd3_decode_memory(dbuf, dsize, test_text, size, obuf, &osize, size, 0); if (r1 != 0 || r2 != 0 || dsize >= (size/2) || dsize < 1 || osize != size) { stream->msg = "encode/decode size error"; return XD3_INTERNAL; } if (memcmp(obuf, ibuf, size) != 0) { stream->msg = "encode/decode data error"; return XD3_INTERNAL; } return 0; } /*********************************************************************** TEST MAIN ***********************************************************************/ static int xd3_selftest (void) { #define DO_TEST(fn,flags,arg) \ do { \ xd3_stream stream; \ xd3_config config; \ xd3_init_config (& config, flags); \ XPR(NT "testing " #fn "%s...", \ flags ? (" (" #flags ")") : ""); \ if ((ret = xd3_config_stream (& stream, & config) == 0) && \ (ret = test_ ## fn (& stream, arg)) == 0) { \ XPR(NTR " success\n"); \ } else { \ XPR(NTR " failed: %s: %s\n", xd3_errstring (& stream), \ xd3_mainerror (ret)); } \ xd3_free_stream (& stream); \ if (ret != 0) { goto failure; } \ } while (0) int ret; DO_TEST (random_numbers, 0, 0); DO_TEST (decode_integer_end_of_input, 0, 0); DO_TEST (decode_integer_overflow, 0, 0); DO_TEST (encode_decode_uint32_t, 0, 0); DO_TEST (encode_decode_uint64_t, 0, 0); DO_TEST (usize_t_overflow, 0, 0); DO_TEST (forward_match, 0, 0); DO_TEST (address_cache, 0, 0); IF_GENCODETBL (DO_TEST (address_cache, XD3_ALT_CODE_TABLE, 0)); DO_TEST (string_matching, 0, 0); DO_TEST (choose_instruction, 0, 0); DO_TEST (identical_behavior, 0, 0); DO_TEST (in_memory, 0, 0); IF_GENCODETBL (DO_TEST (choose_instruction, XD3_ALT_CODE_TABLE, 0)); IF_GENCODETBL (DO_TEST (encode_code_table, 0, 0)); DO_TEST (iopt_flush_instructions, 0, 0); DO_TEST (source_cksum_offset, 0, 0); DO_TEST (decompress_single_bit_error, 0, 3); DO_TEST (decompress_single_bit_error, XD3_ADLER32, 3); IF_LZMA (DO_TEST (decompress_single_bit_error, XD3_SEC_LZMA, 54)); IF_FGK (DO_TEST (decompress_single_bit_error, XD3_SEC_FGK, 3)); IF_DJW (DO_TEST (decompress_single_bit_error, XD3_SEC_DJW, 8)); /* There are many expected non-failures for ALT_CODE_TABLE because * not all of the instruction codes are used. */ IF_GENCODETBL ( DO_TEST (decompress_single_bit_error, XD3_ALT_CODE_TABLE, 224)); #if SHELL_TESTS DO_TEST (force_behavior, 0, 0); DO_TEST (stdout_behavior, 0, 0); DO_TEST (no_output, 0, 0); DO_TEST (command_line_arguments, 0, 0); #if EXTERNAL_COMPRESSION DO_TEST (source_decompression, 0, 0); DO_TEST (externally_compressed_io, 0, 0); #endif DO_TEST (recode_command, 0, 0); #endif IF_LZMA (DO_TEST (secondary_lzma, 0, 1)); IF_DJW (DO_TEST (secondary_huff, 0, DJW_MAX_GROUPS)); IF_FGK (DO_TEST (secondary_fgk, 0, 1)); DO_TEST (compressed_stream_overflow, 0, 0); IF_LZMA (DO_TEST (compressed_stream_overflow, XD3_SEC_LZMA, 0)); failure: test_cleanup (); return ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE; #undef DO_TEST }
/* xdelta 3 - delta compression tools and library Copyright (C) 2001, * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012. * Joshua P. MacDonald * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This is public-domain Mersenne Twister code, * attributed to Michael Brundage. Thanks! * http://www.qbrundage.com/michaelb/pubs/essays/random_number_generation.html */ static const uint32_t TEST_SEED1 = 5489UL; #define MT_LEN 624 #define MT_IA 397 static const uint32_t UPPER_MASK = 0x80000000; static const uint32_t LOWER_MASK = 0x7FFFFFFF; static const uint32_t MATRIX_A = 0x9908B0DF; #ifndef SHELL_TESTS #define SHELL_TESTS 1 #endif typedef struct mtrand mtrand; struct mtrand { int mt_index_; uint32_t mt_buffer_[MT_LEN]; }; int test_compare_files (const char* tgt, const char *rec); void mt_init(mtrand *mt, uint32_t seed); uint32_t mt_random (mtrand *mt); int test_setup (void); void mt_init(mtrand *mt, uint32_t seed) { int i; mt->mt_buffer_[0] = seed; mt->mt_index_ = MT_LEN; for (i = 1; i < MT_LEN; i++) { /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt->mt_buffer_[i] = (1812433253UL * (mt->mt_buffer_[i-1] ^ (mt->mt_buffer_[i-1] >> 30)) + i); } } uint32_t mt_random (mtrand *mt) { uint32_t y; unsigned long mag01[2]; mag01[0] = 0; mag01[1] = MATRIX_A; if (mt->mt_index_ >= MT_LEN) { int kk; for (kk = 0; kk < MT_LEN - MT_IA; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk < MT_LEN - 1; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) | (mt->mt_buffer_[0] & LOWER_MASK); mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mt->mt_index_ = 0; } y = mt->mt_buffer_[mt->mt_index_++]; y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } static mtrand static_mtrand; #include <math.h> static uint32_t mt_exp_rand (uint32_t mean, uint32_t max_value) { double mean_d = mean; double erand = log (1.0 / (mt_random (&static_mtrand) / (double)UINT32_MAX)); uint32_t x = (uint32_t) (mean_d * erand + 0.5); return min (x, max_value); } #if SHELL_TESTS #include <sys/wait.h> #endif #define MSG_IS(x) (stream->msg != NULL && strcmp ((x), stream->msg) == 0) static const usize_t TWO_MEGS_AND_DELTA = (3 << 20); static const usize_t ADDR_CACHE_ROUNDS = 10000; static const usize_t TEST_FILE_MEAN = 16384; static const double TEST_ADD_MEAN = 128; static const double TEST_ADD_MAX = 512; static const double TEST_ADD_RATIO = 0.1; static const double TEST_EPSILON = 0.25; #define TESTBUFSIZE (1024 * 16) #define TESTFILESIZE (1024) static char TEST_TARGET_FILE[TESTFILESIZE]; static char TEST_SOURCE_FILE[TESTFILESIZE]; static char TEST_DELTA_FILE[TESTFILESIZE]; static char TEST_RECON_FILE[TESTFILESIZE]; static char TEST_RECON2_FILE[TESTFILESIZE]; static char TEST_COPY_FILE[TESTFILESIZE]; static char TEST_NOPERM_FILE[TESTFILESIZE]; #define CHECK(cond) if (!(cond)) { XPR(NT "check failure: " #cond); abort(); } #if SHELL_TESTS /* Use a fixed soft config so that test values are fixed. See also * test_compress_text(). */ static const char* test_softcfg_str = "-C9,3,4,8,2,36,70"; #endif /*********************************************************************** TEST HELPERS ***********************************************************************/ static void DOT (void) { XPR(NTR "."); } static int do_cmd (xd3_stream *stream, const char *buf) { int ret; if ((ret = system (buf)) != 0) { if (WIFEXITED (ret)) { stream->msg = "command exited non-zero"; IF_DEBUG1 (XPR(NT "command was: %s\n", buf)); } else { stream->msg = "abnormal command termination"; } return ret; } return 0; } static int do_fail (xd3_stream *stream, const char *buf) { int ret; ret = system (buf); if (! WIFEXITED (ret) || WEXITSTATUS (ret) != 1) { stream->msg = "command should have not succeeded"; XPR(NT "command was %s\n", buf); return XD3_INTERNAL; } return 0; } /* Test that the exponential distribution actually produces its mean. */ static int test_random_numbers (xd3_stream *stream, int ignore) { usize_t i; usize_t sum = 0; usize_t mean = 50; usize_t n_rounds = 1000000; double average, error; double allowed_error = 0.1; mt_init (& static_mtrand, 0x9f73f7fe); for (i = 0; i < n_rounds; i += 1) { sum += mt_exp_rand (mean, USIZE_T_MAX); } average = (double) sum / (double) n_rounds; error = average - (double) mean; if (error < allowed_error && error > -allowed_error) { return 0; } /*XPR(NT "error is %f\n", error);*/ stream->msg = "random distribution looks broken"; return XD3_INTERNAL; } static void test_unlink (char* file) { int ret; if ((ret = unlink (file)) != 0 && errno != ENOENT) { XPR(NT "unlink %s failed: %s\n", file, strerror(ret)); } } static void test_cleanup (void) { #if 1 test_unlink (TEST_TARGET_FILE); test_unlink (TEST_SOURCE_FILE); test_unlink (TEST_DELTA_FILE); test_unlink (TEST_RECON_FILE); test_unlink (TEST_RECON2_FILE); test_unlink (TEST_COPY_FILE); test_unlink (TEST_NOPERM_FILE); #endif } int test_setup (void) { static int x = 0; x++; snprintf_func (TEST_TARGET_FILE, TESTFILESIZE, "/tmp/xdtest.target.%d", x); snprintf_func (TEST_SOURCE_FILE, TESTFILESIZE, "/tmp/xdtest.source.%d", x); snprintf_func (TEST_DELTA_FILE, TESTFILESIZE, "/tmp/xdtest.delta.%d", x); snprintf_func (TEST_RECON_FILE, TESTFILESIZE, "/tmp/xdtest.recon.%d", x); snprintf_func (TEST_RECON2_FILE, TESTFILESIZE, "/tmp/xdtest.recon2.%d", x); snprintf_func (TEST_COPY_FILE, TESTFILESIZE, "/tmp/xdtest.copy.%d", x); snprintf_func (TEST_NOPERM_FILE, TESTFILESIZE, "/tmp/xdtest.noperm.%d", x); test_cleanup(); return 0; } static int test_make_inputs (xd3_stream *stream, xoff_t *ss_out, xoff_t *ts_out) { usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; uint8_t *buf = (uint8_t*) malloc (ts + ss), *sbuf = buf, *tbuf = buf + ss; usize_t sadd = 0, sadd_max = (usize_t)(ss * TEST_ADD_RATIO); FILE *tf = NULL, *sf = NULL; usize_t i, j; int ret; if (buf == NULL) { return ENOMEM; } if ((tf = fopen (TEST_TARGET_FILE, "w")) == NULL || (ss_out != NULL && (sf = fopen (TEST_SOURCE_FILE, "w")) == NULL)) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if (ss_out != NULL) { for (i = 0; i < ss; ) { sbuf[i++] = (uint8_t) mt_random (&static_mtrand); } } /* Then modify the data to produce copies, everything not copied is * an add. The following logic produces the TEST_ADD_RATIO. The * variable SADD contains the number of adds so far, which should * not exceed SADD_MAX. */ /* XPR(NT "ss = %u ts = %u\n", ss, ts); */ for (i = 0; i < ts; ) { usize_t left = ts - i; usize_t next = mt_exp_rand ((uint32_t) TEST_ADD_MEAN, (uint32_t) TEST_ADD_MAX); usize_t add_left = sadd_max - sadd; double add_prob = (left == 0) ? 0 : (add_left / (double) left); int do_copy; next = min (left, next); do_copy = (next > add_left || (mt_random (&static_mtrand) / \ (double)USIZE_T_MAX) >= add_prob); if (ss_out == NULL) { do_copy &= (i > 0); } else { do_copy &= (ss - next) > 0; } if (do_copy) { /* Copy */ size_t offset = mt_random (&static_mtrand) % ((ss_out == NULL) ? i : (ss - next)); /* XPR(NT "[%u] copy %u at %u ", i, next, offset); */ for (j = 0; j < next; j += 1) { char c = ((ss_out == NULL) ? tbuf : sbuf)[offset + j]; /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ } else { /* Add */ /* XPR(NT "[%u] add %u ", i, next); */ for (j = 0; j < next; j += 1) { char c = (char) mt_random (&static_mtrand); /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ sadd += next; } } /* XPR(NT "sadd = %u max = %u\n", sadd, sadd_max); */ if ((fwrite (tbuf, 1, ts, tf) != ts) || (ss_out != NULL && (fwrite (sbuf, 1, ss, sf) != ss))) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if ((ret = fclose (tf)) || (ss_out != NULL && (ret = fclose (sf)))) { stream->msg = "close failed"; ret = get_errno (); goto failure; } if (ts_out) { (*ts_out) = ts; } if (ss_out) { (*ss_out) = ss; } failure: free (buf); return ret; } int test_compare_files (const char* tgt, const char *rec) { FILE *orig, *recons; static uint8_t obuf[TESTBUFSIZE], rbuf[TESTBUFSIZE]; xoff_t offset = 0; size_t i; size_t oc, rc; xoff_t diffs = 0; if ((orig = fopen (tgt, "r")) == NULL) { XPR(NT "open %s failed\n", tgt); return get_errno (); } if ((recons = fopen (rec, "r")) == NULL) { XPR(NT "open %s failed\n", rec); return get_errno (); } for (;;) { oc = fread (obuf, 1, TESTBUFSIZE, orig); rc = fread (rbuf, 1, TESTBUFSIZE, recons); if (oc != rc) { return XD3_INTERNAL; } if (oc == 0) { break; } for (i = 0; i < oc; i += 1) { if (obuf[i] != rbuf[i]) { XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\n", (int)i, (int)oc, offset, obuf[i], rbuf[i]); diffs++; return XD3_INTERNAL; } } offset += oc; } fclose (orig); fclose (recons); if (diffs != 0) { return XD3_INTERNAL; } return 0; } static int test_copy_to (const char *from, const char *to) { char buf[TESTBUFSIZE]; int ret; snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", from, to); if ((ret = system (buf)) != 0) { return XD3_INTERNAL; } return 0; } static int test_save_copy (const char *origname) { return test_copy_to(origname, TEST_COPY_FILE); } static int test_file_size (const char* file, xoff_t *size) { struct stat sbuf; int ret; (*size) = 0; if (stat (file, & sbuf) < 0) { ret = get_errno (); XPR(NT "stat failed: %s: %s\n", file, strerror (ret)); return ret; } if (! S_ISREG (sbuf.st_mode)) { ret = XD3_INTERNAL; XPR(NT "not a regular file: %s: %s\n", file, strerror (ret)); return ret; } (*size) = sbuf.st_size; return 0; } /*********************************************************************** READ OFFSET ***********************************************************************/ /* Common test for read_integer errors: encodes a 64-bit value and * then attempts to read as a 32-bit value. If TRUNC is non-zero, * attempts to get errors by shortening the input, otherwise it should * overflow. Expects XD3_INTERNAL and MSG. */ static int test_read_integer_error (xd3_stream *stream, usize_t trunto, const char *msg) { uint64_t eval = 1ULL << 34; uint32_t rval; xd3_output *buf = NULL; const uint8_t *max; const uint8_t *inp; int ret; buf = xd3_alloc_output (stream, buf); if ((ret = xd3_emit_uint64_t (stream, & buf, eval))) { goto fail; } again: inp = buf->base; max = buf->base + buf->next - trunto; if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) != XD3_INVALID_INPUT || !MSG_IS (msg)) { ret = XD3_INTERNAL; } else if (trunto && trunto < buf->next) { trunto += 1; goto again; } else { ret = 0; } fail: xd3_free_output (stream, buf); return ret; } /* Test integer overflow using the above routine. */ static int test_decode_integer_overflow (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 0, "overflow in read_intger"); } /* Test integer EOI using the above routine. */ static int test_decode_integer_end_of_input (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 1, "end-of-input in read_integer"); } /* Test that emit_integer/decode_integer/sizeof_integer/read_integer * work on correct inputs. Tests powers of (2^7), plus or minus, up * to the maximum value. */ #define TEST_ENCODE_DECODE_INTEGER(TYPE,ONE,MAX) \ xd3_output *rbuf = NULL; \ xd3_output *dbuf = NULL; \ TYPE values[64]; \ usize_t nvalues = 0; \ usize_t i; \ int ret = 0; \ \ for (i = 0; i < (sizeof (TYPE) * 8); i += 7) \ { \ values[nvalues++] = (ONE << i) - ONE; \ values[nvalues++] = (ONE << i); \ values[nvalues++] = (ONE << i) + ONE; \ } \ \ values[nvalues++] = MAX-ONE; \ values[nvalues++] = MAX; \ \ rbuf = xd3_alloc_output (stream, rbuf); \ dbuf = xd3_alloc_output (stream, dbuf); \ \ for (i = 0; i < nvalues; i += 1) \ { \ const uint8_t *max; \ const uint8_t *inp; \ TYPE val; \ \ DOT (); \ rbuf->next = 0; \ \ if ((ret = xd3_emit_ ## TYPE (stream, & rbuf, values[i])) || \ (ret = xd3_emit_ ## TYPE (stream, & dbuf, values[i]))) \ { \ goto fail; \ } \ \ inp = rbuf->base; \ max = rbuf->base + rbuf->next; \ \ if (rbuf->next != xd3_sizeof_ ## TYPE (values[i])) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ if ((ret = xd3_read_ ## TYPE (stream, & inp, max, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ DOT (); \ } \ \ stream->next_in = dbuf->base; \ stream->avail_in = dbuf->next; \ \ for (i = 0; i < nvalues; i += 1) \ { \ TYPE val; \ \ if ((ret = xd3_decode_ ## TYPE (stream, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ } \ \ if (stream->avail_in != 0) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ fail: \ xd3_free_output (stream, rbuf); \ xd3_free_output (stream, dbuf); \ \ return ret static int test_encode_decode_uint32_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint32_t,1U,UINT32_MAX); } static int test_encode_decode_uint64_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint64_t,1ULL,UINT64_MAX); } static int test_usize_t_overflow (xd3_stream *stream, int unused) { if (USIZE_T_OVERFLOW (USIZE_T_MAX, 0)) { goto fail; } if (USIZE_T_OVERFLOW (0, USIZE_T_MAX)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2 + 1)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX, 1)) { goto fail; } if (! USIZE_T_OVERFLOW (1, USIZE_T_MAX)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX / 2 + 1, USIZE_T_MAX / 2 + 1)) { goto fail; } return 0; fail: stream->msg = "incorrect overflow computation"; return XD3_INTERNAL; } static int test_forward_match (xd3_stream *stream, int unused) { usize_t i; uint8_t buf1[256], buf2[256]; memset(buf1, 0, 256); memset(buf2, 0, 256); for (i = 0; i < 256; i++) { CHECK(xd3_forward_match(buf1, buf2, i) == (int)i); } for (i = 0; i < 255; i++) { buf2[i] = 1; CHECK(xd3_forward_match(buf1, buf2, 256) == (int)i); buf2[i] = 0; } return 0; } /*********************************************************************** Address cache ***********************************************************************/ static int test_address_cache (xd3_stream *stream, int unused) { int ret; usize_t i; usize_t offset; usize_t *addrs; uint8_t *big_buf, *buf_max; const uint8_t *buf; xd3_output *outp; uint8_t *modes; int mode_counts[16]; stream->acache.s_near = stream->code_table_desc->near_modes; stream->acache.s_same = stream->code_table_desc->same_modes; if ((ret = xd3_encode_init_partial (stream))) { return ret; } addrs = (usize_t*) xd3_alloc (stream, sizeof (usize_t), ADDR_CACHE_ROUNDS); modes = (uint8_t*) xd3_alloc (stream, sizeof (uint8_t), ADDR_CACHE_ROUNDS); memset (mode_counts, 0, sizeof (mode_counts)); memset (modes, 0, ADDR_CACHE_ROUNDS); addrs[0] = 0; mt_init (& static_mtrand, 0x9f73f7fc); /* First pass: encode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { double p; usize_t addr; usize_t prev_i; usize_t nearby; p = (mt_random (&static_mtrand) / (double)USIZE_T_MAX); prev_i = mt_random (&static_mtrand) % offset; nearby = (mt_random (&static_mtrand) % 256) % offset; nearby = max (1U, nearby); if (p < 0.1) { addr = addrs[offset-nearby]; } else if (p < 0.4) { addr = min (addrs[prev_i] + nearby, offset-1); } else { addr = prev_i; } if ((ret = xd3_encode_address (stream, addr, offset, & modes[offset]))) { return ret; } addrs[offset] = addr; mode_counts[modes[offset]] += 1; } /* Copy addresses into a contiguous buffer. */ big_buf = (uint8_t*) xd3_alloc (stream, xd3_sizeof_output (ADDR_HEAD (stream)), 1); for (offset = 0, outp = ADDR_HEAD (stream); outp != NULL; offset += outp->next, outp = outp->next_page) { memcpy (big_buf + offset, outp->base, outp->next); } buf_max = big_buf + offset; buf = big_buf; /* Second pass: decode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { uint32_t addr; if ((ret = xd3_decode_address (stream, offset, modes[offset], & buf, buf_max, & addr))) { return ret; } if (addr != addrs[offset]) { stream->msg = "incorrect decoded address"; return XD3_INTERNAL; } } /* Check that every byte, mode was used. */ if (buf != buf_max) { stream->msg = "address bytes not used"; return XD3_INTERNAL; } for (i = 0; i < (2 + stream->acache.s_same + stream->acache.s_near); i += 1) { if (mode_counts[i] == 0) { stream->msg = "address mode not used"; return XD3_INTERNAL; } } xd3_free (stream, modes); xd3_free (stream, addrs); xd3_free (stream, big_buf); return 0; } /*********************************************************************** Encode and decode with single bit error ***********************************************************************/ /* It compresses from 256 to around 185 bytes. * Avoids matching addresses that are a single-bit difference. * Avoids matching address 0. */ static const uint8_t test_text[] = "this is a story\n" "abouttttttttttt\n" "- his is a stor\n" "- about nothing " " all. boutique -" "his story is a -" "about " "what happens all" " the time what -" "am I ttttttt the" " person said, so" " what, per son -" " gory story is -" " about nothing -" "tttttt to test -" "his sto nothing"; static const uint8_t test_apphead[] = "header test"; static int test_compress_text (xd3_stream *stream, uint8_t *encoded, usize_t *encoded_size) { int ret; xd3_config cfg; int oflags = stream->flags; int flags = stream->flags | XD3_FLUSH; xd3_free_stream (stream); xd3_init_config (& cfg, flags); /* This configuration is fixed so that the "expected non-error" the counts in * decompress_single_bit_errors are too. See test_coftcfg_str. */ cfg.smatch_cfg = XD3_SMATCH_SOFT; cfg.smatcher_soft.name = "test"; cfg.smatcher_soft.large_look = 64; /* no source, not used */ cfg.smatcher_soft.large_step = 64; /* no source, not used */ cfg.smatcher_soft.small_look = 4; cfg.smatcher_soft.small_chain = 128; cfg.smatcher_soft.small_lchain = 16; cfg.smatcher_soft.max_lazy = 8; cfg.smatcher_soft.long_enough = 128; xd3_config_stream (stream, & cfg); (*encoded_size) = 0; xd3_set_appheader (stream, test_apphead, (usize_t) strlen ((char*) test_apphead)); if ((ret = xd3_encode_stream (stream, test_text, sizeof (test_text), encoded, encoded_size, 4*sizeof (test_text)))) { goto fail; } if ((ret = xd3_close_stream (stream))) { goto fail; } fail: xd3_free_stream (stream); xd3_init_config (& cfg, oflags); xd3_config_stream (stream, & cfg); return ret; } static int test_decompress_text (xd3_stream *stream, uint8_t *enc, usize_t enc_size, usize_t test_desize) { xd3_config cfg; char decoded[sizeof (test_text)]; uint8_t *apphead; usize_t apphead_size; usize_t decoded_size; const char *msg; int ret; usize_t pos = 0; int flags = stream->flags; usize_t take; input: /* Test decoding test_desize input bytes at a time */ take = min (enc_size - pos, test_desize); CHECK(take > 0); xd3_avail_input (stream, enc + pos, take); again: ret = xd3_decode_input (stream); pos += take; take = 0; switch (ret) { case XD3_OUTPUT: break; case XD3_WINSTART: case XD3_GOTHEADER: goto again; case XD3_INPUT: if (pos < enc_size) { goto input; } /* else fallthrough */ case XD3_WINFINISH: default: goto fail; } CHECK(ret == XD3_OUTPUT); CHECK(pos == enc_size); if (stream->avail_out != sizeof (test_text)) { stream->msg = "incorrect output size"; ret = XD3_INTERNAL; goto fail; } decoded_size = stream->avail_out; memcpy (decoded, stream->next_out, stream->avail_out); xd3_consume_output (stream); if ((ret = xd3_get_appheader (stream, & apphead, & apphead_size))) { goto fail; } if (apphead_size != strlen ((char*) test_apphead) || memcmp (apphead, test_apphead, strlen ((char*) test_apphead)) != 0) { stream->msg = "incorrect appheader"; ret = XD3_INTERNAL; goto fail; } if ((ret = xd3_decode_input (stream)) != XD3_WINFINISH || (ret = xd3_close_stream (stream)) != 0) { goto fail; } if (decoded_size != sizeof (test_text) || memcmp (decoded, test_text, sizeof (test_text)) != 0) { stream->msg = "incorrect output text"; ret = EIO; } fail: msg = stream->msg; xd3_free_stream (stream); xd3_init_config (& cfg, flags); xd3_config_stream (stream, & cfg); stream->msg = msg; return ret; } static int test_decompress_single_bit_error (xd3_stream *stream, int expected_non_failures) { int ret; usize_t i; uint8_t encoded[4*sizeof (test_text)]; /* make room for alt code table */ usize_t encoded_size; int non_failures = 0; int cksum = (stream->flags & XD3_ADLER32) != 0; //#define DEBUG_TEST_FAILURES #ifndef DEBUG_TEST_FAILURES #define TEST_FAILURES() #else /* For checking non-failure cases by hand, enable this macro and run * xdelta printdelta with print_cpymode disabled. Every non-failure * should change a copy address mode, which doesn't cause a failure * because the address cache starts out with all zeros. ./xdelta3 test for i in test_text.xz.*; do ./xdelta3 printdelta $i > $i.out; diff $i.out test_text.xz.0.out; done */ system ("rm -rf test_text.*"); { char buf[TESTBUFSIZE]; FILE *f; snprintf_func (buf, TESTBUFSIZE, "test_text"); f = fopen (buf, "w"); fwrite (test_text,1,sizeof (test_text),f); fclose (f); } #define TEST_FAILURES() \ do { \ char buf[TESTBUFSIZE]; \ FILE *f; \ snprintf_func (buf, TESTBUFSIZE, "test_text.xz.%d", non_failures); \ f = fopen (buf, "w"); \ fwrite (encoded,1,encoded_size,f); \ fclose (f); \ } while (0) #endif stream->sec_data.inefficient = 1; stream->sec_inst.inefficient = 1; stream->sec_addr.inefficient = 1; /* Encode text, test correct input */ if ((ret = test_compress_text (stream, encoded, & encoded_size))) { /*stream->msg = "without error: encode failure";*/ return ret; } if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text) / 4))) { /*stream->msg = "without error: decode failure";*/ return ret; } TEST_FAILURES(); for (i = 0; i < encoded_size*8; i += 1) { /* Single bit error. */ encoded[i/8] ^= 1 << (i%8); if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text))) == 0) { non_failures += 1; #ifdef DEBUG_TEST_FAILURES XPR(NT "%u[%u] non-failure %u\n", i/8, i%8, non_failures); #endif TEST_FAILURES(); } else { /*XPR(NT "%u[%u] failure: %s\n", i/8, i%8, stream->msg);*/ } /* decompress_text returns EIO when the final memcmp() fails, but that * should never happen with checksumming on. */ if (cksum && ret == EIO) { /*XPR(NT "%u[%u] cksum mismatch\n", i/8, i%8);*/ stream->msg = "checksum mismatch"; return XD3_INTERNAL; } /* Undo single bit error. */ encoded[i/8] ^= 1 << (i%8); } /* Test correct input again */ if ((ret = test_decompress_text (stream, encoded, encoded_size, 1))) { /*stream->msg = "without error: decode failure";*/ return ret; } /* Check expected non-failures */ if (non_failures != expected_non_failures) { XPR(NT "non-failures %u; expected %u", non_failures, expected_non_failures); stream->msg = "incorrect"; return XD3_INTERNAL; } DOT (); return 0; } /*********************************************************************** Secondary compression tests ***********************************************************************/ #if SECONDARY_ANY typedef int (*sec_dist_func) (xd3_stream *stream, xd3_output *data); static int sec_dist_func1 (xd3_stream *stream, xd3_output *data); static int sec_dist_func2 (xd3_stream *stream, xd3_output *data); static int sec_dist_func3 (xd3_stream *stream, xd3_output *data); static int sec_dist_func4 (xd3_stream *stream, xd3_output *data); static int sec_dist_func5 (xd3_stream *stream, xd3_output *data); static int sec_dist_func6 (xd3_stream *stream, xd3_output *data); static int sec_dist_func7 (xd3_stream *stream, xd3_output *data); static int sec_dist_func8 (xd3_stream *stream, xd3_output *data); static int sec_dist_func9 (xd3_stream *stream, xd3_output *data); static int sec_dist_func10 (xd3_stream *stream, xd3_output *data); static int sec_dist_func11 (xd3_stream *stream, xd3_output *data); static sec_dist_func sec_dists[] = { sec_dist_func1, sec_dist_func2, sec_dist_func3, sec_dist_func4, sec_dist_func5, sec_dist_func6, sec_dist_func7, sec_dist_func8, sec_dist_func9, sec_dist_func10, sec_dist_func11, }; /* Test ditsribution: 100 bytes of the same character (13). */ static int sec_dist_func1 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < 100; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 13))) { return ret; } } return 0; } /* Test ditsribution: uniform covering half the alphabet. */ static int sec_dist_func2 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%(ALPHABET_SIZE/2)))) { return ret; } } return 0; } /* Test ditsribution: uniform covering the entire alphabet. */ static int sec_dist_func3 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%ALPHABET_SIZE))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering half the alphabet */ static int sec_dist_func4 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering the entire alphabet */ static int sec_dist_func5 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE-1); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering half the alphabet */ static int sec_dist_func6 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_random (&static_mtrand) % (ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering the entire alphabet */ static int sec_dist_func7 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*200; i += 1) { x = mt_random (&static_mtrand) % ALPHABET_SIZE; if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: A small number of frequent characters, difficult * to divide into many groups */ static int sec_dist_func8 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE*5; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 0))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 64))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 128))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 255))) { return ret; } } return 0; } /* Test distribution: One that causes many FGK block promotions (found a bug) */ static int sec_dist_func9 (xd3_stream *stream, xd3_output *data) { int i, ret; int ramp = 0; int rcount = 0; int prom = 0; int pcount = 0; /* 200 was long enough to trigger it--only when stricter checking * that counted all blocks was turned on, but it seems I deleted * this code. (missing fgk_free_block on line 398). */ for (i = 0; i < ALPHABET_SIZE*200; i += 1) { repeat: if (ramp < ALPHABET_SIZE) { /* Initially Nth symbol has (N+1) frequency */ if (rcount <= ramp) { rcount += 1; if ((ret = xd3_emit_byte (stream, & data, ramp))) { return ret; } continue; } ramp += 1; rcount = 0; goto repeat; } /* Thereafter, promote least freq to max freq */ if (pcount == ALPHABET_SIZE) { pcount = 0; prom = (prom + 1) % ALPHABET_SIZE; } pcount += 1; if ((ret = xd3_emit_byte (stream, & data, prom))) { return ret; } } return 0; } /* Test distribution: freq[i] == i*i, creates a 21-bit code length, fixed in 3.0r. */ static int sec_dist_func10 (xd3_stream *stream, xd3_output *data) { int i, j, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { for (j = 0; j <= (i*i); j += 1) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } } return 0; } /* Test distribution: fibonacci */ static int sec_dist_func11 (xd3_stream *stream, xd3_output *data) { int sum0 = 0; int sum1 = 1; int i, j, ret; for (i = 0; i < 33; ++i) { for (j = 0; j < (sum0 + sum1); ++j) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } sum0 = sum1; sum1 = j; } return 0; } static int test_secondary_decode (xd3_stream *stream, const xd3_sec_type *sec, usize_t input_size, usize_t compress_size, const uint8_t *dec_input, const uint8_t *dec_correct, uint8_t *dec_output) { int ret; xd3_sec_stream *dec_stream; const uint8_t *dec_input_used, *dec_input_end; uint8_t *dec_output_used, *dec_output_end; if ((dec_stream = sec->alloc (stream)) == NULL) { return ENOMEM; } if ((ret = sec->init (stream, dec_stream, 0)) != 0) { goto fail; } dec_input_used = dec_input; dec_input_end = dec_input + compress_size; dec_output_used = dec_output; dec_output_end = dec_output + input_size; if ((ret = sec->decode (stream, dec_stream, & dec_input_used, dec_input_end, & dec_output_used, dec_output_end))) { goto fail; } if (dec_input_used != dec_input_end) { stream->msg = "unused input"; ret = XD3_INTERNAL; goto fail; } if (dec_output_used != dec_output_end) { stream->msg = "unfinished output"; ret = XD3_INTERNAL; goto fail; } if (memcmp (dec_output, dec_correct, input_size) != 0) { stream->msg = "incorrect output"; ret = XD3_INTERNAL; goto fail; } fail: sec->destroy (stream, dec_stream); return ret; } static int test_secondary (xd3_stream *stream, const xd3_sec_type *sec, usize_t groups) { usize_t test_i; int ret; xd3_output *in_head, *out_head, *p; usize_t p_off, input_size, compress_size; uint8_t *dec_input = NULL, *dec_output = NULL, *dec_correct = NULL; xd3_sec_stream *enc_stream; xd3_sec_cfg cfg; memset (& cfg, 0, sizeof (cfg)); cfg.inefficient = 1; for (cfg.ngroups = 1; cfg.ngroups <= groups; cfg.ngroups += 1) { XPR(NTR "\n..."); for (test_i = 0; test_i < SIZEOF_ARRAY (sec_dists); test_i += 1) { mt_init (& static_mtrand, 0x9f73f7fc); in_head = xd3_alloc_output (stream, NULL); out_head = xd3_alloc_output (stream, NULL); enc_stream = sec->alloc (stream); dec_input = NULL; dec_output = NULL; dec_correct = NULL; if (in_head == NULL || out_head == NULL || enc_stream == NULL) { goto nomem; } if ((ret = sec_dists[test_i] (stream, in_head))) { goto fail; } if ((ret = sec->init (stream, enc_stream, 1)) != 0) { goto fail; } /* Encode data */ if ((ret = sec->encode (stream, enc_stream, in_head, out_head, & cfg))) { XPR(NT "test %u: encode: %s", test_i, stream->msg); goto fail; } /* Calculate sizes, allocate contiguous arrays for decoding */ input_size = xd3_sizeof_output (in_head); compress_size = xd3_sizeof_output (out_head); XPR(NTR "%.3f", 8.0 * (double) compress_size / (double) input_size); if ((dec_input = (uint8_t*) xd3_alloc (stream, compress_size, 1)) == NULL || (dec_output = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL || (dec_correct = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL) { goto nomem; } /* Fill the compressed data array */ for (p_off = 0, p = out_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_input + p_off, p->base, p->next); } CHECK(p_off == compress_size); /* Fill the input data array */ for (p_off = 0, p = in_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_correct + p_off, p->base, p->next); } CHECK(p_off == input_size); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output))) { XPR(NT "test %u: decode: %s", test_i, stream->msg); goto fail; } /* Single-bit error test, only cover the first 10 bytes. * Some non-failures are expected in the Huffman case: * Changing the clclen array, for example, may not harm the * decoding. Really looking for faults here. */ { int i; int bytes = min (compress_size, 10U); for (i = 0; i < bytes * 8; i += 1) { dec_input[i/8] ^= 1 << (i%8); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output)) == 0) { /*XPR(NT "test %u: decode single-bit [%u/%u] error non-failure", test_i, i/8, i%8);*/ } dec_input[i/8] ^= 1 << (i%8); if ((i % (2*bytes)) == (2*bytes)-1) { DOT (); } } ret = 0; } if (0) { nomem: ret = ENOMEM; } fail: sec->destroy (stream, enc_stream); xd3_free_output (stream, in_head); xd3_free_output (stream, out_head); xd3_free (stream, dec_input); xd3_free (stream, dec_output); xd3_free (stream, dec_correct); if (ret != 0) { return ret; } } } return 0; } IF_FGK (static int test_secondary_fgk (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & fgk_sec_type, gp); }) IF_DJW (static int test_secondary_huff (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & djw_sec_type, gp); }) IF_LZMA (static int test_secondary_lzma (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & lzma_sec_type, gp); }) #endif /*********************************************************************** TEST INSTRUCTION TABLE ***********************************************************************/ /* Test that xd3_choose_instruction() does the right thing for its code * table. */ static int test_choose_instruction (xd3_stream *stream, int ignore) { int i; stream->code_table = (*stream->code_table_func) (); for (i = 0; i < 256; i += 1) { const xd3_dinst *d = stream->code_table + i; xd3_rinst prev, inst; CHECK(d->type1 > 0); memset (& prev, 0, sizeof (prev)); memset (& inst, 0, sizeof (inst)); if (d->type2 == 0) { inst.type = d->type1; if ((inst.size = d->size1) == 0) { inst.size = TESTBUFSIZE; } XD3_CHOOSE_INSTRUCTION (stream, NULL, & inst); if (inst.code2 != 0 || inst.code1 != i) { stream->msg = "wrong single instruction"; return XD3_INTERNAL; } } else { prev.type = d->type1; prev.size = d->size1; inst.type = d->type2; inst.size = d->size2; XD3_CHOOSE_INSTRUCTION (stream, & prev, & inst); if (prev.code2 != i) { stream->msg = "wrong double instruction"; return XD3_INTERNAL; } } } return 0; } /*********************************************************************** TEST INSTRUCTION TABLE CODING ***********************************************************************/ #if GENERIC_ENCODE_TABLES /* Test that encoding and decoding a code table works */ static int test_encode_code_table (xd3_stream *stream, int ignore) { int ret; const uint8_t *comp_data; usize_t comp_size; if ((ret = xd3_compute_alternate_table_encoding (stream, & comp_data, & comp_size))) { return ret; } stream->acache.s_near = __alternate_code_table_desc.near_modes; stream->acache.s_same = __alternate_code_table_desc.same_modes; if ((ret = xd3_apply_table_encoding (stream, comp_data, comp_size))) { return ret; } if (memcmp (stream->code_table, xd3_alternate_code_table (), sizeof (xd3_dinst) * 256) != 0) { stream->msg = "wrong code table reconstruction"; return XD3_INTERNAL; } return 0; } #endif /*********************************************************************** 64BIT STREAMING ***********************************************************************/ /* This test encodes and decodes a series of 1 megabyte windows, each * containing a long run of zeros along with a single xoff_t size * record to indicate the sequence. */ static int test_streaming (xd3_stream *in_stream, uint8_t *encbuf, uint8_t *decbuf, uint8_t *delbuf, usize_t megs) { xd3_stream estream, dstream; int ret; usize_t i, delsize, decsize; xd3_config cfg; xd3_init_config (& cfg, in_stream->flags); cfg.flags |= XD3_COMPLEVEL_6; if ((ret = xd3_config_stream (& estream, & cfg)) || (ret = xd3_config_stream (& dstream, & cfg))) { goto fail; } for (i = 0; i < megs; i += 1) { ((usize_t*) encbuf)[0] = i; if ((i % 200) == 199) { DOT (); } if ((ret = xd3_process_stream (1, & estream, xd3_encode_input, 0, encbuf, 1 << 20, delbuf, & delsize, 1 << 20))) { in_stream->msg = estream.msg; goto fail; } if ((ret = xd3_process_stream (0, & dstream, xd3_decode_input, 0, delbuf, delsize, decbuf, & decsize, 1 << 20))) { in_stream->msg = dstream.msg; goto fail; } if (decsize != 1 << 20 || memcmp (encbuf, decbuf, 1 << 20) != 0) { in_stream->msg = "wrong result"; ret = XD3_INTERNAL; goto fail; } } if ((ret = xd3_close_stream (& estream)) || (ret = xd3_close_stream (& dstream))) { goto fail; } fail: xd3_free_stream (& estream); xd3_free_stream (& dstream); return ret; } /* Run tests of data streaming of over and around 4GB of data. */ static int test_compressed_stream_overflow (xd3_stream *stream, int ignore) { int ret; int i; uint8_t *buf; if ((buf = (uint8_t*) malloc (TWO_MEGS_AND_DELTA)) == NULL) { return ENOMEM; } memset (buf, 0, TWO_MEGS_AND_DELTA); for (i = 0; i < (2 << 20); i += 256) { int j; int off = mt_random(& static_mtrand) % 10; for (j = 0; j < 256; j++) { buf[i + j] = j + off; } } /* Test overflow of a 32-bit file offset. */ if (SIZEOF_XOFF_T == 4) { ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), (1 << 12) + 1); if (ret == XD3_INVALID_INPUT && MSG_IS ("decoder file offset overflow")) { ret = 0; } else { XPR(NT XD3_LIB_ERRMSG (stream, ret)); stream->msg = "expected overflow condition"; ret = XD3_INTERNAL; goto fail; } } /* Test transfer of exactly 32bits worth of data. */ if ((ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), 1 << 12))) { goto fail; } fail: free (buf); return ret; } /*********************************************************************** COMMAND LINE ***********************************************************************/ #if SHELL_TESTS /* For each pair of command templates in the array below, test that * encoding and decoding commands work. Also check for the expected * size delta, which should be approximately TEST_ADD_RATIO times the * file size created by test_make_inputs. Due to differences in the * application header, it is suppressed (-A) so that all delta files * are the same. */ static int test_command_line_arguments (xd3_stream *stream, int ignore) { int i, ret; static const char* cmdpairs[] = { /* standard input, output */ "%s %s -A < %s > %s", "%s -d < %s > %s", "%s %s -A -e < %s > %s", "%s -d < %s > %s", "%s %s -A= encode < %s > %s", "%s decode < %s > %s", "%s %s -A -q encode < %s > %s", "%s -qdq < %s > %s", /* file input, standard output */ "%s %s -A= %s > %s", "%s -d %s > %s", "%s %s -A -e %s > %s", "%s -d %s > %s", "%s %s encode -A= %s > %s", "%s decode %s > %s", /* file input, output */ "%s %s -A= %s %s", "%s -d %s %s", "%s %s -A -e %s %s", "%s -d %s %s", "%s %s -A= encode %s %s", "%s decode %s %s", /* option placement */ "%s %s -A -f %s %s", "%s -f -d %s %s", "%s %s -e -A= %s %s", "%s -d -f %s %s", "%s %s -f encode -A= %s %s", "%s -f decode -f %s %s", }; char ecmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; int pairs = SIZEOF_ARRAY (cmdpairs) / 2; xoff_t tsize; xoff_t dsize; double ratio; mt_init (& static_mtrand, 0x9f73f7fc); for (i = 0; i < pairs; i += 1) { test_setup (); if ((ret = test_make_inputs (stream, NULL, & tsize))) { return ret; } snprintf_func (ecmd, TESTBUFSIZE, cmdpairs[2*i], program_name, test_softcfg_str, TEST_TARGET_FILE, TEST_DELTA_FILE); snprintf_func (dcmd, TESTBUFSIZE, cmdpairs[2*i+1], program_name, TEST_DELTA_FILE, TEST_RECON_FILE); /* Encode and decode. */ if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Compare the target file. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } ratio = (double) dsize / (double) tsize; /* Check that it is not too small, not too large. */ if (ratio >= TEST_ADD_RATIO + TEST_EPSILON) { XPR(NT "test encode with size ratio %.4f, " "expected < %.4f (%"Q"u, %"Q"u)\n", ratio, TEST_ADD_RATIO + TEST_EPSILON, dsize, tsize); stream->msg = "strange encoding"; return XD3_INTERNAL; } if (ratio <= TEST_ADD_RATIO * (1.0 - 2 * TEST_EPSILON)) { XPR(NT "test encode with size ratio %.4f, " "expected > %.4f\n", ratio, TEST_ADD_RATIO - TEST_EPSILON); stream->msg = "strange encoding"; return XD3_INTERNAL; } /* Also check that test_compare_files works. The delta and original should * not be identical. */ if ((ret = test_compare_files (TEST_DELTA_FILE, TEST_TARGET_FILE)) == 0) { stream->msg = "broken test_compare_files"; return XD3_INTERNAL; } test_cleanup (); DOT (); } return 0; } static int check_vcdiff_header (xd3_stream *stream, const char *input, const char *line_start, const char *matches, int yes_or_no) { int ret; char vcmd[TESTBUFSIZE], gcmd[TESTBUFSIZE]; snprintf_func (vcmd, TESTBUFSIZE, "%s printhdr -f %s %s", program_name, input, TEST_RECON2_FILE); if ((ret = system (vcmd)) != 0) { XPR(NT "printhdr command: %s\n", vcmd); stream->msg = "printhdr cmd failed"; return XD3_INTERNAL; } snprintf_func (gcmd, TESTBUFSIZE, "grep \"%s.*%s.*\" %s > /dev/null", line_start, matches, TEST_RECON2_FILE); if (yes_or_no) { if ((ret = do_cmd (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } else { if ((ret = do_fail (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } return 0; } static int test_recode_command2 (xd3_stream *stream, int has_source, int variant, int change) { int has_adler32 = (variant & 0x1) != 0; int has_apphead = (variant & 0x2) != 0; int has_secondary = (variant & 0x4) != 0; int change_adler32 = (change & 0x1) != 0; int change_apphead = (change & 0x2) != 0; int change_secondary = (change & 0x4) != 0; int recoded_adler32 = change_adler32 ? !has_adler32 : has_adler32; int recoded_apphead = change_apphead ? !has_apphead : has_apphead; int recoded_secondary = change_secondary ? !has_secondary : has_secondary; char ecmd[TESTBUFSIZE], recmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; xoff_t tsize, ssize; int ret; test_setup (); if ((ret = test_make_inputs (stream, has_source ? & ssize : NULL, & tsize))) { return ret; } /* First encode */ snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s", program_name, test_softcfg_str, has_adler32 ? "" : "-n ", has_apphead ? "-A=encode_apphead " : "-A= ", has_secondary ? "-S djw " : "-S none ", has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } /* Now recode */ snprintf_func (recmd, TESTBUFSIZE, "%s recode %s -f %s %s %s %s %s", program_name, test_softcfg_str, recoded_adler32 ? "" : "-n ", !change_apphead ? "" : (recoded_apphead ? "-A=recode_apphead " : "-A= "), recoded_secondary ? "-S djw " : "-S none ", TEST_DELTA_FILE, TEST_COPY_FILE); if ((ret = system (recmd)) != 0) { XPR(NT "recode command: %s\n", recmd); stream->msg = "recode cmd failed"; return XD3_INTERNAL; } /* Check recode changes. */ if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_SOURCE", has_source))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_SECONDARY", recoded_secondary))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_ADLER32", /* Recode can't generate an adler32 * checksum, it can only preserve it or * remove it. */ has_adler32 && recoded_adler32))) { return ret; } if (!change_apphead) { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", has_apphead))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "encode_apphead", has_apphead))) { return ret; } } else { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", recoded_apphead))) { return ret; } if (recoded_apphead && (ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "recode_apphead", 1))) { return ret; } } /* Now decode */ snprintf_func (dcmd, TESTBUFSIZE, "%s -fd %s %s %s %s ", program_name, has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_COPY_FILE, TEST_RECON_FILE); if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Now compare. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } return 0; } static int test_recode_command (xd3_stream *stream, int ignore) { /* Things to test: * - with and without a source file (recode does not change) * * (recode may or may not change -- 8 variations) * - with and without adler32 * - with and without app header * - with and without secondary */ int has_source; int variant; int change; int ret; for (has_source = 0; has_source < 2; has_source++) { for (variant = 0; variant < 8; variant++) { for (change = 0; change < 8; change++) { if ((ret = test_recode_command2 (stream, has_source, variant, change))) { return ret; } } DOT (); } } return 0; } #endif /*********************************************************************** EXTERNAL I/O DECOMPRESSION/RECOMPRESSION ***********************************************************************/ #if EXTERNAL_COMPRESSION /* This performs one step of the test_externally_compressed_io * function described below. It builds a pipe containing both Xdelta * and external compression/decompression that should not modify the * data passing through. */ static int test_compressed_pipe (xd3_stream *stream, main_extcomp *ext, char* buf, const char* comp_options, const char* decomp_options, int do_ext_recomp, const char* msg) { int ret; char decomp_buf[TESTBUFSIZE]; if (do_ext_recomp) { snprintf_func (decomp_buf, TESTBUFSIZE, " | %s %s", ext->decomp_cmdname, ext->decomp_options); } else { decomp_buf[0] = 0; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s | %s %s | %s %s%s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_TARGET_FILE, program_name, comp_options, program_name, decomp_options, decomp_buf, TEST_RECON_FILE); if ((ret = system (buf)) != 0) { stream->msg = msg; return XD3_INTERNAL; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return XD3_INTERNAL; } DOT (); return 0; } /* We want to test that a pipe such as: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -dcf | gzip -dcf | --> * * is transparent, i.e., does not modify the stream of data. However, * we also want to verify that at the center the data is properly * compressed, i.e., that we do not just have a re-compressed gzip * format, that we have an VCDIFF format. We do this in two steps. * First test the above pipe, then test with suppressed output * recompression (-D). The result should be the original input: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -Ddcf | --> * * Finally we want to test that -D also disables input decompression: * * --> | gzip -cf | xdelta3 -Dcf | xdelta3 -Ddcf | gzip -dcf | --> */ static int test_externally_compressed_io (xd3_stream *stream, int ignore) { usize_t i; int ret; char buf[TESTBUFSIZE]; mt_init (& static_mtrand, 0x9f73f7fc); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } for (i = 0; i < SIZEOF_ARRAY (extcomp_types); i += 1) { main_extcomp *ext = & extcomp_types[i]; /* Test for the existence of the external command first, if not skip. */ snprintf_func (buf, TESTBUFSIZE, "%s %s < /dev/null > /dev/null", ext->recomp_cmdname, ext->recomp_options); if ((ret = system (buf)) != 0) { XPR(NT "%s=0", ext->recomp_cmdname); continue; } if ((ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-dcfq", 1, "compression failed: identity pipe")) || (ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-Rdcfq", 0, "compression failed: without recompression")) || (ret = test_compressed_pipe (stream, ext, buf, "-Dcfq", "-Rdcfq", 1, "compression failed: without decompression"))) { return ret; } } return 0; } /* This tests the proper functioning of external decompression for * source files. The source and target files are identical and * compressed by gzip. Decoding such a delta with recompression * disbaled (-R) should produce the original, uncompressed * source/target file. Then it checks with output recompression * enabled--in this case the output should be a compressed copy of the * original source/target file. Then it checks that encoding with * decompression disabled works--the compressed files are identical * and decoding them should always produce a compressed output, * regardless of -R since the encoded delta file had decompression * disabled.. */ static int test_source_decompression (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; const main_extcomp *ext; xoff_t dsize; mt_init (& static_mtrand, 0x9f73f7fc); test_setup (); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Use gzip. */ if ((ext = main_get_compressor ("G")) == NULL) { XPR(NT "skipped"); return 0; } /* Save an uncompressed copy. */ if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; } /* Compress the source. */ snprintf_func (buf, TESTBUFSIZE, "%s -1 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_SOURCE_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Compress the target. */ snprintf_func (buf, TESTBUFSIZE, "%s -9 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now the two identical files are compressed. Delta-encode the target, * with decompression. */ snprintf_func (buf, TESTBUFSIZE, "%s -e -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Check that the compressed file is small (b/c inputs are * identical). */ if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } /* Deltas for identical files should be very small. */ if (dsize > 200) { XPR(NT "external compression did not happen\n"); stream->msg = "external compression did not happen"; return XD3_INTERNAL; } /* Decode the delta file with recompression disabled, should get an * uncompressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dq -R -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON_FILE))) { return ret; } /* Decode the delta file with recompression, should get a compressed file * out. But we can't compare compressed files directly. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dqf -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s > %s", ext->decomp_cmdname, ext->decomp_options, TEST_RECON_FILE, TEST_RECON2_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON2_FILE))) { return ret; } /* Encode with decompression disabled */ snprintf_func (buf, TESTBUFSIZE, "%s -e -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Decode the delta file with decompression disabled, should get the * identical compressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -d -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } test_cleanup(); return 0; } #endif /*********************************************************************** FORCE, STDOUT ***********************************************************************/ /* This tests that output will not overwrite an existing file unless * -f was specified. The test is for encoding (the same code handles * it for decoding). */ static int test_force_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; /* Create empty target file */ test_setup (); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode again, should fail. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -e %s %s ", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* Force it, should succeed. */ snprintf_func (buf, TESTBUFSIZE, "%s -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This checks the proper operation of the -c flag. When specified * the default output becomes stdout, otherwise the input must be * provided (encode) or it may be defaulted (decode w/ app header). */ static int test_stdout_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup(); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, encode writes to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* With -c, encode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -e -c %s > %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, decode writes to target file name, but it fails because the * file exists. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -d %s ", program_name, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* With -c, decode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -d -c %s > /dev/null", program_name, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This tests that the no-output flag (-J) works. */ static int test_no_output (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup (); snprintf_func (buf, TESTBUFSIZE, "touch %s && chmod 0000 %s", TEST_NOPERM_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Try no_output encode w/out unwritable output file */ snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now really write the delta to test decode no-output */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -q -f -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup (); return 0; } /* This tests that the default appheader works */ static int test_appheader (xd3_stream *stream, int ignore) { int i; int ret; char buf[TESTBUFSIZE]; char bogus[TESTBUFSIZE]; xoff_t ssize, tsize; test_setup (); if ((ret = test_make_inputs (stream, &ssize, &tsize))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e -s %s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_copy_to (program_name, TEST_RECON2_FILE))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "chmod 0700 %s", TEST_RECON2_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; } if ((ret = test_copy_to (TEST_SOURCE_FILE, TEST_TARGET_FILE))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) == 0) { return XD3_INVALID; // I.e., files are different! } // Test that the target file is restored. snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)", TEST_RECON2_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) != 0) { return ret; } // Test a malicious string w/ entries > 4 in the appheader by having // the encoder write it: for (i = 0; i < TESTBUFSIZE / 4; ++i) { bogus[2*i] = 'G'; bogus[2*i+1] = '/'; } bogus[TESTBUFSIZE/2-1] = 0; snprintf_func (buf, TESTBUFSIZE, "%s -q -f -A=%s -e -s %s %s %s", program_name, bogus, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } // Then read it: snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)", TEST_RECON2_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf)) == 0) { return XD3_INVALID; // Impossible } if (!WIFEXITED(ret)) { return XD3_INVALID; // Must have crashed! } return 0; } /*********************************************************************** Source identical optimization ***********************************************************************/ /* Computing a delta should be fastest when the two inputs are * identical, this checks it. The library is called to compute a * delta between a 10000 byte file, 1000 byte winsize, 500 byte source * blocksize. The same buffer is used for both source and target. */ static int test_identical_behavior (xd3_stream *stream, int ignore) { #define IDB_TGTSZ 10000 /* Not a power of two b/c of hard-coded expectations below. */ #define IDB_BLKSZ 512 #define IDB_WINSZ 1000 #define IDB_DELSZ 1000 #define IDB_WINCNT (IDB_TGTSZ / IDB_WINSZ) int ret, i; uint8_t buf[IDB_TGTSZ]; uint8_t del[IDB_DELSZ]; uint8_t rec[IDB_TGTSZ]; xd3_source source; int nextencwin = 0; int winstarts = 0, winfinishes = 0; usize_t delpos = 0, recsize; xd3_config config; memset(&source, 0, sizeof(source)); for (i = 0; i < IDB_TGTSZ; i += 1) { buf[i] = (uint8_t) mt_random (&static_mtrand); } stream->winsize = IDB_WINSZ; source.blksize = IDB_BLKSZ; source.name = ""; source.curblk = NULL; source.curblkno = 0; if ((ret = xd3_set_source (stream, & source))) { goto fail; } /* Compute an delta between identical source and targets. */ for (;;) { ret = xd3_encode_input (stream); if (ret == XD3_INPUT) { xd3_avail_input (stream, buf + (IDB_WINSZ * nextencwin), IDB_WINSZ); nextencwin += 1; continue; } if (ret == XD3_GETSRCBLK) { source.curblkno = source.getblkno; source.onblk = IDB_BLKSZ; source.curblk = buf + source.getblkno * IDB_BLKSZ; continue; } if (ret == XD3_WINSTART) { winstarts++; continue; } if (ret == XD3_WINFINISH) { winfinishes++; if (winfinishes == IDB_WINCNT) { break; } continue; } if (ret != XD3_OUTPUT) { goto fail; } CHECK(delpos + stream->avail_out <= IDB_DELSZ); memcpy (del + delpos, stream->next_out, stream->avail_out); delpos += stream->avail_out; xd3_consume_output (stream); } CHECK(winfinishes == IDB_WINCNT); CHECK(winstarts == IDB_WINCNT); CHECK(nextencwin == IDB_WINCNT); /* Reset. */ memset(&source, 0, sizeof(source)); source.blksize = IDB_TGTSZ; source.onblk = IDB_TGTSZ; source.curblk = buf; source.curblkno = 0; if ((ret = xd3_close_stream (stream))) { goto fail; } xd3_free_stream (stream); xd3_init_config (& config, 0); if ((ret = xd3_config_stream (stream, & config))) { goto fail; } if ((ret = xd3_set_source_and_size (stream, & source, IDB_TGTSZ))) { goto fail; } /* Decode. */ if ((ret = xd3_decode_stream (stream, del, delpos, rec, & recsize, IDB_TGTSZ))) { goto fail; } /* Check result size and data. */ if (recsize != IDB_TGTSZ) { stream->msg = "wrong size reconstruction"; goto fail; } if (memcmp (rec, buf, IDB_TGTSZ) != 0) { stream->msg = "wrong data reconstruction"; goto fail; } /* Check that there was one copy per window. */ IF_DEBUG (if (stream->n_scpy != IDB_WINCNT || stream->n_add != 0 || stream->n_run != 0) { stream->msg = "wrong copy count"; goto fail; }); /* Check that no checksums were computed because the initial match was presumed. */ IF_DEBUG (if (stream->large_ckcnt != 0) { stream->msg = "wrong checksum behavior"; goto fail; }); ret = 0; fail: return ret; } /*********************************************************************** String matching test ***********************************************************************/ /* Check particular matching behaviors by calling * xd3_string_match_soft directly with specific arguments. */ typedef struct _string_match_test string_match_test; typedef enum { SM_NONE = 0, SM_LAZY = (1 << 1), } string_match_flags; struct _string_match_test { const char *input; int flags; const char *result; }; static const string_match_test match_tests[] = { /* nothing */ { "1234567890", SM_NONE, "" }, /* basic run, copy */ { "11111111112323232323", SM_NONE, "R0/10 C12/8@10" }, /* no run smaller than MIN_RUN=8 */ { "1111111", SM_NONE, "C1/6@0" }, { "11111111", SM_NONE, "R0/8" }, /* simple promotion: the third copy address depends on promotion */ { "ABCDEF_ABCDEF^ABCDEF", SM_NONE, "C7/6@0 C14/6@7" }, /* { "ABCDEF_ABCDEF^ABCDEF", SM_PROMOTE, "C7/6@0 C14/6@0" }, forgotten */ /* simple lazy: there is a better copy starting with "23 X" than "123 " */ { "123 23 XYZ 123 XYZ", SM_NONE, "C11/4@0" }, { "123 23 XYZ 123 XYZ", SM_LAZY, "C11/4@0 C12/6@4" }, /* trylazy: no lazy matches unless there are at least two characters beyond * the first match */ { "2123_121212", SM_LAZY, "C7/4@5" }, { "2123_1212123", SM_LAZY, "C7/4@5" }, { "2123_1212123_", SM_LAZY, "C7/4@5 C8/5@0" }, /* trylazy: no lazy matches if the copy is >= MAXLAZY=10 */ { "2123_121212123_", SM_LAZY, "C7/6@5 C10/5@0" }, { "2123_12121212123_", SM_LAZY, "C7/8@5 C12/5@0" }, { "2123_1212121212123_", SM_LAZY, "C7/10@5" }, /* lazy run: check a run overlapped by a longer copy */ { "11111112 111111112 1", SM_LAZY, "C1/6@0 R9/8 C10/10@0" }, /* lazy match: match_length,run_l >= min_match tests, shouldn't get any * copies within the run, no run within the copy */ { "^________^________ ", SM_LAZY, "R1/8 C9/9@0" }, /* chain depth: it only goes back 10. this checks that the 10th match hits * and the 11th misses. */ { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/5@0" }, { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234>1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/4@45 C55/4@50" }, /* ssmatch test */ { "ABCDE___ABCDE*** BCDE***", SM_NONE, "C8/5@0 C17/4@1" }, /*{ "ABCDE___ABCDE*** BCDE***", SM_SSMATCH, "C8/5@0 C17/7@9" }, forgotten */ }; static int test_string_matching (xd3_stream *stream, int ignore) { usize_t i; int ret; xd3_config config; char rbuf[TESTBUFSIZE]; for (i = 0; i < SIZEOF_ARRAY (match_tests); i += 1) { const string_match_test *test = & match_tests[i]; char *rptr = rbuf; usize_t len = (usize_t) strlen (test->input); xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 4; config.smatcher_soft.large_step = 4; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 10; config.smatcher_soft.small_lchain = 10; config.smatcher_soft.max_lazy = (test->flags & SM_LAZY) ? 10 : 0; config.smatcher_soft.long_enough = 10; if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_encode_init_full (stream))) { return ret; } xd3_avail_input (stream, (uint8_t*)test->input, len); if ((ret = stream->smatcher.string_match (stream))) { return ret; } *rptr = 0; while (! xd3_rlist_empty (& stream->iopt_used)) { xd3_rinst *inst = xd3_rlist_pop_front (& stream->iopt_used); switch (inst->type) { case XD3_RUN: *rptr++ = 'R'; break; case XD3_CPY: *rptr++ = 'C'; break; default: CHECK(0); } snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d", inst->pos, inst->size); rptr += strlen (rptr); if (inst->type == XD3_CPY) { *rptr++ = '@'; snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%"Q"d", inst->addr); rptr += strlen (rptr); } *rptr++ = ' '; xd3_rlist_push_back (& stream->iopt_free, inst); } if (rptr != rbuf) { rptr -= 1; *rptr = 0; } if (strcmp (rbuf, test->result) != 0) { XPR(NT "test %u: expected %s: got %s", i, test->result, rbuf); stream->msg = "wrong result"; return XD3_INTERNAL; } } return 0; } /* * This is a test for many overlapping instructions. It must be a lazy * matcher. */ static int test_iopt_flush_instructions (xd3_stream *stream, int ignore) { int ret, i; usize_t tpos = 0; usize_t delta_size, recon_size; xd3_config config; uint8_t target[TESTBUFSIZE]; uint8_t delta[TESTBUFSIZE]; uint8_t recon[TESTBUFSIZE]; xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 16; config.smatcher_soft.large_step = 16; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 128; config.smatcher_soft.small_lchain = 16; config.smatcher_soft.max_lazy = 8; config.smatcher_soft.long_enough = 128; if ((ret = xd3_config_stream (stream, & config))) { return ret; } for (i = 1; i < 250; i++) { target[tpos++] = i; target[tpos++] = i+1; target[tpos++] = i+2; target[tpos++] = i+3; target[tpos++] = 0; } for (i = 1; i < 253; i++) { target[tpos++] = i; } if ((ret = xd3_encode_stream (stream, target, tpos, delta, & delta_size, sizeof (delta)))) { return ret; } xd3_free_stream(stream); if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_decode_stream (stream, delta, delta_size, recon, & recon_size, sizeof (recon)))) { return ret; } CHECK(tpos == recon_size); CHECK(memcmp(target, recon, recon_size) == 0); return 0; } /* * This tests the 32/64bit ambiguity for source-window matching. */ static int test_source_cksum_offset (xd3_stream *stream, int ignore) { xd3_source source; // Inputs are: struct { xoff_t cpos; // stream->srcwin_cksum_pos; xoff_t ipos; // stream->total_in; xoff_t size; // stream->src->size; usize_t input; // input 32-bit offset xoff_t output; // output 64-bit offset } cksum_test[] = { // If cpos is <= 2^32 { 1, 1, 1, 1, 1 }, #if XD3_USE_LARGEFILE64 // cpos ipos size input output // 0x____xxxxxULL, 0x____xxxxxULL, 0x____xxxxxULL, 0x___xxxxxUL, 0x____xxxxxULL { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0x00000000UL, 0x100000000ULL }, { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0xF0000000UL, 0x0F0000000ULL }, { 0x100200000ULL, 0x100100000ULL, 0x100200000ULL, 0x00300000UL, 0x000300000ULL }, { 25771983104ULL, 25770000000ULL, 26414808769ULL, 2139216707UL, 23614053187ULL }, #endif { 0, 0, 0, 0, 0 }, }, *test_ptr; stream->src = &source; for (test_ptr = cksum_test; test_ptr->cpos; test_ptr++) { xoff_t r; stream->srcwin_cksum_pos = test_ptr->cpos; stream->total_in = test_ptr->ipos; r = xd3_source_cksum_offset(stream, test_ptr->input); CHECK(r == test_ptr->output); } return 0; } static int test_in_memory (xd3_stream *stream, int ignore) { // test_text is 256 bytes uint8_t ibuf[sizeof(test_text)]; uint8_t dbuf[sizeof(test_text)]; uint8_t obuf[sizeof(test_text)]; usize_t size = sizeof(test_text); usize_t dsize, osize; int r1, r2; int eflags = SECONDARY_DJW ? XD3_SEC_DJW : 0; memcpy(ibuf, test_text, size); memset(ibuf + 128, 0, 16); r1 = xd3_encode_memory(ibuf, size, test_text, size, dbuf, &dsize, size, eflags); r2 = xd3_decode_memory(dbuf, dsize, test_text, size, obuf, &osize, size, 0); if (r1 != 0 || r2 != 0 || dsize >= (size/2) || dsize < 1 || osize != size) { stream->msg = "encode/decode size error"; return XD3_INTERNAL; } if (memcmp(obuf, ibuf, size) != 0) { stream->msg = "encode/decode data error"; return XD3_INTERNAL; } return 0; } /*********************************************************************** TEST MAIN ***********************************************************************/ static int xd3_selftest (void) { #define DO_TEST(fn,flags,arg) \ do { \ xd3_stream stream; \ xd3_config config; \ xd3_init_config (& config, flags); \ XPR(NT "testing " #fn "%s...", \ flags ? (" (" #flags ")") : ""); \ if ((ret = xd3_config_stream (& stream, & config) == 0) && \ (ret = test_ ## fn (& stream, arg)) == 0) { \ XPR(NTR " success\n"); \ } else { \ XPR(NTR " failed: %s: %s\n", xd3_errstring (& stream), \ xd3_mainerror (ret)); } \ xd3_free_stream (& stream); \ if (ret != 0) { goto failure; } \ } while (0) int ret; DO_TEST (random_numbers, 0, 0); DO_TEST (decode_integer_end_of_input, 0, 0); DO_TEST (decode_integer_overflow, 0, 0); DO_TEST (encode_decode_uint32_t, 0, 0); DO_TEST (encode_decode_uint64_t, 0, 0); DO_TEST (usize_t_overflow, 0, 0); DO_TEST (forward_match, 0, 0); DO_TEST (address_cache, 0, 0); IF_GENCODETBL (DO_TEST (address_cache, XD3_ALT_CODE_TABLE, 0)); DO_TEST (string_matching, 0, 0); DO_TEST (choose_instruction, 0, 0); DO_TEST (identical_behavior, 0, 0); DO_TEST (in_memory, 0, 0); IF_GENCODETBL (DO_TEST (choose_instruction, XD3_ALT_CODE_TABLE, 0)); IF_GENCODETBL (DO_TEST (encode_code_table, 0, 0)); DO_TEST (iopt_flush_instructions, 0, 0); DO_TEST (source_cksum_offset, 0, 0); DO_TEST (decompress_single_bit_error, 0, 3); DO_TEST (decompress_single_bit_error, XD3_ADLER32, 3); IF_LZMA (DO_TEST (decompress_single_bit_error, XD3_SEC_LZMA, 54)); IF_FGK (DO_TEST (decompress_single_bit_error, XD3_SEC_FGK, 3)); IF_DJW (DO_TEST (decompress_single_bit_error, XD3_SEC_DJW, 8)); /* There are many expected non-failures for ALT_CODE_TABLE because * not all of the instruction codes are used. */ IF_GENCODETBL ( DO_TEST (decompress_single_bit_error, XD3_ALT_CODE_TABLE, 224)); #if SHELL_TESTS DO_TEST (force_behavior, 0, 0); DO_TEST (stdout_behavior, 0, 0); DO_TEST (no_output, 0, 0); DO_TEST (appheader, 0, 0); DO_TEST (command_line_arguments, 0, 0); #if EXTERNAL_COMPRESSION DO_TEST (source_decompression, 0, 0); DO_TEST (externally_compressed_io, 0, 0); #endif DO_TEST (recode_command, 0, 0); #endif IF_LZMA (DO_TEST (secondary_lzma, 0, 1)); IF_DJW (DO_TEST (secondary_huff, 0, DJW_MAX_GROUPS)); IF_FGK (DO_TEST (secondary_fgk, 0, 1)); DO_TEST (compressed_stream_overflow, 0, 0); IF_LZMA (DO_TEST (compressed_stream_overflow, XD3_SEC_LZMA, 0)); failure: test_cleanup (); return ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE; #undef DO_TEST }
uint32_t mt_random (mtrand *mt) { uint32_t y; unsigned long mag01[2]; mag01[0] = 0; mag01[1] = MATRIX_A; if (mt->mt_index_ >= MT_LEN) { int kk; for (kk = 0; kk < MT_LEN - MT_IA; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk < MT_LEN - 1; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) | (mt->mt_buffer_[0] & LOWER_MASK); mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mt->mt_index_ = 0; } y = mt->mt_buffer_[mt->mt_index_++]; y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; }
uint32_t mt_random (mtrand *mt) { uint32_t y; unsigned long mag01[2]; mag01[0] = 0; mag01[1] = MATRIX_A; if (mt->mt_index_ >= MT_LEN) { int kk; for (kk = 0; kk < MT_LEN - MT_IA; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk < MT_LEN - 1; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) | (mt->mt_buffer_[0] & LOWER_MASK); mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mt->mt_index_ = 0; } y = mt->mt_buffer_[mt->mt_index_++]; y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; }
{'added': [(2, ' * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012.'), (57, '\t(1812433253UL * (mt->mt_buffer_[i-1] ^'), (72, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (74, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^'), (78, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (80, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^'), (83, ' y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) |'), (85, ' mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^'), (169, ' return ret;'), (260, ' usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) +'), (261, ' TEST_FILE_MEAN / 2;'), (262, ' usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) +'), (263, ' TEST_FILE_MEAN / 2;'), (414, '\t XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\\n",'), (426, ' if (diffs != 0)'), (434, 'test_copy_to (const char *from, const char *to)'), (439, ' snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", from, to);'), (449, 'static int'), (450, 'test_save_copy (const char *origname)'), (451, '{'), (452, ' return test_copy_to(origname, TEST_COPY_FILE);'), (453, '}'), (454, ''), (510, ' if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) !='), (1665, ' for (i = 0; i < (2 << 20); i += 256)'), (1669, ' for (j = 0; j < 256; j++)'), (1694, ' if ((ret = test_streaming (stream,'), (1695, '\t\t\t buf,'), (1696, '\t\t\t buf + (1 << 20),'), (1697, '\t\t\t buf + (2 << 20),'), (1698, '\t\t\t 1 << 12)))'), (1900, ' snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s",'), (1921, '\t !change_apphead ? "" :'), (2372, '/* This tests that the default appheader works */'), (2373, 'static int'), (2374, 'test_appheader (xd3_stream *stream, int ignore)'), (2375, '{'), (2376, ' int i;'), (2377, ' int ret;'), (2378, ' char buf[TESTBUFSIZE];'), (2379, ' char bogus[TESTBUFSIZE];'), (2380, ' xoff_t ssize, tsize;'), (2381, ' test_setup ();'), (2382, ''), (2383, ' if ((ret = test_make_inputs (stream, &ssize, &tsize))) { return ret; }'), (2384, ''), (2385, ' snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e -s %s %s %s", program_name,'), (2386, '\t\t TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE);'), (2387, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2388, ''), (2389, ' if ((ret = test_copy_to (program_name, TEST_RECON2_FILE))) { return ret; }'), (2390, ''), (2391, ' snprintf_func (buf, TESTBUFSIZE, "chmod 0700 %s", TEST_RECON2_FILE);'), (2392, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2393, ''), (2394, ' if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; }'), (2395, ' if ((ret = test_copy_to (TEST_SOURCE_FILE, TEST_TARGET_FILE))) { return ret; }'), (2396, ''), (2397, ' if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) == 0)'), (2398, ' {'), (2399, ' return XD3_INVALID; // I.e., files are different!'), (2400, ' }'), (2401, ''), (2402, ' // Test that the target file is restored.'), (2403, ' snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)",'), (2404, '\t\t TEST_RECON2_FILE,'), (2405, '\t\t TEST_DELTA_FILE);'), (2406, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2407, ''), (2408, ' if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) != 0)'), (2409, ' {'), (2410, ' return ret;'), (2411, ' }'), (2412, ''), (2413, ' // Test a malicious string w/ entries > 4 in the appheader by having'), (2414, ' // the encoder write it:'), (2415, ' for (i = 0; i < TESTBUFSIZE / 4; ++i)'), (2416, ' {'), (2417, " bogus[2*i] = 'G';"), (2418, " bogus[2*i+1] = '/';"), (2419, ' }'), (2420, ' bogus[TESTBUFSIZE/2-1] = 0;'), (2421, ''), (2422, ' snprintf_func (buf, TESTBUFSIZE,'), (2423, '\t\t "%s -q -f -A=%s -e -s %s %s %s", program_name, bogus,'), (2424, '\t\t TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE);'), (2425, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2426, ' // Then read it:'), (2427, ' snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)",'), (2428, '\t\t TEST_RECON2_FILE,'), (2429, '\t\t TEST_DELTA_FILE);'), (2430, ' if ((ret = do_cmd (stream, buf)) == 0)'), (2431, ' {'), (2432, ' return XD3_INVALID; // Impossible'), (2433, ' }'), (2434, ' if (!WIFEXITED(ret))'), (2435, ' {'), (2436, ' return XD3_INVALID; // Must have crashed!'), (2437, ' }'), (2438, ''), (2439, ' return 0;'), (2440, '}'), (2441, ''), (2684, '\t snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d",'), (2929, ' DO_TEST (appheader, 0, 0);')], 'deleted': [(2, ' * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012.'), (57, '\t(1812433253UL * (mt->mt_buffer_[i-1] ^'), (72, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (74, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^'), (78, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (80, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^'), (83, ' y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) |'), (85, ' mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^'), (169, ' return XD3_INTERNAL;'), (260, ' usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2;'), (261, ' usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2;'), (412, '\t XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\\n",'), (424, ' if (diffs != 0)'), (432, 'test_save_copy (const char *origname)'), (437, ' snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", origname, TEST_COPY_FILE);'), (502, ' if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) !='), (1657, ' for (i = 0; i < (2 << 20); i += 256)'), (1661, ' for (j = 0; j < 256; j++)'), (1686, ' if ((ret = test_streaming (stream,'), (1687, '\t\t\t buf,'), (1688, '\t\t\t buf + (1 << 20),'), (1689, '\t\t\t buf + (2 << 20),'), (1690, '\t\t\t 1 << 12)))'), (1892, ' snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s",'), (1913, '\t !change_apphead ? "" :'), (2606, '\t snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d",')]}
105
26
2,023
12,828
32
311
4
https://github.com/jmacd/xdelta-devel
CVE-2014-9765
CWE-119
2,493
libraw_cxx.cpp
C++
LibRaw::dcraw_process
/* -*- C++ -*- * File: libraw_cxx.cpp * Copyright 2008-2013 LibRaw LLC (info@libraw.org) * Created: Sat Mar 8 , 2008 * * LibRaw C++ interface (implementation) LibRaw is free software; you can redistribute it and/or modify it under the terms of the one of three licenses as you choose: 1. GNU LESSER GENERAL PUBLIC LICENSE version 2.1 (See file LICENSE.LGPL provided in LibRaw distribution archive for details). 2. COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 (See file LICENSE.CDDL provided in LibRaw distribution archive for details). 3. LibRaw Software License 27032010 (See file LICENSE.LibRaw.pdf provided in LibRaw distribution archive for details). */ #include <math.h> #include <errno.h> #include <float.h> #include <new> #include <exception> #include <sys/types.h> #include <sys/stat.h> #ifndef WIN32 #include <netinet/in.h> #else #include <winsock2.h> #endif #define LIBRAW_LIBRARY_BUILD #include "libraw/libraw.h" #include "internal/defines.h" #ifdef USE_RAWSPEED #include "../RawSpeed/rawspeed_xmldata.cpp" #include <RawSpeed/StdAfx.h> #include <RawSpeed/FileMap.h> #include <RawSpeed/RawParser.h> #include <RawSpeed/RawDecoder.h> #include <RawSpeed/CameraMetaData.h> #include <RawSpeed/ColorFilterArray.h> #endif #ifdef __cplusplus extern "C" { #endif void default_memory_callback(void *,const char *file,const char *where) { fprintf (stderr,"%s: Out of memory in %s\n", file?file:"unknown file", where); } void default_data_callback(void*,const char *file, const int offset) { if(offset < 0) fprintf (stderr,"%s: Unexpected end of file\n", file?file:"unknown file"); else fprintf (stderr,"%s: data corrupted at %d\n",file?file:"unknown file",offset); } const char *libraw_strerror(int e) { enum LibRaw_errors errorcode = (LibRaw_errors)e; switch(errorcode) { case LIBRAW_SUCCESS: return "No error"; case LIBRAW_UNSPECIFIED_ERROR: return "Unspecified error"; case LIBRAW_FILE_UNSUPPORTED: return "Unsupported file format or not RAW file"; case LIBRAW_REQUEST_FOR_NONEXISTENT_IMAGE: return "Request for nonexisting image number"; case LIBRAW_OUT_OF_ORDER_CALL: return "Out of order call of libraw function"; case LIBRAW_NO_THUMBNAIL: return "No thumbnail in file"; case LIBRAW_UNSUPPORTED_THUMBNAIL: return "Unsupported thumbnail format"; case LIBRAW_INPUT_CLOSED: return "No input stream, or input stream closed"; case LIBRAW_UNSUFFICIENT_MEMORY: return "Unsufficient memory"; case LIBRAW_DATA_ERROR: return "Corrupted data or unexpected EOF"; case LIBRAW_IO_ERROR: return "Input/output error"; case LIBRAW_CANCELLED_BY_CALLBACK: return "Cancelled by user callback"; case LIBRAW_BAD_CROP: return "Bad crop box"; default: return "Unknown error code"; } } #ifdef __cplusplus } #endif const double LibRaw_constants::xyz_rgb[3][3] = { { 0.412453, 0.357580, 0.180423 }, { 0.212671, 0.715160, 0.072169 }, { 0.019334, 0.119193, 0.950227 } }; const float LibRaw_constants::d65_white[3] = { 0.950456f, 1.0f, 1.088754f }; #define P1 imgdata.idata #define S imgdata.sizes #define O imgdata.params #define C imgdata.color #define T imgdata.thumbnail #define IO libraw_internal_data.internal_output_params #define ID libraw_internal_data.internal_data #define EXCEPTION_HANDLER(e) do{ \ /* fprintf(stderr,"Exception %d caught\n",e);*/ \ switch(e) \ { \ case LIBRAW_EXCEPTION_ALLOC: \ recycle(); \ return LIBRAW_UNSUFFICIENT_MEMORY; \ case LIBRAW_EXCEPTION_DECODE_RAW: \ case LIBRAW_EXCEPTION_DECODE_JPEG: \ recycle(); \ return LIBRAW_DATA_ERROR; \ case LIBRAW_EXCEPTION_DECODE_JPEG2000: \ recycle(); \ return LIBRAW_DATA_ERROR; \ case LIBRAW_EXCEPTION_IO_EOF: \ case LIBRAW_EXCEPTION_IO_CORRUPT: \ recycle(); \ return LIBRAW_IO_ERROR; \ case LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK:\ recycle(); \ return LIBRAW_CANCELLED_BY_CALLBACK; \ case LIBRAW_EXCEPTION_BAD_CROP: \ recycle(); \ return LIBRAW_BAD_CROP; \ default: \ return LIBRAW_UNSPECIFIED_ERROR; \ } \ }while(0) const char* LibRaw::version() { return LIBRAW_VERSION_STR;} int LibRaw::versionNumber() { return LIBRAW_VERSION; } const char* LibRaw::strerror(int p) { return libraw_strerror(p);} void LibRaw::derror() { if (!libraw_internal_data.unpacker_data.data_error && libraw_internal_data.internal_data.input) { if (libraw_internal_data.internal_data.input->eof()) { if(callbacks.data_cb)(*callbacks.data_cb)(callbacks.datacb_data, libraw_internal_data.internal_data.input->fname(),-1); throw LIBRAW_EXCEPTION_IO_EOF; } else { if(callbacks.data_cb)(*callbacks.data_cb)(callbacks.datacb_data, libraw_internal_data.internal_data.input->fname(), libraw_internal_data.internal_data.input->tell()); throw LIBRAW_EXCEPTION_IO_CORRUPT; } } libraw_internal_data.unpacker_data.data_error++; } void LibRaw::dcraw_clear_mem(libraw_processed_image_t* p) { if(p) ::free(p); } #ifdef USE_RAWSPEED using namespace RawSpeed; class CameraMetaDataLR : public CameraMetaData { public: CameraMetaDataLR() : CameraMetaData() {} CameraMetaDataLR(char *filename) : CameraMetaData(filename){} CameraMetaDataLR(char *data, int sz); }; CameraMetaDataLR::CameraMetaDataLR(char *data, int sz) : CameraMetaData() { ctxt = xmlNewParserCtxt(); if (ctxt == NULL) { ThrowCME("CameraMetaData:Could not initialize context."); } xmlResetLastError(); doc = xmlCtxtReadMemory(ctxt, data,sz, "", NULL, XML_PARSE_DTDVALID); if (doc == NULL) { ThrowCME("CameraMetaData: XML Document could not be parsed successfully. Error was: %s", ctxt->lastError.message); } if (ctxt->valid == 0) { if (ctxt->lastError.code == 0x5e) { // printf("CameraMetaData: Unable to locate DTD, attempting to ignore."); } else { ThrowCME("CameraMetaData: XML file does not validate. DTD Error was: %s", ctxt->lastError.message); } } xmlNodePtr cur; cur = xmlDocGetRootElement(doc); if (xmlStrcmp(cur->name, (const xmlChar *) "Cameras")) { ThrowCME("CameraMetaData: XML document of the wrong type, root node is not cameras."); return; } cur = cur->xmlChildrenNode; while (cur != NULL) { if ((!xmlStrcmp(cur->name, (const xmlChar *)"Camera"))) { Camera *camera = new Camera(doc, cur); addCamera(camera); // Create cameras for aliases. for (uint32 i = 0; i < camera->aliases.size(); i++) { addCamera(new Camera(camera, i)); } } cur = cur->next; } if (doc) xmlFreeDoc(doc); doc = 0; if (ctxt) xmlFreeParserCtxt(ctxt); ctxt = 0; } #define RAWSPEED_DATA_COUNT (sizeof(_rawspeed_data_xml)/sizeof(_rawspeed_data_xml[0])) static CameraMetaDataLR* make_camera_metadata() { int len = 0,i; for(i=0;i<RAWSPEED_DATA_COUNT;i++) if(_rawspeed_data_xml[i]) { len+=strlen(_rawspeed_data_xml[i]); } char *rawspeed_xml = (char*)calloc(len+1,sizeof(_rawspeed_data_xml[0][0])); if(!rawspeed_xml) return NULL; int offt = 0; for(i=0;i<RAWSPEED_DATA_COUNT;i++) if(_rawspeed_data_xml[i]) { int ll = strlen(_rawspeed_data_xml[i]); if(offt+ll>len) break; memmove(rawspeed_xml+offt,_rawspeed_data_xml[i],ll); offt+=ll; } rawspeed_xml[offt]=0; CameraMetaDataLR *ret=NULL; try { ret = new CameraMetaDataLR(rawspeed_xml,offt); } catch (...) { // Mask all exceptions } free(rawspeed_xml); return ret; } #endif #define ZERO(a) memset(&a,0,sizeof(a)) LibRaw:: LibRaw(unsigned int flags) { double aber[4] = {1,1,1,1}; double gamm[6] = { 0.45,4.5,0,0,0,0 }; unsigned greybox[4] = { 0, 0, UINT_MAX, UINT_MAX }; unsigned cropbox[4] = { 0, 0, UINT_MAX, UINT_MAX }; #ifdef DCRAW_VERBOSE verbose = 1; #else verbose = 0; #endif ZERO(imgdata); ZERO(libraw_internal_data); ZERO(callbacks); _rawspeed_camerameta = _rawspeed_decoder = NULL; #ifdef USE_RAWSPEED CameraMetaDataLR *camerameta = make_camera_metadata(); // May be NULL in case of exception in make_camera_metadata() _rawspeed_camerameta = static_cast<void*>(camerameta); #endif callbacks.mem_cb = (flags & LIBRAW_OPIONS_NO_MEMERR_CALLBACK) ? NULL: &default_memory_callback; callbacks.data_cb = (flags & LIBRAW_OPIONS_NO_DATAERR_CALLBACK)? NULL : &default_data_callback; memmove(&imgdata.params.aber,&aber,sizeof(aber)); memmove(&imgdata.params.gamm,&gamm,sizeof(gamm)); memmove(&imgdata.params.greybox,&greybox,sizeof(greybox)); memmove(&imgdata.params.cropbox,&cropbox,sizeof(cropbox)); imgdata.params.bright=1; imgdata.params.use_camera_matrix=-1; imgdata.params.user_flip=-1; imgdata.params.user_black=-1; imgdata.params.user_cblack[0]=imgdata.params.user_cblack[1]=imgdata.params.user_cblack[2]=imgdata.params.user_cblack[3]=-1000001; imgdata.params.user_sat=-1; imgdata.params.user_qual=-1; imgdata.params.output_color=1; imgdata.params.output_bps=8; imgdata.params.use_fuji_rotate=1; imgdata.params.exp_shift = 1.0; imgdata.params.auto_bright_thr = LIBRAW_DEFAULT_AUTO_BRIGHTNESS_THRESHOLD; imgdata.params.adjust_maximum_thr= LIBRAW_DEFAULT_ADJUST_MAXIMUM_THRESHOLD; imgdata.params.use_rawspeed = 1; imgdata.params.green_matching = 0; imgdata.parent_class = this; imgdata.progress_flags = 0; tls = new LibRaw_TLS; tls->init(); } int LibRaw::set_rawspeed_camerafile(char *filename) { #ifdef USE_RAWSPEED try { CameraMetaDataLR *camerameta = new CameraMetaDataLR(filename); if(_rawspeed_camerameta) { CameraMetaDataLR *d = static_cast<CameraMetaDataLR*>(_rawspeed_camerameta); delete d; } _rawspeed_camerameta = static_cast<void*>(camerameta); } catch (...) { //just return error code return -1; } #endif return 0; } LibRaw::~LibRaw() { recycle(); delete tls; #ifdef USE_RAWSPEED if(_rawspeed_camerameta) { CameraMetaDataLR *cmeta = static_cast<CameraMetaDataLR*>(_rawspeed_camerameta); delete cmeta; _rawspeed_camerameta = NULL; } #endif } void* LibRaw:: malloc(size_t t) { void *p = memmgr.malloc(t); if(!p) throw LIBRAW_EXCEPTION_ALLOC; return p; } void* LibRaw:: realloc(void *q,size_t t) { void *p = memmgr.realloc(q,t); if(!p) throw LIBRAW_EXCEPTION_ALLOC; return p; } void* LibRaw:: calloc(size_t n,size_t t) { void *p = memmgr.calloc(n,t); if(!p) throw LIBRAW_EXCEPTION_ALLOC; return p; } void LibRaw:: free(void *p) { memmgr.free(p); } void LibRaw:: recycle_datastream() { if(libraw_internal_data.internal_data.input && libraw_internal_data.internal_data.input_internal) { delete libraw_internal_data.internal_data.input; libraw_internal_data.internal_data.input = NULL; } libraw_internal_data.internal_data.input_internal = 0; } void LibRaw:: recycle() { recycle_datastream(); #define FREE(a) do { if(a) { free(a); a = NULL;} }while(0) FREE(imgdata.image); FREE(imgdata.thumbnail.thumb); FREE(libraw_internal_data.internal_data.meta_data); FREE(libraw_internal_data.output_data.histogram); FREE(libraw_internal_data.output_data.oprof); FREE(imgdata.color.profile); FREE(imgdata.rawdata.ph1_black); FREE(imgdata.rawdata.raw_alloc); #undef FREE ZERO(imgdata.rawdata); ZERO(imgdata.sizes); ZERO(imgdata.color); ZERO(libraw_internal_data); #ifdef USE_RAWSPEED if(_rawspeed_decoder) { RawDecoder *d = static_cast<RawDecoder*>(_rawspeed_decoder); delete d; } _rawspeed_decoder = 0; #endif memmgr.cleanup(); imgdata.thumbnail.tformat = LIBRAW_THUMBNAIL_UNKNOWN; imgdata.progress_flags = 0; tls->init(); } const char * LibRaw::unpack_function_name() { libraw_decoder_info_t decoder_info; get_decoder_info(&decoder_info); return decoder_info.decoder_name; } int LibRaw::get_decoder_info(libraw_decoder_info_t* d_info) { if(!d_info) return LIBRAW_UNSPECIFIED_ERROR; if(!load_raw) return LIBRAW_OUT_OF_ORDER_CALL; d_info->decoder_flags = LIBRAW_DECODER_NOTSET; int rawdata = (imgdata.idata.filters || P1.colors == 1); // dcraw.c names order if (load_raw == &LibRaw::canon_600_load_raw) { d_info->decoder_name = "canon_600_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; // WB set within decoder, no need to load raw } else if (load_raw == &LibRaw::canon_load_raw) { d_info->decoder_name = "canon_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::lossless_jpeg_load_raw) { // Check rbayer d_info->decoder_name = "lossless_jpeg_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_HASCURVE | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::canon_sraw_load_raw) { d_info->decoder_name = "canon_sraw_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::lossless_dng_load_raw) { // Check rbayer d_info->decoder_name = "lossless_dng_load_raw()"; d_info->decoder_flags = rawdata? LIBRAW_DECODER_FLATFIELD : LIBRAW_DECODER_LEGACY ; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; d_info->decoder_flags |= LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::packed_dng_load_raw) { // Check rbayer d_info->decoder_name = "packed_dng_load_raw()"; d_info->decoder_flags = rawdata ? LIBRAW_DECODER_FLATFIELD : LIBRAW_DECODER_LEGACY; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; d_info->decoder_flags |= LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::pentax_load_raw ) { d_info->decoder_name = "pentax_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::nikon_load_raw) { // Check rbayer d_info->decoder_name = "nikon_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::rollei_load_raw ) { // UNTESTED d_info->decoder_name = "rollei_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::phase_one_load_raw ) { d_info->decoder_name = "phase_one_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::phase_one_load_raw_c ) { d_info->decoder_name = "phase_one_load_raw_c()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::hasselblad_load_raw ) { d_info->decoder_name = "hasselblad_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::leaf_hdr_load_raw ) { d_info->decoder_name = "leaf_hdr_load_raw()"; d_info->decoder_flags = imgdata.idata.filters? LIBRAW_DECODER_FLATFIELD:LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::unpacked_load_raw ) { d_info->decoder_name = "unpacked_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_USEBAYER2; } else if (load_raw == &LibRaw::sinar_4shot_load_raw ) { // UNTESTED d_info->decoder_name = "sinar_4shot_load_raw()"; d_info->decoder_flags = (O.shot_select|| O.half_size)?LIBRAW_DECODER_FLATFIELD:LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::imacon_full_load_raw ) { d_info->decoder_name = "imacon_full_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::hasselblad_full_load_raw ) { d_info->decoder_name = "hasselblad_full_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::packed_load_raw ) { d_info->decoder_name = "packed_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::nokia_load_raw ) { // UNTESTED d_info->decoder_name = "nokia_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::panasonic_load_raw ) { d_info->decoder_name = "panasonic_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::olympus_load_raw ) { d_info->decoder_name = "olympus_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::minolta_rd175_load_raw ) { // UNTESTED d_info->decoder_name = "minolta_rd175_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::quicktake_100_load_raw ) { // UNTESTED d_info->decoder_name = "quicktake_100_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::kodak_radc_load_raw ) { d_info->decoder_name = "kodak_radc_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::kodak_jpeg_load_raw ) { // UNTESTED + RBAYER d_info->decoder_name = "kodak_jpeg_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::lossy_dng_load_raw) { // Check rbayer d_info->decoder_name = "lossy_dng_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY | LIBRAW_DECODER_TRYRAWSPEED; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_dc120_load_raw ) { d_info->decoder_name = "kodak_dc120_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::eight_bit_load_raw ) { d_info->decoder_name = "eight_bit_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_yrgb_load_raw ) { d_info->decoder_name = "kodak_yrgb_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_262_load_raw ) { d_info->decoder_name = "kodak_262_load_raw()"; // UNTESTED! d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_65000_load_raw ) { d_info->decoder_name = "kodak_65000_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_ycbcr_load_raw ) { // UNTESTED d_info->decoder_name = "kodak_ycbcr_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_rgb_load_raw ) { // UNTESTED d_info->decoder_name = "kodak_rgb_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::sony_load_raw ) { d_info->decoder_name = "sony_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::sony_arw_load_raw ) { d_info->decoder_name = "sony_arw_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; #ifndef NOSONY_RAWSPEED d_info->decoder_flags |= LIBRAW_DECODER_TRYRAWSPEED; #endif } else if (load_raw == &LibRaw::sony_arw2_load_raw ) { d_info->decoder_name = "sony_arw2_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; #ifndef NOSONY_RAWSPEED d_info->decoder_flags |= LIBRAW_DECODER_TRYRAWSPEED; #endif d_info->decoder_flags |= LIBRAW_DECODER_ITSASONY; } else if (load_raw == &LibRaw::smal_v6_load_raw ) { // UNTESTED d_info->decoder_name = "smal_v6_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::smal_v9_load_raw ) { // UNTESTED d_info->decoder_name = "smal_v9_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::redcine_load_raw) { d_info->decoder_name = "redcine_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::foveon_sd_load_raw ) { d_info->decoder_name = "foveon_sd_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::foveon_dp_load_raw ) { d_info->decoder_name = "foveon_dp_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; } else { d_info->decoder_name = "Unknown unpack function"; d_info->decoder_flags = LIBRAW_DECODER_NOTSET; } return LIBRAW_SUCCESS; } int LibRaw::adjust_maximum() { ushort real_max; float auto_threshold; if(O.adjust_maximum_thr < 0.00001) return LIBRAW_SUCCESS; else if (O.adjust_maximum_thr > 0.99999) auto_threshold = LIBRAW_DEFAULT_ADJUST_MAXIMUM_THRESHOLD; else auto_threshold = O.adjust_maximum_thr; real_max = C.data_maximum; if (real_max > 0 && real_max < C.maximum && real_max > C.maximum* auto_threshold) { C.maximum = real_max; } return LIBRAW_SUCCESS; } void LibRaw:: merror (void *ptr, const char *where) { if (ptr) return; if(callbacks.mem_cb)(*callbacks.mem_cb)(callbacks.memcb_data, libraw_internal_data.internal_data.input ?libraw_internal_data.internal_data.input->fname() :NULL, where); throw LIBRAW_EXCEPTION_ALLOC; } int LibRaw::open_file(const char *fname, INT64 max_buf_size) { #ifndef WIN32 struct stat st; if(stat(fname,&st)) return LIBRAW_IO_ERROR; int big = (st.st_size > max_buf_size)?1:0; #else struct _stati64 st; if(_stati64(fname,&st)) return LIBRAW_IO_ERROR; int big = (st.st_size > max_buf_size)?1:0; #endif LibRaw_abstract_datastream *stream; try { if(big) stream = new LibRaw_bigfile_datastream(fname); else stream = new LibRaw_file_datastream(fname); } catch (std::bad_alloc) { recycle(); return LIBRAW_UNSUFFICIENT_MEMORY; } if(!stream->valid()) { delete stream; return LIBRAW_IO_ERROR; } ID.input_internal = 0; // preserve from deletion on error int ret = open_datastream(stream); if (ret == LIBRAW_SUCCESS) { ID.input_internal =1 ; // flag to delete datastream on recycle } else { delete stream; ID.input_internal = 0; } return ret; } #ifdef WIN32 int LibRaw::open_file(const wchar_t *fname, INT64 max_buf_size) { struct _stati64 st; if(_wstati64(fname,&st)) return LIBRAW_IO_ERROR; int big = (st.st_size > max_buf_size)?1:0; LibRaw_abstract_datastream *stream; try { if(big) stream = new LibRaw_bigfile_datastream(fname); else stream = new LibRaw_file_datastream(fname); } catch (std::bad_alloc) { recycle(); return LIBRAW_UNSUFFICIENT_MEMORY; } if(!stream->valid()) { delete stream; return LIBRAW_IO_ERROR; } ID.input_internal = 0; // preserve from deletion on error int ret = open_datastream(stream); if (ret == LIBRAW_SUCCESS) { ID.input_internal =1 ; // flag to delete datastream on recycle } else { delete stream; ID.input_internal = 0; } return ret; } #endif int LibRaw::open_buffer(void *buffer, size_t size) { // this stream will close on recycle() if(!buffer || buffer==(void*)-1) return LIBRAW_IO_ERROR; LibRaw_buffer_datastream *stream; try { stream = new LibRaw_buffer_datastream(buffer,size); } catch (std::bad_alloc) { recycle(); return LIBRAW_UNSUFFICIENT_MEMORY; } if(!stream->valid()) { delete stream; return LIBRAW_IO_ERROR; } ID.input_internal = 0; // preserve from deletion on error int ret = open_datastream(stream); if (ret == LIBRAW_SUCCESS) { ID.input_internal =1 ; // flag to delete datastream on recycle } else { delete stream; ID.input_internal = 0; } return ret; } void LibRaw::hasselblad_full_load_raw() { int row, col; for (row=0; row < S.height; row++) for (col=0; col < S.width; col++) { read_shorts (&imgdata.image[row*S.width+col][2], 1); // B read_shorts (&imgdata.image[row*S.width+col][1], 1); // G read_shorts (&imgdata.image[row*S.width+col][0], 1); // R } } int LibRaw::open_datastream(LibRaw_abstract_datastream *stream) { if(!stream) return ENOENT; if(!stream->valid()) return LIBRAW_IO_ERROR; recycle(); try { ID.input = stream; SET_PROC_FLAG(LIBRAW_PROGRESS_OPEN); if (O.use_camera_matrix < 0) O.use_camera_matrix = O.use_camera_wb; identify(); #if 0 size_t bytes = ID.input->size()-libraw_internal_data.unpacker_data.data_offset; float bpp = float(bytes)/float(S.raw_width)/float(S.raw_height); float bpp2 = float(bytes)/float(S.width)/float(S.height); printf("RawSize: %dx%d data offset: %d data size:%d bpp: %g bpp2: %g\n",S.raw_width,S.raw_height,libraw_internal_data.unpacker_data.data_offset,bytes,bpp,bpp2); if(!strcasecmp(imgdata.idata.make,"Hasselblad") && bpp == 6.0f) { load_raw = &LibRaw::hasselblad_full_load_raw; S.width = S.raw_width; S.height = S.raw_height; P1.filters = 0; P1.colors=3; P1.raw_count=1; C.maximum=0xffff; printf("3 channel hassy found\n"); } #endif if(C.profile_length) { if(C.profile) free(C.profile); C.profile = malloc(C.profile_length); merror(C.profile,"LibRaw::open_file()"); ID.input->seek(ID.profile_offset,SEEK_SET); ID.input->read(C.profile,C.profile_length,1); } SET_PROC_FLAG(LIBRAW_PROGRESS_IDENTIFY); } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } catch (std::exception ee) { EXCEPTION_HANDLER(LIBRAW_EXCEPTION_IO_CORRUPT); } if(P1.raw_count < 1) return LIBRAW_FILE_UNSUPPORTED; write_fun = &LibRaw::write_ppm_tiff; if (load_raw == &LibRaw::kodak_ycbcr_load_raw) { S.height += S.height & 1; S.width += S.width & 1; } libraw_decoder_info_t dinfo; get_decoder_info(&dinfo); if(dinfo.decoder_flags & LIBRAW_DECODER_LEGACY) { // Adjust sizes according to image buffer size S.raw_width = S.width; S.left_margin = 0; S.raw_height = S.height; S.top_margin = 0; } IO.shrink = P1.filters && (O.half_size || ((O.threshold || O.aber[0] != 1 || O.aber[2] != 1) )); S.iheight = (S.height + IO.shrink) >> IO.shrink; S.iwidth = (S.width + IO.shrink) >> IO.shrink; if(imgdata.idata.filters == 303979333U) { //printf("BL=%d [%d,%d,%d,%d]\n",C.black,C.cblack[0],C.cblack[1],C.cblack[2],C.cblack[3]); C.black = C.cblack[0]; C.cblack[0]=C.cblack[1]=C.cblack[2]=C.cblack[3]=0; imgdata.idata.filters = 2; } // X20 if(imgdata.idata.filters == 0x5bb8445b) { C.black = 257; C.cblack[0]=C.cblack[1]=C.cblack[2]=C.cblack[3]=0; imgdata.idata.filters = 2; S.width = 4030; S.height = 3010; S.top_margin = 2; S.left_margin = 2; } // X100S if(imgdata.idata.filters == 0x5145bb84) { C.black = 1024; C.cblack[0]=C.cblack[1]=C.cblack[2]=C.cblack[3]=0; S.left_margin = 2; S.top_margin = 1; S.width = 4934; S.height = 3290; imgdata.idata.filters = 2; } // Save color,sizes and internal data into raw_image fields memmove(&imgdata.rawdata.color,&imgdata.color,sizeof(imgdata.color)); memmove(&imgdata.rawdata.sizes,&imgdata.sizes,sizeof(imgdata.sizes)); memmove(&imgdata.rawdata.iparams,&imgdata.idata,sizeof(imgdata.idata)); memmove(&imgdata.rawdata.ioparams,&libraw_internal_data.internal_output_params,sizeof(libraw_internal_data.internal_output_params)); SET_PROC_FLAG(LIBRAW_PROGRESS_SIZE_ADJUST); return LIBRAW_SUCCESS; } #ifdef USE_RAWSPEED void LibRaw::fix_after_rawspeed(int bl) { if (load_raw == &LibRaw::lossy_dng_load_raw) C.maximum = 0xffff; else if (load_raw == &LibRaw::sony_load_raw) C.maximum = 0x3ff0; else if ( (load_raw == &LibRaw::sony_arw2_load_raw || (load_raw == &LibRaw::packed_load_raw && !strcasecmp(imgdata.idata.make,"Sony"))) && bl >= (C.black+C.cblack[0])*2 ) { C.maximum *=4; C.black *=4; for(int c=0; c< 4; c++) C.cblack[c]*=4; } } #else void LibRaw::fix_after_rawspeed(int) { } #endif int LibRaw::unpack(void) { CHECK_ORDER_HIGH(LIBRAW_PROGRESS_LOAD_RAW); CHECK_ORDER_LOW(LIBRAW_PROGRESS_IDENTIFY); try { if(!libraw_internal_data.internal_data.input) return LIBRAW_INPUT_CLOSED; RUN_CALLBACK(LIBRAW_PROGRESS_LOAD_RAW,0,2); if (O.shot_select >= P1.raw_count) return LIBRAW_REQUEST_FOR_NONEXISTENT_IMAGE; if(!load_raw) return LIBRAW_UNSPECIFIED_ERROR; if (O.use_camera_matrix && C.cmatrix[0][0] > 0.25) { memcpy (C.rgb_cam, C.cmatrix, sizeof (C.cmatrix)); IO.raw_color = 0; } // already allocated ? if(imgdata.image) { free(imgdata.image); imgdata.image = 0; } if(imgdata.rawdata.raw_alloc) { free(imgdata.rawdata.raw_alloc); imgdata.rawdata.raw_alloc = 0; } if (libraw_internal_data.unpacker_data.meta_length) { libraw_internal_data.internal_data.meta_data = (char *) malloc (libraw_internal_data.unpacker_data.meta_length); merror (libraw_internal_data.internal_data.meta_data, "LibRaw::unpack()"); } libraw_decoder_info_t decoder_info; get_decoder_info(&decoder_info); int save_iwidth = S.iwidth, save_iheight = S.iheight, save_shrink = IO.shrink; int rwidth = S.raw_width, rheight = S.raw_height; if( !IO.fuji_width) { // adjust non-Fuji allocation if(rwidth < S.width + S.left_margin) rwidth = S.width + S.left_margin; if(rheight < S.height + S.top_margin) rheight = S.height + S.top_margin; } S.raw_pitch = S.raw_width*2; imgdata.rawdata.raw_image = 0; imgdata.rawdata.color4_image = 0; imgdata.rawdata.color3_image = 0; #ifdef USE_RAWSPEED // RawSpeed Supported, if(O.use_rawspeed && (decoder_info.decoder_flags & LIBRAW_DECODER_TRYRAWSPEED) && _rawspeed_camerameta) { INT64 spos = ID.input->tell(); try { // printf("Using rawspeed\n"); ID.input->seek(0,SEEK_SET); INT64 _rawspeed_buffer_sz = ID.input->size()+32; void *_rawspeed_buffer = malloc(_rawspeed_buffer_sz); if(!_rawspeed_buffer) throw LIBRAW_EXCEPTION_ALLOC; ID.input->read(_rawspeed_buffer,_rawspeed_buffer_sz,1); FileMap map((uchar8*)_rawspeed_buffer,_rawspeed_buffer_sz); RawParser t(&map); RawDecoder *d = 0; CameraMetaDataLR *meta = static_cast<CameraMetaDataLR*>(_rawspeed_camerameta); d = t.getDecoder(); try { d->checkSupport(meta); } catch (const RawDecoderException& e) { imgdata.process_warnings |= LIBRAW_WARN_RAWSPEED_UNSUPPORTED; throw e; } d->decodeRaw(); d->decodeMetaData(meta); RawImage r = d->mRaw; if (r->isCFA) { // Save pointer to decoder _rawspeed_decoder = static_cast<void*>(d); imgdata.rawdata.raw_image = (ushort*) r->getDataUncropped(0,0); S.raw_pitch = r->pitch; fix_after_rawspeed(r->blackLevel); } else if(r->getCpp()==4) { _rawspeed_decoder = static_cast<void*>(d); imgdata.rawdata.color4_image = (ushort(*)[4]) r->getDataUncropped(0,0); S.raw_pitch = r->pitch; C.maximum = r->whitePoint; fix_after_rawspeed(r->blackLevel); } else if(r->getCpp() == 3) { _rawspeed_decoder = static_cast<void*>(d); imgdata.rawdata.color3_image = (ushort(*)[3]) r->getDataUncropped(0,0); S.raw_pitch = r->pitch; C.maximum = r->whitePoint; fix_after_rawspeed(r->blackLevel); } else { delete d; } free(_rawspeed_buffer); imgdata.process_warnings |= LIBRAW_WARN_RAWSPEED_PROCESSED; } catch (...) { imgdata.process_warnings |= LIBRAW_WARN_RAWSPEED_PROBLEM; // no other actions: if raw_image is not set we'll try usual load_raw call } ID.input->seek(spos,SEEK_SET); } #endif if(!imgdata.rawdata.raw_image && !imgdata.rawdata.color4_image && !imgdata.rawdata.color3_image) // RawSpeed failed! { // Not allocated on RawSpeed call, try call LibRaw if(decoder_info.decoder_flags & LIBRAW_DECODER_FLATFIELD) { imgdata.rawdata.raw_alloc = malloc(rwidth*(rheight+7)*sizeof(imgdata.rawdata.raw_image[0])); imgdata.rawdata.raw_image = (ushort*) imgdata.rawdata.raw_alloc; } else if (decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY) { // sRAW and Foveon only, so extra buffer size is just 1/4 // Legacy converters does not supports half mode! S.iwidth = S.width; S.iheight= S.height; IO.shrink = 0; S.raw_pitch = S.width*8; // allocate image as temporary buffer, size imgdata.rawdata.raw_alloc = calloc(S.iwidth*S.iheight,sizeof(*imgdata.image)); imgdata.image = (ushort (*)[4]) imgdata.rawdata.raw_alloc; } ID.input->seek(libraw_internal_data.unpacker_data.data_offset, SEEK_SET); unsigned m_save = C.maximum; if(load_raw == &LibRaw::unpacked_load_raw && !strcasecmp(imgdata.idata.make,"Nikon")) C.maximum=65535; (this->*load_raw)(); if(load_raw == &LibRaw::unpacked_load_raw && !strcasecmp(imgdata.idata.make,"Nikon")) C.maximum = m_save; } if(imgdata.rawdata.raw_image) crop_masked_pixels(); // calculate black levels // recover saved if( (decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY) && !imgdata.rawdata.color4_image) { imgdata.image = 0; imgdata.rawdata.color4_image = (ushort (*)[4]) imgdata.rawdata.raw_alloc; } // recover image sizes S.iwidth = save_iwidth; S.iheight = save_iheight; IO.shrink = save_shrink; // adjust black to possible maximum unsigned int i = C.cblack[3]; unsigned int c; for(c=0;c<3;c++) if (i > C.cblack[c]) i = C.cblack[c]; for (c=0;c<4;c++) C.cblack[c] -= i; C.black += i; // Save color,sizes and internal data into raw_image fields memmove(&imgdata.rawdata.color,&imgdata.color,sizeof(imgdata.color)); memmove(&imgdata.rawdata.sizes,&imgdata.sizes,sizeof(imgdata.sizes)); memmove(&imgdata.rawdata.iparams,&imgdata.idata,sizeof(imgdata.idata)); memmove(&imgdata.rawdata.ioparams,&libraw_internal_data.internal_output_params,sizeof(libraw_internal_data.internal_output_params)); SET_PROC_FLAG(LIBRAW_PROGRESS_LOAD_RAW); RUN_CALLBACK(LIBRAW_PROGRESS_LOAD_RAW,1,2); return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } catch (std::exception ee) { EXCEPTION_HANDLER(LIBRAW_EXCEPTION_IO_CORRUPT); } } void LibRaw::free_image(void) { if(imgdata.image) { free(imgdata.image); imgdata.image = 0; imgdata.progress_flags = LIBRAW_PROGRESS_START|LIBRAW_PROGRESS_OPEN |LIBRAW_PROGRESS_IDENTIFY|LIBRAW_PROGRESS_SIZE_ADJUST|LIBRAW_PROGRESS_LOAD_RAW; } } void LibRaw::raw2image_start() { // restore color,sizes and internal data into raw_image fields memmove(&imgdata.color,&imgdata.rawdata.color,sizeof(imgdata.color)); memmove(&imgdata.sizes,&imgdata.rawdata.sizes,sizeof(imgdata.sizes)); memmove(&imgdata.idata,&imgdata.rawdata.iparams,sizeof(imgdata.idata)); memmove(&libraw_internal_data.internal_output_params,&imgdata.rawdata.ioparams,sizeof(libraw_internal_data.internal_output_params)); if (O.user_flip >= 0) S.flip = O.user_flip; switch ((S.flip+3600) % 360) { case 270: S.flip = 5; break; case 180: S.flip = 3; break; case 90: S.flip = 6; break; } // adjust for half mode! IO.shrink = P1.filters && (O.half_size || ((O.threshold || O.aber[0] != 1 || O.aber[2] != 1) )); S.iheight = (S.height + IO.shrink) >> IO.shrink; S.iwidth = (S.width + IO.shrink) >> IO.shrink; } int LibRaw::is_phaseone_compressed() { return (load_raw == &LibRaw::phase_one_load_raw_c && imgdata.rawdata.ph1_black); } int LibRaw::raw2image(void) { CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW); try { raw2image_start(); if (is_phaseone_compressed()) { phase_one_allocate_tempbuffer(); phase_one_subtract_black((ushort*)imgdata.rawdata.raw_alloc,imgdata.rawdata.raw_image); phase_one_correct(); } // free and re-allocate image bitmap if(imgdata.image) { imgdata.image = (ushort (*)[4]) realloc (imgdata.image,S.iheight*S.iwidth *sizeof (*imgdata.image)); memset(imgdata.image,0,S.iheight*S.iwidth *sizeof (*imgdata.image)); } else imgdata.image = (ushort (*)[4]) calloc (S.iheight*S.iwidth, sizeof (*imgdata.image)); merror (imgdata.image, "raw2image()"); libraw_decoder_info_t decoder_info; get_decoder_info(&decoder_info); // Move saved bitmap to imgdata.image if(decoder_info.decoder_flags & LIBRAW_DECODER_FLATFIELD) { if (IO.fuji_width) { unsigned r,c; int row,col; for (row=0; row < S.raw_height-S.top_margin*2; row++) { for (col=0; col < IO.fuji_width << !libraw_internal_data.unpacker_data.fuji_layout; col++) { if (libraw_internal_data.unpacker_data.fuji_layout) { r = IO.fuji_width - 1 - col + (row >> 1); c = col + ((row+1) >> 1); } else { r = IO.fuji_width - 1 + row - (col >> 1); c = row + ((col+1) >> 1); } if (r < S.height && c < S.width) imgdata.image[((r)>>IO.shrink)*S.iwidth+((c)>>IO.shrink)][FC(r,c)] = imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_pitch/2+(col+S.left_margin)]; } } } else { int row,col; for (row=0; row < S.height; row++) for (col=0; col < S.width; col++) imgdata.image[((row) >> IO.shrink)*S.iwidth + ((col) >> IO.shrink)][fcol(row,col)] = imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_pitch/2+(col+S.left_margin)]; } } else if(decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY) { if(imgdata.rawdata.color4_image) { if(S.width*8 == S.raw_pitch) memmove(imgdata.image,imgdata.rawdata.color4_image,S.width*S.height*sizeof(*imgdata.image)); else { for(int row = 0; row < S.height; row++) memmove(&imgdata.image[row*S.width], &imgdata.rawdata.color4_image[(row+S.top_margin)*S.raw_pitch/8+S.left_margin], S.width*sizeof(*imgdata.image)); } } else if(imgdata.rawdata.color3_image) { unsigned char *c3image = (unsigned char*) imgdata.rawdata.color3_image; for(int row = 0; row < S.height; row++) { ushort (*srcrow)[3] = (ushort (*)[3]) &c3image[(row+S.top_margin)*S.raw_pitch]; ushort (*dstrow)[4] = (ushort (*)[4]) &imgdata.image[row*S.width]; for(int col=0; col < S.width; col++) { for(int c=0; c< 3; c++) dstrow[col][c] = srcrow[S.left_margin+col][c]; dstrow[col][3]=0; } } } else { // legacy decoder, but no data? throw LIBRAW_EXCEPTION_DECODE_RAW; } } // Free PhaseOne separate copy allocated at function start if (is_phaseone_compressed()) { phase_one_free_tempbuffer(); } // hack - clear later flags! if (load_raw == &CLASS canon_600_load_raw && S.width < S.raw_width) { canon_600_correct(); } imgdata.progress_flags = LIBRAW_PROGRESS_START|LIBRAW_PROGRESS_OPEN | LIBRAW_PROGRESS_RAW2_IMAGE |LIBRAW_PROGRESS_IDENTIFY|LIBRAW_PROGRESS_SIZE_ADJUST|LIBRAW_PROGRESS_LOAD_RAW; return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } } void LibRaw::phase_one_allocate_tempbuffer() { // Allocate temp raw_image buffer imgdata.rawdata.raw_image = (ushort*)malloc(S.raw_pitch*S.raw_height); merror (imgdata.rawdata.raw_image, "phase_one_prepare_to_correct()"); } void LibRaw::phase_one_free_tempbuffer() { free(imgdata.rawdata.raw_image); imgdata.rawdata.raw_image = (ushort*) imgdata.rawdata.raw_alloc; } void LibRaw::phase_one_subtract_black(ushort *src, ushort *dest) { // ushort *src = (ushort*)imgdata.rawdata.raw_alloc; if(O.user_black<0 && O.user_cblack[0] <= -1000000 && O.user_cblack[1] <= -1000000 && O.user_cblack[2] <= -1000000 && O.user_cblack[3] <= -1000000) { for(int row = 0; row < S.raw_height; row++) { ushort bl = imgdata.color.phase_one_data.t_black - imgdata.rawdata.ph1_black[row][0]; for(int col=0; col < imgdata.color.phase_one_data.split_col && col < S.raw_width; col++) { int idx = row*S.raw_width + col; ushort val = src[idx]; dest[idx] = val>bl?val-bl:0; } bl = imgdata.color.phase_one_data.t_black - imgdata.rawdata.ph1_black[row][1]; for(int col=imgdata.color.phase_one_data.split_col; col < S.raw_width; col++) { int idx = row*S.raw_width + col; ushort val = src[idx]; dest[idx] = val>bl?val-bl:0; } } } else // black set by user interaction { // Black level in cblack! for(int row = 0; row < S.raw_height; row++) { unsigned short cblk[16]; for(int cc=0; cc<16;cc++) cblk[cc]=C.cblack[fcol(row,cc)]; for(int col = 0; col < S.raw_width; col++) { int idx = row*S.raw_width + col; ushort val = src[idx]; ushort bl = cblk[col&0xf]; dest[idx] = val>bl?val-bl:0; } } } } void LibRaw::copy_fuji_uncropped(unsigned short cblack[4],unsigned short *dmaxp) { int row; #if defined(LIBRAW_USE_OPENMP) #pragma omp parallel for default(shared) #endif for (row=0; row < S.raw_height-S.top_margin*2; row++) { int col; unsigned short ldmax = 0; for (col=0; col < IO.fuji_width << !libraw_internal_data.unpacker_data.fuji_layout; col++) { unsigned r,c; if (libraw_internal_data.unpacker_data.fuji_layout) { r = IO.fuji_width - 1 - col + (row >> 1); c = col + ((row+1) >> 1); } else { r = IO.fuji_width - 1 + row - (col >> 1); c = row + ((col+1) >> 1); } if (r < S.height && c < S.width) { unsigned short val = imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_pitch/2+(col+S.left_margin)]; int cc = FC(r,c); if(val>cblack[cc]) { val-=cblack[cc]; if(val>ldmax)ldmax = val; } else val = 0; imgdata.image[((r)>>IO.shrink)*S.iwidth+((c)>>IO.shrink)][cc] = val; } } #if defined(LIBRAW_USE_OPENMP) #pragma omp critical(dataupdate) #endif { if(*dmaxp < ldmax) *dmaxp = ldmax; } } } void LibRaw::copy_bayer(unsigned short cblack[4],unsigned short *dmaxp) { // Both cropped and uncropped int row; #if defined(LIBRAW_USE_OPENMP) #pragma omp parallel for default(shared) #endif for (row=0; row < S.height; row++) { int col; unsigned short ldmax = 0; for (col=0; col < S.width; col++) { unsigned short val = imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_pitch/2+(col+S.left_margin)]; int cc = fcol(row,col); if(val>cblack[cc]) { val-=cblack[cc]; if(val>ldmax)ldmax = val; } else val = 0; imgdata.image[((row) >> IO.shrink)*S.iwidth + ((col) >> IO.shrink)][cc] = val; } #if defined(LIBRAW_USE_OPENMP) #pragma omp critical(dataupdate) #endif { if(*dmaxp < ldmax) *dmaxp = ldmax; } } } int LibRaw::raw2image_ex(int do_subtract_black) { CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW); try { raw2image_start(); // Compressed P1 files with bl data! if (is_phaseone_compressed()) { phase_one_allocate_tempbuffer(); phase_one_subtract_black((ushort*)imgdata.rawdata.raw_alloc,imgdata.rawdata.raw_image); phase_one_correct(); } // process cropping int do_crop = 0; unsigned save_width = S.width; if (~O.cropbox[2] && ~O.cropbox[3] && load_raw != &LibRaw::foveon_sd_load_raw) // Foveon SD to be cropped later { int crop[4],c,filt; for(int c=0;c<4;c++) { crop[c] = O.cropbox[c]; if(crop[c]<0) crop[c]=0; } if(IO.fuji_width && imgdata.idata.filters >= 1000) { crop[0] = (crop[0]/4)*4; crop[1] = (crop[1]/4)*4; if(!libraw_internal_data.unpacker_data.fuji_layout) { crop[2]*=sqrt(2.0); crop[3]/=sqrt(2.0); } crop[2] = (crop[2]/4+1)*4; crop[3] = (crop[3]/4+1)*4; } else if (imgdata.idata.filters == 1) { crop[0] = (crop[0]/16)*16; crop[1] = (crop[1]/16)*16; } else if(imgdata.idata.filters == 2) { crop[0] = (crop[0]/6)*6; crop[1] = (crop[1]/6)*6; } do_crop = 1; crop[2] = MIN (crop[2], (signed) S.width-crop[0]); crop[3] = MIN (crop[3], (signed) S.height-crop[1]); if (crop[2] <= 0 || crop[3] <= 0) throw LIBRAW_EXCEPTION_BAD_CROP; // adjust sizes! S.left_margin+=crop[0]; S.top_margin+=crop[1]; S.width=crop[2]; S.height=crop[3]; S.iheight = (S.height + IO.shrink) >> IO.shrink; S.iwidth = (S.width + IO.shrink) >> IO.shrink; if(!IO.fuji_width && imgdata.idata.filters && imgdata.idata.filters >= 1000) { for (filt=c=0; c < 16; c++) filt |= FC((c >> 1)+(crop[1]), (c & 1)+(crop[0])) << c*2; imgdata.idata.filters = filt; } } int alloc_width = S.iwidth; int alloc_height = S.iheight; if(IO.fuji_width && do_crop) { int IO_fw = S.width >> !libraw_internal_data.unpacker_data.fuji_layout; int t_alloc_width = (S.height >> libraw_internal_data.unpacker_data.fuji_layout) + IO_fw; int t_alloc_height = t_alloc_width - 1; alloc_height = (t_alloc_height + IO.shrink) >> IO.shrink; alloc_width = (t_alloc_width + IO.shrink) >> IO.shrink; } int alloc_sz = alloc_width*alloc_height; if(imgdata.image) { imgdata.image = (ushort (*)[4]) realloc (imgdata.image,alloc_sz *sizeof (*imgdata.image)); memset(imgdata.image,0,alloc_sz *sizeof (*imgdata.image)); } else imgdata.image = (ushort (*)[4]) calloc (alloc_sz, sizeof (*imgdata.image)); merror (imgdata.image, "raw2image_ex()"); libraw_decoder_info_t decoder_info; get_decoder_info(&decoder_info); // Adjust black levels unsigned short cblack[4]={0,0,0,0}; unsigned short dmax = 0; if(do_subtract_black) { adjust_bl(); for(int i=0; i< 4; i++) cblack[i] = (unsigned short)C.cblack[i]; } // Move saved bitmap to imgdata.image if(decoder_info.decoder_flags & LIBRAW_DECODER_FLATFIELD) { if (IO.fuji_width) { if(do_crop) { IO.fuji_width = S.width >> !libraw_internal_data.unpacker_data.fuji_layout; int IO_fwidth = (S.height >> libraw_internal_data.unpacker_data.fuji_layout) + IO.fuji_width; int IO_fheight = IO_fwidth - 1; int row,col; for(row=0;row<S.height;row++) { for(col=0;col<S.width;col++) { int r,c; if (libraw_internal_data.unpacker_data.fuji_layout) { r = IO.fuji_width - 1 - col + (row >> 1); c = col + ((row+1) >> 1); } else { r = IO.fuji_width - 1 + row - (col >> 1); c = row + ((col+1) >> 1); } unsigned short val = imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_pitch/2 +(col+S.left_margin)]; int cc = FCF(row,col); if(val > cblack[cc]) { val-=cblack[cc]; if(dmax < val) dmax = val; } else val = 0; imgdata.image[((r) >> IO.shrink)*alloc_width + ((c) >> IO.shrink)][cc] = val; } } S.height = IO_fheight; S.width = IO_fwidth; S.iheight = (S.height + IO.shrink) >> IO.shrink; S.iwidth = (S.width + IO.shrink) >> IO.shrink; S.raw_height -= 2*S.top_margin; } else { copy_fuji_uncropped(cblack,&dmax); } } // end Fuji else { copy_bayer(cblack,&dmax); } } else if(decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY) { if(imgdata.rawdata.color4_image) { if(S.raw_pitch != S.width*8) { for(int row = 0; row < S.height; row++) memmove(&imgdata.image[row*S.width], &imgdata.rawdata.color4_image[(row+S.top_margin)*S.raw_pitch/8+S.left_margin], S.width*sizeof(*imgdata.image)); } else { // legacy is always 4channel and not shrinked! memmove(imgdata.image,imgdata.rawdata.color4_image,S.width*S.height*sizeof(*imgdata.image)); } } else if(imgdata.rawdata.color3_image) { unsigned char *c3image = (unsigned char*) imgdata.rawdata.color3_image; for(int row = 0; row < S.height; row++) { ushort (*srcrow)[3] = (ushort (*)[3]) &c3image[(row+S.top_margin)*S.raw_pitch]; ushort (*dstrow)[4] = (ushort (*)[4]) &imgdata.image[row*S.width]; for(int col=0; col < S.width; col++) { for(int c=0; c< 3; c++) dstrow[col][c] = srcrow[S.left_margin+col][c]; dstrow[col][3]=0; } } } else { // legacy decoder, but no data? throw LIBRAW_EXCEPTION_DECODE_RAW; } } // Free PhaseOne separate copy allocated at function start if (is_phaseone_compressed()) { phase_one_free_tempbuffer(); } if (load_raw == &CLASS canon_600_load_raw && S.width < S.raw_width) { canon_600_correct(); } if(do_subtract_black) { C.data_maximum = (int)dmax; C.maximum -= C.black; ZERO(C.cblack); C.black = 0; } // hack - clear later flags! imgdata.progress_flags = LIBRAW_PROGRESS_START|LIBRAW_PROGRESS_OPEN | LIBRAW_PROGRESS_RAW2_IMAGE |LIBRAW_PROGRESS_IDENTIFY|LIBRAW_PROGRESS_SIZE_ADJUST|LIBRAW_PROGRESS_LOAD_RAW; return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } } #if 1 libraw_processed_image_t * LibRaw::dcraw_make_mem_thumb(int *errcode) { if(!T.thumb) { if ( !ID.toffset) { if(errcode) *errcode= LIBRAW_NO_THUMBNAIL; } else { if(errcode) *errcode= LIBRAW_OUT_OF_ORDER_CALL; } return NULL; } if (T.tformat == LIBRAW_THUMBNAIL_BITMAP) { libraw_processed_image_t * ret = (libraw_processed_image_t *)::malloc(sizeof(libraw_processed_image_t)+T.tlength); if(!ret) { if(errcode) *errcode= ENOMEM; return NULL; } memset(ret,0,sizeof(libraw_processed_image_t)); ret->type = LIBRAW_IMAGE_BITMAP; ret->height = T.theight; ret->width = T.twidth; ret->colors = 3; ret->bits = 8; ret->data_size = T.tlength; memmove(ret->data,T.thumb,T.tlength); if(errcode) *errcode= 0; return ret; } else if (T.tformat == LIBRAW_THUMBNAIL_JPEG) { ushort exif[5]; int mk_exif = 0; if(strcmp(T.thumb+6,"Exif")) mk_exif = 1; int dsize = T.tlength + mk_exif * (sizeof(exif)+sizeof(tiff_hdr)); libraw_processed_image_t * ret = (libraw_processed_image_t *)::malloc(sizeof(libraw_processed_image_t)+dsize); if(!ret) { if(errcode) *errcode= ENOMEM; return NULL; } memset(ret,0,sizeof(libraw_processed_image_t)); ret->type = LIBRAW_IMAGE_JPEG; ret->data_size = dsize; ret->data[0] = 0xff; ret->data[1] = 0xd8; if(mk_exif) { struct tiff_hdr th; memcpy (exif, "\xff\xe1 Exif\0\0", 10); exif[1] = htons (8 + sizeof th); memmove(ret->data+2,exif,sizeof(exif)); tiff_head (&th, 0); memmove(ret->data+(2+sizeof(exif)),&th,sizeof(th)); memmove(ret->data+(2+sizeof(exif)+sizeof(th)),T.thumb+2,T.tlength-2); } else { memmove(ret->data+2,T.thumb+2,T.tlength-2); } if(errcode) *errcode= 0; return ret; } else { if(errcode) *errcode= LIBRAW_UNSUPPORTED_THUMBNAIL; return NULL; } } // jlb // macros for copying pixels to either BGR or RGB formats #define FORBGR for(c=P1.colors-1; c >=0 ; c--) #define FORRGB for(c=0; c < P1.colors ; c++) void LibRaw::get_mem_image_format(int* width, int* height, int* colors, int* bps) const { if (S.flip & 4) { *width = S.height; *height = S.width; } else { *width = S.width; *height = S.height; } *colors = P1.colors; *bps = O.output_bps; } int LibRaw::copy_mem_image(void* scan0, int stride, int bgr) { // the image memory pointed to by scan0 is assumed to be in the format returned by get_mem_image_format if((imgdata.progress_flags & LIBRAW_PROGRESS_THUMB_MASK) < LIBRAW_PROGRESS_PRE_INTERPOLATE) return LIBRAW_OUT_OF_ORDER_CALL; if(libraw_internal_data.output_data.histogram) { int perc, val, total, t_white=0x2000,c; perc = S.width * S.height * 0.01; /* 99th percentile white level */ if (IO.fuji_width) perc /= 2; if (!((O.highlight & ~2) || O.no_auto_bright)) for (t_white=c=0; c < P1.colors; c++) { for (val=0x2000, total=0; --val > 32; ) if ((total += libraw_internal_data.output_data.histogram[c][val]) > perc) break; if (t_white < val) t_white = val; } gamma_curve (O.gamm[0], O.gamm[1], 2, (t_white << 3)/O.bright); } int s_iheight = S.iheight; int s_iwidth = S.iwidth; int s_width = S.width; int s_hwight = S.height; S.iheight = S.height; S.iwidth = S.width; if (S.flip & 4) SWAP(S.height,S.width); uchar *ppm; ushort *ppm2; int c, row, col, soff, rstep, cstep; soff = flip_index (0, 0); cstep = flip_index (0, 1) - soff; rstep = flip_index (1, 0) - flip_index (0, S.width); for (row=0; row < S.height; row++, soff += rstep) { uchar *bufp = ((uchar*)scan0)+row*stride; ppm2 = (ushort*) (ppm = bufp); // keep trivial decisions in the outer loop for speed if (bgr) { if (O.output_bps == 8) { for (col=0; col < S.width; col++, soff += cstep) FORBGR *ppm++ = imgdata.color.curve[imgdata.image[soff][c]]>>8; } else { for (col=0; col < S.width; col++, soff += cstep) FORBGR *ppm2++ = imgdata.color.curve[imgdata.image[soff][c]]; } } else { if (O.output_bps == 8) { for (col=0; col < S.width; col++, soff += cstep) FORRGB *ppm++ = imgdata.color.curve[imgdata.image[soff][c]]>>8; } else { for (col=0; col < S.width; col++, soff += cstep) FORRGB *ppm2++ = imgdata.color.curve[imgdata.image[soff][c]]; } } // bufp += stride; // go to the next line } S.iheight = s_iheight; S.iwidth = s_iwidth; S.width = s_width; S.height = s_hwight; return 0; } #undef FORBGR #undef FORRGB libraw_processed_image_t *LibRaw::dcraw_make_mem_image(int *errcode) { int width, height, colors, bps; get_mem_image_format(&width, &height, &colors, &bps); int stride = width * (bps/8) * colors; unsigned ds = height * stride; libraw_processed_image_t *ret = (libraw_processed_image_t*)::malloc(sizeof(libraw_processed_image_t)+ds); if(!ret) { if(errcode) *errcode= ENOMEM; return NULL; } memset(ret,0,sizeof(libraw_processed_image_t)); // metadata init ret->type = LIBRAW_IMAGE_BITMAP; ret->height = height; ret->width = width; ret->colors = colors; ret->bits = bps; ret->data_size = ds; copy_mem_image(ret->data, stride, 0); return ret; } #undef FORC #undef FORCC #undef SWAP #endif int LibRaw::dcraw_ppm_tiff_writer(const char *filename) { CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW); if(!imgdata.image) return LIBRAW_OUT_OF_ORDER_CALL; if(!filename) return ENOENT; FILE *f = fopen(filename,"wb"); if(!f) return errno; try { if(!libraw_internal_data.output_data.histogram) { libraw_internal_data.output_data.histogram = (int (*)[LIBRAW_HISTOGRAM_SIZE]) malloc(sizeof(*libraw_internal_data.output_data.histogram)*4); merror(libraw_internal_data.output_data.histogram,"LibRaw::dcraw_ppm_tiff_writer()"); } libraw_internal_data.internal_data.output = f; write_ppm_tiff(); SET_PROC_FLAG(LIBRAW_PROGRESS_FLIP); libraw_internal_data.internal_data.output = NULL; fclose(f); return 0; } catch ( LibRaw_exceptions err) { fclose(f); EXCEPTION_HANDLER(err); } } void LibRaw::kodak_thumb_loader() { // some kodak cameras ushort s_height = S.height, s_width = S.width,s_iwidth = S.iwidth,s_iheight=S.iheight; int s_colors = P1.colors; unsigned s_filters = P1.filters; ushort (*s_image)[4] = imgdata.image; S.height = T.theight; S.width = T.twidth; P1.filters = 0; if (thumb_load_raw == &CLASS kodak_ycbcr_load_raw) { S.height += S.height & 1; S.width += S.width & 1; } imgdata.image = (ushort (*)[4]) calloc (S.iheight*S.iwidth, sizeof (*imgdata.image)); merror (imgdata.image, "LibRaw::kodak_thumb_loader()"); ID.input->seek(ID.toffset, SEEK_SET); // read kodak thumbnail into T.image[] (this->*thumb_load_raw)(); // copy-n-paste from image pipe #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define LIM(x,min,max) MAX(min,MIN(x,max)) #define CLIP(x) LIM(x,0,65535) #define SWAP(a,b) { a ^= b; a ^= (b ^= a); } // from scale_colors { double dmax; float scale_mul[4]; int c,val; for (dmax=DBL_MAX, c=0; c < 3; c++) if (dmax > C.pre_mul[c]) dmax = C.pre_mul[c]; for( c=0; c< 3; c++) scale_mul[c] = (C.pre_mul[c] / dmax) * 65535.0 / C.maximum; scale_mul[3] = scale_mul[1]; size_t size = S.height * S.width; for (unsigned i=0; i < size*4 ; i++) { val = imgdata.image[0][i]; if(!val) continue; val *= scale_mul[i & 3]; imgdata.image[0][i] = CLIP(val); } } // from convert_to_rgb ushort *img; int row,col; int (*t_hist)[LIBRAW_HISTOGRAM_SIZE] = (int (*)[LIBRAW_HISTOGRAM_SIZE]) calloc(sizeof(*t_hist),4); merror (t_hist, "LibRaw::kodak_thumb_loader()"); float out[3], out_cam[3][4] = { {2.81761312, -1.98369181, 0.166078627, 0}, {-0.111855984, 1.73688626, -0.625030339, 0}, {-0.0379119813, -0.891268849, 1.92918086, 0} }; for (img=imgdata.image[0], row=0; row < S.height; row++) for (col=0; col < S.width; col++, img+=4) { out[0] = out[1] = out[2] = 0; int c; for(c=0;c<3;c++) { out[0] += out_cam[0][c] * img[c]; out[1] += out_cam[1][c] * img[c]; out[2] += out_cam[2][c] * img[c]; } for(c=0; c<3; c++) img[c] = CLIP((int) out[c]); for(c=0; c<P1.colors;c++) t_hist[c][img[c] >> 3]++; } // from gamma_lut int (*save_hist)[LIBRAW_HISTOGRAM_SIZE] = libraw_internal_data.output_data.histogram; libraw_internal_data.output_data.histogram = t_hist; // make curve output curve! ushort (*t_curve) = (ushort*) calloc(sizeof(C.curve),1); merror (t_curve, "LibRaw::kodak_thumb_loader()"); memmove(t_curve,C.curve,sizeof(C.curve)); memset(C.curve,0,sizeof(C.curve)); { int perc, val, total, t_white=0x2000,c; perc = S.width * S.height * 0.01; /* 99th percentile white level */ if (IO.fuji_width) perc /= 2; if (!((O.highlight & ~2) || O.no_auto_bright)) for (t_white=c=0; c < P1.colors; c++) { for (val=0x2000, total=0; --val > 32; ) if ((total += libraw_internal_data.output_data.histogram[c][val]) > perc) break; if (t_white < val) t_white = val; } gamma_curve (O.gamm[0], O.gamm[1], 2, (t_white << 3)/O.bright); } libraw_internal_data.output_data.histogram = save_hist; free(t_hist); // from write_ppm_tiff - copy pixels into bitmap S.iheight = S.height; S.iwidth = S.width; if (S.flip & 4) SWAP(S.height,S.width); if(T.thumb) free(T.thumb); T.thumb = (char*) calloc (S.width * S.height, P1.colors); merror (T.thumb, "LibRaw::kodak_thumb_loader()"); T.tlength = S.width * S.height * P1.colors; // from write_tiff_ppm { int soff = flip_index (0, 0); int cstep = flip_index (0, 1) - soff; int rstep = flip_index (1, 0) - flip_index (0, S.width); for (int row=0; row < S.height; row++, soff += rstep) { char *ppm = T.thumb + row*S.width*P1.colors; for (int col=0; col < S.width; col++, soff += cstep) for(int c = 0; c < P1.colors; c++) ppm [col*P1.colors+c] = imgdata.color.curve[imgdata.image[soff][c]]>>8; } } memmove(C.curve,t_curve,sizeof(C.curve)); free(t_curve); // restore variables free(imgdata.image); imgdata.image = s_image; T.twidth = S.width; S.width = s_width; S.iwidth = s_iwidth; S.iheight = s_iheight; T.theight = S.height; S.height = s_height; T.tcolors = P1.colors; P1.colors = s_colors; P1.filters = s_filters; } #undef MIN #undef MAX #undef LIM #undef CLIP #undef SWAP // thumbnail , thumb_format int LibRaw::unpack_thumb(void) { CHECK_ORDER_LOW(LIBRAW_PROGRESS_IDENTIFY); CHECK_ORDER_BIT(LIBRAW_PROGRESS_THUMB_LOAD); try { if(!libraw_internal_data.internal_data.input) return LIBRAW_INPUT_CLOSED; if ( !ID.toffset) { return LIBRAW_NO_THUMBNAIL; } else if (thumb_load_raw) { kodak_thumb_loader(); T.tformat = LIBRAW_THUMBNAIL_BITMAP; SET_PROC_FLAG(LIBRAW_PROGRESS_THUMB_LOAD); return 0; } else { ID.input->seek(ID.toffset, SEEK_SET); if ( write_thumb == &LibRaw::jpeg_thumb) { if(T.thumb) free(T.thumb); T.thumb = (char *) malloc (T.tlength); merror (T.thumb, "jpeg_thumb()"); ID.input->read (T.thumb, 1, T.tlength); T.tcolors = 3; T.tformat = LIBRAW_THUMBNAIL_JPEG; SET_PROC_FLAG(LIBRAW_PROGRESS_THUMB_LOAD); return 0; } else if (write_thumb == &LibRaw::ppm_thumb) { T.tlength = T.twidth * T.theight*3; if(T.thumb) free(T.thumb); T.thumb = (char *) malloc (T.tlength); merror (T.thumb, "ppm_thumb()"); ID.input->read(T.thumb, 1, T.tlength); T.tformat = LIBRAW_THUMBNAIL_BITMAP; SET_PROC_FLAG(LIBRAW_PROGRESS_THUMB_LOAD); return 0; } else if (write_thumb == &LibRaw::ppm16_thumb) { T.tlength = T.twidth * T.theight*3; ushort *t_thumb = (ushort*)calloc(T.tlength,2); ID.input->read(t_thumb,2,T.tlength); if ((libraw_internal_data.unpacker_data.order= 0x4949) == (ntohs(0x1234) == 0x1234)) swab ((char*)t_thumb, (char*)t_thumb, T.tlength*2); if(T.thumb) free(T.thumb); T.thumb = (char *) malloc (T.tlength); merror (T.thumb, "ppm_thumb()"); for (int i=0; i < T.tlength; i++) T.thumb[i] = t_thumb[i] >> 8; free(t_thumb); T.tformat = LIBRAW_THUMBNAIL_BITMAP; SET_PROC_FLAG(LIBRAW_PROGRESS_THUMB_LOAD); return 0; } else if (write_thumb == &LibRaw::foveon_thumb) { foveon_thumb_loader(); // may return with error, so format is set in // foveon thumb loader itself SET_PROC_FLAG(LIBRAW_PROGRESS_THUMB_LOAD); return 0; } // else if -- all other write_thumb cases! else { return LIBRAW_UNSUPPORTED_THUMBNAIL; } } // last resort return LIBRAW_UNSUPPORTED_THUMBNAIL; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } } int LibRaw::dcraw_thumb_writer(const char *fname) { // CHECK_ORDER_LOW(LIBRAW_PROGRESS_THUMB_LOAD); if(!fname) return ENOENT; FILE *tfp = fopen(fname,"wb"); if(!tfp) return errno; if(!T.thumb) { fclose(tfp); return LIBRAW_OUT_OF_ORDER_CALL; } try { switch (T.tformat) { case LIBRAW_THUMBNAIL_JPEG: jpeg_thumb_writer (tfp,T.thumb,T.tlength); break; case LIBRAW_THUMBNAIL_BITMAP: fprintf (tfp, "P6\n%d %d\n255\n", T.twidth, T.theight); fwrite (T.thumb, 1, T.tlength, tfp); break; default: fclose(tfp); return LIBRAW_UNSUPPORTED_THUMBNAIL; } fclose(tfp); return 0; } catch ( LibRaw_exceptions err) { fclose(tfp); EXCEPTION_HANDLER(err); } } int LibRaw::adjust_sizes_info_only(void) { CHECK_ORDER_LOW(LIBRAW_PROGRESS_IDENTIFY); raw2image_start(); if (O.use_fuji_rotate) { if (IO.fuji_width) { IO.fuji_width = (IO.fuji_width - 1 + IO.shrink) >> IO.shrink; S.iwidth = (ushort)(IO.fuji_width / sqrt(0.5)); S.iheight = (ushort)( (S.iheight - IO.fuji_width) / sqrt(0.5)); } else { if (S.pixel_aspect < 1) S.iheight = (ushort)( S.iheight / S.pixel_aspect + 0.5); if (S.pixel_aspect > 1) S.iwidth = (ushort) (S.iwidth * S.pixel_aspect + 0.5); } } SET_PROC_FLAG(LIBRAW_PROGRESS_FUJI_ROTATE); if ( S.flip & 4) { unsigned short t = S.iheight; S.iheight=S.iwidth; S.iwidth = t; SET_PROC_FLAG(LIBRAW_PROGRESS_FLIP); } return 0; } int LibRaw::subtract_black() { CHECK_ORDER_LOW(LIBRAW_PROGRESS_RAW2_IMAGE); try { if(!is_phaseone_compressed() && (C.cblack[0] || C.cblack[1] || C.cblack[2] || C.cblack[3])) { #define BAYERC(row,col,c) imgdata.image[((row) >> IO.shrink)*S.iwidth + ((col) >> IO.shrink)][c] int cblk[4],i; for(i=0;i<4;i++) cblk[i] = C.cblack[i]; int size = S.iheight * S.iwidth; #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define LIM(x,min,max) MAX(min,MIN(x,max)) #define CLIP(x) LIM(x,0,65535) for(i=0; i< size*4; i++) { int val = imgdata.image[0][i]; val -= cblk[i & 3]; imgdata.image[0][i] = CLIP(val); if(C.data_maximum < val) C.data_maximum = val; } #undef MIN #undef MAX #undef LIM #undef CLIP C.maximum -= C.black; ZERO(C.cblack); C.black = 0; #undef BAYERC } else { // Nothing to Do, maximum is already calculated, black level is 0, so no change // only calculate channel maximum; int idx; ushort *p = (ushort*)imgdata.image; C.data_maximum = 0; for(idx=0;idx<S.iheight*S.iwidth*4;idx++) if(C.data_maximum < p[idx]) C.data_maximum = p[idx]; } return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } } #define TBLN 65535 void LibRaw::exp_bef(float shift, float smooth) { // params limits if(shift>8) shift = 8; if(shift<0.25) shift = 0.25; if(smooth < 0.0) smooth = 0.0; if(smooth > 1.0) smooth = 1.0; unsigned short *lut = (ushort*)malloc((TBLN+1)*sizeof(unsigned short)); if(shift <=1.0) { for(int i=0;i<=TBLN;i++) lut[i] = (unsigned short)((float)i*shift); } else { float x1,x2,y1,y2; float cstops = log(shift)/log(2.0f); float room = cstops*2; float roomlin = powf(2.0f,room); x2 = (float)TBLN; x1 = (x2+1)/roomlin-1; y1 = x1*shift; y2 = x2*(1+(1-smooth)*(shift-1)); float sq3x=powf(x1*x1*x2,1.0f/3.0f); float B = (y2-y1+shift*(3*x1-3.0f*sq3x)) / (x2+2.0f*x1-3.0f*sq3x); float A = (shift - B)*3.0f*powf(x1*x1,1.0f/3.0f); float CC = y2 - A*powf(x2,1.0f/3.0f)-B*x2; for(int i=0;i<=TBLN;i++) { float X = (float)i; float Y = A*powf(X,1.0f/3.0f)+B*X+CC; if(i<x1) lut[i] = (unsigned short)((float)i*shift); else lut[i] = Y<0?0:(Y>TBLN?TBLN:(unsigned short)(Y)); } } for(int i=0; i< S.height*S.width; i++) { imgdata.image[i][0] = lut[imgdata.image[i][0]]; imgdata.image[i][1] = lut[imgdata.image[i][1]]; imgdata.image[i][2] = lut[imgdata.image[i][2]]; imgdata.image[i][3] = lut[imgdata.image[i][3]]; } C.data_maximum = lut[C.data_maximum]; C.maximum = lut[C.maximum]; // no need to adjust the minumum, black is already subtracted free(lut); } #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define LIM(x,min,max) MAX(min,MIN(x,max)) #define ULIM(x,y,z) ((y) < (z) ? LIM(x,y,z) : LIM(x,z,y)) #define CLIP(x) LIM(x,0,65535) void LibRaw::convert_to_rgb_loop(float out_cam[3][4]) { int row,col,c; float out[3]; ushort *img; memset(libraw_internal_data.output_data.histogram,0,sizeof(int)*LIBRAW_HISTOGRAM_SIZE*4); for (img=imgdata.image[0], row=0; row < S.height; row++) for (col=0; col < S.width; col++, img+=4) { if (!libraw_internal_data.internal_output_params.raw_color) { out[0] = out[1] = out[2] = 0; for(c=0; c< imgdata.idata.colors; c++) { out[0] += out_cam[0][c] * img[c]; out[1] += out_cam[1][c] * img[c]; out[2] += out_cam[2][c] * img[c]; } for(c=0;c<3;c++) img[c] = CLIP((int) out[c]); } for(c=0; c< imgdata.idata.colors; c++) libraw_internal_data.output_data.histogram[c][img[c] >> 3]++; } } void LibRaw::scale_colors_loop(float scale_mul[4]) { unsigned size = S.iheight*S.iwidth; if(C.cblack[0]||C.cblack[1]||C.cblack[2]||C.cblack[3]) { for (unsigned i=0; i < size*4; i++) { int val = imgdata.image[0][i]; if (!val) continue; val -= C.cblack[i & 3]; val *= scale_mul[i & 3]; imgdata.image[0][i] = CLIP(val); } } else // BL is zero { for (unsigned i=0; i < size*4; i++) { int val = imgdata.image[0][i]; val *= scale_mul[i & 3]; imgdata.image[0][i] = CLIP(val); } } } void LibRaw::adjust_bl() { if (O.user_black >= 0) C.black = O.user_black; for(int i=0; i<4; i++) if(O.user_cblack[i]>-1000000) C.cblack[i] = O.user_cblack[i]; // remove common part from C.cblack[] int i = C.cblack[3]; int c; for(c=0;c<3;c++) if (i > C.cblack[c]) i = C.cblack[c]; for(c=0;c<4;c++) C.cblack[c] -= i; C.black += i; for(c=0;c<4;c++) C.cblack[c] += C.black; } int LibRaw::dcraw_process(void) { int quality,i; int iterations=-1, dcb_enhance=1, noiserd=0; int eeci_refine_fl=0, es_med_passes_fl=0; float cared=0,cablue=0; float linenoise=0; float lclean=0,cclean=0; float thresh=0; float preser=0; float expos=1.0; CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW); // CHECK_ORDER_HIGH(LIBRAW_PROGRESS_PRE_INTERPOLATE); try { int no_crop = 1; if (~O.cropbox[2] && ~O.cropbox[3]) no_crop=0; libraw_decoder_info_t di; get_decoder_info(&di); int subtract_inline = !O.bad_pixels && !O.dark_frame && !O.wf_debanding && !(di.decoder_flags & LIBRAW_DECODER_LEGACY) && !IO.zero_is_bad; raw2image_ex(subtract_inline); // allocate imgdata.image and copy data! int save_4color = O.four_color_rgb; if (IO.zero_is_bad) { remove_zeroes(); SET_PROC_FLAG(LIBRAW_PROGRESS_REMOVE_ZEROES); } if(O.half_size) O.four_color_rgb = 1; if(O.bad_pixels && no_crop) { bad_pixels(O.bad_pixels); SET_PROC_FLAG(LIBRAW_PROGRESS_BAD_PIXELS); } if (O.dark_frame && no_crop) { subtract (O.dark_frame); SET_PROC_FLAG(LIBRAW_PROGRESS_DARK_FRAME); } if (O.wf_debanding) { wf_remove_banding(); } quality = 2 + !IO.fuji_width; if (O.user_qual >= 0) quality = O.user_qual; if(!subtract_inline || !C.data_maximum) { adjust_bl(); subtract_black(); } adjust_maximum(); if (O.user_sat > 0) C.maximum = O.user_sat; if (P1.is_foveon) { if(load_raw == &LibRaw::foveon_dp_load_raw) { for (int i=0; i < S.height*S.width*4; i++) if ((short) imgdata.image[0][i] < 0) imgdata.image[0][i] = 0; } else foveon_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_FOVEON_INTERPOLATE); } if (O.green_matching && !O.half_size) { green_matching(); } if (!P1.is_foveon) { scale_colors(); SET_PROC_FLAG(LIBRAW_PROGRESS_SCALE_COLORS); } pre_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_PRE_INTERPOLATE); if (O.dcb_iterations >= 0) iterations = O.dcb_iterations; if (O.dcb_enhance_fl >=0 ) dcb_enhance = O.dcb_enhance_fl; if (O.fbdd_noiserd >=0 ) noiserd = O.fbdd_noiserd; if (O.eeci_refine >=0 ) eeci_refine_fl = O.eeci_refine; if (O.es_med_passes >0 ) es_med_passes_fl = O.es_med_passes; // LIBRAW_DEMOSAIC_PACK_GPL3 if (!O.half_size && O.cfa_green >0) {thresh=O.green_thresh ;green_equilibrate(thresh);} if (O.exp_correc >0) {expos=O.exp_shift ; preser=O.exp_preser; exp_bef(expos,preser);} if (O.ca_correc >0 ) {cablue=O.cablue; cared=O.cared; CA_correct_RT(cablue, cared);} if (O.cfaline >0 ) {linenoise=O.linenoise; cfa_linedn(linenoise);} if (O.cfa_clean >0 ) {lclean=O.lclean; cclean=O.cclean; cfa_impulse_gauss(lclean,cclean);} if (P1.filters) { if (noiserd>0 && P1.colors==3 && P1.filters) fbdd(noiserd); if (quality == 0) lin_interpolate(); else if (quality == 1 || P1.colors > 3 || P1.filters < 1000) vng_interpolate(); else if (quality == 2) ppg_interpolate(); else if (quality == 3) ahd_interpolate(); // really don't need it here due to fallback op else if (quality == 4) dcb(iterations, dcb_enhance); // LIBRAW_DEMOSAIC_PACK_GPL2 else if (quality == 5) ahd_interpolate_mod(); else if (quality == 6) afd_interpolate_pl(2,1); else if (quality == 7) vcd_interpolate(0); else if (quality == 8) vcd_interpolate(12); else if (quality == 9) lmmse_interpolate(1); // LIBRAW_DEMOSAIC_PACK_GPL3 else if (quality == 10) amaze_demosaic_RT(); // LGPL2 else if (quality == 11) dht_interpolate(); else if (quality == 12) aahd_interpolate(); // fallback to AHD else ahd_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_INTERPOLATE); } if (IO.mix_green) { for (P1.colors=3, i=0; i < S.height * S.width; i++) imgdata.image[i][1] = (imgdata.image[i][1] + imgdata.image[i][3]) >> 1; SET_PROC_FLAG(LIBRAW_PROGRESS_MIX_GREEN); } if(!P1.is_foveon) { if (P1.colors == 3) { if (quality == 8) { if (eeci_refine_fl == 1) refinement(); if (O.med_passes > 0) median_filter_new(); if (es_med_passes_fl > 0) es_median_filter(); } else { median_filter(); } SET_PROC_FLAG(LIBRAW_PROGRESS_MEDIAN_FILTER); } } if (O.highlight == 2) { blend_highlights(); SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS); } if (O.highlight > 2) { recover_highlights(); SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS); } if (O.use_fuji_rotate) { fuji_rotate(); SET_PROC_FLAG(LIBRAW_PROGRESS_FUJI_ROTATE); } if(!libraw_internal_data.output_data.histogram) { libraw_internal_data.output_data.histogram = (int (*)[LIBRAW_HISTOGRAM_SIZE]) malloc(sizeof(*libraw_internal_data.output_data.histogram)*4); merror(libraw_internal_data.output_data.histogram,"LibRaw::dcraw_process()"); } #ifndef NO_LCMS if(O.camera_profile) { apply_profile(O.camera_profile,O.output_profile); SET_PROC_FLAG(LIBRAW_PROGRESS_APPLY_PROFILE); } #endif convert_to_rgb(); SET_PROC_FLAG(LIBRAW_PROGRESS_CONVERT_RGB); if (O.use_fuji_rotate) { stretch(); SET_PROC_FLAG(LIBRAW_PROGRESS_STRETCH); } O.four_color_rgb = save_4color; // also, restore return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } } // Supported cameras: static const char *static_camera_list[] = { "Adobe Digital Negative (DNG)", "AgfaPhoto DC-833m", "Apple QuickTake 100", "Apple QuickTake 150", "Apple QuickTake 200", "ARRIRAW format", "AVT F-080C", "AVT F-145C", "AVT F-201C", "AVT F-510C", "AVT F-810C", "Canon PowerShot 600", "Canon PowerShot A5", "Canon PowerShot A5 Zoom", "Canon PowerShot A50", "Canon PowerShot A460 (CHDK hack)", "Canon PowerShot A470 (CHDK hack)", "Canon PowerShot A530 (CHDK hack)", "Canon PowerShot A570 (CHDK hack)", "Canon PowerShot A590 (CHDK hack)", "Canon PowerShot A610 (CHDK hack)", "Canon PowerShot A620 (CHDK hack)", "Canon PowerShot A630 (CHDK hack)", "Canon PowerShot A640 (CHDK hack)", "Canon PowerShot A650 (CHDK hack)", "Canon PowerShot A710 IS (CHDK hack)", "Canon PowerShot A720 IS (CHDK hack)", "Canon PowerShot Pro70", "Canon PowerShot Pro90 IS", "Canon PowerShot Pro1", "Canon PowerShot G1", "Canon PowerShot G1 X", "Canon PowerShot G2", "Canon PowerShot G3", "Canon PowerShot G5", "Canon PowerShot G6", "Canon PowerShot G7 (CHDK hack)", "Canon PowerShot G9", "Canon PowerShot G10", "Canon PowerShot G11", "Canon PowerShot G12", "Canon PowerShot G15", "Canon PowerShot S2 IS (CHDK hack)", "Canon PowerShot S3 IS (CHDK hack)", "Canon PowerShot S5 IS (CHDK hack)", "Canon PowerShot SD300 (CHDK hack)", "Canon PowerShot S30", "Canon PowerShot S40", "Canon PowerShot S45", "Canon PowerShot S50", "Canon PowerShot S60", "Canon PowerShot S70", "Canon PowerShot S90", "Canon PowerShot S95", "Canon PowerShot S100", "Canon PowerShot S110", "Canon PowerShot SX1 IS", "Canon PowerShot SX50 HS", "Canon PowerShot SX110 IS (CHDK hack)", "Canon PowerShot SX120 IS (CHDK hack)", "Canon PowerShot SX220 HS (CHDK hack)", "Canon PowerShot SX20 IS (CHDK hack)", "Canon PowerShot SX30 IS (CHDK hack)", "Canon EOS D30", "Canon EOS D60", "Canon EOS 5D", "Canon EOS 5D Mark II", "Canon EOS 5D Mark III", "Canon EOS 6D", "Canon EOS 7D", "Canon EOS 10D", "Canon EOS 20D", "Canon EOS 30D", "Canon EOS 40D", "Canon EOS 50D", "Canon EOS 60D", "Canon EOS 100D/ Digital Rebel SL1", "Canon EOS 300D / Digital Rebel / Kiss Digital", "Canon EOS 350D / Digital Rebel XT / Kiss Digital N", "Canon EOS 400D / Digital Rebel XTi / Kiss Digital X", "Canon EOS 450D / Digital Rebel XSi / Kiss Digital X2", "Canon EOS 500D / Digital Rebel T1i / Kiss Digital X3", "Canon EOS 550D / Digital Rebel T2i / Kiss Digital X4", "Canon EOS 600D / Digital Rebel T3i / Kiss Digital X5", "Canon EOS 650D / Digital Rebel T4i / Kiss Digital X6i", "Canon EOS 700D / Digital Rebel T54i", "Canon EOS 1000D / Digital Rebel XS / Kiss Digital F", "Canon EOS 1100D / Digital Rebel T3 / Kiss Digital X50", "Canon EOS D2000C", "Canon EOS M", "Canon EOS-1D", "Canon EOS-1DS", "Canon EOS-1D X", "Canon EOS-1D Mark II", "Canon EOS-1D Mark II N", "Canon EOS-1D Mark III", "Canon EOS-1D Mark IV", "Canon EOS-1Ds Mark II", "Canon EOS-1Ds Mark III", "Casio QV-2000UX", "Casio QV-3000EX", "Casio QV-3500EX", "Casio QV-4000", "Casio QV-5700", "Casio QV-R41", "Casio QV-R51", "Casio QV-R61", "Casio EX-S20", "Casio EX-S100", "Casio EX-Z4", "Casio EX-Z50", "Casio EX-Z500", "Casio EX-Z55", "Casio EX-Z60", "Casio EX-Z75", "Casio EX-Z750", "Casio EX-Z8", "Casio EX-Z850", "Casio EX-Z1050", "Casio EX-Z1080", "Casio EX-ZR100", "Casio Exlim Pro 505", "Casio Exlim Pro 600", "Casio Exlim Pro 700", "Contax N Digital", "Creative PC-CAM 600", "Epson R-D1", "Foculus 531C", "Fuji E550", "Fuji E900", "Fuji F700", "Fuji F710", "Fuji F800", "Fuji F810", "Fuji S2Pro", "Fuji S3Pro", "Fuji S5Pro", "Fuji S20Pro", "Fuji S100FS", "Fuji S5000", "Fuji S5100/S5500", "Fuji S5200/S5600", "Fuji S6000fd", "Fuji S7000", "Fuji S9000/S9500", "Fuji S9100/S9600", "Fuji S200EXR", "Fuji SL1000", "Fuji HS10/HS11", "Fuji HS20EXR", "Fuji HS30EXR", "Fuji HS50EXR", "Fuji F550EXR", "Fuji F600EXR", "Fuji F770EXR", "Fuji F800EXR", "Fuji X-Pro1", "Fuji X-S1", "Fuji X100", "Fuji X100S", "Fuji X10", "Fuji X20", "Fuji X-E1", "Fuji XF1", "Fuji IS-1", "Hasselblad CFV", "Hasselblad H3D", "Hasselblad H4D", "Hasselblad V96C", "Imacon Ixpress 16-megapixel", "Imacon Ixpress 22-megapixel", "Imacon Ixpress 39-megapixel", "ISG 2020x1520", "Kodak DC20", "Kodak DC25", "Kodak DC40", "Kodak DC50", "Kodak DC120 (also try kdc2tiff)", "Kodak DCS200", "Kodak DCS315C", "Kodak DCS330C", "Kodak DCS420", "Kodak DCS460", "Kodak DCS460A", "Kodak DCS520C", "Kodak DCS560C", "Kodak DCS620C", "Kodak DCS620X", "Kodak DCS660C", "Kodak DCS660M", "Kodak DCS720X", "Kodak DCS760C", "Kodak DCS760M", "Kodak EOSDCS1", "Kodak EOSDCS3B", "Kodak NC2000F", "Kodak ProBack", "Kodak PB645C", "Kodak PB645H", "Kodak PB645M", "Kodak DCS Pro 14n", "Kodak DCS Pro 14nx", "Kodak DCS Pro SLR/c", "Kodak DCS Pro SLR/n", "Kodak C330", "Kodak C603", "Kodak P850", "Kodak P880", "Kodak Z980", "Kodak Z981", "Kodak Z990", "Kodak Z1015", "Kodak KAI-0340", "Konica KD-400Z", "Konica KD-510Z", "Leaf AFi 7", "Leaf AFi-II 5", "Leaf AFi-II 6", "Leaf AFi-II 7", "Leaf AFi-II 8", "Leaf AFi-II 10", "Leaf AFi-II 10R", "Leaf AFi-II 12", "Leaf AFi-II 12R", "Leaf Aptus 17", "Leaf Aptus 22", "Leaf Aptus 54S", "Leaf Aptus 65", "Leaf Aptus 75", "Leaf Aptus 75S", "Leaf Cantare", "Leaf CatchLight", "Leaf CMost", "Leaf DCB2", "Leaf Valeo 6", "Leaf Valeo 11", "Leaf Valeo 17", "Leaf Valeo 22", "Leaf Volare", "Leica Digilux 2", "Leica Digilux 3", "Leica D-LUX2", "Leica D-LUX3", "Leica D-LUX4", "Leica D-LUX5", "Leica D-LUX6", "Leica V-LUX1", "Leica V-LUX2", "Leica V-LUX3", "Leica V-LUX4", "Logitech Fotoman Pixtura", "Mamiya ZD", "Micron 2010", "Minolta RD175", "Minolta DiMAGE 5", "Minolta DiMAGE 7", "Minolta DiMAGE 7i", "Minolta DiMAGE 7Hi", "Minolta DiMAGE A1", "Minolta DiMAGE A2", "Minolta DiMAGE A200", "Minolta DiMAGE G400", "Minolta DiMAGE G500", "Minolta DiMAGE G530", "Minolta DiMAGE G600", "Minolta DiMAGE Z2", "Minolta Alpha/Dynax/Maxxum 5D", "Minolta Alpha/Dynax/Maxxum 7D", "Motorola PIXL", "Nikon D1", "Nikon D1H", "Nikon D1X", "Nikon D2H", "Nikon D2Hs", "Nikon D2X", "Nikon D2Xs", "Nikon D3", "Nikon D3s", "Nikon D3X", "Nikon D4", "Nikon D40", "Nikon D40X", "Nikon D50", "Nikon D60", "Nikon D600", "Nikon D70", "Nikon D70s", "Nikon D80", "Nikon D90", "Nikon D100", "Nikon D200", "Nikon D300", "Nikon D300s", "Nikon D700", "Nikon D3000", "Nikon D3100", "Nikon D3200", "Nikon D5000", "Nikon D5100", "Nikon D7000", "Nikon D800", "Nikon D800E", "Nikon 1 J1", "Nikon 1 S1", "Nikon 1 V1", "Nikon 1 J2", "Nikon 1 V2", "Nikon 1 J3", "Nikon E700 (\"DIAG RAW\" hack)", "Nikon E800 (\"DIAG RAW\" hack)", "Nikon E880 (\"DIAG RAW\" hack)", "Nikon E900 (\"DIAG RAW\" hack)", "Nikon E950 (\"DIAG RAW\" hack)", "Nikon E990 (\"DIAG RAW\" hack)", "Nikon E995 (\"DIAG RAW\" hack)", "Nikon E2100 (\"DIAG RAW\" hack)", "Nikon E2500 (\"DIAG RAW\" hack)", "Nikon E3200 (\"DIAG RAW\" hack)", "Nikon E3700 (\"DIAG RAW\" hack)", "Nikon E4300 (\"DIAG RAW\" hack)", "Nikon E4500 (\"DIAG RAW\" hack)", "Nikon E5000", "Nikon E5400", "Nikon E5700", "Nikon E8400", "Nikon E8700", "Nikon E8800", "Nikon Coolpix A", "Nikon Coolpix P330", "Nikon Coolpix P6000", "Nikon Coolpix P7000", "Nikon Coolpix P7100", "Nikon Coolpix P7700", "Nikon Coolpix S6 (\"DIAG RAW\" hack)", "Nokia N95", "Nokia X2", "Olympus C3030Z", "Olympus C5050Z", "Olympus C5060WZ", "Olympus C7070WZ", "Olympus C70Z,C7000Z", "Olympus C740UZ", "Olympus C770UZ", "Olympus C8080WZ", "Olympus X200,D560Z,C350Z", "Olympus E-1", "Olympus E-3", "Olympus E-5", "Olympus E-10", "Olympus E-20", "Olympus E-30", "Olympus E-300", "Olympus E-330", "Olympus E-400", "Olympus E-410", "Olympus E-420", "Olympus E-500", "Olympus E-510", "Olympus E-520", "Olympus E-620", "Olympus E-P1", "Olympus E-P2", "Olympus E-P3", "Olympus E-PL1", "Olympus E-PL1s", "Olympus E-PL2", "Olympus E-PL3", "Olympus E-PL5", "Olympus E-PM1", "Olympus E-PM2", "Olympus E-M5", "Olympus SP310", "Olympus SP320", "Olympus SP350", "Olympus SP500UZ", "Olympus SP510UZ", "Olympus SP550UZ", "Olympus SP560UZ", "Olympus SP570UZ", "Olympus XZ-1", "Olympus XZ-10", "Olympus XZ-2", "Panasonic DMC-FZ8", "Panasonic DMC-FZ18", "Panasonic DMC-FZ28", "Panasonic DMC-FZ30", "Panasonic DMC-FZ35/FZ38", "Panasonic DMC-FZ40", "Panasonic DMC-FZ50", "Panasonic DMC-FZ100", "Panasonic DMC-FZ150", "Panasonic DMC-FZ200", "Panasonic DMC-FX150", "Panasonic DMC-G1", "Panasonic DMC-G10", "Panasonic DMC-G2", "Panasonic DMC-G3", "Panasonic DMC-G5", "Panasonic DMC-G6", "Panasonic DMC-GF1", "Panasonic DMC-GF2", "Panasonic DMC-GF3", "Panasonic DMC-GF5", "Panasonic DMC-GH1", "Panasonic DMC-GH2", "Panasonic DMC-GH3", "Panasonic DMC-GX1", "Panasonic DMC-L1", "Panasonic DMC-L10", "Panasonic DMC-LC1", "Panasonic DMC-LX1", "Panasonic DMC-LX2", "Panasonic DMC-LX3", "Panasonic DMC-LX5", "Panasonic DMC-LX7", "Pentax *ist D", "Pentax *ist DL", "Pentax *ist DL2", "Pentax *ist DS", "Pentax *ist DS2", "Pentax K10D", "Pentax K20D", "Pentax K100D", "Pentax K100D Super", "Pentax K200D", "Pentax K2000/K-m", "Pentax K-x", "Pentax K-r", "Pentax K-30", "Pentax K-5", "Pentax K-5 II", "Pentax K-5 IIs", "Pentax K-7", "Pentax MX-1", "Pentax Q10", "Pentax Optio S", "Pentax Optio S4", "Pentax Optio 33WR", "Pentax Optio 750Z", "Pentax 645D", "Phase One LightPhase", "Phase One H 10", "Phase One H 20", "Phase One H 25", "Phase One P 20", "Phase One P 25", "Phase One P 30", "Phase One P 45", "Phase One P 45+", "Phase One P 65", "Pixelink A782", #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 "Polaroid x530", #endif #ifndef NO_JASPER "Redcode R3D format", #endif "Rollei d530flex", "RoverShot 3320af", "Samsung EX1", "Samsung EX2F", "Samsung GX-1S", "Samsung GX10", "Samsung GX20", "Samsung NX10", "Samsung NX11", "Samsung NX100", "Samsung NX20", "Samsung NX200", "Samsung NX210", "Samsung NX1000", "Samsung WB550", "Samsung WB2000", "Samsung S85 (hacked)", "Samsung S850 (hacked)", "Sarnoff 4096x5440", #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 "Sigma SD9", "Sigma SD10", "Sigma SD14", "Sigma SD15", "Sigma SD1", "Sigma SD1 Merill", "Sigma DP1", "Sigma DP1 Merill", "Sigma DP1S", "Sigma DP1X", "Sigma DP2", "Sigma DP2 Merill", "Sigma DP2S", "Sigma DP2X", #endif "Sinar 3072x2048", "Sinar 4080x4080", "Sinar 4080x5440", "Sinar STI format", "SMaL Ultra-Pocket 3", "SMaL Ultra-Pocket 4", "SMaL Ultra-Pocket 5", "Sony DSC-F828", "Sony DSC-R1", "Sony DSC-RX1", "Sony DSC-RX100", "Sony DSC-V3", "Sony DSLR-A100", "Sony DSLR-A200", "Sony DSLR-A230", "Sony DSLR-A290", "Sony DSLR-A300", "Sony DSLR-A330", "Sony DSLR-A350", "Sony DSLR-A380", "Sony DSLR-A390", "Sony DSLR-A450", "Sony DSLR-A500", "Sony DSLR-A550", "Sony DSLR-A580", "Sony DSLR-A700", "Sony DSLR-A850", "Sony DSLR-A900", "Sony NEX-3", "Sony NEX-5", "Sony NEX-5N", "Sony NEX-5R", "Sony NEX-6", "Sony NEX-7", "Sony NEX-C3", "Sony NEX-F3", "Sony SLT-A33", "Sony SLT-A35", "Sony SLT-A37", "Sony SLT-A55V", "Sony SLT-A57", "Sony SLT-A58", "Sony SLT-A65V", "Sony SLT-A77V", "Sony SLT-A99V", "Sony XCD-SX910CR", "STV680 VGA", "ptGrey GRAS-50S5C", "JaiPulnix BB-500CL", "JaiPulnix BB-500GE", "SVS SVS625CL", NULL }; const char** LibRaw::cameraList() { return static_camera_list;} int LibRaw::cameraCount() { return (sizeof(static_camera_list)/sizeof(static_camera_list[0]))-1; } const char * LibRaw::strprogress(enum LibRaw_progress p) { switch(p) { case LIBRAW_PROGRESS_START: return "Starting"; case LIBRAW_PROGRESS_OPEN : return "Opening file"; case LIBRAW_PROGRESS_IDENTIFY : return "Reading metadata"; case LIBRAW_PROGRESS_SIZE_ADJUST: return "Adjusting size"; case LIBRAW_PROGRESS_LOAD_RAW: return "Reading RAW data"; case LIBRAW_PROGRESS_REMOVE_ZEROES: return "Clearing zero values"; case LIBRAW_PROGRESS_BAD_PIXELS : return "Removing dead pixels"; case LIBRAW_PROGRESS_DARK_FRAME: return "Subtracting dark frame data"; case LIBRAW_PROGRESS_FOVEON_INTERPOLATE: return "Interpolating Foveon sensor data"; case LIBRAW_PROGRESS_SCALE_COLORS: return "Scaling colors"; case LIBRAW_PROGRESS_PRE_INTERPOLATE: return "Pre-interpolating"; case LIBRAW_PROGRESS_INTERPOLATE: return "Interpolating"; case LIBRAW_PROGRESS_MIX_GREEN : return "Mixing green channels"; case LIBRAW_PROGRESS_MEDIAN_FILTER : return "Median filter"; case LIBRAW_PROGRESS_HIGHLIGHTS: return "Highlight recovery"; case LIBRAW_PROGRESS_FUJI_ROTATE : return "Rotating Fuji diagonal data"; case LIBRAW_PROGRESS_FLIP : return "Flipping image"; case LIBRAW_PROGRESS_APPLY_PROFILE: return "ICC conversion"; case LIBRAW_PROGRESS_CONVERT_RGB: return "Converting to RGB"; case LIBRAW_PROGRESS_STRETCH: return "Stretching image"; case LIBRAW_PROGRESS_THUMB_LOAD: return "Loading thumbnail"; default: return "Some strange things"; } }
/* -*- C++ -*- * File: libraw_cxx.cpp * Copyright 2008-2013 LibRaw LLC (info@libraw.org) * Created: Sat Mar 8 , 2008 * * LibRaw C++ interface (implementation) LibRaw is free software; you can redistribute it and/or modify it under the terms of the one of three licenses as you choose: 1. GNU LESSER GENERAL PUBLIC LICENSE version 2.1 (See file LICENSE.LGPL provided in LibRaw distribution archive for details). 2. COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 (See file LICENSE.CDDL provided in LibRaw distribution archive for details). 3. LibRaw Software License 27032010 (See file LICENSE.LibRaw.pdf provided in LibRaw distribution archive for details). */ #include <math.h> #include <errno.h> #include <float.h> #include <new> #include <exception> #include <sys/types.h> #include <sys/stat.h> #ifndef WIN32 #include <netinet/in.h> #else #include <winsock2.h> #endif #define LIBRAW_LIBRARY_BUILD #include "libraw/libraw.h" #include "internal/defines.h" #ifdef USE_RAWSPEED #include "../RawSpeed/rawspeed_xmldata.cpp" #include <RawSpeed/StdAfx.h> #include <RawSpeed/FileMap.h> #include <RawSpeed/RawParser.h> #include <RawSpeed/RawDecoder.h> #include <RawSpeed/CameraMetaData.h> #include <RawSpeed/ColorFilterArray.h> #endif #ifdef __cplusplus extern "C" { #endif void default_memory_callback(void *,const char *file,const char *where) { fprintf (stderr,"%s: Out of memory in %s\n", file?file:"unknown file", where); } void default_data_callback(void*,const char *file, const int offset) { if(offset < 0) fprintf (stderr,"%s: Unexpected end of file\n", file?file:"unknown file"); else fprintf (stderr,"%s: data corrupted at %d\n",file?file:"unknown file",offset); } const char *libraw_strerror(int e) { enum LibRaw_errors errorcode = (LibRaw_errors)e; switch(errorcode) { case LIBRAW_SUCCESS: return "No error"; case LIBRAW_UNSPECIFIED_ERROR: return "Unspecified error"; case LIBRAW_FILE_UNSUPPORTED: return "Unsupported file format or not RAW file"; case LIBRAW_REQUEST_FOR_NONEXISTENT_IMAGE: return "Request for nonexisting image number"; case LIBRAW_OUT_OF_ORDER_CALL: return "Out of order call of libraw function"; case LIBRAW_NO_THUMBNAIL: return "No thumbnail in file"; case LIBRAW_UNSUPPORTED_THUMBNAIL: return "Unsupported thumbnail format"; case LIBRAW_INPUT_CLOSED: return "No input stream, or input stream closed"; case LIBRAW_UNSUFFICIENT_MEMORY: return "Unsufficient memory"; case LIBRAW_DATA_ERROR: return "Corrupted data or unexpected EOF"; case LIBRAW_IO_ERROR: return "Input/output error"; case LIBRAW_CANCELLED_BY_CALLBACK: return "Cancelled by user callback"; case LIBRAW_BAD_CROP: return "Bad crop box"; default: return "Unknown error code"; } } #ifdef __cplusplus } #endif const double LibRaw_constants::xyz_rgb[3][3] = { { 0.412453, 0.357580, 0.180423 }, { 0.212671, 0.715160, 0.072169 }, { 0.019334, 0.119193, 0.950227 } }; const float LibRaw_constants::d65_white[3] = { 0.950456f, 1.0f, 1.088754f }; #define P1 imgdata.idata #define S imgdata.sizes #define O imgdata.params #define C imgdata.color #define T imgdata.thumbnail #define IO libraw_internal_data.internal_output_params #define ID libraw_internal_data.internal_data #define EXCEPTION_HANDLER(e) do{ \ /* fprintf(stderr,"Exception %d caught\n",e);*/ \ switch(e) \ { \ case LIBRAW_EXCEPTION_ALLOC: \ recycle(); \ return LIBRAW_UNSUFFICIENT_MEMORY; \ case LIBRAW_EXCEPTION_DECODE_RAW: \ case LIBRAW_EXCEPTION_DECODE_JPEG: \ recycle(); \ return LIBRAW_DATA_ERROR; \ case LIBRAW_EXCEPTION_DECODE_JPEG2000: \ recycle(); \ return LIBRAW_DATA_ERROR; \ case LIBRAW_EXCEPTION_IO_EOF: \ case LIBRAW_EXCEPTION_IO_CORRUPT: \ recycle(); \ return LIBRAW_IO_ERROR; \ case LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK:\ recycle(); \ return LIBRAW_CANCELLED_BY_CALLBACK; \ case LIBRAW_EXCEPTION_BAD_CROP: \ recycle(); \ return LIBRAW_BAD_CROP; \ default: \ return LIBRAW_UNSPECIFIED_ERROR; \ } \ }while(0) const char* LibRaw::version() { return LIBRAW_VERSION_STR;} int LibRaw::versionNumber() { return LIBRAW_VERSION; } const char* LibRaw::strerror(int p) { return libraw_strerror(p);} void LibRaw::derror() { if (!libraw_internal_data.unpacker_data.data_error && libraw_internal_data.internal_data.input) { if (libraw_internal_data.internal_data.input->eof()) { if(callbacks.data_cb)(*callbacks.data_cb)(callbacks.datacb_data, libraw_internal_data.internal_data.input->fname(),-1); throw LIBRAW_EXCEPTION_IO_EOF; } else { if(callbacks.data_cb)(*callbacks.data_cb)(callbacks.datacb_data, libraw_internal_data.internal_data.input->fname(), libraw_internal_data.internal_data.input->tell()); throw LIBRAW_EXCEPTION_IO_CORRUPT; } } libraw_internal_data.unpacker_data.data_error++; } void LibRaw::dcraw_clear_mem(libraw_processed_image_t* p) { if(p) ::free(p); } #ifdef USE_RAWSPEED using namespace RawSpeed; class CameraMetaDataLR : public CameraMetaData { public: CameraMetaDataLR() : CameraMetaData() {} CameraMetaDataLR(char *filename) : CameraMetaData(filename){} CameraMetaDataLR(char *data, int sz); }; CameraMetaDataLR::CameraMetaDataLR(char *data, int sz) : CameraMetaData() { ctxt = xmlNewParserCtxt(); if (ctxt == NULL) { ThrowCME("CameraMetaData:Could not initialize context."); } xmlResetLastError(); doc = xmlCtxtReadMemory(ctxt, data,sz, "", NULL, XML_PARSE_DTDVALID); if (doc == NULL) { ThrowCME("CameraMetaData: XML Document could not be parsed successfully. Error was: %s", ctxt->lastError.message); } if (ctxt->valid == 0) { if (ctxt->lastError.code == 0x5e) { // printf("CameraMetaData: Unable to locate DTD, attempting to ignore."); } else { ThrowCME("CameraMetaData: XML file does not validate. DTD Error was: %s", ctxt->lastError.message); } } xmlNodePtr cur; cur = xmlDocGetRootElement(doc); if (xmlStrcmp(cur->name, (const xmlChar *) "Cameras")) { ThrowCME("CameraMetaData: XML document of the wrong type, root node is not cameras."); return; } cur = cur->xmlChildrenNode; while (cur != NULL) { if ((!xmlStrcmp(cur->name, (const xmlChar *)"Camera"))) { Camera *camera = new Camera(doc, cur); addCamera(camera); // Create cameras for aliases. for (uint32 i = 0; i < camera->aliases.size(); i++) { addCamera(new Camera(camera, i)); } } cur = cur->next; } if (doc) xmlFreeDoc(doc); doc = 0; if (ctxt) xmlFreeParserCtxt(ctxt); ctxt = 0; } #define RAWSPEED_DATA_COUNT (sizeof(_rawspeed_data_xml)/sizeof(_rawspeed_data_xml[0])) static CameraMetaDataLR* make_camera_metadata() { int len = 0,i; for(i=0;i<RAWSPEED_DATA_COUNT;i++) if(_rawspeed_data_xml[i]) { len+=strlen(_rawspeed_data_xml[i]); } char *rawspeed_xml = (char*)calloc(len+1,sizeof(_rawspeed_data_xml[0][0])); if(!rawspeed_xml) return NULL; int offt = 0; for(i=0;i<RAWSPEED_DATA_COUNT;i++) if(_rawspeed_data_xml[i]) { int ll = strlen(_rawspeed_data_xml[i]); if(offt+ll>len) break; memmove(rawspeed_xml+offt,_rawspeed_data_xml[i],ll); offt+=ll; } rawspeed_xml[offt]=0; CameraMetaDataLR *ret=NULL; try { ret = new CameraMetaDataLR(rawspeed_xml,offt); } catch (...) { // Mask all exceptions } free(rawspeed_xml); return ret; } #endif #define ZERO(a) memset(&a,0,sizeof(a)) LibRaw:: LibRaw(unsigned int flags) { double aber[4] = {1,1,1,1}; double gamm[6] = { 0.45,4.5,0,0,0,0 }; unsigned greybox[4] = { 0, 0, UINT_MAX, UINT_MAX }; unsigned cropbox[4] = { 0, 0, UINT_MAX, UINT_MAX }; #ifdef DCRAW_VERBOSE verbose = 1; #else verbose = 0; #endif ZERO(imgdata); ZERO(libraw_internal_data); ZERO(callbacks); _rawspeed_camerameta = _rawspeed_decoder = NULL; #ifdef USE_RAWSPEED CameraMetaDataLR *camerameta = make_camera_metadata(); // May be NULL in case of exception in make_camera_metadata() _rawspeed_camerameta = static_cast<void*>(camerameta); #endif callbacks.mem_cb = (flags & LIBRAW_OPIONS_NO_MEMERR_CALLBACK) ? NULL: &default_memory_callback; callbacks.data_cb = (flags & LIBRAW_OPIONS_NO_DATAERR_CALLBACK)? NULL : &default_data_callback; memmove(&imgdata.params.aber,&aber,sizeof(aber)); memmove(&imgdata.params.gamm,&gamm,sizeof(gamm)); memmove(&imgdata.params.greybox,&greybox,sizeof(greybox)); memmove(&imgdata.params.cropbox,&cropbox,sizeof(cropbox)); imgdata.params.bright=1; imgdata.params.use_camera_matrix=-1; imgdata.params.user_flip=-1; imgdata.params.user_black=-1; imgdata.params.user_cblack[0]=imgdata.params.user_cblack[1]=imgdata.params.user_cblack[2]=imgdata.params.user_cblack[3]=-1000001; imgdata.params.user_sat=-1; imgdata.params.user_qual=-1; imgdata.params.output_color=1; imgdata.params.output_bps=8; imgdata.params.use_fuji_rotate=1; imgdata.params.exp_shift = 1.0; imgdata.params.auto_bright_thr = LIBRAW_DEFAULT_AUTO_BRIGHTNESS_THRESHOLD; imgdata.params.adjust_maximum_thr= LIBRAW_DEFAULT_ADJUST_MAXIMUM_THRESHOLD; imgdata.params.use_rawspeed = 1; imgdata.params.green_matching = 0; imgdata.parent_class = this; imgdata.progress_flags = 0; tls = new LibRaw_TLS; tls->init(); } int LibRaw::set_rawspeed_camerafile(char *filename) { #ifdef USE_RAWSPEED try { CameraMetaDataLR *camerameta = new CameraMetaDataLR(filename); if(_rawspeed_camerameta) { CameraMetaDataLR *d = static_cast<CameraMetaDataLR*>(_rawspeed_camerameta); delete d; } _rawspeed_camerameta = static_cast<void*>(camerameta); } catch (...) { //just return error code return -1; } #endif return 0; } LibRaw::~LibRaw() { recycle(); delete tls; #ifdef USE_RAWSPEED if(_rawspeed_camerameta) { CameraMetaDataLR *cmeta = static_cast<CameraMetaDataLR*>(_rawspeed_camerameta); delete cmeta; _rawspeed_camerameta = NULL; } #endif } void* LibRaw:: malloc(size_t t) { void *p = memmgr.malloc(t); if(!p) throw LIBRAW_EXCEPTION_ALLOC; return p; } void* LibRaw:: realloc(void *q,size_t t) { void *p = memmgr.realloc(q,t); if(!p) throw LIBRAW_EXCEPTION_ALLOC; return p; } void* LibRaw:: calloc(size_t n,size_t t) { void *p = memmgr.calloc(n,t); if(!p) throw LIBRAW_EXCEPTION_ALLOC; return p; } void LibRaw:: free(void *p) { memmgr.free(p); } void LibRaw:: recycle_datastream() { if(libraw_internal_data.internal_data.input && libraw_internal_data.internal_data.input_internal) { delete libraw_internal_data.internal_data.input; libraw_internal_data.internal_data.input = NULL; } libraw_internal_data.internal_data.input_internal = 0; } void LibRaw:: recycle() { recycle_datastream(); #define FREE(a) do { if(a) { free(a); a = NULL;} }while(0) FREE(imgdata.image); FREE(imgdata.thumbnail.thumb); FREE(libraw_internal_data.internal_data.meta_data); FREE(libraw_internal_data.output_data.histogram); FREE(libraw_internal_data.output_data.oprof); FREE(imgdata.color.profile); FREE(imgdata.rawdata.ph1_black); FREE(imgdata.rawdata.raw_alloc); #undef FREE ZERO(imgdata.rawdata); ZERO(imgdata.sizes); ZERO(imgdata.color); ZERO(libraw_internal_data); #ifdef USE_RAWSPEED if(_rawspeed_decoder) { RawDecoder *d = static_cast<RawDecoder*>(_rawspeed_decoder); delete d; } _rawspeed_decoder = 0; #endif memmgr.cleanup(); imgdata.thumbnail.tformat = LIBRAW_THUMBNAIL_UNKNOWN; imgdata.progress_flags = 0; tls->init(); } const char * LibRaw::unpack_function_name() { libraw_decoder_info_t decoder_info; get_decoder_info(&decoder_info); return decoder_info.decoder_name; } int LibRaw::get_decoder_info(libraw_decoder_info_t* d_info) { if(!d_info) return LIBRAW_UNSPECIFIED_ERROR; if(!load_raw) return LIBRAW_OUT_OF_ORDER_CALL; d_info->decoder_flags = LIBRAW_DECODER_NOTSET; int rawdata = (imgdata.idata.filters || P1.colors == 1); // dcraw.c names order if (load_raw == &LibRaw::canon_600_load_raw) { d_info->decoder_name = "canon_600_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; // WB set within decoder, no need to load raw } else if (load_raw == &LibRaw::canon_load_raw) { d_info->decoder_name = "canon_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::lossless_jpeg_load_raw) { // Check rbayer d_info->decoder_name = "lossless_jpeg_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_HASCURVE | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::canon_sraw_load_raw) { d_info->decoder_name = "canon_sraw_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::lossless_dng_load_raw) { // Check rbayer d_info->decoder_name = "lossless_dng_load_raw()"; d_info->decoder_flags = rawdata? LIBRAW_DECODER_FLATFIELD : LIBRAW_DECODER_LEGACY ; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; d_info->decoder_flags |= LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::packed_dng_load_raw) { // Check rbayer d_info->decoder_name = "packed_dng_load_raw()"; d_info->decoder_flags = rawdata ? LIBRAW_DECODER_FLATFIELD : LIBRAW_DECODER_LEGACY; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; d_info->decoder_flags |= LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::pentax_load_raw ) { d_info->decoder_name = "pentax_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::nikon_load_raw) { // Check rbayer d_info->decoder_name = "nikon_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::rollei_load_raw ) { // UNTESTED d_info->decoder_name = "rollei_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::phase_one_load_raw ) { d_info->decoder_name = "phase_one_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::phase_one_load_raw_c ) { d_info->decoder_name = "phase_one_load_raw_c()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::hasselblad_load_raw ) { d_info->decoder_name = "hasselblad_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::leaf_hdr_load_raw ) { d_info->decoder_name = "leaf_hdr_load_raw()"; d_info->decoder_flags = imgdata.idata.filters? LIBRAW_DECODER_FLATFIELD:LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::unpacked_load_raw ) { d_info->decoder_name = "unpacked_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_USEBAYER2; } else if (load_raw == &LibRaw::sinar_4shot_load_raw ) { // UNTESTED d_info->decoder_name = "sinar_4shot_load_raw()"; d_info->decoder_flags = (O.shot_select|| O.half_size)?LIBRAW_DECODER_FLATFIELD:LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::imacon_full_load_raw ) { d_info->decoder_name = "imacon_full_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::hasselblad_full_load_raw ) { d_info->decoder_name = "hasselblad_full_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::packed_load_raw ) { d_info->decoder_name = "packed_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::nokia_load_raw ) { // UNTESTED d_info->decoder_name = "nokia_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::panasonic_load_raw ) { d_info->decoder_name = "panasonic_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::olympus_load_raw ) { d_info->decoder_name = "olympus_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD | LIBRAW_DECODER_TRYRAWSPEED; } else if (load_raw == &LibRaw::minolta_rd175_load_raw ) { // UNTESTED d_info->decoder_name = "minolta_rd175_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::quicktake_100_load_raw ) { // UNTESTED d_info->decoder_name = "quicktake_100_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::kodak_radc_load_raw ) { d_info->decoder_name = "kodak_radc_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::kodak_jpeg_load_raw ) { // UNTESTED + RBAYER d_info->decoder_name = "kodak_jpeg_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::lossy_dng_load_raw) { // Check rbayer d_info->decoder_name = "lossy_dng_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY | LIBRAW_DECODER_TRYRAWSPEED; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_dc120_load_raw ) { d_info->decoder_name = "kodak_dc120_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::eight_bit_load_raw ) { d_info->decoder_name = "eight_bit_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_yrgb_load_raw ) { d_info->decoder_name = "kodak_yrgb_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_262_load_raw ) { d_info->decoder_name = "kodak_262_load_raw()"; // UNTESTED! d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_65000_load_raw ) { d_info->decoder_name = "kodak_65000_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_ycbcr_load_raw ) { // UNTESTED d_info->decoder_name = "kodak_ycbcr_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::kodak_rgb_load_raw ) { // UNTESTED d_info->decoder_name = "kodak_rgb_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::sony_load_raw ) { d_info->decoder_name = "sony_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::sony_arw_load_raw ) { d_info->decoder_name = "sony_arw_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; #ifndef NOSONY_RAWSPEED d_info->decoder_flags |= LIBRAW_DECODER_TRYRAWSPEED; #endif } else if (load_raw == &LibRaw::sony_arw2_load_raw ) { d_info->decoder_name = "sony_arw2_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; #ifndef NOSONY_RAWSPEED d_info->decoder_flags |= LIBRAW_DECODER_TRYRAWSPEED; #endif d_info->decoder_flags |= LIBRAW_DECODER_ITSASONY; } else if (load_raw == &LibRaw::smal_v6_load_raw ) { // UNTESTED d_info->decoder_name = "smal_v6_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::smal_v9_load_raw ) { // UNTESTED d_info->decoder_name = "smal_v9_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; } else if (load_raw == &LibRaw::redcine_load_raw) { d_info->decoder_name = "redcine_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_FLATFIELD; d_info->decoder_flags |= LIBRAW_DECODER_HASCURVE; } else if (load_raw == &LibRaw::foveon_sd_load_raw ) { d_info->decoder_name = "foveon_sd_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; } else if (load_raw == &LibRaw::foveon_dp_load_raw ) { d_info->decoder_name = "foveon_dp_load_raw()"; d_info->decoder_flags = LIBRAW_DECODER_LEGACY; } else { d_info->decoder_name = "Unknown unpack function"; d_info->decoder_flags = LIBRAW_DECODER_NOTSET; } return LIBRAW_SUCCESS; } int LibRaw::adjust_maximum() { ushort real_max; float auto_threshold; if(O.adjust_maximum_thr < 0.00001) return LIBRAW_SUCCESS; else if (O.adjust_maximum_thr > 0.99999) auto_threshold = LIBRAW_DEFAULT_ADJUST_MAXIMUM_THRESHOLD; else auto_threshold = O.adjust_maximum_thr; real_max = C.data_maximum; if (real_max > 0 && real_max < C.maximum && real_max > C.maximum* auto_threshold) { C.maximum = real_max; } return LIBRAW_SUCCESS; } void LibRaw:: merror (void *ptr, const char *where) { if (ptr) return; if(callbacks.mem_cb)(*callbacks.mem_cb)(callbacks.memcb_data, libraw_internal_data.internal_data.input ?libraw_internal_data.internal_data.input->fname() :NULL, where); throw LIBRAW_EXCEPTION_ALLOC; } int LibRaw::open_file(const char *fname, INT64 max_buf_size) { #ifndef WIN32 struct stat st; if(stat(fname,&st)) return LIBRAW_IO_ERROR; int big = (st.st_size > max_buf_size)?1:0; #else struct _stati64 st; if(_stati64(fname,&st)) return LIBRAW_IO_ERROR; int big = (st.st_size > max_buf_size)?1:0; #endif LibRaw_abstract_datastream *stream; try { if(big) stream = new LibRaw_bigfile_datastream(fname); else stream = new LibRaw_file_datastream(fname); } catch (std::bad_alloc) { recycle(); return LIBRAW_UNSUFFICIENT_MEMORY; } if(!stream->valid()) { delete stream; return LIBRAW_IO_ERROR; } ID.input_internal = 0; // preserve from deletion on error int ret = open_datastream(stream); if (ret == LIBRAW_SUCCESS) { ID.input_internal =1 ; // flag to delete datastream on recycle } else { delete stream; ID.input_internal = 0; } return ret; } #ifdef WIN32 int LibRaw::open_file(const wchar_t *fname, INT64 max_buf_size) { struct _stati64 st; if(_wstati64(fname,&st)) return LIBRAW_IO_ERROR; int big = (st.st_size > max_buf_size)?1:0; LibRaw_abstract_datastream *stream; try { if(big) stream = new LibRaw_bigfile_datastream(fname); else stream = new LibRaw_file_datastream(fname); } catch (std::bad_alloc) { recycle(); return LIBRAW_UNSUFFICIENT_MEMORY; } if(!stream->valid()) { delete stream; return LIBRAW_IO_ERROR; } ID.input_internal = 0; // preserve from deletion on error int ret = open_datastream(stream); if (ret == LIBRAW_SUCCESS) { ID.input_internal =1 ; // flag to delete datastream on recycle } else { delete stream; ID.input_internal = 0; } return ret; } #endif int LibRaw::open_buffer(void *buffer, size_t size) { // this stream will close on recycle() if(!buffer || buffer==(void*)-1) return LIBRAW_IO_ERROR; LibRaw_buffer_datastream *stream; try { stream = new LibRaw_buffer_datastream(buffer,size); } catch (std::bad_alloc) { recycle(); return LIBRAW_UNSUFFICIENT_MEMORY; } if(!stream->valid()) { delete stream; return LIBRAW_IO_ERROR; } ID.input_internal = 0; // preserve from deletion on error int ret = open_datastream(stream); if (ret == LIBRAW_SUCCESS) { ID.input_internal =1 ; // flag to delete datastream on recycle } else { delete stream; ID.input_internal = 0; } return ret; } void LibRaw::hasselblad_full_load_raw() { int row, col; for (row=0; row < S.height; row++) for (col=0; col < S.width; col++) { read_shorts (&imgdata.image[row*S.width+col][2], 1); // B read_shorts (&imgdata.image[row*S.width+col][1], 1); // G read_shorts (&imgdata.image[row*S.width+col][0], 1); // R } } int LibRaw::open_datastream(LibRaw_abstract_datastream *stream) { if(!stream) return ENOENT; if(!stream->valid()) return LIBRAW_IO_ERROR; recycle(); try { ID.input = stream; SET_PROC_FLAG(LIBRAW_PROGRESS_OPEN); if (O.use_camera_matrix < 0) O.use_camera_matrix = O.use_camera_wb; identify(); #if 0 size_t bytes = ID.input->size()-libraw_internal_data.unpacker_data.data_offset; float bpp = float(bytes)/float(S.raw_width)/float(S.raw_height); float bpp2 = float(bytes)/float(S.width)/float(S.height); printf("RawSize: %dx%d data offset: %d data size:%d bpp: %g bpp2: %g\n",S.raw_width,S.raw_height,libraw_internal_data.unpacker_data.data_offset,bytes,bpp,bpp2); if(!strcasecmp(imgdata.idata.make,"Hasselblad") && bpp == 6.0f) { load_raw = &LibRaw::hasselblad_full_load_raw; S.width = S.raw_width; S.height = S.raw_height; P1.filters = 0; P1.colors=3; P1.raw_count=1; C.maximum=0xffff; printf("3 channel hassy found\n"); } #endif if(C.profile_length) { if(C.profile) free(C.profile); C.profile = malloc(C.profile_length); merror(C.profile,"LibRaw::open_file()"); ID.input->seek(ID.profile_offset,SEEK_SET); ID.input->read(C.profile,C.profile_length,1); } SET_PROC_FLAG(LIBRAW_PROGRESS_IDENTIFY); } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } catch (std::exception ee) { EXCEPTION_HANDLER(LIBRAW_EXCEPTION_IO_CORRUPT); } if(P1.raw_count < 1) return LIBRAW_FILE_UNSUPPORTED; write_fun = &LibRaw::write_ppm_tiff; if (load_raw == &LibRaw::kodak_ycbcr_load_raw) { S.height += S.height & 1; S.width += S.width & 1; } libraw_decoder_info_t dinfo; get_decoder_info(&dinfo); if(dinfo.decoder_flags & LIBRAW_DECODER_LEGACY) { // Adjust sizes according to image buffer size S.raw_width = S.width; S.left_margin = 0; S.raw_height = S.height; S.top_margin = 0; } IO.shrink = P1.filters && (O.half_size || ((O.threshold || O.aber[0] != 1 || O.aber[2] != 1) )); S.iheight = (S.height + IO.shrink) >> IO.shrink; S.iwidth = (S.width + IO.shrink) >> IO.shrink; if(imgdata.idata.filters == 303979333U) { //printf("BL=%d [%d,%d,%d,%d]\n",C.black,C.cblack[0],C.cblack[1],C.cblack[2],C.cblack[3]); C.black = C.cblack[0]; C.cblack[0]=C.cblack[1]=C.cblack[2]=C.cblack[3]=0; imgdata.idata.filters = 2; } // X20 if(imgdata.idata.filters == 0x5bb8445b) { C.black = 257; C.cblack[0]=C.cblack[1]=C.cblack[2]=C.cblack[3]=0; imgdata.idata.filters = 2; S.width = 4030; S.height = 3010; S.top_margin = 2; S.left_margin = 2; } // X100S if(imgdata.idata.filters == 0x5145bb84) { C.black = 1024; C.cblack[0]=C.cblack[1]=C.cblack[2]=C.cblack[3]=0; S.left_margin = 2; S.top_margin = 1; S.width = 4934; S.height = 3290; imgdata.idata.filters = 2; } // Save color,sizes and internal data into raw_image fields memmove(&imgdata.rawdata.color,&imgdata.color,sizeof(imgdata.color)); memmove(&imgdata.rawdata.sizes,&imgdata.sizes,sizeof(imgdata.sizes)); memmove(&imgdata.rawdata.iparams,&imgdata.idata,sizeof(imgdata.idata)); memmove(&imgdata.rawdata.ioparams,&libraw_internal_data.internal_output_params,sizeof(libraw_internal_data.internal_output_params)); SET_PROC_FLAG(LIBRAW_PROGRESS_SIZE_ADJUST); return LIBRAW_SUCCESS; } #ifdef USE_RAWSPEED void LibRaw::fix_after_rawspeed(int bl) { if (load_raw == &LibRaw::lossy_dng_load_raw) C.maximum = 0xffff; else if (load_raw == &LibRaw::sony_load_raw) C.maximum = 0x3ff0; else if ( (load_raw == &LibRaw::sony_arw2_load_raw || (load_raw == &LibRaw::packed_load_raw && !strcasecmp(imgdata.idata.make,"Sony"))) && bl >= (C.black+C.cblack[0])*2 ) { C.maximum *=4; C.black *=4; for(int c=0; c< 4; c++) C.cblack[c]*=4; } } #else void LibRaw::fix_after_rawspeed(int) { } #endif int LibRaw::unpack(void) { CHECK_ORDER_HIGH(LIBRAW_PROGRESS_LOAD_RAW); CHECK_ORDER_LOW(LIBRAW_PROGRESS_IDENTIFY); try { if(!libraw_internal_data.internal_data.input) return LIBRAW_INPUT_CLOSED; RUN_CALLBACK(LIBRAW_PROGRESS_LOAD_RAW,0,2); if (O.shot_select >= P1.raw_count) return LIBRAW_REQUEST_FOR_NONEXISTENT_IMAGE; if(!load_raw) return LIBRAW_UNSPECIFIED_ERROR; if (O.use_camera_matrix && C.cmatrix[0][0] > 0.25) { memcpy (C.rgb_cam, C.cmatrix, sizeof (C.cmatrix)); IO.raw_color = 0; } // already allocated ? if(imgdata.image) { free(imgdata.image); imgdata.image = 0; } if(imgdata.rawdata.raw_alloc) { free(imgdata.rawdata.raw_alloc); imgdata.rawdata.raw_alloc = 0; } if (libraw_internal_data.unpacker_data.meta_length) { libraw_internal_data.internal_data.meta_data = (char *) malloc (libraw_internal_data.unpacker_data.meta_length); merror (libraw_internal_data.internal_data.meta_data, "LibRaw::unpack()"); } libraw_decoder_info_t decoder_info; get_decoder_info(&decoder_info); int save_iwidth = S.iwidth, save_iheight = S.iheight, save_shrink = IO.shrink; int rwidth = S.raw_width, rheight = S.raw_height; if( !IO.fuji_width) { // adjust non-Fuji allocation if(rwidth < S.width + S.left_margin) rwidth = S.width + S.left_margin; if(rheight < S.height + S.top_margin) rheight = S.height + S.top_margin; } S.raw_pitch = S.raw_width*2; imgdata.rawdata.raw_image = 0; imgdata.rawdata.color4_image = 0; imgdata.rawdata.color3_image = 0; #ifdef USE_RAWSPEED // RawSpeed Supported, if(O.use_rawspeed && (decoder_info.decoder_flags & LIBRAW_DECODER_TRYRAWSPEED) && _rawspeed_camerameta) { INT64 spos = ID.input->tell(); try { // printf("Using rawspeed\n"); ID.input->seek(0,SEEK_SET); INT64 _rawspeed_buffer_sz = ID.input->size()+32; void *_rawspeed_buffer = malloc(_rawspeed_buffer_sz); if(!_rawspeed_buffer) throw LIBRAW_EXCEPTION_ALLOC; ID.input->read(_rawspeed_buffer,_rawspeed_buffer_sz,1); FileMap map((uchar8*)_rawspeed_buffer,_rawspeed_buffer_sz); RawParser t(&map); RawDecoder *d = 0; CameraMetaDataLR *meta = static_cast<CameraMetaDataLR*>(_rawspeed_camerameta); d = t.getDecoder(); try { d->checkSupport(meta); } catch (const RawDecoderException& e) { imgdata.process_warnings |= LIBRAW_WARN_RAWSPEED_UNSUPPORTED; throw e; } d->decodeRaw(); d->decodeMetaData(meta); RawImage r = d->mRaw; if (r->isCFA) { // Save pointer to decoder _rawspeed_decoder = static_cast<void*>(d); imgdata.rawdata.raw_image = (ushort*) r->getDataUncropped(0,0); S.raw_pitch = r->pitch; fix_after_rawspeed(r->blackLevel); } else if(r->getCpp()==4) { _rawspeed_decoder = static_cast<void*>(d); imgdata.rawdata.color4_image = (ushort(*)[4]) r->getDataUncropped(0,0); S.raw_pitch = r->pitch; C.maximum = r->whitePoint; fix_after_rawspeed(r->blackLevel); } else if(r->getCpp() == 3) { _rawspeed_decoder = static_cast<void*>(d); imgdata.rawdata.color3_image = (ushort(*)[3]) r->getDataUncropped(0,0); S.raw_pitch = r->pitch; C.maximum = r->whitePoint; fix_after_rawspeed(r->blackLevel); } else { delete d; } free(_rawspeed_buffer); imgdata.process_warnings |= LIBRAW_WARN_RAWSPEED_PROCESSED; } catch (...) { imgdata.process_warnings |= LIBRAW_WARN_RAWSPEED_PROBLEM; // no other actions: if raw_image is not set we'll try usual load_raw call } ID.input->seek(spos,SEEK_SET); } #endif if(!imgdata.rawdata.raw_image && !imgdata.rawdata.color4_image && !imgdata.rawdata.color3_image) // RawSpeed failed! { // Not allocated on RawSpeed call, try call LibRaw if(decoder_info.decoder_flags & LIBRAW_DECODER_FLATFIELD) { imgdata.rawdata.raw_alloc = malloc(rwidth*(rheight+7)*sizeof(imgdata.rawdata.raw_image[0])); imgdata.rawdata.raw_image = (ushort*) imgdata.rawdata.raw_alloc; } else if (decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY) { // sRAW and Foveon only, so extra buffer size is just 1/4 // Legacy converters does not supports half mode! S.iwidth = S.width; S.iheight= S.height; IO.shrink = 0; S.raw_pitch = S.width*8; // allocate image as temporary buffer, size imgdata.rawdata.raw_alloc = calloc(S.iwidth*S.iheight,sizeof(*imgdata.image)); imgdata.image = (ushort (*)[4]) imgdata.rawdata.raw_alloc; } ID.input->seek(libraw_internal_data.unpacker_data.data_offset, SEEK_SET); unsigned m_save = C.maximum; if(load_raw == &LibRaw::unpacked_load_raw && !strcasecmp(imgdata.idata.make,"Nikon")) C.maximum=65535; (this->*load_raw)(); if(load_raw == &LibRaw::unpacked_load_raw && !strcasecmp(imgdata.idata.make,"Nikon")) C.maximum = m_save; } if(imgdata.rawdata.raw_image) crop_masked_pixels(); // calculate black levels // recover saved if( (decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY) && !imgdata.rawdata.color4_image) { imgdata.image = 0; imgdata.rawdata.color4_image = (ushort (*)[4]) imgdata.rawdata.raw_alloc; } // recover image sizes S.iwidth = save_iwidth; S.iheight = save_iheight; IO.shrink = save_shrink; // adjust black to possible maximum unsigned int i = C.cblack[3]; unsigned int c; for(c=0;c<3;c++) if (i > C.cblack[c]) i = C.cblack[c]; for (c=0;c<4;c++) C.cblack[c] -= i; C.black += i; // Save color,sizes and internal data into raw_image fields memmove(&imgdata.rawdata.color,&imgdata.color,sizeof(imgdata.color)); memmove(&imgdata.rawdata.sizes,&imgdata.sizes,sizeof(imgdata.sizes)); memmove(&imgdata.rawdata.iparams,&imgdata.idata,sizeof(imgdata.idata)); memmove(&imgdata.rawdata.ioparams,&libraw_internal_data.internal_output_params,sizeof(libraw_internal_data.internal_output_params)); SET_PROC_FLAG(LIBRAW_PROGRESS_LOAD_RAW); RUN_CALLBACK(LIBRAW_PROGRESS_LOAD_RAW,1,2); return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } catch (std::exception ee) { EXCEPTION_HANDLER(LIBRAW_EXCEPTION_IO_CORRUPT); } } void LibRaw::free_image(void) { if(imgdata.image) { free(imgdata.image); imgdata.image = 0; imgdata.progress_flags = LIBRAW_PROGRESS_START|LIBRAW_PROGRESS_OPEN |LIBRAW_PROGRESS_IDENTIFY|LIBRAW_PROGRESS_SIZE_ADJUST|LIBRAW_PROGRESS_LOAD_RAW; } } void LibRaw::raw2image_start() { // restore color,sizes and internal data into raw_image fields memmove(&imgdata.color,&imgdata.rawdata.color,sizeof(imgdata.color)); memmove(&imgdata.sizes,&imgdata.rawdata.sizes,sizeof(imgdata.sizes)); memmove(&imgdata.idata,&imgdata.rawdata.iparams,sizeof(imgdata.idata)); memmove(&libraw_internal_data.internal_output_params,&imgdata.rawdata.ioparams,sizeof(libraw_internal_data.internal_output_params)); if (O.user_flip >= 0) S.flip = O.user_flip; switch ((S.flip+3600) % 360) { case 270: S.flip = 5; break; case 180: S.flip = 3; break; case 90: S.flip = 6; break; } // adjust for half mode! IO.shrink = P1.filters && (O.half_size || ((O.threshold || O.aber[0] != 1 || O.aber[2] != 1) )); S.iheight = (S.height + IO.shrink) >> IO.shrink; S.iwidth = (S.width + IO.shrink) >> IO.shrink; } int LibRaw::is_phaseone_compressed() { return (load_raw == &LibRaw::phase_one_load_raw_c && imgdata.rawdata.ph1_black); } int LibRaw::raw2image(void) { CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW); try { raw2image_start(); if (is_phaseone_compressed()) { phase_one_allocate_tempbuffer(); phase_one_subtract_black((ushort*)imgdata.rawdata.raw_alloc,imgdata.rawdata.raw_image); phase_one_correct(); } // free and re-allocate image bitmap if(imgdata.image) { imgdata.image = (ushort (*)[4]) realloc (imgdata.image,S.iheight*S.iwidth *sizeof (*imgdata.image)); memset(imgdata.image,0,S.iheight*S.iwidth *sizeof (*imgdata.image)); } else imgdata.image = (ushort (*)[4]) calloc (S.iheight*S.iwidth, sizeof (*imgdata.image)); merror (imgdata.image, "raw2image()"); libraw_decoder_info_t decoder_info; get_decoder_info(&decoder_info); // Move saved bitmap to imgdata.image if(decoder_info.decoder_flags & LIBRAW_DECODER_FLATFIELD) { if (IO.fuji_width) { unsigned r,c; int row,col; for (row=0; row < S.raw_height-S.top_margin*2; row++) { for (col=0; col < IO.fuji_width << !libraw_internal_data.unpacker_data.fuji_layout; col++) { if (libraw_internal_data.unpacker_data.fuji_layout) { r = IO.fuji_width - 1 - col + (row >> 1); c = col + ((row+1) >> 1); } else { r = IO.fuji_width - 1 + row - (col >> 1); c = row + ((col+1) >> 1); } if (r < S.height && c < S.width) imgdata.image[((r)>>IO.shrink)*S.iwidth+((c)>>IO.shrink)][FC(r,c)] = imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_pitch/2+(col+S.left_margin)]; } } } else { int row,col; for (row=0; row < S.height; row++) for (col=0; col < S.width; col++) imgdata.image[((row) >> IO.shrink)*S.iwidth + ((col) >> IO.shrink)][fcol(row,col)] = imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_pitch/2+(col+S.left_margin)]; } } else if(decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY) { if(imgdata.rawdata.color4_image) { if(S.width*8 == S.raw_pitch) memmove(imgdata.image,imgdata.rawdata.color4_image,S.width*S.height*sizeof(*imgdata.image)); else { for(int row = 0; row < S.height; row++) memmove(&imgdata.image[row*S.width], &imgdata.rawdata.color4_image[(row+S.top_margin)*S.raw_pitch/8+S.left_margin], S.width*sizeof(*imgdata.image)); } } else if(imgdata.rawdata.color3_image) { unsigned char *c3image = (unsigned char*) imgdata.rawdata.color3_image; for(int row = 0; row < S.height; row++) { ushort (*srcrow)[3] = (ushort (*)[3]) &c3image[(row+S.top_margin)*S.raw_pitch]; ushort (*dstrow)[4] = (ushort (*)[4]) &imgdata.image[row*S.width]; for(int col=0; col < S.width; col++) { for(int c=0; c< 3; c++) dstrow[col][c] = srcrow[S.left_margin+col][c]; dstrow[col][3]=0; } } } else { // legacy decoder, but no data? throw LIBRAW_EXCEPTION_DECODE_RAW; } } // Free PhaseOne separate copy allocated at function start if (is_phaseone_compressed()) { phase_one_free_tempbuffer(); } // hack - clear later flags! if (load_raw == &CLASS canon_600_load_raw && S.width < S.raw_width) { canon_600_correct(); } imgdata.progress_flags = LIBRAW_PROGRESS_START|LIBRAW_PROGRESS_OPEN | LIBRAW_PROGRESS_RAW2_IMAGE |LIBRAW_PROGRESS_IDENTIFY|LIBRAW_PROGRESS_SIZE_ADJUST|LIBRAW_PROGRESS_LOAD_RAW; return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } } void LibRaw::phase_one_allocate_tempbuffer() { // Allocate temp raw_image buffer imgdata.rawdata.raw_image = (ushort*)malloc(S.raw_pitch*S.raw_height); merror (imgdata.rawdata.raw_image, "phase_one_prepare_to_correct()"); } void LibRaw::phase_one_free_tempbuffer() { free(imgdata.rawdata.raw_image); imgdata.rawdata.raw_image = (ushort*) imgdata.rawdata.raw_alloc; } void LibRaw::phase_one_subtract_black(ushort *src, ushort *dest) { // ushort *src = (ushort*)imgdata.rawdata.raw_alloc; if(O.user_black<0 && O.user_cblack[0] <= -1000000 && O.user_cblack[1] <= -1000000 && O.user_cblack[2] <= -1000000 && O.user_cblack[3] <= -1000000) { for(int row = 0; row < S.raw_height; row++) { ushort bl = imgdata.color.phase_one_data.t_black - imgdata.rawdata.ph1_black[row][0]; for(int col=0; col < imgdata.color.phase_one_data.split_col && col < S.raw_width; col++) { int idx = row*S.raw_width + col; ushort val = src[idx]; dest[idx] = val>bl?val-bl:0; } bl = imgdata.color.phase_one_data.t_black - imgdata.rawdata.ph1_black[row][1]; for(int col=imgdata.color.phase_one_data.split_col; col < S.raw_width; col++) { int idx = row*S.raw_width + col; ushort val = src[idx]; dest[idx] = val>bl?val-bl:0; } } } else // black set by user interaction { // Black level in cblack! for(int row = 0; row < S.raw_height; row++) { unsigned short cblk[16]; for(int cc=0; cc<16;cc++) cblk[cc]=C.cblack[fcol(row,cc)]; for(int col = 0; col < S.raw_width; col++) { int idx = row*S.raw_width + col; ushort val = src[idx]; ushort bl = cblk[col&0xf]; dest[idx] = val>bl?val-bl:0; } } } } void LibRaw::copy_fuji_uncropped(unsigned short cblack[4],unsigned short *dmaxp) { int row; #if defined(LIBRAW_USE_OPENMP) #pragma omp parallel for default(shared) #endif for (row=0; row < S.raw_height-S.top_margin*2; row++) { int col; unsigned short ldmax = 0; for (col=0; col < IO.fuji_width << !libraw_internal_data.unpacker_data.fuji_layout; col++) { unsigned r,c; if (libraw_internal_data.unpacker_data.fuji_layout) { r = IO.fuji_width - 1 - col + (row >> 1); c = col + ((row+1) >> 1); } else { r = IO.fuji_width - 1 + row - (col >> 1); c = row + ((col+1) >> 1); } if (r < S.height && c < S.width) { unsigned short val = imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_pitch/2+(col+S.left_margin)]; int cc = FC(r,c); if(val>cblack[cc]) { val-=cblack[cc]; if(val>ldmax)ldmax = val; } else val = 0; imgdata.image[((r)>>IO.shrink)*S.iwidth+((c)>>IO.shrink)][cc] = val; } } #if defined(LIBRAW_USE_OPENMP) #pragma omp critical(dataupdate) #endif { if(*dmaxp < ldmax) *dmaxp = ldmax; } } } void LibRaw::copy_bayer(unsigned short cblack[4],unsigned short *dmaxp) { // Both cropped and uncropped int row; #if defined(LIBRAW_USE_OPENMP) #pragma omp parallel for default(shared) #endif for (row=0; row < S.height; row++) { int col; unsigned short ldmax = 0; for (col=0; col < S.width; col++) { unsigned short val = imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_pitch/2+(col+S.left_margin)]; int cc = fcol(row,col); if(val>cblack[cc]) { val-=cblack[cc]; if(val>ldmax)ldmax = val; } else val = 0; imgdata.image[((row) >> IO.shrink)*S.iwidth + ((col) >> IO.shrink)][cc] = val; } #if defined(LIBRAW_USE_OPENMP) #pragma omp critical(dataupdate) #endif { if(*dmaxp < ldmax) *dmaxp = ldmax; } } } int LibRaw::raw2image_ex(int do_subtract_black) { CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW); try { raw2image_start(); // Compressed P1 files with bl data! if (is_phaseone_compressed()) { phase_one_allocate_tempbuffer(); phase_one_subtract_black((ushort*)imgdata.rawdata.raw_alloc,imgdata.rawdata.raw_image); phase_one_correct(); } // process cropping int do_crop = 0; unsigned save_width = S.width; if (~O.cropbox[2] && ~O.cropbox[3] && load_raw != &LibRaw::foveon_sd_load_raw) // Foveon SD to be cropped later { int crop[4],c,filt; for(int c=0;c<4;c++) { crop[c] = O.cropbox[c]; if(crop[c]<0) crop[c]=0; } if(IO.fuji_width && imgdata.idata.filters >= 1000) { crop[0] = (crop[0]/4)*4; crop[1] = (crop[1]/4)*4; if(!libraw_internal_data.unpacker_data.fuji_layout) { crop[2]*=sqrt(2.0); crop[3]/=sqrt(2.0); } crop[2] = (crop[2]/4+1)*4; crop[3] = (crop[3]/4+1)*4; } else if (imgdata.idata.filters == 1) { crop[0] = (crop[0]/16)*16; crop[1] = (crop[1]/16)*16; } else if(imgdata.idata.filters == 2) { crop[0] = (crop[0]/6)*6; crop[1] = (crop[1]/6)*6; } do_crop = 1; crop[2] = MIN (crop[2], (signed) S.width-crop[0]); crop[3] = MIN (crop[3], (signed) S.height-crop[1]); if (crop[2] <= 0 || crop[3] <= 0) throw LIBRAW_EXCEPTION_BAD_CROP; // adjust sizes! S.left_margin+=crop[0]; S.top_margin+=crop[1]; S.width=crop[2]; S.height=crop[3]; S.iheight = (S.height + IO.shrink) >> IO.shrink; S.iwidth = (S.width + IO.shrink) >> IO.shrink; if(!IO.fuji_width && imgdata.idata.filters && imgdata.idata.filters >= 1000) { for (filt=c=0; c < 16; c++) filt |= FC((c >> 1)+(crop[1]), (c & 1)+(crop[0])) << c*2; imgdata.idata.filters = filt; } } int alloc_width = S.iwidth; int alloc_height = S.iheight; if(IO.fuji_width && do_crop) { int IO_fw = S.width >> !libraw_internal_data.unpacker_data.fuji_layout; int t_alloc_width = (S.height >> libraw_internal_data.unpacker_data.fuji_layout) + IO_fw; int t_alloc_height = t_alloc_width - 1; alloc_height = (t_alloc_height + IO.shrink) >> IO.shrink; alloc_width = (t_alloc_width + IO.shrink) >> IO.shrink; } int alloc_sz = alloc_width*alloc_height; if(imgdata.image) { imgdata.image = (ushort (*)[4]) realloc (imgdata.image,alloc_sz *sizeof (*imgdata.image)); memset(imgdata.image,0,alloc_sz *sizeof (*imgdata.image)); } else imgdata.image = (ushort (*)[4]) calloc (alloc_sz, sizeof (*imgdata.image)); merror (imgdata.image, "raw2image_ex()"); libraw_decoder_info_t decoder_info; get_decoder_info(&decoder_info); // Adjust black levels unsigned short cblack[4]={0,0,0,0}; unsigned short dmax = 0; if(do_subtract_black) { adjust_bl(); for(int i=0; i< 4; i++) cblack[i] = (unsigned short)C.cblack[i]; } // Move saved bitmap to imgdata.image if(decoder_info.decoder_flags & LIBRAW_DECODER_FLATFIELD) { if (IO.fuji_width) { if(do_crop) { IO.fuji_width = S.width >> !libraw_internal_data.unpacker_data.fuji_layout; int IO_fwidth = (S.height >> libraw_internal_data.unpacker_data.fuji_layout) + IO.fuji_width; int IO_fheight = IO_fwidth - 1; int row,col; for(row=0;row<S.height;row++) { for(col=0;col<S.width;col++) { int r,c; if (libraw_internal_data.unpacker_data.fuji_layout) { r = IO.fuji_width - 1 - col + (row >> 1); c = col + ((row+1) >> 1); } else { r = IO.fuji_width - 1 + row - (col >> 1); c = row + ((col+1) >> 1); } unsigned short val = imgdata.rawdata.raw_image[(row+S.top_margin)*S.raw_pitch/2 +(col+S.left_margin)]; int cc = FCF(row,col); if(val > cblack[cc]) { val-=cblack[cc]; if(dmax < val) dmax = val; } else val = 0; imgdata.image[((r) >> IO.shrink)*alloc_width + ((c) >> IO.shrink)][cc] = val; } } S.height = IO_fheight; S.width = IO_fwidth; S.iheight = (S.height + IO.shrink) >> IO.shrink; S.iwidth = (S.width + IO.shrink) >> IO.shrink; S.raw_height -= 2*S.top_margin; } else { copy_fuji_uncropped(cblack,&dmax); } } // end Fuji else { copy_bayer(cblack,&dmax); } } else if(decoder_info.decoder_flags & LIBRAW_DECODER_LEGACY) { if(imgdata.rawdata.color4_image) { if(S.raw_pitch != S.width*8) { for(int row = 0; row < S.height; row++) memmove(&imgdata.image[row*S.width], &imgdata.rawdata.color4_image[(row+S.top_margin)*S.raw_pitch/8+S.left_margin], S.width*sizeof(*imgdata.image)); } else { // legacy is always 4channel and not shrinked! memmove(imgdata.image,imgdata.rawdata.color4_image,S.width*S.height*sizeof(*imgdata.image)); } } else if(imgdata.rawdata.color3_image) { unsigned char *c3image = (unsigned char*) imgdata.rawdata.color3_image; for(int row = 0; row < S.height; row++) { ushort (*srcrow)[3] = (ushort (*)[3]) &c3image[(row+S.top_margin)*S.raw_pitch]; ushort (*dstrow)[4] = (ushort (*)[4]) &imgdata.image[row*S.width]; for(int col=0; col < S.width; col++) { for(int c=0; c< 3; c++) dstrow[col][c] = srcrow[S.left_margin+col][c]; dstrow[col][3]=0; } } } else { // legacy decoder, but no data? throw LIBRAW_EXCEPTION_DECODE_RAW; } } // Free PhaseOne separate copy allocated at function start if (is_phaseone_compressed()) { phase_one_free_tempbuffer(); } if (load_raw == &CLASS canon_600_load_raw && S.width < S.raw_width) { canon_600_correct(); } if(do_subtract_black) { C.data_maximum = (int)dmax; C.maximum -= C.black; ZERO(C.cblack); C.black = 0; } // hack - clear later flags! imgdata.progress_flags = LIBRAW_PROGRESS_START|LIBRAW_PROGRESS_OPEN | LIBRAW_PROGRESS_RAW2_IMAGE |LIBRAW_PROGRESS_IDENTIFY|LIBRAW_PROGRESS_SIZE_ADJUST|LIBRAW_PROGRESS_LOAD_RAW; return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } } #if 1 libraw_processed_image_t * LibRaw::dcraw_make_mem_thumb(int *errcode) { if(!T.thumb) { if ( !ID.toffset) { if(errcode) *errcode= LIBRAW_NO_THUMBNAIL; } else { if(errcode) *errcode= LIBRAW_OUT_OF_ORDER_CALL; } return NULL; } if (T.tformat == LIBRAW_THUMBNAIL_BITMAP) { libraw_processed_image_t * ret = (libraw_processed_image_t *)::malloc(sizeof(libraw_processed_image_t)+T.tlength); if(!ret) { if(errcode) *errcode= ENOMEM; return NULL; } memset(ret,0,sizeof(libraw_processed_image_t)); ret->type = LIBRAW_IMAGE_BITMAP; ret->height = T.theight; ret->width = T.twidth; ret->colors = 3; ret->bits = 8; ret->data_size = T.tlength; memmove(ret->data,T.thumb,T.tlength); if(errcode) *errcode= 0; return ret; } else if (T.tformat == LIBRAW_THUMBNAIL_JPEG) { ushort exif[5]; int mk_exif = 0; if(strcmp(T.thumb+6,"Exif")) mk_exif = 1; int dsize = T.tlength + mk_exif * (sizeof(exif)+sizeof(tiff_hdr)); libraw_processed_image_t * ret = (libraw_processed_image_t *)::malloc(sizeof(libraw_processed_image_t)+dsize); if(!ret) { if(errcode) *errcode= ENOMEM; return NULL; } memset(ret,0,sizeof(libraw_processed_image_t)); ret->type = LIBRAW_IMAGE_JPEG; ret->data_size = dsize; ret->data[0] = 0xff; ret->data[1] = 0xd8; if(mk_exif) { struct tiff_hdr th; memcpy (exif, "\xff\xe1 Exif\0\0", 10); exif[1] = htons (8 + sizeof th); memmove(ret->data+2,exif,sizeof(exif)); tiff_head (&th, 0); memmove(ret->data+(2+sizeof(exif)),&th,sizeof(th)); memmove(ret->data+(2+sizeof(exif)+sizeof(th)),T.thumb+2,T.tlength-2); } else { memmove(ret->data+2,T.thumb+2,T.tlength-2); } if(errcode) *errcode= 0; return ret; } else { if(errcode) *errcode= LIBRAW_UNSUPPORTED_THUMBNAIL; return NULL; } } // jlb // macros for copying pixels to either BGR or RGB formats #define FORBGR for(c=P1.colors-1; c >=0 ; c--) #define FORRGB for(c=0; c < P1.colors ; c++) void LibRaw::get_mem_image_format(int* width, int* height, int* colors, int* bps) const { if (S.flip & 4) { *width = S.height; *height = S.width; } else { *width = S.width; *height = S.height; } *colors = P1.colors; *bps = O.output_bps; } int LibRaw::copy_mem_image(void* scan0, int stride, int bgr) { // the image memory pointed to by scan0 is assumed to be in the format returned by get_mem_image_format if((imgdata.progress_flags & LIBRAW_PROGRESS_THUMB_MASK) < LIBRAW_PROGRESS_PRE_INTERPOLATE) return LIBRAW_OUT_OF_ORDER_CALL; if(libraw_internal_data.output_data.histogram) { int perc, val, total, t_white=0x2000,c; perc = S.width * S.height * 0.01; /* 99th percentile white level */ if (IO.fuji_width) perc /= 2; if (!((O.highlight & ~2) || O.no_auto_bright)) for (t_white=c=0; c < P1.colors; c++) { for (val=0x2000, total=0; --val > 32; ) if ((total += libraw_internal_data.output_data.histogram[c][val]) > perc) break; if (t_white < val) t_white = val; } gamma_curve (O.gamm[0], O.gamm[1], 2, (t_white << 3)/O.bright); } int s_iheight = S.iheight; int s_iwidth = S.iwidth; int s_width = S.width; int s_hwight = S.height; S.iheight = S.height; S.iwidth = S.width; if (S.flip & 4) SWAP(S.height,S.width); uchar *ppm; ushort *ppm2; int c, row, col, soff, rstep, cstep; soff = flip_index (0, 0); cstep = flip_index (0, 1) - soff; rstep = flip_index (1, 0) - flip_index (0, S.width); for (row=0; row < S.height; row++, soff += rstep) { uchar *bufp = ((uchar*)scan0)+row*stride; ppm2 = (ushort*) (ppm = bufp); // keep trivial decisions in the outer loop for speed if (bgr) { if (O.output_bps == 8) { for (col=0; col < S.width; col++, soff += cstep) FORBGR *ppm++ = imgdata.color.curve[imgdata.image[soff][c]]>>8; } else { for (col=0; col < S.width; col++, soff += cstep) FORBGR *ppm2++ = imgdata.color.curve[imgdata.image[soff][c]]; } } else { if (O.output_bps == 8) { for (col=0; col < S.width; col++, soff += cstep) FORRGB *ppm++ = imgdata.color.curve[imgdata.image[soff][c]]>>8; } else { for (col=0; col < S.width; col++, soff += cstep) FORRGB *ppm2++ = imgdata.color.curve[imgdata.image[soff][c]]; } } // bufp += stride; // go to the next line } S.iheight = s_iheight; S.iwidth = s_iwidth; S.width = s_width; S.height = s_hwight; return 0; } #undef FORBGR #undef FORRGB libraw_processed_image_t *LibRaw::dcraw_make_mem_image(int *errcode) { int width, height, colors, bps; get_mem_image_format(&width, &height, &colors, &bps); int stride = width * (bps/8) * colors; unsigned ds = height * stride; libraw_processed_image_t *ret = (libraw_processed_image_t*)::malloc(sizeof(libraw_processed_image_t)+ds); if(!ret) { if(errcode) *errcode= ENOMEM; return NULL; } memset(ret,0,sizeof(libraw_processed_image_t)); // metadata init ret->type = LIBRAW_IMAGE_BITMAP; ret->height = height; ret->width = width; ret->colors = colors; ret->bits = bps; ret->data_size = ds; copy_mem_image(ret->data, stride, 0); return ret; } #undef FORC #undef FORCC #undef SWAP #endif int LibRaw::dcraw_ppm_tiff_writer(const char *filename) { CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW); if(!imgdata.image) return LIBRAW_OUT_OF_ORDER_CALL; if(!filename) return ENOENT; FILE *f = fopen(filename,"wb"); if(!f) return errno; try { if(!libraw_internal_data.output_data.histogram) { libraw_internal_data.output_data.histogram = (int (*)[LIBRAW_HISTOGRAM_SIZE]) malloc(sizeof(*libraw_internal_data.output_data.histogram)*4); merror(libraw_internal_data.output_data.histogram,"LibRaw::dcraw_ppm_tiff_writer()"); } libraw_internal_data.internal_data.output = f; write_ppm_tiff(); SET_PROC_FLAG(LIBRAW_PROGRESS_FLIP); libraw_internal_data.internal_data.output = NULL; fclose(f); return 0; } catch ( LibRaw_exceptions err) { fclose(f); EXCEPTION_HANDLER(err); } } void LibRaw::kodak_thumb_loader() { // some kodak cameras ushort s_height = S.height, s_width = S.width,s_iwidth = S.iwidth,s_iheight=S.iheight; int s_colors = P1.colors; unsigned s_filters = P1.filters; ushort (*s_image)[4] = imgdata.image; S.height = T.theight; S.width = T.twidth; P1.filters = 0; if (thumb_load_raw == &CLASS kodak_ycbcr_load_raw) { S.height += S.height & 1; S.width += S.width & 1; } imgdata.image = (ushort (*)[4]) calloc (S.iheight*S.iwidth, sizeof (*imgdata.image)); merror (imgdata.image, "LibRaw::kodak_thumb_loader()"); ID.input->seek(ID.toffset, SEEK_SET); // read kodak thumbnail into T.image[] (this->*thumb_load_raw)(); // copy-n-paste from image pipe #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define LIM(x,min,max) MAX(min,MIN(x,max)) #define CLIP(x) LIM(x,0,65535) #define SWAP(a,b) { a ^= b; a ^= (b ^= a); } // from scale_colors { double dmax; float scale_mul[4]; int c,val; for (dmax=DBL_MAX, c=0; c < 3; c++) if (dmax > C.pre_mul[c]) dmax = C.pre_mul[c]; for( c=0; c< 3; c++) scale_mul[c] = (C.pre_mul[c] / dmax) * 65535.0 / C.maximum; scale_mul[3] = scale_mul[1]; size_t size = S.height * S.width; for (unsigned i=0; i < size*4 ; i++) { val = imgdata.image[0][i]; if(!val) continue; val *= scale_mul[i & 3]; imgdata.image[0][i] = CLIP(val); } } // from convert_to_rgb ushort *img; int row,col; int (*t_hist)[LIBRAW_HISTOGRAM_SIZE] = (int (*)[LIBRAW_HISTOGRAM_SIZE]) calloc(sizeof(*t_hist),4); merror (t_hist, "LibRaw::kodak_thumb_loader()"); float out[3], out_cam[3][4] = { {2.81761312, -1.98369181, 0.166078627, 0}, {-0.111855984, 1.73688626, -0.625030339, 0}, {-0.0379119813, -0.891268849, 1.92918086, 0} }; for (img=imgdata.image[0], row=0; row < S.height; row++) for (col=0; col < S.width; col++, img+=4) { out[0] = out[1] = out[2] = 0; int c; for(c=0;c<3;c++) { out[0] += out_cam[0][c] * img[c]; out[1] += out_cam[1][c] * img[c]; out[2] += out_cam[2][c] * img[c]; } for(c=0; c<3; c++) img[c] = CLIP((int) out[c]); for(c=0; c<P1.colors;c++) t_hist[c][img[c] >> 3]++; } // from gamma_lut int (*save_hist)[LIBRAW_HISTOGRAM_SIZE] = libraw_internal_data.output_data.histogram; libraw_internal_data.output_data.histogram = t_hist; // make curve output curve! ushort (*t_curve) = (ushort*) calloc(sizeof(C.curve),1); merror (t_curve, "LibRaw::kodak_thumb_loader()"); memmove(t_curve,C.curve,sizeof(C.curve)); memset(C.curve,0,sizeof(C.curve)); { int perc, val, total, t_white=0x2000,c; perc = S.width * S.height * 0.01; /* 99th percentile white level */ if (IO.fuji_width) perc /= 2; if (!((O.highlight & ~2) || O.no_auto_bright)) for (t_white=c=0; c < P1.colors; c++) { for (val=0x2000, total=0; --val > 32; ) if ((total += libraw_internal_data.output_data.histogram[c][val]) > perc) break; if (t_white < val) t_white = val; } gamma_curve (O.gamm[0], O.gamm[1], 2, (t_white << 3)/O.bright); } libraw_internal_data.output_data.histogram = save_hist; free(t_hist); // from write_ppm_tiff - copy pixels into bitmap S.iheight = S.height; S.iwidth = S.width; if (S.flip & 4) SWAP(S.height,S.width); if(T.thumb) free(T.thumb); T.thumb = (char*) calloc (S.width * S.height, P1.colors); merror (T.thumb, "LibRaw::kodak_thumb_loader()"); T.tlength = S.width * S.height * P1.colors; // from write_tiff_ppm { int soff = flip_index (0, 0); int cstep = flip_index (0, 1) - soff; int rstep = flip_index (1, 0) - flip_index (0, S.width); for (int row=0; row < S.height; row++, soff += rstep) { char *ppm = T.thumb + row*S.width*P1.colors; for (int col=0; col < S.width; col++, soff += cstep) for(int c = 0; c < P1.colors; c++) ppm [col*P1.colors+c] = imgdata.color.curve[imgdata.image[soff][c]]>>8; } } memmove(C.curve,t_curve,sizeof(C.curve)); free(t_curve); // restore variables free(imgdata.image); imgdata.image = s_image; T.twidth = S.width; S.width = s_width; S.iwidth = s_iwidth; S.iheight = s_iheight; T.theight = S.height; S.height = s_height; T.tcolors = P1.colors; P1.colors = s_colors; P1.filters = s_filters; } #undef MIN #undef MAX #undef LIM #undef CLIP #undef SWAP // thumbnail , thumb_format int LibRaw::unpack_thumb(void) { CHECK_ORDER_LOW(LIBRAW_PROGRESS_IDENTIFY); CHECK_ORDER_BIT(LIBRAW_PROGRESS_THUMB_LOAD); try { if(!libraw_internal_data.internal_data.input) return LIBRAW_INPUT_CLOSED; if ( !ID.toffset) { return LIBRAW_NO_THUMBNAIL; } else if (thumb_load_raw) { kodak_thumb_loader(); T.tformat = LIBRAW_THUMBNAIL_BITMAP; SET_PROC_FLAG(LIBRAW_PROGRESS_THUMB_LOAD); return 0; } else { ID.input->seek(ID.toffset, SEEK_SET); if ( write_thumb == &LibRaw::jpeg_thumb) { if(T.thumb) free(T.thumb); T.thumb = (char *) malloc (T.tlength); merror (T.thumb, "jpeg_thumb()"); ID.input->read (T.thumb, 1, T.tlength); T.tcolors = 3; T.tformat = LIBRAW_THUMBNAIL_JPEG; SET_PROC_FLAG(LIBRAW_PROGRESS_THUMB_LOAD); return 0; } else if (write_thumb == &LibRaw::ppm_thumb) { T.tlength = T.twidth * T.theight*3; if(T.thumb) free(T.thumb); T.thumb = (char *) malloc (T.tlength); merror (T.thumb, "ppm_thumb()"); ID.input->read(T.thumb, 1, T.tlength); T.tformat = LIBRAW_THUMBNAIL_BITMAP; SET_PROC_FLAG(LIBRAW_PROGRESS_THUMB_LOAD); return 0; } else if (write_thumb == &LibRaw::ppm16_thumb) { T.tlength = T.twidth * T.theight*3; ushort *t_thumb = (ushort*)calloc(T.tlength,2); ID.input->read(t_thumb,2,T.tlength); if ((libraw_internal_data.unpacker_data.order= 0x4949) == (ntohs(0x1234) == 0x1234)) swab ((char*)t_thumb, (char*)t_thumb, T.tlength*2); if(T.thumb) free(T.thumb); T.thumb = (char *) malloc (T.tlength); merror (T.thumb, "ppm_thumb()"); for (int i=0; i < T.tlength; i++) T.thumb[i] = t_thumb[i] >> 8; free(t_thumb); T.tformat = LIBRAW_THUMBNAIL_BITMAP; SET_PROC_FLAG(LIBRAW_PROGRESS_THUMB_LOAD); return 0; } else if (write_thumb == &LibRaw::foveon_thumb) { foveon_thumb_loader(); // may return with error, so format is set in // foveon thumb loader itself SET_PROC_FLAG(LIBRAW_PROGRESS_THUMB_LOAD); return 0; } // else if -- all other write_thumb cases! else { return LIBRAW_UNSUPPORTED_THUMBNAIL; } } // last resort return LIBRAW_UNSUPPORTED_THUMBNAIL; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } } int LibRaw::dcraw_thumb_writer(const char *fname) { // CHECK_ORDER_LOW(LIBRAW_PROGRESS_THUMB_LOAD); if(!fname) return ENOENT; FILE *tfp = fopen(fname,"wb"); if(!tfp) return errno; if(!T.thumb) { fclose(tfp); return LIBRAW_OUT_OF_ORDER_CALL; } try { switch (T.tformat) { case LIBRAW_THUMBNAIL_JPEG: jpeg_thumb_writer (tfp,T.thumb,T.tlength); break; case LIBRAW_THUMBNAIL_BITMAP: fprintf (tfp, "P6\n%d %d\n255\n", T.twidth, T.theight); fwrite (T.thumb, 1, T.tlength, tfp); break; default: fclose(tfp); return LIBRAW_UNSUPPORTED_THUMBNAIL; } fclose(tfp); return 0; } catch ( LibRaw_exceptions err) { fclose(tfp); EXCEPTION_HANDLER(err); } } int LibRaw::adjust_sizes_info_only(void) { CHECK_ORDER_LOW(LIBRAW_PROGRESS_IDENTIFY); raw2image_start(); if (O.use_fuji_rotate) { if (IO.fuji_width) { IO.fuji_width = (IO.fuji_width - 1 + IO.shrink) >> IO.shrink; S.iwidth = (ushort)(IO.fuji_width / sqrt(0.5)); S.iheight = (ushort)( (S.iheight - IO.fuji_width) / sqrt(0.5)); } else { if (S.pixel_aspect < 1) S.iheight = (ushort)( S.iheight / S.pixel_aspect + 0.5); if (S.pixel_aspect > 1) S.iwidth = (ushort) (S.iwidth * S.pixel_aspect + 0.5); } } SET_PROC_FLAG(LIBRAW_PROGRESS_FUJI_ROTATE); if ( S.flip & 4) { unsigned short t = S.iheight; S.iheight=S.iwidth; S.iwidth = t; SET_PROC_FLAG(LIBRAW_PROGRESS_FLIP); } return 0; } int LibRaw::subtract_black() { CHECK_ORDER_LOW(LIBRAW_PROGRESS_RAW2_IMAGE); try { if(!is_phaseone_compressed() && (C.cblack[0] || C.cblack[1] || C.cblack[2] || C.cblack[3])) { #define BAYERC(row,col,c) imgdata.image[((row) >> IO.shrink)*S.iwidth + ((col) >> IO.shrink)][c] int cblk[4],i; for(i=0;i<4;i++) cblk[i] = C.cblack[i]; int size = S.iheight * S.iwidth; #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define LIM(x,min,max) MAX(min,MIN(x,max)) #define CLIP(x) LIM(x,0,65535) int dmax = 0; for(i=0; i< size*4; i++) { int val = imgdata.image[0][i]; val -= cblk[i & 3]; imgdata.image[0][i] = CLIP(val); if(dmax < val) dmax = val; } C.data_maximum = dmax & 0xffff; #undef MIN #undef MAX #undef LIM #undef CLIP C.maximum -= C.black; ZERO(C.cblack); C.black = 0; #undef BAYERC } else { // Nothing to Do, maximum is already calculated, black level is 0, so no change // only calculate channel maximum; int idx; ushort *p = (ushort*)imgdata.image; int dmax = 0; for(idx=0;idx<S.iheight*S.iwidth*4;idx++) if(dmax < p[idx]) dmax = p[idx]; C.data_maximum = dmax; } return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } } #define TBLN 65535 void LibRaw::exp_bef(float shift, float smooth) { // params limits if(shift>8) shift = 8; if(shift<0.25) shift = 0.25; if(smooth < 0.0) smooth = 0.0; if(smooth > 1.0) smooth = 1.0; unsigned short *lut = (ushort*)malloc((TBLN+1)*sizeof(unsigned short)); if(shift <=1.0) { for(int i=0;i<=TBLN;i++) lut[i] = (unsigned short)((float)i*shift); } else { float x1,x2,y1,y2; float cstops = log(shift)/log(2.0f); float room = cstops*2; float roomlin = powf(2.0f,room); x2 = (float)TBLN; x1 = (x2+1)/roomlin-1; y1 = x1*shift; y2 = x2*(1+(1-smooth)*(shift-1)); float sq3x=powf(x1*x1*x2,1.0f/3.0f); float B = (y2-y1+shift*(3*x1-3.0f*sq3x)) / (x2+2.0f*x1-3.0f*sq3x); float A = (shift - B)*3.0f*powf(x1*x1,1.0f/3.0f); float CC = y2 - A*powf(x2,1.0f/3.0f)-B*x2; for(int i=0;i<=TBLN;i++) { float X = (float)i; float Y = A*powf(X,1.0f/3.0f)+B*X+CC; if(i<x1) lut[i] = (unsigned short)((float)i*shift); else lut[i] = Y<0?0:(Y>TBLN?TBLN:(unsigned short)(Y)); } } for(int i=0; i< S.height*S.width; i++) { imgdata.image[i][0] = lut[imgdata.image[i][0]]; imgdata.image[i][1] = lut[imgdata.image[i][1]]; imgdata.image[i][2] = lut[imgdata.image[i][2]]; imgdata.image[i][3] = lut[imgdata.image[i][3]]; } if(C.data_maximum <=TBLN) C.data_maximum = lut[C.data_maximum]; if(C.maximum <= TBLN) C.maximum = lut[C.maximum]; // no need to adjust the minumum, black is already subtracted free(lut); } #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define LIM(x,min,max) MAX(min,MIN(x,max)) #define ULIM(x,y,z) ((y) < (z) ? LIM(x,y,z) : LIM(x,z,y)) #define CLIP(x) LIM(x,0,65535) void LibRaw::convert_to_rgb_loop(float out_cam[3][4]) { int row,col,c; float out[3]; ushort *img; memset(libraw_internal_data.output_data.histogram,0,sizeof(int)*LIBRAW_HISTOGRAM_SIZE*4); for (img=imgdata.image[0], row=0; row < S.height; row++) for (col=0; col < S.width; col++, img+=4) { if (!libraw_internal_data.internal_output_params.raw_color) { out[0] = out[1] = out[2] = 0; for(c=0; c< imgdata.idata.colors; c++) { out[0] += out_cam[0][c] * img[c]; out[1] += out_cam[1][c] * img[c]; out[2] += out_cam[2][c] * img[c]; } for(c=0;c<3;c++) img[c] = CLIP((int) out[c]); } for(c=0; c< imgdata.idata.colors; c++) libraw_internal_data.output_data.histogram[c][img[c] >> 3]++; } } void LibRaw::scale_colors_loop(float scale_mul[4]) { unsigned size = S.iheight*S.iwidth; if(C.cblack[0]||C.cblack[1]||C.cblack[2]||C.cblack[3]) { for (unsigned i=0; i < size*4; i++) { int val = imgdata.image[0][i]; if (!val) continue; val -= C.cblack[i & 3]; val *= scale_mul[i & 3]; imgdata.image[0][i] = CLIP(val); } } else // BL is zero { for (unsigned i=0; i < size*4; i++) { int val = imgdata.image[0][i]; val *= scale_mul[i & 3]; imgdata.image[0][i] = CLIP(val); } } } void LibRaw::adjust_bl() { if (O.user_black >= 0) C.black = O.user_black; for(int i=0; i<4; i++) if(O.user_cblack[i]>-1000000) C.cblack[i] = O.user_cblack[i]; // remove common part from C.cblack[] int i = C.cblack[3]; int c; for(c=0;c<3;c++) if (i > C.cblack[c]) i = C.cblack[c]; for(c=0;c<4;c++) C.cblack[c] -= i; C.black += i; for(c=0;c<4;c++) C.cblack[c] += C.black; } int LibRaw::dcraw_process(void) { int quality,i; int iterations=-1, dcb_enhance=1, noiserd=0; int eeci_refine_fl=0, es_med_passes_fl=0; float cared=0,cablue=0; float linenoise=0; float lclean=0,cclean=0; float thresh=0; float preser=0; float expos=1.0; CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW); // CHECK_ORDER_HIGH(LIBRAW_PROGRESS_PRE_INTERPOLATE); try { int no_crop = 1; if (~O.cropbox[2] && ~O.cropbox[3]) no_crop=0; libraw_decoder_info_t di; get_decoder_info(&di); int subtract_inline = !O.bad_pixels && !O.dark_frame && !O.wf_debanding && !(di.decoder_flags & LIBRAW_DECODER_LEGACY) && !IO.zero_is_bad; raw2image_ex(subtract_inline); // allocate imgdata.image and copy data! int save_4color = O.four_color_rgb; if (IO.zero_is_bad) { remove_zeroes(); SET_PROC_FLAG(LIBRAW_PROGRESS_REMOVE_ZEROES); } if(O.half_size) O.four_color_rgb = 1; if(O.bad_pixels && no_crop) { bad_pixels(O.bad_pixels); SET_PROC_FLAG(LIBRAW_PROGRESS_BAD_PIXELS); } if (O.dark_frame && no_crop) { subtract (O.dark_frame); SET_PROC_FLAG(LIBRAW_PROGRESS_DARK_FRAME); } if (O.wf_debanding) { wf_remove_banding(); } quality = 2 + !IO.fuji_width; if (O.user_qual >= 0) quality = O.user_qual; if(!subtract_inline || !C.data_maximum) { adjust_bl(); subtract_black(); } adjust_maximum(); if (O.user_sat > 0) C.maximum = O.user_sat; if (P1.is_foveon) { if(load_raw == &LibRaw::foveon_dp_load_raw) { for (int i=0; i < S.height*S.width*4; i++) if ((short) imgdata.image[0][i] < 0) imgdata.image[0][i] = 0; } else foveon_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_FOVEON_INTERPOLATE); } if (O.green_matching && !O.half_size) { green_matching(); } if (!P1.is_foveon) { scale_colors(); SET_PROC_FLAG(LIBRAW_PROGRESS_SCALE_COLORS); } pre_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_PRE_INTERPOLATE); if (O.dcb_iterations >= 0) iterations = O.dcb_iterations; if (O.dcb_enhance_fl >=0 ) dcb_enhance = O.dcb_enhance_fl; if (O.fbdd_noiserd >=0 ) noiserd = O.fbdd_noiserd; if (O.eeci_refine >=0 ) eeci_refine_fl = O.eeci_refine; if (O.es_med_passes >0 ) es_med_passes_fl = O.es_med_passes; // LIBRAW_DEMOSAIC_PACK_GPL3 if (!O.half_size && O.cfa_green >0) {thresh=O.green_thresh ;green_equilibrate(thresh);} if (O.exp_correc >0) {expos=O.exp_shift ; preser=O.exp_preser; exp_bef(expos,preser);} if (O.ca_correc >0 ) {cablue=O.cablue; cared=O.cared; CA_correct_RT(cablue, cared);} if (O.cfaline >0 ) {linenoise=O.linenoise; cfa_linedn(linenoise);} if (O.cfa_clean >0 ) {lclean=O.lclean; cclean=O.cclean; cfa_impulse_gauss(lclean,cclean);} if (P1.filters) { if (noiserd>0 && P1.colors==3 && P1.filters) fbdd(noiserd); if (quality == 0) lin_interpolate(); else if (quality == 1 || P1.colors > 3 || P1.filters < 1000) vng_interpolate(); else if (quality == 2) ppg_interpolate(); else if (quality == 3) ahd_interpolate(); // really don't need it here due to fallback op else if (quality == 4) dcb(iterations, dcb_enhance); // LIBRAW_DEMOSAIC_PACK_GPL2 else if (quality == 5) ahd_interpolate_mod(); else if (quality == 6) afd_interpolate_pl(2,1); else if (quality == 7) vcd_interpolate(0); else if (quality == 8) vcd_interpolate(12); else if (quality == 9) lmmse_interpolate(1); // LIBRAW_DEMOSAIC_PACK_GPL3 else if (quality == 10) amaze_demosaic_RT(); // LGPL2 else if (quality == 11) dht_interpolate(); else if (quality == 12) aahd_interpolate(); // fallback to AHD else ahd_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_INTERPOLATE); } if (IO.mix_green) { for (P1.colors=3, i=0; i < S.height * S.width; i++) imgdata.image[i][1] = (imgdata.image[i][1] + imgdata.image[i][3]) >> 1; SET_PROC_FLAG(LIBRAW_PROGRESS_MIX_GREEN); } if(!P1.is_foveon) { if (P1.colors == 3) { if (quality == 8) { if (eeci_refine_fl == 1) refinement(); if (O.med_passes > 0) median_filter_new(); if (es_med_passes_fl > 0) es_median_filter(); } else { median_filter(); } SET_PROC_FLAG(LIBRAW_PROGRESS_MEDIAN_FILTER); } } if (O.highlight == 2) { blend_highlights(); SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS); } if (O.highlight > 2) { recover_highlights(); SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS); } if (O.use_fuji_rotate) { fuji_rotate(); SET_PROC_FLAG(LIBRAW_PROGRESS_FUJI_ROTATE); } if(!libraw_internal_data.output_data.histogram) { libraw_internal_data.output_data.histogram = (int (*)[LIBRAW_HISTOGRAM_SIZE]) malloc(sizeof(*libraw_internal_data.output_data.histogram)*4); merror(libraw_internal_data.output_data.histogram,"LibRaw::dcraw_process()"); } #ifndef NO_LCMS if(O.camera_profile) { apply_profile(O.camera_profile,O.output_profile); SET_PROC_FLAG(LIBRAW_PROGRESS_APPLY_PROFILE); } #endif convert_to_rgb(); SET_PROC_FLAG(LIBRAW_PROGRESS_CONVERT_RGB); if (O.use_fuji_rotate) { stretch(); SET_PROC_FLAG(LIBRAW_PROGRESS_STRETCH); } O.four_color_rgb = save_4color; // also, restore return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } } // Supported cameras: static const char *static_camera_list[] = { "Adobe Digital Negative (DNG)", "AgfaPhoto DC-833m", "Apple QuickTake 100", "Apple QuickTake 150", "Apple QuickTake 200", "ARRIRAW format", "AVT F-080C", "AVT F-145C", "AVT F-201C", "AVT F-510C", "AVT F-810C", "Canon PowerShot 600", "Canon PowerShot A5", "Canon PowerShot A5 Zoom", "Canon PowerShot A50", "Canon PowerShot A460 (CHDK hack)", "Canon PowerShot A470 (CHDK hack)", "Canon PowerShot A530 (CHDK hack)", "Canon PowerShot A570 (CHDK hack)", "Canon PowerShot A590 (CHDK hack)", "Canon PowerShot A610 (CHDK hack)", "Canon PowerShot A620 (CHDK hack)", "Canon PowerShot A630 (CHDK hack)", "Canon PowerShot A640 (CHDK hack)", "Canon PowerShot A650 (CHDK hack)", "Canon PowerShot A710 IS (CHDK hack)", "Canon PowerShot A720 IS (CHDK hack)", "Canon PowerShot Pro70", "Canon PowerShot Pro90 IS", "Canon PowerShot Pro1", "Canon PowerShot G1", "Canon PowerShot G1 X", "Canon PowerShot G2", "Canon PowerShot G3", "Canon PowerShot G5", "Canon PowerShot G6", "Canon PowerShot G7 (CHDK hack)", "Canon PowerShot G9", "Canon PowerShot G10", "Canon PowerShot G11", "Canon PowerShot G12", "Canon PowerShot G15", "Canon PowerShot S2 IS (CHDK hack)", "Canon PowerShot S3 IS (CHDK hack)", "Canon PowerShot S5 IS (CHDK hack)", "Canon PowerShot SD300 (CHDK hack)", "Canon PowerShot S30", "Canon PowerShot S40", "Canon PowerShot S45", "Canon PowerShot S50", "Canon PowerShot S60", "Canon PowerShot S70", "Canon PowerShot S90", "Canon PowerShot S95", "Canon PowerShot S100", "Canon PowerShot S110", "Canon PowerShot SX1 IS", "Canon PowerShot SX50 HS", "Canon PowerShot SX110 IS (CHDK hack)", "Canon PowerShot SX120 IS (CHDK hack)", "Canon PowerShot SX220 HS (CHDK hack)", "Canon PowerShot SX20 IS (CHDK hack)", "Canon PowerShot SX30 IS (CHDK hack)", "Canon EOS D30", "Canon EOS D60", "Canon EOS 5D", "Canon EOS 5D Mark II", "Canon EOS 5D Mark III", "Canon EOS 6D", "Canon EOS 7D", "Canon EOS 10D", "Canon EOS 20D", "Canon EOS 30D", "Canon EOS 40D", "Canon EOS 50D", "Canon EOS 60D", "Canon EOS 100D/ Digital Rebel SL1", "Canon EOS 300D / Digital Rebel / Kiss Digital", "Canon EOS 350D / Digital Rebel XT / Kiss Digital N", "Canon EOS 400D / Digital Rebel XTi / Kiss Digital X", "Canon EOS 450D / Digital Rebel XSi / Kiss Digital X2", "Canon EOS 500D / Digital Rebel T1i / Kiss Digital X3", "Canon EOS 550D / Digital Rebel T2i / Kiss Digital X4", "Canon EOS 600D / Digital Rebel T3i / Kiss Digital X5", "Canon EOS 650D / Digital Rebel T4i / Kiss Digital X6i", "Canon EOS 700D / Digital Rebel T54i", "Canon EOS 1000D / Digital Rebel XS / Kiss Digital F", "Canon EOS 1100D / Digital Rebel T3 / Kiss Digital X50", "Canon EOS D2000C", "Canon EOS M", "Canon EOS-1D", "Canon EOS-1DS", "Canon EOS-1D X", "Canon EOS-1D Mark II", "Canon EOS-1D Mark II N", "Canon EOS-1D Mark III", "Canon EOS-1D Mark IV", "Canon EOS-1Ds Mark II", "Canon EOS-1Ds Mark III", "Casio QV-2000UX", "Casio QV-3000EX", "Casio QV-3500EX", "Casio QV-4000", "Casio QV-5700", "Casio QV-R41", "Casio QV-R51", "Casio QV-R61", "Casio EX-S20", "Casio EX-S100", "Casio EX-Z4", "Casio EX-Z50", "Casio EX-Z500", "Casio EX-Z55", "Casio EX-Z60", "Casio EX-Z75", "Casio EX-Z750", "Casio EX-Z8", "Casio EX-Z850", "Casio EX-Z1050", "Casio EX-Z1080", "Casio EX-ZR100", "Casio Exlim Pro 505", "Casio Exlim Pro 600", "Casio Exlim Pro 700", "Contax N Digital", "Creative PC-CAM 600", "Epson R-D1", "Foculus 531C", "Fuji E550", "Fuji E900", "Fuji F700", "Fuji F710", "Fuji F800", "Fuji F810", "Fuji S2Pro", "Fuji S3Pro", "Fuji S5Pro", "Fuji S20Pro", "Fuji S100FS", "Fuji S5000", "Fuji S5100/S5500", "Fuji S5200/S5600", "Fuji S6000fd", "Fuji S7000", "Fuji S9000/S9500", "Fuji S9100/S9600", "Fuji S200EXR", "Fuji SL1000", "Fuji HS10/HS11", "Fuji HS20EXR", "Fuji HS30EXR", "Fuji HS50EXR", "Fuji F550EXR", "Fuji F600EXR", "Fuji F770EXR", "Fuji F800EXR", "Fuji X-Pro1", "Fuji X-S1", "Fuji X100", "Fuji X100S", "Fuji X10", "Fuji X20", "Fuji X-E1", "Fuji XF1", "Fuji IS-1", "Hasselblad CFV", "Hasselblad H3D", "Hasselblad H4D", "Hasselblad V96C", "Imacon Ixpress 16-megapixel", "Imacon Ixpress 22-megapixel", "Imacon Ixpress 39-megapixel", "ISG 2020x1520", "Kodak DC20", "Kodak DC25", "Kodak DC40", "Kodak DC50", "Kodak DC120 (also try kdc2tiff)", "Kodak DCS200", "Kodak DCS315C", "Kodak DCS330C", "Kodak DCS420", "Kodak DCS460", "Kodak DCS460A", "Kodak DCS520C", "Kodak DCS560C", "Kodak DCS620C", "Kodak DCS620X", "Kodak DCS660C", "Kodak DCS660M", "Kodak DCS720X", "Kodak DCS760C", "Kodak DCS760M", "Kodak EOSDCS1", "Kodak EOSDCS3B", "Kodak NC2000F", "Kodak ProBack", "Kodak PB645C", "Kodak PB645H", "Kodak PB645M", "Kodak DCS Pro 14n", "Kodak DCS Pro 14nx", "Kodak DCS Pro SLR/c", "Kodak DCS Pro SLR/n", "Kodak C330", "Kodak C603", "Kodak P850", "Kodak P880", "Kodak Z980", "Kodak Z981", "Kodak Z990", "Kodak Z1015", "Kodak KAI-0340", "Konica KD-400Z", "Konica KD-510Z", "Leaf AFi 7", "Leaf AFi-II 5", "Leaf AFi-II 6", "Leaf AFi-II 7", "Leaf AFi-II 8", "Leaf AFi-II 10", "Leaf AFi-II 10R", "Leaf AFi-II 12", "Leaf AFi-II 12R", "Leaf Aptus 17", "Leaf Aptus 22", "Leaf Aptus 54S", "Leaf Aptus 65", "Leaf Aptus 75", "Leaf Aptus 75S", "Leaf Cantare", "Leaf CatchLight", "Leaf CMost", "Leaf DCB2", "Leaf Valeo 6", "Leaf Valeo 11", "Leaf Valeo 17", "Leaf Valeo 22", "Leaf Volare", "Leica Digilux 2", "Leica Digilux 3", "Leica D-LUX2", "Leica D-LUX3", "Leica D-LUX4", "Leica D-LUX5", "Leica D-LUX6", "Leica V-LUX1", "Leica V-LUX2", "Leica V-LUX3", "Leica V-LUX4", "Logitech Fotoman Pixtura", "Mamiya ZD", "Micron 2010", "Minolta RD175", "Minolta DiMAGE 5", "Minolta DiMAGE 7", "Minolta DiMAGE 7i", "Minolta DiMAGE 7Hi", "Minolta DiMAGE A1", "Minolta DiMAGE A2", "Minolta DiMAGE A200", "Minolta DiMAGE G400", "Minolta DiMAGE G500", "Minolta DiMAGE G530", "Minolta DiMAGE G600", "Minolta DiMAGE Z2", "Minolta Alpha/Dynax/Maxxum 5D", "Minolta Alpha/Dynax/Maxxum 7D", "Motorola PIXL", "Nikon D1", "Nikon D1H", "Nikon D1X", "Nikon D2H", "Nikon D2Hs", "Nikon D2X", "Nikon D2Xs", "Nikon D3", "Nikon D3s", "Nikon D3X", "Nikon D4", "Nikon D40", "Nikon D40X", "Nikon D50", "Nikon D60", "Nikon D600", "Nikon D70", "Nikon D70s", "Nikon D80", "Nikon D90", "Nikon D100", "Nikon D200", "Nikon D300", "Nikon D300s", "Nikon D700", "Nikon D3000", "Nikon D3100", "Nikon D3200", "Nikon D5000", "Nikon D5100", "Nikon D7000", "Nikon D800", "Nikon D800E", "Nikon 1 J1", "Nikon 1 S1", "Nikon 1 V1", "Nikon 1 J2", "Nikon 1 V2", "Nikon 1 J3", "Nikon E700 (\"DIAG RAW\" hack)", "Nikon E800 (\"DIAG RAW\" hack)", "Nikon E880 (\"DIAG RAW\" hack)", "Nikon E900 (\"DIAG RAW\" hack)", "Nikon E950 (\"DIAG RAW\" hack)", "Nikon E990 (\"DIAG RAW\" hack)", "Nikon E995 (\"DIAG RAW\" hack)", "Nikon E2100 (\"DIAG RAW\" hack)", "Nikon E2500 (\"DIAG RAW\" hack)", "Nikon E3200 (\"DIAG RAW\" hack)", "Nikon E3700 (\"DIAG RAW\" hack)", "Nikon E4300 (\"DIAG RAW\" hack)", "Nikon E4500 (\"DIAG RAW\" hack)", "Nikon E5000", "Nikon E5400", "Nikon E5700", "Nikon E8400", "Nikon E8700", "Nikon E8800", "Nikon Coolpix A", "Nikon Coolpix P330", "Nikon Coolpix P6000", "Nikon Coolpix P7000", "Nikon Coolpix P7100", "Nikon Coolpix P7700", "Nikon Coolpix S6 (\"DIAG RAW\" hack)", "Nokia N95", "Nokia X2", "Olympus C3030Z", "Olympus C5050Z", "Olympus C5060WZ", "Olympus C7070WZ", "Olympus C70Z,C7000Z", "Olympus C740UZ", "Olympus C770UZ", "Olympus C8080WZ", "Olympus X200,D560Z,C350Z", "Olympus E-1", "Olympus E-3", "Olympus E-5", "Olympus E-10", "Olympus E-20", "Olympus E-30", "Olympus E-300", "Olympus E-330", "Olympus E-400", "Olympus E-410", "Olympus E-420", "Olympus E-500", "Olympus E-510", "Olympus E-520", "Olympus E-620", "Olympus E-P1", "Olympus E-P2", "Olympus E-P3", "Olympus E-PL1", "Olympus E-PL1s", "Olympus E-PL2", "Olympus E-PL3", "Olympus E-PL5", "Olympus E-PM1", "Olympus E-PM2", "Olympus E-M5", "Olympus SP310", "Olympus SP320", "Olympus SP350", "Olympus SP500UZ", "Olympus SP510UZ", "Olympus SP550UZ", "Olympus SP560UZ", "Olympus SP570UZ", "Olympus XZ-1", "Olympus XZ-10", "Olympus XZ-2", "Panasonic DMC-FZ8", "Panasonic DMC-FZ18", "Panasonic DMC-FZ28", "Panasonic DMC-FZ30", "Panasonic DMC-FZ35/FZ38", "Panasonic DMC-FZ40", "Panasonic DMC-FZ50", "Panasonic DMC-FZ100", "Panasonic DMC-FZ150", "Panasonic DMC-FZ200", "Panasonic DMC-FX150", "Panasonic DMC-G1", "Panasonic DMC-G10", "Panasonic DMC-G2", "Panasonic DMC-G3", "Panasonic DMC-G5", "Panasonic DMC-G6", "Panasonic DMC-GF1", "Panasonic DMC-GF2", "Panasonic DMC-GF3", "Panasonic DMC-GF5", "Panasonic DMC-GH1", "Panasonic DMC-GH2", "Panasonic DMC-GH3", "Panasonic DMC-GX1", "Panasonic DMC-L1", "Panasonic DMC-L10", "Panasonic DMC-LC1", "Panasonic DMC-LX1", "Panasonic DMC-LX2", "Panasonic DMC-LX3", "Panasonic DMC-LX5", "Panasonic DMC-LX7", "Pentax *ist D", "Pentax *ist DL", "Pentax *ist DL2", "Pentax *ist DS", "Pentax *ist DS2", "Pentax K10D", "Pentax K20D", "Pentax K100D", "Pentax K100D Super", "Pentax K200D", "Pentax K2000/K-m", "Pentax K-x", "Pentax K-r", "Pentax K-30", "Pentax K-5", "Pentax K-5 II", "Pentax K-5 IIs", "Pentax K-7", "Pentax MX-1", "Pentax Q10", "Pentax Optio S", "Pentax Optio S4", "Pentax Optio 33WR", "Pentax Optio 750Z", "Pentax 645D", "Phase One LightPhase", "Phase One H 10", "Phase One H 20", "Phase One H 25", "Phase One P 20", "Phase One P 25", "Phase One P 30", "Phase One P 45", "Phase One P 45+", "Phase One P 65", "Pixelink A782", #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 "Polaroid x530", #endif #ifndef NO_JASPER "Redcode R3D format", #endif "Rollei d530flex", "RoverShot 3320af", "Samsung EX1", "Samsung EX2F", "Samsung GX-1S", "Samsung GX10", "Samsung GX20", "Samsung NX10", "Samsung NX11", "Samsung NX100", "Samsung NX20", "Samsung NX200", "Samsung NX210", "Samsung NX1000", "Samsung WB550", "Samsung WB2000", "Samsung S85 (hacked)", "Samsung S850 (hacked)", "Sarnoff 4096x5440", #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 "Sigma SD9", "Sigma SD10", "Sigma SD14", "Sigma SD15", "Sigma SD1", "Sigma SD1 Merill", "Sigma DP1", "Sigma DP1 Merill", "Sigma DP1S", "Sigma DP1X", "Sigma DP2", "Sigma DP2 Merill", "Sigma DP2S", "Sigma DP2X", #endif "Sinar 3072x2048", "Sinar 4080x4080", "Sinar 4080x5440", "Sinar STI format", "SMaL Ultra-Pocket 3", "SMaL Ultra-Pocket 4", "SMaL Ultra-Pocket 5", "Sony DSC-F828", "Sony DSC-R1", "Sony DSC-RX1", "Sony DSC-RX100", "Sony DSC-V3", "Sony DSLR-A100", "Sony DSLR-A200", "Sony DSLR-A230", "Sony DSLR-A290", "Sony DSLR-A300", "Sony DSLR-A330", "Sony DSLR-A350", "Sony DSLR-A380", "Sony DSLR-A390", "Sony DSLR-A450", "Sony DSLR-A500", "Sony DSLR-A550", "Sony DSLR-A580", "Sony DSLR-A700", "Sony DSLR-A850", "Sony DSLR-A900", "Sony NEX-3", "Sony NEX-5", "Sony NEX-5N", "Sony NEX-5R", "Sony NEX-6", "Sony NEX-7", "Sony NEX-C3", "Sony NEX-F3", "Sony SLT-A33", "Sony SLT-A35", "Sony SLT-A37", "Sony SLT-A55V", "Sony SLT-A57", "Sony SLT-A58", "Sony SLT-A65V", "Sony SLT-A77V", "Sony SLT-A99V", "Sony XCD-SX910CR", "STV680 VGA", "ptGrey GRAS-50S5C", "JaiPulnix BB-500CL", "JaiPulnix BB-500GE", "SVS SVS625CL", NULL }; const char** LibRaw::cameraList() { return static_camera_list;} int LibRaw::cameraCount() { return (sizeof(static_camera_list)/sizeof(static_camera_list[0]))-1; } const char * LibRaw::strprogress(enum LibRaw_progress p) { switch(p) { case LIBRAW_PROGRESS_START: return "Starting"; case LIBRAW_PROGRESS_OPEN : return "Opening file"; case LIBRAW_PROGRESS_IDENTIFY : return "Reading metadata"; case LIBRAW_PROGRESS_SIZE_ADJUST: return "Adjusting size"; case LIBRAW_PROGRESS_LOAD_RAW: return "Reading RAW data"; case LIBRAW_PROGRESS_REMOVE_ZEROES: return "Clearing zero values"; case LIBRAW_PROGRESS_BAD_PIXELS : return "Removing dead pixels"; case LIBRAW_PROGRESS_DARK_FRAME: return "Subtracting dark frame data"; case LIBRAW_PROGRESS_FOVEON_INTERPOLATE: return "Interpolating Foveon sensor data"; case LIBRAW_PROGRESS_SCALE_COLORS: return "Scaling colors"; case LIBRAW_PROGRESS_PRE_INTERPOLATE: return "Pre-interpolating"; case LIBRAW_PROGRESS_INTERPOLATE: return "Interpolating"; case LIBRAW_PROGRESS_MIX_GREEN : return "Mixing green channels"; case LIBRAW_PROGRESS_MEDIAN_FILTER : return "Median filter"; case LIBRAW_PROGRESS_HIGHLIGHTS: return "Highlight recovery"; case LIBRAW_PROGRESS_FUJI_ROTATE : return "Rotating Fuji diagonal data"; case LIBRAW_PROGRESS_FLIP : return "Flipping image"; case LIBRAW_PROGRESS_APPLY_PROFILE: return "ICC conversion"; case LIBRAW_PROGRESS_CONVERT_RGB: return "Converting to RGB"; case LIBRAW_PROGRESS_STRETCH: return "Stretching image"; case LIBRAW_PROGRESS_THUMB_LOAD: return "Loading thumbnail"; default: return "Some strange things"; } }
int LibRaw::dcraw_process(void) { int quality,i; int iterations=-1, dcb_enhance=1, noiserd=0; int eeci_refine_fl=0, es_med_passes_fl=0; float cared=0,cablue=0; float linenoise=0; float lclean=0,cclean=0; float thresh=0; float preser=0; float expos=1.0; CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW); // CHECK_ORDER_HIGH(LIBRAW_PROGRESS_PRE_INTERPOLATE); try { int no_crop = 1; if (~O.cropbox[2] && ~O.cropbox[3]) no_crop=0; libraw_decoder_info_t di; get_decoder_info(&di); int subtract_inline = !O.bad_pixels && !O.dark_frame && !O.wf_debanding && !(di.decoder_flags & LIBRAW_DECODER_LEGACY) && !IO.zero_is_bad; raw2image_ex(subtract_inline); // allocate imgdata.image and copy data! int save_4color = O.four_color_rgb; if (IO.zero_is_bad) { remove_zeroes(); SET_PROC_FLAG(LIBRAW_PROGRESS_REMOVE_ZEROES); } if(O.half_size) O.four_color_rgb = 1; if(O.bad_pixels && no_crop) { bad_pixels(O.bad_pixels); SET_PROC_FLAG(LIBRAW_PROGRESS_BAD_PIXELS); } if (O.dark_frame && no_crop) { subtract (O.dark_frame); SET_PROC_FLAG(LIBRAW_PROGRESS_DARK_FRAME); } if (O.wf_debanding) { wf_remove_banding(); } quality = 2 + !IO.fuji_width; if (O.user_qual >= 0) quality = O.user_qual; if(!subtract_inline || !C.data_maximum) { adjust_bl(); subtract_black(); } adjust_maximum(); if (O.user_sat > 0) C.maximum = O.user_sat; if (P1.is_foveon) { if(load_raw == &LibRaw::foveon_dp_load_raw) { for (int i=0; i < S.height*S.width*4; i++) if ((short) imgdata.image[0][i] < 0) imgdata.image[0][i] = 0; } else foveon_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_FOVEON_INTERPOLATE); } if (O.green_matching && !O.half_size) { green_matching(); } if (!P1.is_foveon) { scale_colors(); SET_PROC_FLAG(LIBRAW_PROGRESS_SCALE_COLORS); } pre_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_PRE_INTERPOLATE); if (O.dcb_iterations >= 0) iterations = O.dcb_iterations; if (O.dcb_enhance_fl >=0 ) dcb_enhance = O.dcb_enhance_fl; if (O.fbdd_noiserd >=0 ) noiserd = O.fbdd_noiserd; if (O.eeci_refine >=0 ) eeci_refine_fl = O.eeci_refine; if (O.es_med_passes >0 ) es_med_passes_fl = O.es_med_passes; // LIBRAW_DEMOSAIC_PACK_GPL3 if (!O.half_size && O.cfa_green >0) {thresh=O.green_thresh ;green_equilibrate(thresh);} if (O.exp_correc >0) {expos=O.exp_shift ; preser=O.exp_preser; exp_bef(expos,preser);} if (O.ca_correc >0 ) {cablue=O.cablue; cared=O.cared; CA_correct_RT(cablue, cared);} if (O.cfaline >0 ) {linenoise=O.linenoise; cfa_linedn(linenoise);} if (O.cfa_clean >0 ) {lclean=O.lclean; cclean=O.cclean; cfa_impulse_gauss(lclean,cclean);} if (P1.filters) { if (noiserd>0 && P1.colors==3 && P1.filters) fbdd(noiserd); if (quality == 0) lin_interpolate(); else if (quality == 1 || P1.colors > 3 || P1.filters < 1000) vng_interpolate(); else if (quality == 2) ppg_interpolate(); else if (quality == 3) ahd_interpolate(); // really don't need it here due to fallback op else if (quality == 4) dcb(iterations, dcb_enhance); // LIBRAW_DEMOSAIC_PACK_GPL2 else if (quality == 5) ahd_interpolate_mod(); else if (quality == 6) afd_interpolate_pl(2,1); else if (quality == 7) vcd_interpolate(0); else if (quality == 8) vcd_interpolate(12); else if (quality == 9) lmmse_interpolate(1); // LIBRAW_DEMOSAIC_PACK_GPL3 else if (quality == 10) amaze_demosaic_RT(); // LGPL2 else if (quality == 11) dht_interpolate(); else if (quality == 12) aahd_interpolate(); // fallback to AHD else ahd_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_INTERPOLATE); } if (IO.mix_green) { for (P1.colors=3, i=0; i < S.height * S.width; i++) imgdata.image[i][1] = (imgdata.image[i][1] + imgdata.image[i][3]) >> 1; SET_PROC_FLAG(LIBRAW_PROGRESS_MIX_GREEN); } if(!P1.is_foveon) { if (P1.colors == 3) { if (quality == 8) { if (eeci_refine_fl == 1) refinement(); if (O.med_passes > 0) median_filter_new(); if (es_med_passes_fl > 0) es_median_filter(); } else { median_filter(); } SET_PROC_FLAG(LIBRAW_PROGRESS_MEDIAN_FILTER); } } if (O.highlight == 2) { blend_highlights(); SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS); } if (O.highlight > 2) { recover_highlights(); SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS); } if (O.use_fuji_rotate) { fuji_rotate(); SET_PROC_FLAG(LIBRAW_PROGRESS_FUJI_ROTATE); } if(!libraw_internal_data.output_data.histogram) { libraw_internal_data.output_data.histogram = (int (*)[LIBRAW_HISTOGRAM_SIZE]) malloc(sizeof(*libraw_internal_data.output_data.histogram)*4); merror(libraw_internal_data.output_data.histogram,"LibRaw::dcraw_process()"); } #ifndef NO_LCMS if(O.camera_profile) { apply_profile(O.camera_profile,O.output_profile); SET_PROC_FLAG(LIBRAW_PROGRESS_APPLY_PROFILE); } #endif convert_to_rgb(); SET_PROC_FLAG(LIBRAW_PROGRESS_CONVERT_RGB); if (O.use_fuji_rotate) { stretch(); SET_PROC_FLAG(LIBRAW_PROGRESS_STRETCH); } O.four_color_rgb = save_4color; // also, restore return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } }
int LibRaw::dcraw_process(void) { int quality,i; int iterations=-1, dcb_enhance=1, noiserd=0; int eeci_refine_fl=0, es_med_passes_fl=0; float cared=0,cablue=0; float linenoise=0; float lclean=0,cclean=0; float thresh=0; float preser=0; float expos=1.0; CHECK_ORDER_LOW(LIBRAW_PROGRESS_LOAD_RAW); // CHECK_ORDER_HIGH(LIBRAW_PROGRESS_PRE_INTERPOLATE); try { int no_crop = 1; if (~O.cropbox[2] && ~O.cropbox[3]) no_crop=0; libraw_decoder_info_t di; get_decoder_info(&di); int subtract_inline = !O.bad_pixels && !O.dark_frame && !O.wf_debanding && !(di.decoder_flags & LIBRAW_DECODER_LEGACY) && !IO.zero_is_bad; raw2image_ex(subtract_inline); // allocate imgdata.image and copy data! int save_4color = O.four_color_rgb; if (IO.zero_is_bad) { remove_zeroes(); SET_PROC_FLAG(LIBRAW_PROGRESS_REMOVE_ZEROES); } if(O.half_size) O.four_color_rgb = 1; if(O.bad_pixels && no_crop) { bad_pixels(O.bad_pixels); SET_PROC_FLAG(LIBRAW_PROGRESS_BAD_PIXELS); } if (O.dark_frame && no_crop) { subtract (O.dark_frame); SET_PROC_FLAG(LIBRAW_PROGRESS_DARK_FRAME); } if (O.wf_debanding) { wf_remove_banding(); } quality = 2 + !IO.fuji_width; if (O.user_qual >= 0) quality = O.user_qual; if(!subtract_inline || !C.data_maximum) { adjust_bl(); subtract_black(); } adjust_maximum(); if (O.user_sat > 0) C.maximum = O.user_sat; if (P1.is_foveon) { if(load_raw == &LibRaw::foveon_dp_load_raw) { for (int i=0; i < S.height*S.width*4; i++) if ((short) imgdata.image[0][i] < 0) imgdata.image[0][i] = 0; } else foveon_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_FOVEON_INTERPOLATE); } if (O.green_matching && !O.half_size) { green_matching(); } if (!P1.is_foveon) { scale_colors(); SET_PROC_FLAG(LIBRAW_PROGRESS_SCALE_COLORS); } pre_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_PRE_INTERPOLATE); if (O.dcb_iterations >= 0) iterations = O.dcb_iterations; if (O.dcb_enhance_fl >=0 ) dcb_enhance = O.dcb_enhance_fl; if (O.fbdd_noiserd >=0 ) noiserd = O.fbdd_noiserd; if (O.eeci_refine >=0 ) eeci_refine_fl = O.eeci_refine; if (O.es_med_passes >0 ) es_med_passes_fl = O.es_med_passes; // LIBRAW_DEMOSAIC_PACK_GPL3 if (!O.half_size && O.cfa_green >0) {thresh=O.green_thresh ;green_equilibrate(thresh);} if (O.exp_correc >0) {expos=O.exp_shift ; preser=O.exp_preser; exp_bef(expos,preser);} if (O.ca_correc >0 ) {cablue=O.cablue; cared=O.cared; CA_correct_RT(cablue, cared);} if (O.cfaline >0 ) {linenoise=O.linenoise; cfa_linedn(linenoise);} if (O.cfa_clean >0 ) {lclean=O.lclean; cclean=O.cclean; cfa_impulse_gauss(lclean,cclean);} if (P1.filters) { if (noiserd>0 && P1.colors==3 && P1.filters) fbdd(noiserd); if (quality == 0) lin_interpolate(); else if (quality == 1 || P1.colors > 3 || P1.filters < 1000) vng_interpolate(); else if (quality == 2) ppg_interpolate(); else if (quality == 3) ahd_interpolate(); // really don't need it here due to fallback op else if (quality == 4) dcb(iterations, dcb_enhance); // LIBRAW_DEMOSAIC_PACK_GPL2 else if (quality == 5) ahd_interpolate_mod(); else if (quality == 6) afd_interpolate_pl(2,1); else if (quality == 7) vcd_interpolate(0); else if (quality == 8) vcd_interpolate(12); else if (quality == 9) lmmse_interpolate(1); // LIBRAW_DEMOSAIC_PACK_GPL3 else if (quality == 10) amaze_demosaic_RT(); // LGPL2 else if (quality == 11) dht_interpolate(); else if (quality == 12) aahd_interpolate(); // fallback to AHD else ahd_interpolate(); SET_PROC_FLAG(LIBRAW_PROGRESS_INTERPOLATE); } if (IO.mix_green) { for (P1.colors=3, i=0; i < S.height * S.width; i++) imgdata.image[i][1] = (imgdata.image[i][1] + imgdata.image[i][3]) >> 1; SET_PROC_FLAG(LIBRAW_PROGRESS_MIX_GREEN); } if(!P1.is_foveon) { if (P1.colors == 3) { if (quality == 8) { if (eeci_refine_fl == 1) refinement(); if (O.med_passes > 0) median_filter_new(); if (es_med_passes_fl > 0) es_median_filter(); } else { median_filter(); } SET_PROC_FLAG(LIBRAW_PROGRESS_MEDIAN_FILTER); } } if (O.highlight == 2) { blend_highlights(); SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS); } if (O.highlight > 2) { recover_highlights(); SET_PROC_FLAG(LIBRAW_PROGRESS_HIGHLIGHTS); } if (O.use_fuji_rotate) { fuji_rotate(); SET_PROC_FLAG(LIBRAW_PROGRESS_FUJI_ROTATE); } if(!libraw_internal_data.output_data.histogram) { libraw_internal_data.output_data.histogram = (int (*)[LIBRAW_HISTOGRAM_SIZE]) malloc(sizeof(*libraw_internal_data.output_data.histogram)*4); merror(libraw_internal_data.output_data.histogram,"LibRaw::dcraw_process()"); } #ifndef NO_LCMS if(O.camera_profile) { apply_profile(O.camera_profile,O.output_profile); SET_PROC_FLAG(LIBRAW_PROGRESS_APPLY_PROFILE); } #endif convert_to_rgb(); SET_PROC_FLAG(LIBRAW_PROGRESS_CONVERT_RGB); if (O.use_fuji_rotate) { stretch(); SET_PROC_FLAG(LIBRAW_PROGRESS_STRETCH); } O.four_color_rgb = save_4color; // also, restore return 0; } catch ( LibRaw_exceptions err) { EXCEPTION_HANDLER(err); } }
{'added': [(2339, '\t\t\tint dmax = 0;'), (2340, '\t\t\tfor(i=0; i< size*4; i++)'), (2345, ' if(dmax < val) dmax = val;'), (2347, '\t\t\tC.data_maximum = dmax & 0xffff;'), (2363, '\t\t int dmax = 0;'), (2365, ' if(dmax < p[idx]) dmax = p[idx];'), (2366, '\t\t C.data_maximum = dmax;'), (2426, '\tif(C.data_maximum <=TBLN)'), (2427, '\t\tC.data_maximum = lut[C.data_maximum];'), (2428, '\tif(C.maximum <= TBLN)'), (2429, '\t\tC.maximum = lut[C.maximum];'), (2537, '\t\tint save_4color = O.four_color_rgb;')], 'deleted': [(2339, ''), (2340, ' for(i=0; i< size*4; i++)'), (2345, ' if(C.data_maximum < val) C.data_maximum = val;'), (2362, ' C.data_maximum = 0;'), (2364, ' if(C.data_maximum < p[idx]) C.data_maximum = p[idx];'), (2424, ' C.data_maximum = lut[C.data_maximum];'), (2425, ' C.maximum = lut[C.maximum];'), (2533, ' int save_4color = O.four_color_rgb;')]}
12
8
2,789
17,314
177
1,162
70
https://github.com/LibRaw/LibRaw
CVE-2013-2127
CWE-119
1,505
segment_sum.cc
C++
tflite::ops::builtin::segment_sum::ResizeOutputTensor
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace segment_sum { static const int kInputDataTensor = 0; static const int kInputSegmentIdsTensor = 1; static const int kOutputTensor = 0; TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const TfLiteTensor* data, const TfLiteTensor* segment_ids, TfLiteTensor* output) { int max_index = -1; const int segment_id_size = segment_ids->dims->data[0]; if (segment_id_size > 0) { max_index = segment_ids->data.i32[segment_id_size - 1]; } const int data_rank = NumDimensions(data); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data)); output_shape->data[0] = max_index + 1; for (int i = 1; i < data_rank; ++i) { output_shape->data[i] = data->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* data = GetInput(context, node, kInputDataTensor); const TfLiteTensor* segment_ids = GetInput(context, node, kInputSegmentIdsTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, data->type == kTfLiteInt32 || data->type == kTfLiteFloat32); TF_LITE_ENSURE_EQ(context, segment_ids->type, kTfLiteInt32); if (!IsConstantTensor(data) || !IsConstantTensor(segment_ids)) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputTensor(context, data, segment_ids, output); } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* data = GetInput(context, node, kInputDataTensor); const TfLiteTensor* segment_ids = GetInput(context, node, kInputSegmentIdsTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, data, segment_ids, output)); } #define TF_LITE_SEGMENT_SUM(dtype) \ reference_ops::SegmentSum<dtype>( \ GetTensorShape(data), GetTensorData<dtype>(data), \ GetTensorShape(segment_ids), GetTensorData<int32_t>(segment_ids), \ GetTensorShape(output), GetTensorData<dtype>(output)); switch (data->type) { case kTfLiteInt32: TF_LITE_SEGMENT_SUM(int32_t); break; case kTfLiteFloat32: TF_LITE_SEGMENT_SUM(float); break; default: context->ReportError(context, "Currently SegmentSum doesn't support type: %s", TfLiteTypeGetName(data->type)); return kTfLiteError; } #undef TF_LITE_SEGMENT_SUM return kTfLiteOk; } } // namespace segment_sum TfLiteRegistration* Register_SEGMENT_SUM() { static TfLiteRegistration r = {nullptr, nullptr, segment_sum::Prepare, segment_sum::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace segment_sum { static const int kInputDataTensor = 0; static const int kInputSegmentIdsTensor = 1; static const int kOutputTensor = 0; TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const TfLiteTensor* data, const TfLiteTensor* segment_ids, TfLiteTensor* output) { // Segment ids should be of same cardinality as first input dimension and they // should be increasing by at most 1, from 0 (e.g., [0, 0, 1, 2, 3] is valid) const int segment_id_size = segment_ids->dims->data[0]; TF_LITE_ENSURE_EQ(context, segment_id_size, data->dims->data[0]); int previous_segment_id = -1; for (int i = 0; i < segment_id_size; i++) { const int current_segment_id = GetTensorData<int32_t>(segment_ids)[i]; if (i == 0) { TF_LITE_ENSURE_EQ(context, current_segment_id, 0); } else { int delta = current_segment_id - previous_segment_id; TF_LITE_ENSURE(context, delta == 0 || delta == 1); } previous_segment_id = current_segment_id; } const int max_index = previous_segment_id; const int data_rank = NumDimensions(data); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data)); output_shape->data[0] = max_index + 1; for (int i = 1; i < data_rank; ++i) { output_shape->data[i] = data->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* data = GetInput(context, node, kInputDataTensor); const TfLiteTensor* segment_ids = GetInput(context, node, kInputSegmentIdsTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, data->type == kTfLiteInt32 || data->type == kTfLiteFloat32); TF_LITE_ENSURE_EQ(context, segment_ids->type, kTfLiteInt32); if (!IsConstantTensor(data) || !IsConstantTensor(segment_ids)) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputTensor(context, data, segment_ids, output); } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* data = GetInput(context, node, kInputDataTensor); const TfLiteTensor* segment_ids = GetInput(context, node, kInputSegmentIdsTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, data, segment_ids, output)); } #define TF_LITE_SEGMENT_SUM(dtype) \ reference_ops::SegmentSum<dtype>( \ GetTensorShape(data), GetTensorData<dtype>(data), \ GetTensorShape(segment_ids), GetTensorData<int32_t>(segment_ids), \ GetTensorShape(output), GetTensorData<dtype>(output)); switch (data->type) { case kTfLiteInt32: TF_LITE_SEGMENT_SUM(int32_t); break; case kTfLiteFloat32: TF_LITE_SEGMENT_SUM(float); break; default: context->ReportError(context, "Currently SegmentSum doesn't support type: %s", TfLiteTypeGetName(data->type)); return kTfLiteError; } #undef TF_LITE_SEGMENT_SUM return kTfLiteOk; } } // namespace segment_sum TfLiteRegistration* Register_SEGMENT_SUM() { static TfLiteRegistration r = {nullptr, nullptr, segment_sum::Prepare, segment_sum::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const TfLiteTensor* data, const TfLiteTensor* segment_ids, TfLiteTensor* output) { int max_index = -1; const int segment_id_size = segment_ids->dims->data[0]; if (segment_id_size > 0) { max_index = segment_ids->data.i32[segment_id_size - 1]; } const int data_rank = NumDimensions(data); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data)); output_shape->data[0] = max_index + 1; for (int i = 1; i < data_rank; ++i) { output_shape->data[i] = data->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); }
TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const TfLiteTensor* data, const TfLiteTensor* segment_ids, TfLiteTensor* output) { // Segment ids should be of same cardinality as first input dimension and they // should be increasing by at most 1, from 0 (e.g., [0, 0, 1, 2, 3] is valid) const int segment_id_size = segment_ids->dims->data[0]; TF_LITE_ENSURE_EQ(context, segment_id_size, data->dims->data[0]); int previous_segment_id = -1; for (int i = 0; i < segment_id_size; i++) { const int current_segment_id = GetTensorData<int32_t>(segment_ids)[i]; if (i == 0) { TF_LITE_ENSURE_EQ(context, current_segment_id, 0); } else { int delta = current_segment_id - previous_segment_id; TF_LITE_ENSURE(context, delta == 0 || delta == 1); } previous_segment_id = current_segment_id; } const int max_index = previous_segment_id; const int data_rank = NumDimensions(data); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data)); output_shape->data[0] = max_index + 1; for (int i = 1; i < data_rank; ++i) { output_shape->data[i] = data->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); }
{'added': [(37, ' // Segment ids should be of same cardinality as first input dimension and they'), (38, ' // should be increasing by at most 1, from 0 (e.g., [0, 0, 1, 2, 3] is valid)'), (40, ' TF_LITE_ENSURE_EQ(context, segment_id_size, data->dims->data[0]);'), (41, ' int previous_segment_id = -1;'), (42, ' for (int i = 0; i < segment_id_size; i++) {'), (43, ' const int current_segment_id = GetTensorData<int32_t>(segment_ids)[i];'), (44, ' if (i == 0) {'), (45, ' TF_LITE_ENSURE_EQ(context, current_segment_id, 0);'), (46, ' } else {'), (47, ' int delta = current_segment_id - previous_segment_id;'), (48, ' TF_LITE_ENSURE(context, delta == 0 || delta == 1);'), (49, ' }'), (50, ' previous_segment_id = current_segment_id;'), (52, ''), (53, ' const int max_index = previous_segment_id;'), (54, '')], 'deleted': [(37, ' int max_index = -1;'), (39, ' if (segment_id_size > 0) {'), (40, ' max_index = segment_ids->data.i32[segment_id_size - 1];')]}
16
3
88
566
17
138
3
https://github.com/tensorflow/tensorflow
CVE-2020-15212
CWE-787
1,222
cx24116.c
C
cx24116_send_diseqc_msg
/* Conexant cx24116/cx24118 - DVBS/S2 Satellite demod/tuner driver Copyright (C) 2006-2008 Steven Toth <stoth@hauppauge.com> Copyright (C) 2006-2007 Georg Acher Copyright (C) 2007-2008 Darron Broad March 2007 Fixed some bugs. Added diseqc support. Added corrected signal strength support. August 2007 Sync with legacy version. Some clean ups. Copyright (C) 2008 Igor Liplianin September, 9th 2008 Fixed locking on high symbol rates (>30000). Implement MPEG initialization parameter. January, 17th 2009 Fill set_voltage with actually control voltage code. Correct set tone to not affect voltage. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/firmware.h> #include "dvb_frontend.h" #include "cx24116.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)"); #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_INFO "cx24116: " args); \ } while (0) #define CX24116_DEFAULT_FIRMWARE "dvb-fe-cx24116.fw" #define CX24116_SEARCH_RANGE_KHZ 5000 /* known registers */ #define CX24116_REG_COMMAND (0x00) /* command args 0x00..0x1e */ #define CX24116_REG_EXECUTE (0x1f) /* execute command */ #define CX24116_REG_MAILBOX (0x96) /* FW or multipurpose mailbox? */ #define CX24116_REG_RESET (0x20) /* reset status > 0 */ #define CX24116_REG_SIGNAL (0x9e) /* signal low */ #define CX24116_REG_SSTATUS (0x9d) /* signal high / status */ #define CX24116_REG_QUALITY8 (0xa3) #define CX24116_REG_QSTATUS (0xbc) #define CX24116_REG_QUALITY0 (0xd5) #define CX24116_REG_BER0 (0xc9) #define CX24116_REG_BER8 (0xc8) #define CX24116_REG_BER16 (0xc7) #define CX24116_REG_BER24 (0xc6) #define CX24116_REG_UCB0 (0xcb) #define CX24116_REG_UCB8 (0xca) #define CX24116_REG_CLKDIV (0xf3) #define CX24116_REG_RATEDIV (0xf9) /* configured fec (not tuned) or actual FEC (tuned) 1=1/2 2=2/3 etc */ #define CX24116_REG_FECSTATUS (0x9c) /* FECSTATUS bits */ /* mask to determine configured fec (not tuned) or actual fec (tuned) */ #define CX24116_FEC_FECMASK (0x1f) /* Select DVB-S demodulator, else DVB-S2 */ #define CX24116_FEC_DVBS (0x20) #define CX24116_FEC_UNKNOWN (0x40) /* Unknown/unused */ /* Pilot mode requested when tuning else always reset when tuned */ #define CX24116_FEC_PILOT (0x80) /* arg buffer size */ #define CX24116_ARGLEN (0x1e) /* rolloff */ #define CX24116_ROLLOFF_020 (0x00) #define CX24116_ROLLOFF_025 (0x01) #define CX24116_ROLLOFF_035 (0x02) /* pilot bit */ #define CX24116_PILOT_OFF (0x00) #define CX24116_PILOT_ON (0x40) /* signal status */ #define CX24116_HAS_SIGNAL (0x01) #define CX24116_HAS_CARRIER (0x02) #define CX24116_HAS_VITERBI (0x04) #define CX24116_HAS_SYNCLOCK (0x08) #define CX24116_HAS_UNKNOWN1 (0x10) #define CX24116_HAS_UNKNOWN2 (0x20) #define CX24116_STATUS_MASK (0x0f) #define CX24116_SIGNAL_MASK (0xc0) #define CX24116_DISEQC_TONEOFF (0) /* toneburst never sent */ #define CX24116_DISEQC_TONECACHE (1) /* toneburst cached */ #define CX24116_DISEQC_MESGCACHE (2) /* message cached */ /* arg offset for DiSEqC */ #define CX24116_DISEQC_BURST (1) #define CX24116_DISEQC_ARG2_2 (2) /* unknown value=2 */ #define CX24116_DISEQC_ARG3_0 (3) /* unknown value=0 */ #define CX24116_DISEQC_ARG4_0 (4) /* unknown value=0 */ #define CX24116_DISEQC_MSGLEN (5) #define CX24116_DISEQC_MSGOFS (6) /* DiSEqC burst */ #define CX24116_DISEQC_MINI_A (0) #define CX24116_DISEQC_MINI_B (1) /* DiSEqC tone burst */ static int toneburst = 1; module_param(toneburst, int, 0644); MODULE_PARM_DESC(toneburst, "DiSEqC toneburst 0=OFF, 1=TONE CACHE, "\ "2=MESSAGE CACHE (default:1)"); /* SNR measurements */ static int esno_snr; module_param(esno_snr, int, 0644); MODULE_PARM_DESC(esno_snr, "SNR return units, 0=PERCENTAGE 0-100, "\ "1=ESNO(db * 10) (default:0)"); enum cmds { CMD_SET_VCO = 0x10, CMD_TUNEREQUEST = 0x11, CMD_MPEGCONFIG = 0x13, CMD_TUNERINIT = 0x14, CMD_BANDWIDTH = 0x15, CMD_GETAGC = 0x19, CMD_LNBCONFIG = 0x20, CMD_LNBSEND = 0x21, /* Formerly CMD_SEND_DISEQC */ CMD_LNBDCLEVEL = 0x22, CMD_SET_TONE = 0x23, CMD_UPDFWVERS = 0x35, CMD_TUNERSLEEP = 0x36, CMD_AGCCONTROL = 0x3b, /* Unknown */ }; /* The Demod/Tuner can't easily provide these, we cache them */ struct cx24116_tuning { u32 frequency; u32 symbol_rate; fe_spectral_inversion_t inversion; fe_code_rate_t fec; fe_delivery_system_t delsys; fe_modulation_t modulation; fe_pilot_t pilot; fe_rolloff_t rolloff; /* Demod values */ u8 fec_val; u8 fec_mask; u8 inversion_val; u8 pilot_val; u8 rolloff_val; }; /* Basic commands that are sent to the firmware */ struct cx24116_cmd { u8 len; u8 args[CX24116_ARGLEN]; }; struct cx24116_state { struct i2c_adapter *i2c; const struct cx24116_config *config; struct dvb_frontend frontend; struct cx24116_tuning dcur; struct cx24116_tuning dnxt; u8 skip_fw_load; u8 burst; struct cx24116_cmd dsec_cmd; }; static int cx24116_writereg(struct cx24116_state *state, int reg, int data) { u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; int err; if (debug > 1) printk("cx24116: %s: write reg 0x%02x, value 0x%02x\n", __func__, reg, data); err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) { printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x," " value == 0x%02x)\n", __func__, err, reg, data); return -EREMOTEIO; } return 0; } /* Bulk byte writes to a single I2C address, for 32k firmware load */ static int cx24116_writeregN(struct cx24116_state *state, int reg, const u8 *data, u16 len) { int ret = -EREMOTEIO; struct i2c_msg msg; u8 *buf; buf = kmalloc(len + 1, GFP_KERNEL); if (buf == NULL) { printk("Unable to kmalloc\n"); ret = -ENOMEM; goto error; } *(buf) = reg; memcpy(buf + 1, data, len); msg.addr = state->config->demod_address; msg.flags = 0; msg.buf = buf; msg.len = len + 1; if (debug > 1) printk(KERN_INFO "cx24116: %s: write regN 0x%02x, len = %d\n", __func__, reg, len); ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) { printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x\n", __func__, ret, reg); ret = -EREMOTEIO; } error: kfree(buf); return ret; } static int cx24116_readreg(struct cx24116_state *state, u8 reg) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { printk(KERN_ERR "%s: reg=0x%x (error=%d)\n", __func__, reg, ret); return ret; } if (debug > 1) printk(KERN_INFO "cx24116: read reg 0x%02x, value 0x%02x\n", reg, b1[0]); return b1[0]; } static int cx24116_set_inversion(struct cx24116_state *state, fe_spectral_inversion_t inversion) { dprintk("%s(%d)\n", __func__, inversion); switch (inversion) { case INVERSION_OFF: state->dnxt.inversion_val = 0x00; break; case INVERSION_ON: state->dnxt.inversion_val = 0x04; break; case INVERSION_AUTO: state->dnxt.inversion_val = 0x0C; break; default: return -EINVAL; } state->dnxt.inversion = inversion; return 0; } /* * modfec (modulation and FEC) * =========================== * * MOD FEC mask/val standard * ---- -------- ----------- -------- * QPSK FEC_1_2 0x02 0x02+X DVB-S * QPSK FEC_2_3 0x04 0x02+X DVB-S * QPSK FEC_3_4 0x08 0x02+X DVB-S * QPSK FEC_4_5 0x10 0x02+X DVB-S (?) * QPSK FEC_5_6 0x20 0x02+X DVB-S * QPSK FEC_6_7 0x40 0x02+X DVB-S * QPSK FEC_7_8 0x80 0x02+X DVB-S * QPSK FEC_8_9 0x01 0x02+X DVB-S (?) (NOT SUPPORTED?) * QPSK AUTO 0xff 0x02+X DVB-S * * For DVB-S high byte probably represents FEC * and low byte selects the modulator. The high * byte is search range mask. Bit 5 may turn * on DVB-S and remaining bits represent some * kind of calibration (how/what i do not know). * * Eg.(2/3) szap "Zone Horror" * * mask/val = 0x04, 0x20 * status 1f | signal c3c0 | snr a333 | ber 00000098 | unc 0 | FE_HAS_LOCK * * mask/val = 0x04, 0x30 * status 1f | signal c3c0 | snr a333 | ber 00000000 | unc 0 | FE_HAS_LOCK * * After tuning FECSTATUS contains actual FEC * in use numbered 1 through to 8 for 1/2 .. 2/3 etc * * NBC=NOT/NON BACKWARD COMPATIBLE WITH DVB-S (DVB-S2 only) * * NBC-QPSK FEC_1_2 0x00, 0x04 DVB-S2 * NBC-QPSK FEC_3_5 0x00, 0x05 DVB-S2 * NBC-QPSK FEC_2_3 0x00, 0x06 DVB-S2 * NBC-QPSK FEC_3_4 0x00, 0x07 DVB-S2 * NBC-QPSK FEC_4_5 0x00, 0x08 DVB-S2 * NBC-QPSK FEC_5_6 0x00, 0x09 DVB-S2 * NBC-QPSK FEC_8_9 0x00, 0x0a DVB-S2 * NBC-QPSK FEC_9_10 0x00, 0x0b DVB-S2 * * NBC-8PSK FEC_3_5 0x00, 0x0c DVB-S2 * NBC-8PSK FEC_2_3 0x00, 0x0d DVB-S2 * NBC-8PSK FEC_3_4 0x00, 0x0e DVB-S2 * NBC-8PSK FEC_5_6 0x00, 0x0f DVB-S2 * NBC-8PSK FEC_8_9 0x00, 0x10 DVB-S2 * NBC-8PSK FEC_9_10 0x00, 0x11 DVB-S2 * * For DVB-S2 low bytes selects both modulator * and FEC. High byte is meaningless here. To * set pilot, bit 6 (0x40) is set. When inspecting * FECSTATUS bit 7 (0x80) represents the pilot * selection whilst not tuned. When tuned, actual FEC * in use is found in FECSTATUS as per above. Pilot * value is reset. */ /* A table of modulation, fec and configuration bytes for the demod. * Not all S2 mmodulation schemes are support and not all rates with * a scheme are support. Especially, no auto detect when in S2 mode. */ static struct cx24116_modfec { fe_delivery_system_t delivery_system; fe_modulation_t modulation; fe_code_rate_t fec; u8 mask; /* In DVBS mode this is used to autodetect */ u8 val; /* Passed to the firmware to indicate mode selection */ } CX24116_MODFEC_MODES[] = { /* QPSK. For unknown rates we set hardware to auto detect 0xfe 0x30 */ /*mod fec mask val */ { SYS_DVBS, QPSK, FEC_NONE, 0xfe, 0x30 }, { SYS_DVBS, QPSK, FEC_1_2, 0x02, 0x2e }, /* 00000010 00101110 */ { SYS_DVBS, QPSK, FEC_2_3, 0x04, 0x2f }, /* 00000100 00101111 */ { SYS_DVBS, QPSK, FEC_3_4, 0x08, 0x30 }, /* 00001000 00110000 */ { SYS_DVBS, QPSK, FEC_4_5, 0xfe, 0x30 }, /* 000?0000 ? */ { SYS_DVBS, QPSK, FEC_5_6, 0x20, 0x31 }, /* 00100000 00110001 */ { SYS_DVBS, QPSK, FEC_6_7, 0xfe, 0x30 }, /* 0?000000 ? */ { SYS_DVBS, QPSK, FEC_7_8, 0x80, 0x32 }, /* 10000000 00110010 */ { SYS_DVBS, QPSK, FEC_8_9, 0xfe, 0x30 }, /* 0000000? ? */ { SYS_DVBS, QPSK, FEC_AUTO, 0xfe, 0x30 }, /* NBC-QPSK */ { SYS_DVBS2, QPSK, FEC_1_2, 0x00, 0x04 }, { SYS_DVBS2, QPSK, FEC_3_5, 0x00, 0x05 }, { SYS_DVBS2, QPSK, FEC_2_3, 0x00, 0x06 }, { SYS_DVBS2, QPSK, FEC_3_4, 0x00, 0x07 }, { SYS_DVBS2, QPSK, FEC_4_5, 0x00, 0x08 }, { SYS_DVBS2, QPSK, FEC_5_6, 0x00, 0x09 }, { SYS_DVBS2, QPSK, FEC_8_9, 0x00, 0x0a }, { SYS_DVBS2, QPSK, FEC_9_10, 0x00, 0x0b }, /* 8PSK */ { SYS_DVBS2, PSK_8, FEC_3_5, 0x00, 0x0c }, { SYS_DVBS2, PSK_8, FEC_2_3, 0x00, 0x0d }, { SYS_DVBS2, PSK_8, FEC_3_4, 0x00, 0x0e }, { SYS_DVBS2, PSK_8, FEC_5_6, 0x00, 0x0f }, { SYS_DVBS2, PSK_8, FEC_8_9, 0x00, 0x10 }, { SYS_DVBS2, PSK_8, FEC_9_10, 0x00, 0x11 }, /* * `val' can be found in the FECSTATUS register when tuning. * FECSTATUS will give the actual FEC in use if tuning was successful. */ }; static int cx24116_lookup_fecmod(struct cx24116_state *state, fe_delivery_system_t d, fe_modulation_t m, fe_code_rate_t f) { int i, ret = -EOPNOTSUPP; dprintk("%s(0x%02x,0x%02x)\n", __func__, m, f); for (i = 0; i < ARRAY_SIZE(CX24116_MODFEC_MODES); i++) { if ((d == CX24116_MODFEC_MODES[i].delivery_system) && (m == CX24116_MODFEC_MODES[i].modulation) && (f == CX24116_MODFEC_MODES[i].fec)) { ret = i; break; } } return ret; } static int cx24116_set_fec(struct cx24116_state *state, fe_delivery_system_t delsys, fe_modulation_t mod, fe_code_rate_t fec) { int ret = 0; dprintk("%s(0x%02x,0x%02x)\n", __func__, mod, fec); ret = cx24116_lookup_fecmod(state, delsys, mod, fec); if (ret < 0) return ret; state->dnxt.fec = fec; state->dnxt.fec_val = CX24116_MODFEC_MODES[ret].val; state->dnxt.fec_mask = CX24116_MODFEC_MODES[ret].mask; dprintk("%s() mask/val = 0x%02x/0x%02x\n", __func__, state->dnxt.fec_mask, state->dnxt.fec_val); return 0; } static int cx24116_set_symbolrate(struct cx24116_state *state, u32 rate) { dprintk("%s(%d)\n", __func__, rate); /* check if symbol rate is within limits */ if ((rate > state->frontend.ops.info.symbol_rate_max) || (rate < state->frontend.ops.info.symbol_rate_min)) { dprintk("%s() unsupported symbol_rate = %d\n", __func__, rate); return -EOPNOTSUPP; } state->dnxt.symbol_rate = rate; dprintk("%s() symbol_rate = %d\n", __func__, rate); return 0; } static int cx24116_load_firmware(struct dvb_frontend *fe, const struct firmware *fw); static int cx24116_firmware_ondemand(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; const struct firmware *fw; int ret = 0; dprintk("%s()\n", __func__); if (cx24116_readreg(state, 0x20) > 0) { if (state->skip_fw_load) return 0; /* Load firmware */ /* request the firmware, this will block until loaded */ printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__, CX24116_DEFAULT_FIRMWARE); ret = request_firmware(&fw, CX24116_DEFAULT_FIRMWARE, state->i2c->dev.parent); printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n", __func__); if (ret) { printk(KERN_ERR "%s: No firmware uploaded " "(timeout or file not found?)\n", __func__); return ret; } /* Make sure we don't recurse back through here * during loading */ state->skip_fw_load = 1; ret = cx24116_load_firmware(fe, fw); if (ret) printk(KERN_ERR "%s: Writing firmware to device failed\n", __func__); release_firmware(fw); printk(KERN_INFO "%s: Firmware upload %s\n", __func__, ret == 0 ? "complete" : "failed"); /* Ensure firmware is always loaded if required */ state->skip_fw_load = 0; } return ret; } /* Take a basic firmware command structure, format it * and forward it for processing */ static int cx24116_cmd_execute(struct dvb_frontend *fe, struct cx24116_cmd *cmd) { struct cx24116_state *state = fe->demodulator_priv; int i, ret; dprintk("%s()\n", __func__); /* Load the firmware if required */ ret = cx24116_firmware_ondemand(fe); if (ret != 0) { printk(KERN_ERR "%s(): Unable initialise the firmware\n", __func__); return ret; } /* Write the command */ for (i = 0; i < cmd->len ; i++) { dprintk("%s: 0x%02x == 0x%02x\n", __func__, i, cmd->args[i]); cx24116_writereg(state, i, cmd->args[i]); } /* Start execution and wait for cmd to terminate */ cx24116_writereg(state, CX24116_REG_EXECUTE, 0x01); while (cx24116_readreg(state, CX24116_REG_EXECUTE)) { msleep(10); if (i++ > 64) { /* Avoid looping forever if the firmware does not respond */ printk(KERN_WARNING "%s() Firmware not responding\n", __func__); return -EREMOTEIO; } } return 0; } static int cx24116_load_firmware(struct dvb_frontend *fe, const struct firmware *fw) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int i, ret, len, max, remaining; unsigned char vers[4]; dprintk("%s\n", __func__); dprintk("Firmware is %zu bytes (%02x %02x .. %02x %02x)\n", fw->size, fw->data[0], fw->data[1], fw->data[fw->size-2], fw->data[fw->size-1]); /* Toggle 88x SRST pin to reset demod */ if (state->config->reset_device) state->config->reset_device(fe); /* Begin the firmware load process */ /* Prepare the demod, load the firmware, cleanup after load */ /* Init PLL */ cx24116_writereg(state, 0xE5, 0x00); cx24116_writereg(state, 0xF1, 0x08); cx24116_writereg(state, 0xF2, 0x13); /* Start PLL */ cx24116_writereg(state, 0xe0, 0x03); cx24116_writereg(state, 0xe0, 0x00); /* Unknown */ cx24116_writereg(state, CX24116_REG_CLKDIV, 0x46); cx24116_writereg(state, CX24116_REG_RATEDIV, 0x00); /* Unknown */ cx24116_writereg(state, 0xF0, 0x03); cx24116_writereg(state, 0xF4, 0x81); cx24116_writereg(state, 0xF5, 0x00); cx24116_writereg(state, 0xF6, 0x00); /* Split firmware to the max I2C write len and write. * Writes whole firmware as one write when i2c_wr_max is set to 0. */ if (state->config->i2c_wr_max) max = state->config->i2c_wr_max; else max = INT_MAX; /* enough for 32k firmware */ for (remaining = fw->size; remaining > 0; remaining -= max - 1) { len = remaining; if (len > max - 1) len = max - 1; cx24116_writeregN(state, 0xF7, &fw->data[fw->size - remaining], len); } cx24116_writereg(state, 0xF4, 0x10); cx24116_writereg(state, 0xF0, 0x00); cx24116_writereg(state, 0xF8, 0x06); /* Firmware CMD 10: VCO config */ cmd.args[0x00] = CMD_SET_VCO; cmd.args[0x01] = 0x05; cmd.args[0x02] = 0xdc; cmd.args[0x03] = 0xda; cmd.args[0x04] = 0xae; cmd.args[0x05] = 0xaa; cmd.args[0x06] = 0x04; cmd.args[0x07] = 0x9d; cmd.args[0x08] = 0xfc; cmd.args[0x09] = 0x06; cmd.len = 0x0a; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; cx24116_writereg(state, CX24116_REG_SSTATUS, 0x00); /* Firmware CMD 14: Tuner config */ cmd.args[0x00] = CMD_TUNERINIT; cmd.args[0x01] = 0x00; cmd.args[0x02] = 0x00; cmd.len = 0x03; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; cx24116_writereg(state, 0xe5, 0x00); /* Firmware CMD 13: MPEG config */ cmd.args[0x00] = CMD_MPEGCONFIG; cmd.args[0x01] = 0x01; cmd.args[0x02] = 0x75; cmd.args[0x03] = 0x00; if (state->config->mpg_clk_pos_pol) cmd.args[0x04] = state->config->mpg_clk_pos_pol; else cmd.args[0x04] = 0x02; cmd.args[0x05] = 0x00; cmd.len = 0x06; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Firmware CMD 35: Get firmware version */ cmd.args[0x00] = CMD_UPDFWVERS; cmd.len = 0x02; for (i = 0; i < 4; i++) { cmd.args[0x01] = i; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; vers[i] = cx24116_readreg(state, CX24116_REG_MAILBOX); } printk(KERN_INFO "%s: FW version %i.%i.%i.%i\n", __func__, vers[0], vers[1], vers[2], vers[3]); return 0; } static int cx24116_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct cx24116_state *state = fe->demodulator_priv; int lock = cx24116_readreg(state, CX24116_REG_SSTATUS) & CX24116_STATUS_MASK; dprintk("%s: status = 0x%02x\n", __func__, lock); *status = 0; if (lock & CX24116_HAS_SIGNAL) *status |= FE_HAS_SIGNAL; if (lock & CX24116_HAS_CARRIER) *status |= FE_HAS_CARRIER; if (lock & CX24116_HAS_VITERBI) *status |= FE_HAS_VITERBI; if (lock & CX24116_HAS_SYNCLOCK) *status |= FE_HAS_SYNC | FE_HAS_LOCK; return 0; } static int cx24116_read_ber(struct dvb_frontend *fe, u32 *ber) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); *ber = (cx24116_readreg(state, CX24116_REG_BER24) << 24) | (cx24116_readreg(state, CX24116_REG_BER16) << 16) | (cx24116_readreg(state, CX24116_REG_BER8) << 8) | cx24116_readreg(state, CX24116_REG_BER0); return 0; } /* TODO Determine function and scale appropriately */ static int cx24116_read_signal_strength(struct dvb_frontend *fe, u16 *signal_strength) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; u16 sig_reading; dprintk("%s()\n", __func__); /* Firmware CMD 19: Get AGC */ cmd.args[0x00] = CMD_GETAGC; cmd.len = 0x01; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; sig_reading = (cx24116_readreg(state, CX24116_REG_SSTATUS) & CX24116_SIGNAL_MASK) | (cx24116_readreg(state, CX24116_REG_SIGNAL) << 6); *signal_strength = 0 - sig_reading; dprintk("%s: raw / cooked = 0x%04x / 0x%04x\n", __func__, sig_reading, *signal_strength); return 0; } /* SNR (0..100)% = (sig & 0xf0) * 10 + (sig & 0x0f) * 10 / 16 */ static int cx24116_read_snr_pct(struct dvb_frontend *fe, u16 *snr) { struct cx24116_state *state = fe->demodulator_priv; u8 snr_reading; static const u32 snr_tab[] = { /* 10 x Table (rounded up) */ 0x00000, 0x0199A, 0x03333, 0x04ccD, 0x06667, 0x08000, 0x0999A, 0x0b333, 0x0cccD, 0x0e667, 0x10000, 0x1199A, 0x13333, 0x14ccD, 0x16667, 0x18000 }; dprintk("%s()\n", __func__); snr_reading = cx24116_readreg(state, CX24116_REG_QUALITY0); if (snr_reading >= 0xa0 /* 100% */) *snr = 0xffff; else *snr = snr_tab[(snr_reading & 0xf0) >> 4] + (snr_tab[(snr_reading & 0x0f)] >> 4); dprintk("%s: raw / cooked = 0x%02x / 0x%04x\n", __func__, snr_reading, *snr); return 0; } /* The reelbox patches show the value in the registers represents * ESNO, from 0->30db (values 0->300). We provide this value by * default. */ static int cx24116_read_snr_esno(struct dvb_frontend *fe, u16 *snr) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); *snr = cx24116_readreg(state, CX24116_REG_QUALITY8) << 8 | cx24116_readreg(state, CX24116_REG_QUALITY0); dprintk("%s: raw 0x%04x\n", __func__, *snr); return 0; } static int cx24116_read_snr(struct dvb_frontend *fe, u16 *snr) { if (esno_snr == 1) return cx24116_read_snr_esno(fe, snr); else return cx24116_read_snr_pct(fe, snr); } static int cx24116_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); *ucblocks = (cx24116_readreg(state, CX24116_REG_UCB8) << 8) | cx24116_readreg(state, CX24116_REG_UCB0); return 0; } /* Overwrite the current tuning params, we are about to tune */ static void cx24116_clone_params(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; state->dcur = state->dnxt; } /* Wait for LNB */ static int cx24116_wait_for_lnb(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; int i; dprintk("%s() qstatus = 0x%02x\n", __func__, cx24116_readreg(state, CX24116_REG_QSTATUS)); /* Wait for up to 300 ms */ for (i = 0; i < 30 ; i++) { if (cx24116_readreg(state, CX24116_REG_QSTATUS) & 0x20) return 0; msleep(10); } dprintk("%s(): LNB not ready\n", __func__); return -ETIMEDOUT; /* -EBUSY ? */ } static int cx24116_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct cx24116_cmd cmd; int ret; dprintk("%s: %s\n", __func__, voltage == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" : voltage == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : "??"); /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Wait for voltage/min repeat delay */ msleep(100); cmd.args[0x00] = CMD_LNBDCLEVEL; cmd.args[0x01] = (voltage == SEC_VOLTAGE_18 ? 0x01 : 0x00); cmd.len = 0x02; /* Min delay time before DiSEqC send */ msleep(15); return cx24116_cmd_execute(fe, &cmd); } static int cx24116_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct cx24116_cmd cmd; int ret; dprintk("%s(%d)\n", __func__, tone); if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) { printk(KERN_ERR "%s: Invalid, tone=%d\n", __func__, tone); return -EINVAL; } /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Min delay time after DiSEqC send */ msleep(15); /* XXX determine is FW does this, see send_diseqc/burst */ /* Now we set the tone */ cmd.args[0x00] = CMD_SET_TONE; cmd.args[0x01] = 0x00; cmd.args[0x02] = 0x00; switch (tone) { case SEC_TONE_ON: dprintk("%s: setting tone on\n", __func__); cmd.args[0x03] = 0x01; break; case SEC_TONE_OFF: dprintk("%s: setting tone off\n", __func__); cmd.args[0x03] = 0x00; break; } cmd.len = 0x04; /* Min delay time before DiSEqC send */ msleep(15); /* XXX determine is FW does this, see send_diseqc/burst */ return cx24116_cmd_execute(fe, &cmd); } /* Initialise DiSEqC */ static int cx24116_diseqc_init(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; /* Firmware CMD 20: LNB/DiSEqC config */ cmd.args[0x00] = CMD_LNBCONFIG; cmd.args[0x01] = 0x00; cmd.args[0x02] = 0x10; cmd.args[0x03] = 0x00; cmd.args[0x04] = 0x8f; cmd.args[0x05] = 0x28; cmd.args[0x06] = (toneburst == CX24116_DISEQC_TONEOFF) ? 0x00 : 0x01; cmd.args[0x07] = 0x01; cmd.len = 0x08; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Prepare a DiSEqC command */ state->dsec_cmd.args[0x00] = CMD_LNBSEND; /* DiSEqC burst */ state->dsec_cmd.args[CX24116_DISEQC_BURST] = CX24116_DISEQC_MINI_A; /* Unknown */ state->dsec_cmd.args[CX24116_DISEQC_ARG2_2] = 0x02; state->dsec_cmd.args[CX24116_DISEQC_ARG3_0] = 0x00; /* Continuation flag? */ state->dsec_cmd.args[CX24116_DISEQC_ARG4_0] = 0x00; /* DiSEqC message length */ state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = 0x00; /* Command length */ state->dsec_cmd.len = CX24116_DISEQC_MSGOFS; return 0; } /* Send DiSEqC message with derived burst (hack) || previous burst */ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *d) { struct cx24116_state *state = fe->demodulator_priv; int i, ret; /* Dump DiSEqC message */ if (debug) { printk(KERN_INFO "cx24116: %s(", __func__); for (i = 0 ; i < d->msg_len ;) { printk(KERN_INFO "0x%02x", d->msg[i]); if (++i < d->msg_len) printk(KERN_INFO ", "); } printk(") toneburst=%d\n", toneburst); } /* Validate length */ if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS)) return -EINVAL; /* DiSEqC message */ for (i = 0; i < d->msg_len; i++) state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i]; /* DiSEqC message length */ state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = d->msg_len; /* Command length */ state->dsec_cmd.len = CX24116_DISEQC_MSGOFS + state->dsec_cmd.args[CX24116_DISEQC_MSGLEN]; /* DiSEqC toneburst */ if (toneburst == CX24116_DISEQC_MESGCACHE) /* Message is cached */ return 0; else if (toneburst == CX24116_DISEQC_TONEOFF) /* Message is sent without burst */ state->dsec_cmd.args[CX24116_DISEQC_BURST] = 0; else if (toneburst == CX24116_DISEQC_TONECACHE) { /* * Message is sent with derived else cached burst * * WRITE PORT GROUP COMMAND 38 * * 0/A/A: E0 10 38 F0..F3 * 1/B/B: E0 10 38 F4..F7 * 2/C/A: E0 10 38 F8..FB * 3/D/B: E0 10 38 FC..FF * * databyte[3]= 8421:8421 * ABCD:WXYZ * CLR :SET * * WX= PORT SELECT 0..3 (X=TONEBURST) * Y = VOLTAGE (0=13V, 1=18V) * Z = BAND (0=LOW, 1=HIGH(22K)) */ if (d->msg_len >= 4 && d->msg[2] == 0x38) state->dsec_cmd.args[CX24116_DISEQC_BURST] = ((d->msg[3] & 4) >> 2); if (debug) dprintk("%s burst=%d\n", __func__, state->dsec_cmd.args[CX24116_DISEQC_BURST]); } /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Wait for voltage/min repeat delay */ msleep(100); /* Command */ ret = cx24116_cmd_execute(fe, &state->dsec_cmd); if (ret != 0) return ret; /* * Wait for send * * Eutelsat spec: * >15ms delay + (XXX determine if FW does this, see set_tone) * 13.5ms per byte + * >15ms delay + * 12.5ms burst + * >15ms delay (XXX determine if FW does this, see set_tone) */ msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) + ((toneburst == CX24116_DISEQC_TONEOFF) ? 30 : 60)); return 0; } /* Send DiSEqC burst */ static int cx24116_diseqc_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst) { struct cx24116_state *state = fe->demodulator_priv; int ret; dprintk("%s(%d) toneburst=%d\n", __func__, burst, toneburst); /* DiSEqC burst */ if (burst == SEC_MINI_A) state->dsec_cmd.args[CX24116_DISEQC_BURST] = CX24116_DISEQC_MINI_A; else if (burst == SEC_MINI_B) state->dsec_cmd.args[CX24116_DISEQC_BURST] = CX24116_DISEQC_MINI_B; else return -EINVAL; /* DiSEqC toneburst */ if (toneburst != CX24116_DISEQC_MESGCACHE) /* Burst is cached */ return 0; /* Burst is to be sent with cached message */ /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Wait for voltage/min repeat delay */ msleep(100); /* Command */ ret = cx24116_cmd_execute(fe, &state->dsec_cmd); if (ret != 0) return ret; /* * Wait for send * * Eutelsat spec: * >15ms delay + (XXX determine if FW does this, see set_tone) * 13.5ms per byte + * >15ms delay + * 12.5ms burst + * >15ms delay (XXX determine if FW does this, see set_tone) */ msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) + 60); return 0; } static void cx24116_release(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); kfree(state); } static struct dvb_frontend_ops cx24116_ops; struct dvb_frontend *cx24116_attach(const struct cx24116_config *config, struct i2c_adapter *i2c) { struct cx24116_state *state = NULL; int ret; dprintk("%s\n", __func__); /* allocate memory for the internal state */ state = kzalloc(sizeof(struct cx24116_state), GFP_KERNEL); if (state == NULL) goto error1; state->config = config; state->i2c = i2c; /* check if the demod is present */ ret = (cx24116_readreg(state, 0xFF) << 8) | cx24116_readreg(state, 0xFE); if (ret != 0x0501) { printk(KERN_INFO "Invalid probe, probably not a CX24116 device\n"); goto error2; } /* create dvb_frontend */ memcpy(&state->frontend.ops, &cx24116_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error2: kfree(state); error1: return NULL; } EXPORT_SYMBOL(cx24116_attach); /* * Initialise or wake up device * * Power config will reset and load initial firmware if required */ static int cx24116_initfe(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; dprintk("%s()\n", __func__); /* Power on */ cx24116_writereg(state, 0xe0, 0); cx24116_writereg(state, 0xe1, 0); cx24116_writereg(state, 0xea, 0); /* Firmware CMD 36: Power config */ cmd.args[0x00] = CMD_TUNERSLEEP; cmd.args[0x01] = 0; cmd.len = 0x02; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; ret = cx24116_diseqc_init(fe); if (ret != 0) return ret; /* HVR-4000 needs this */ return cx24116_set_voltage(fe, SEC_VOLTAGE_13); } /* * Put device to sleep */ static int cx24116_sleep(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; dprintk("%s()\n", __func__); /* Firmware CMD 36: Power config */ cmd.args[0x00] = CMD_TUNERSLEEP; cmd.args[0x01] = 1; cmd.len = 0x02; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Power off (Shutdown clocks) */ cx24116_writereg(state, 0xea, 0xff); cx24116_writereg(state, 0xe1, 1); cx24116_writereg(state, 0xe0, 1); return 0; } /* dvb-core told us to tune, the tv property cache will be complete, * it's safe for is to pull values and use them for tuning purposes. */ static int cx24116_set_frontend(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct cx24116_cmd cmd; fe_status_t tunerstat; int i, status, ret, retune = 1; dprintk("%s()\n", __func__); switch (c->delivery_system) { case SYS_DVBS: dprintk("%s: DVB-S delivery system selected\n", __func__); /* Only QPSK is supported for DVB-S */ if (c->modulation != QPSK) { dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } /* Pilot doesn't exist in DVB-S, turn bit off */ state->dnxt.pilot_val = CX24116_PILOT_OFF; /* DVB-S only supports 0.35 */ if (c->rolloff != ROLLOFF_35) { dprintk("%s: unsupported rolloff selected (%d)\n", __func__, c->rolloff); return -EOPNOTSUPP; } state->dnxt.rolloff_val = CX24116_ROLLOFF_035; break; case SYS_DVBS2: dprintk("%s: DVB-S2 delivery system selected\n", __func__); /* * NBC 8PSK/QPSK with DVB-S is supported for DVB-S2, * but not hardware auto detection */ if (c->modulation != PSK_8 && c->modulation != QPSK) { dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } switch (c->pilot) { case PILOT_AUTO: /* Not supported but emulated */ state->dnxt.pilot_val = (c->modulation == QPSK) ? CX24116_PILOT_OFF : CX24116_PILOT_ON; retune++; break; case PILOT_OFF: state->dnxt.pilot_val = CX24116_PILOT_OFF; break; case PILOT_ON: state->dnxt.pilot_val = CX24116_PILOT_ON; break; default: dprintk("%s: unsupported pilot mode selected (%d)\n", __func__, c->pilot); return -EOPNOTSUPP; } switch (c->rolloff) { case ROLLOFF_20: state->dnxt.rolloff_val = CX24116_ROLLOFF_020; break; case ROLLOFF_25: state->dnxt.rolloff_val = CX24116_ROLLOFF_025; break; case ROLLOFF_35: state->dnxt.rolloff_val = CX24116_ROLLOFF_035; break; case ROLLOFF_AUTO: /* Rolloff must be explicit */ default: dprintk("%s: unsupported rolloff selected (%d)\n", __func__, c->rolloff); return -EOPNOTSUPP; } break; default: dprintk("%s: unsupported delivery system selected (%d)\n", __func__, c->delivery_system); return -EOPNOTSUPP; } state->dnxt.delsys = c->delivery_system; state->dnxt.modulation = c->modulation; state->dnxt.frequency = c->frequency; state->dnxt.pilot = c->pilot; state->dnxt.rolloff = c->rolloff; ret = cx24116_set_inversion(state, c->inversion); if (ret != 0) return ret; /* FEC_NONE/AUTO for DVB-S2 is not supported and detected here */ ret = cx24116_set_fec(state, c->delivery_system, c->modulation, c->fec_inner); if (ret != 0) return ret; ret = cx24116_set_symbolrate(state, c->symbol_rate); if (ret != 0) return ret; /* discard the 'current' tuning parameters and prepare to tune */ cx24116_clone_params(fe); dprintk("%s: delsys = %d\n", __func__, state->dcur.delsys); dprintk("%s: modulation = %d\n", __func__, state->dcur.modulation); dprintk("%s: frequency = %d\n", __func__, state->dcur.frequency); dprintk("%s: pilot = %d (val = 0x%02x)\n", __func__, state->dcur.pilot, state->dcur.pilot_val); dprintk("%s: retune = %d\n", __func__, retune); dprintk("%s: rolloff = %d (val = 0x%02x)\n", __func__, state->dcur.rolloff, state->dcur.rolloff_val); dprintk("%s: symbol_rate = %d\n", __func__, state->dcur.symbol_rate); dprintk("%s: FEC = %d (mask/val = 0x%02x/0x%02x)\n", __func__, state->dcur.fec, state->dcur.fec_mask, state->dcur.fec_val); dprintk("%s: Inversion = %d (val = 0x%02x)\n", __func__, state->dcur.inversion, state->dcur.inversion_val); /* This is also done in advise/acquire on HVR4000 but not on LITE */ if (state->config->set_ts_params) state->config->set_ts_params(fe, 0); /* Set/Reset B/W */ cmd.args[0x00] = CMD_BANDWIDTH; cmd.args[0x01] = 0x01; cmd.len = 0x02; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Prepare a tune request */ cmd.args[0x00] = CMD_TUNEREQUEST; /* Frequency */ cmd.args[0x01] = (state->dcur.frequency & 0xff0000) >> 16; cmd.args[0x02] = (state->dcur.frequency & 0x00ff00) >> 8; cmd.args[0x03] = (state->dcur.frequency & 0x0000ff); /* Symbol Rate */ cmd.args[0x04] = ((state->dcur.symbol_rate / 1000) & 0xff00) >> 8; cmd.args[0x05] = ((state->dcur.symbol_rate / 1000) & 0x00ff); /* Automatic Inversion */ cmd.args[0x06] = state->dcur.inversion_val; /* Modulation / FEC / Pilot */ cmd.args[0x07] = state->dcur.fec_val | state->dcur.pilot_val; cmd.args[0x08] = CX24116_SEARCH_RANGE_KHZ >> 8; cmd.args[0x09] = CX24116_SEARCH_RANGE_KHZ & 0xff; cmd.args[0x0a] = 0x00; cmd.args[0x0b] = 0x00; cmd.args[0x0c] = state->dcur.rolloff_val; cmd.args[0x0d] = state->dcur.fec_mask; if (state->dcur.symbol_rate > 30000000) { cmd.args[0x0e] = 0x04; cmd.args[0x0f] = 0x00; cmd.args[0x10] = 0x01; cmd.args[0x11] = 0x77; cmd.args[0x12] = 0x36; cx24116_writereg(state, CX24116_REG_CLKDIV, 0x44); cx24116_writereg(state, CX24116_REG_RATEDIV, 0x01); } else { cmd.args[0x0e] = 0x06; cmd.args[0x0f] = 0x00; cmd.args[0x10] = 0x00; cmd.args[0x11] = 0xFA; cmd.args[0x12] = 0x24; cx24116_writereg(state, CX24116_REG_CLKDIV, 0x46); cx24116_writereg(state, CX24116_REG_RATEDIV, 0x00); } cmd.len = 0x13; /* We need to support pilot and non-pilot tuning in the * driver automatically. This is a workaround for because * the demod does not support autodetect. */ do { /* Reset status register */ status = cx24116_readreg(state, CX24116_REG_SSTATUS) & CX24116_SIGNAL_MASK; cx24116_writereg(state, CX24116_REG_SSTATUS, status); /* Tune */ ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) break; /* * Wait for up to 500 ms before retrying * * If we are able to tune then generally it occurs within 100ms. * If it takes longer, try a different toneburst setting. */ for (i = 0; i < 50 ; i++) { cx24116_read_status(fe, &tunerstat); status = tunerstat & (FE_HAS_SIGNAL | FE_HAS_SYNC); if (status == (FE_HAS_SIGNAL | FE_HAS_SYNC)) { dprintk("%s: Tuned\n", __func__); goto tuned; } msleep(10); } dprintk("%s: Not tuned\n", __func__); /* Toggle pilot bit when in auto-pilot */ if (state->dcur.pilot == PILOT_AUTO) cmd.args[0x07] ^= CX24116_PILOT_ON; } while (--retune); tuned: /* Set/Reset B/W */ cmd.args[0x00] = CMD_BANDWIDTH; cmd.args[0x01] = 0x00; cmd.len = 0x02; return cx24116_cmd_execute(fe, &cmd); } static int cx24116_tune(struct dvb_frontend *fe, bool re_tune, unsigned int mode_flags, unsigned int *delay, fe_status_t *status) { /* * It is safe to discard "params" here, as the DVB core will sync * fe->dtv_property_cache with fepriv->parameters_in, where the * DVBv3 params are stored. The only practical usage for it indicate * that re-tuning is needed, e. g. (fepriv->state & FESTATE_RETUNE) is * true. */ *delay = HZ / 5; if (re_tune) { int ret = cx24116_set_frontend(fe); if (ret) return ret; } return cx24116_read_status(fe, status); } static int cx24116_get_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } static struct dvb_frontend_ops cx24116_ops = { .delsys = { SYS_DVBS, SYS_DVBS2 }, .info = { .name = "Conexant CX24116/CX24118", .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 1011, /* kHz for QPSK frontends */ .frequency_tolerance = 5000, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_2G_MODULATION | FE_CAN_QPSK | FE_CAN_RECOVER }, .release = cx24116_release, .init = cx24116_initfe, .sleep = cx24116_sleep, .read_status = cx24116_read_status, .read_ber = cx24116_read_ber, .read_signal_strength = cx24116_read_signal_strength, .read_snr = cx24116_read_snr, .read_ucblocks = cx24116_read_ucblocks, .set_tone = cx24116_set_tone, .set_voltage = cx24116_set_voltage, .diseqc_send_master_cmd = cx24116_send_diseqc_msg, .diseqc_send_burst = cx24116_diseqc_send_burst, .get_frontend_algo = cx24116_get_algo, .tune = cx24116_tune, .set_frontend = cx24116_set_frontend, }; MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24116/cx24118 hardware"); MODULE_AUTHOR("Steven Toth"); MODULE_LICENSE("GPL");
/* Conexant cx24116/cx24118 - DVBS/S2 Satellite demod/tuner driver Copyright (C) 2006-2008 Steven Toth <stoth@hauppauge.com> Copyright (C) 2006-2007 Georg Acher Copyright (C) 2007-2008 Darron Broad March 2007 Fixed some bugs. Added diseqc support. Added corrected signal strength support. August 2007 Sync with legacy version. Some clean ups. Copyright (C) 2008 Igor Liplianin September, 9th 2008 Fixed locking on high symbol rates (>30000). Implement MPEG initialization parameter. January, 17th 2009 Fill set_voltage with actually control voltage code. Correct set tone to not affect voltage. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/firmware.h> #include "dvb_frontend.h" #include "cx24116.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)"); #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_INFO "cx24116: " args); \ } while (0) #define CX24116_DEFAULT_FIRMWARE "dvb-fe-cx24116.fw" #define CX24116_SEARCH_RANGE_KHZ 5000 /* known registers */ #define CX24116_REG_COMMAND (0x00) /* command args 0x00..0x1e */ #define CX24116_REG_EXECUTE (0x1f) /* execute command */ #define CX24116_REG_MAILBOX (0x96) /* FW or multipurpose mailbox? */ #define CX24116_REG_RESET (0x20) /* reset status > 0 */ #define CX24116_REG_SIGNAL (0x9e) /* signal low */ #define CX24116_REG_SSTATUS (0x9d) /* signal high / status */ #define CX24116_REG_QUALITY8 (0xa3) #define CX24116_REG_QSTATUS (0xbc) #define CX24116_REG_QUALITY0 (0xd5) #define CX24116_REG_BER0 (0xc9) #define CX24116_REG_BER8 (0xc8) #define CX24116_REG_BER16 (0xc7) #define CX24116_REG_BER24 (0xc6) #define CX24116_REG_UCB0 (0xcb) #define CX24116_REG_UCB8 (0xca) #define CX24116_REG_CLKDIV (0xf3) #define CX24116_REG_RATEDIV (0xf9) /* configured fec (not tuned) or actual FEC (tuned) 1=1/2 2=2/3 etc */ #define CX24116_REG_FECSTATUS (0x9c) /* FECSTATUS bits */ /* mask to determine configured fec (not tuned) or actual fec (tuned) */ #define CX24116_FEC_FECMASK (0x1f) /* Select DVB-S demodulator, else DVB-S2 */ #define CX24116_FEC_DVBS (0x20) #define CX24116_FEC_UNKNOWN (0x40) /* Unknown/unused */ /* Pilot mode requested when tuning else always reset when tuned */ #define CX24116_FEC_PILOT (0x80) /* arg buffer size */ #define CX24116_ARGLEN (0x1e) /* rolloff */ #define CX24116_ROLLOFF_020 (0x00) #define CX24116_ROLLOFF_025 (0x01) #define CX24116_ROLLOFF_035 (0x02) /* pilot bit */ #define CX24116_PILOT_OFF (0x00) #define CX24116_PILOT_ON (0x40) /* signal status */ #define CX24116_HAS_SIGNAL (0x01) #define CX24116_HAS_CARRIER (0x02) #define CX24116_HAS_VITERBI (0x04) #define CX24116_HAS_SYNCLOCK (0x08) #define CX24116_HAS_UNKNOWN1 (0x10) #define CX24116_HAS_UNKNOWN2 (0x20) #define CX24116_STATUS_MASK (0x0f) #define CX24116_SIGNAL_MASK (0xc0) #define CX24116_DISEQC_TONEOFF (0) /* toneburst never sent */ #define CX24116_DISEQC_TONECACHE (1) /* toneburst cached */ #define CX24116_DISEQC_MESGCACHE (2) /* message cached */ /* arg offset for DiSEqC */ #define CX24116_DISEQC_BURST (1) #define CX24116_DISEQC_ARG2_2 (2) /* unknown value=2 */ #define CX24116_DISEQC_ARG3_0 (3) /* unknown value=0 */ #define CX24116_DISEQC_ARG4_0 (4) /* unknown value=0 */ #define CX24116_DISEQC_MSGLEN (5) #define CX24116_DISEQC_MSGOFS (6) /* DiSEqC burst */ #define CX24116_DISEQC_MINI_A (0) #define CX24116_DISEQC_MINI_B (1) /* DiSEqC tone burst */ static int toneburst = 1; module_param(toneburst, int, 0644); MODULE_PARM_DESC(toneburst, "DiSEqC toneburst 0=OFF, 1=TONE CACHE, "\ "2=MESSAGE CACHE (default:1)"); /* SNR measurements */ static int esno_snr; module_param(esno_snr, int, 0644); MODULE_PARM_DESC(esno_snr, "SNR return units, 0=PERCENTAGE 0-100, "\ "1=ESNO(db * 10) (default:0)"); enum cmds { CMD_SET_VCO = 0x10, CMD_TUNEREQUEST = 0x11, CMD_MPEGCONFIG = 0x13, CMD_TUNERINIT = 0x14, CMD_BANDWIDTH = 0x15, CMD_GETAGC = 0x19, CMD_LNBCONFIG = 0x20, CMD_LNBSEND = 0x21, /* Formerly CMD_SEND_DISEQC */ CMD_LNBDCLEVEL = 0x22, CMD_SET_TONE = 0x23, CMD_UPDFWVERS = 0x35, CMD_TUNERSLEEP = 0x36, CMD_AGCCONTROL = 0x3b, /* Unknown */ }; /* The Demod/Tuner can't easily provide these, we cache them */ struct cx24116_tuning { u32 frequency; u32 symbol_rate; fe_spectral_inversion_t inversion; fe_code_rate_t fec; fe_delivery_system_t delsys; fe_modulation_t modulation; fe_pilot_t pilot; fe_rolloff_t rolloff; /* Demod values */ u8 fec_val; u8 fec_mask; u8 inversion_val; u8 pilot_val; u8 rolloff_val; }; /* Basic commands that are sent to the firmware */ struct cx24116_cmd { u8 len; u8 args[CX24116_ARGLEN]; }; struct cx24116_state { struct i2c_adapter *i2c; const struct cx24116_config *config; struct dvb_frontend frontend; struct cx24116_tuning dcur; struct cx24116_tuning dnxt; u8 skip_fw_load; u8 burst; struct cx24116_cmd dsec_cmd; }; static int cx24116_writereg(struct cx24116_state *state, int reg, int data) { u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; int err; if (debug > 1) printk("cx24116: %s: write reg 0x%02x, value 0x%02x\n", __func__, reg, data); err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) { printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x," " value == 0x%02x)\n", __func__, err, reg, data); return -EREMOTEIO; } return 0; } /* Bulk byte writes to a single I2C address, for 32k firmware load */ static int cx24116_writeregN(struct cx24116_state *state, int reg, const u8 *data, u16 len) { int ret = -EREMOTEIO; struct i2c_msg msg; u8 *buf; buf = kmalloc(len + 1, GFP_KERNEL); if (buf == NULL) { printk("Unable to kmalloc\n"); ret = -ENOMEM; goto error; } *(buf) = reg; memcpy(buf + 1, data, len); msg.addr = state->config->demod_address; msg.flags = 0; msg.buf = buf; msg.len = len + 1; if (debug > 1) printk(KERN_INFO "cx24116: %s: write regN 0x%02x, len = %d\n", __func__, reg, len); ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) { printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x\n", __func__, ret, reg); ret = -EREMOTEIO; } error: kfree(buf); return ret; } static int cx24116_readreg(struct cx24116_state *state, u8 reg) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { printk(KERN_ERR "%s: reg=0x%x (error=%d)\n", __func__, reg, ret); return ret; } if (debug > 1) printk(KERN_INFO "cx24116: read reg 0x%02x, value 0x%02x\n", reg, b1[0]); return b1[0]; } static int cx24116_set_inversion(struct cx24116_state *state, fe_spectral_inversion_t inversion) { dprintk("%s(%d)\n", __func__, inversion); switch (inversion) { case INVERSION_OFF: state->dnxt.inversion_val = 0x00; break; case INVERSION_ON: state->dnxt.inversion_val = 0x04; break; case INVERSION_AUTO: state->dnxt.inversion_val = 0x0C; break; default: return -EINVAL; } state->dnxt.inversion = inversion; return 0; } /* * modfec (modulation and FEC) * =========================== * * MOD FEC mask/val standard * ---- -------- ----------- -------- * QPSK FEC_1_2 0x02 0x02+X DVB-S * QPSK FEC_2_3 0x04 0x02+X DVB-S * QPSK FEC_3_4 0x08 0x02+X DVB-S * QPSK FEC_4_5 0x10 0x02+X DVB-S (?) * QPSK FEC_5_6 0x20 0x02+X DVB-S * QPSK FEC_6_7 0x40 0x02+X DVB-S * QPSK FEC_7_8 0x80 0x02+X DVB-S * QPSK FEC_8_9 0x01 0x02+X DVB-S (?) (NOT SUPPORTED?) * QPSK AUTO 0xff 0x02+X DVB-S * * For DVB-S high byte probably represents FEC * and low byte selects the modulator. The high * byte is search range mask. Bit 5 may turn * on DVB-S and remaining bits represent some * kind of calibration (how/what i do not know). * * Eg.(2/3) szap "Zone Horror" * * mask/val = 0x04, 0x20 * status 1f | signal c3c0 | snr a333 | ber 00000098 | unc 0 | FE_HAS_LOCK * * mask/val = 0x04, 0x30 * status 1f | signal c3c0 | snr a333 | ber 00000000 | unc 0 | FE_HAS_LOCK * * After tuning FECSTATUS contains actual FEC * in use numbered 1 through to 8 for 1/2 .. 2/3 etc * * NBC=NOT/NON BACKWARD COMPATIBLE WITH DVB-S (DVB-S2 only) * * NBC-QPSK FEC_1_2 0x00, 0x04 DVB-S2 * NBC-QPSK FEC_3_5 0x00, 0x05 DVB-S2 * NBC-QPSK FEC_2_3 0x00, 0x06 DVB-S2 * NBC-QPSK FEC_3_4 0x00, 0x07 DVB-S2 * NBC-QPSK FEC_4_5 0x00, 0x08 DVB-S2 * NBC-QPSK FEC_5_6 0x00, 0x09 DVB-S2 * NBC-QPSK FEC_8_9 0x00, 0x0a DVB-S2 * NBC-QPSK FEC_9_10 0x00, 0x0b DVB-S2 * * NBC-8PSK FEC_3_5 0x00, 0x0c DVB-S2 * NBC-8PSK FEC_2_3 0x00, 0x0d DVB-S2 * NBC-8PSK FEC_3_4 0x00, 0x0e DVB-S2 * NBC-8PSK FEC_5_6 0x00, 0x0f DVB-S2 * NBC-8PSK FEC_8_9 0x00, 0x10 DVB-S2 * NBC-8PSK FEC_9_10 0x00, 0x11 DVB-S2 * * For DVB-S2 low bytes selects both modulator * and FEC. High byte is meaningless here. To * set pilot, bit 6 (0x40) is set. When inspecting * FECSTATUS bit 7 (0x80) represents the pilot * selection whilst not tuned. When tuned, actual FEC * in use is found in FECSTATUS as per above. Pilot * value is reset. */ /* A table of modulation, fec and configuration bytes for the demod. * Not all S2 mmodulation schemes are support and not all rates with * a scheme are support. Especially, no auto detect when in S2 mode. */ static struct cx24116_modfec { fe_delivery_system_t delivery_system; fe_modulation_t modulation; fe_code_rate_t fec; u8 mask; /* In DVBS mode this is used to autodetect */ u8 val; /* Passed to the firmware to indicate mode selection */ } CX24116_MODFEC_MODES[] = { /* QPSK. For unknown rates we set hardware to auto detect 0xfe 0x30 */ /*mod fec mask val */ { SYS_DVBS, QPSK, FEC_NONE, 0xfe, 0x30 }, { SYS_DVBS, QPSK, FEC_1_2, 0x02, 0x2e }, /* 00000010 00101110 */ { SYS_DVBS, QPSK, FEC_2_3, 0x04, 0x2f }, /* 00000100 00101111 */ { SYS_DVBS, QPSK, FEC_3_4, 0x08, 0x30 }, /* 00001000 00110000 */ { SYS_DVBS, QPSK, FEC_4_5, 0xfe, 0x30 }, /* 000?0000 ? */ { SYS_DVBS, QPSK, FEC_5_6, 0x20, 0x31 }, /* 00100000 00110001 */ { SYS_DVBS, QPSK, FEC_6_7, 0xfe, 0x30 }, /* 0?000000 ? */ { SYS_DVBS, QPSK, FEC_7_8, 0x80, 0x32 }, /* 10000000 00110010 */ { SYS_DVBS, QPSK, FEC_8_9, 0xfe, 0x30 }, /* 0000000? ? */ { SYS_DVBS, QPSK, FEC_AUTO, 0xfe, 0x30 }, /* NBC-QPSK */ { SYS_DVBS2, QPSK, FEC_1_2, 0x00, 0x04 }, { SYS_DVBS2, QPSK, FEC_3_5, 0x00, 0x05 }, { SYS_DVBS2, QPSK, FEC_2_3, 0x00, 0x06 }, { SYS_DVBS2, QPSK, FEC_3_4, 0x00, 0x07 }, { SYS_DVBS2, QPSK, FEC_4_5, 0x00, 0x08 }, { SYS_DVBS2, QPSK, FEC_5_6, 0x00, 0x09 }, { SYS_DVBS2, QPSK, FEC_8_9, 0x00, 0x0a }, { SYS_DVBS2, QPSK, FEC_9_10, 0x00, 0x0b }, /* 8PSK */ { SYS_DVBS2, PSK_8, FEC_3_5, 0x00, 0x0c }, { SYS_DVBS2, PSK_8, FEC_2_3, 0x00, 0x0d }, { SYS_DVBS2, PSK_8, FEC_3_4, 0x00, 0x0e }, { SYS_DVBS2, PSK_8, FEC_5_6, 0x00, 0x0f }, { SYS_DVBS2, PSK_8, FEC_8_9, 0x00, 0x10 }, { SYS_DVBS2, PSK_8, FEC_9_10, 0x00, 0x11 }, /* * `val' can be found in the FECSTATUS register when tuning. * FECSTATUS will give the actual FEC in use if tuning was successful. */ }; static int cx24116_lookup_fecmod(struct cx24116_state *state, fe_delivery_system_t d, fe_modulation_t m, fe_code_rate_t f) { int i, ret = -EOPNOTSUPP; dprintk("%s(0x%02x,0x%02x)\n", __func__, m, f); for (i = 0; i < ARRAY_SIZE(CX24116_MODFEC_MODES); i++) { if ((d == CX24116_MODFEC_MODES[i].delivery_system) && (m == CX24116_MODFEC_MODES[i].modulation) && (f == CX24116_MODFEC_MODES[i].fec)) { ret = i; break; } } return ret; } static int cx24116_set_fec(struct cx24116_state *state, fe_delivery_system_t delsys, fe_modulation_t mod, fe_code_rate_t fec) { int ret = 0; dprintk("%s(0x%02x,0x%02x)\n", __func__, mod, fec); ret = cx24116_lookup_fecmod(state, delsys, mod, fec); if (ret < 0) return ret; state->dnxt.fec = fec; state->dnxt.fec_val = CX24116_MODFEC_MODES[ret].val; state->dnxt.fec_mask = CX24116_MODFEC_MODES[ret].mask; dprintk("%s() mask/val = 0x%02x/0x%02x\n", __func__, state->dnxt.fec_mask, state->dnxt.fec_val); return 0; } static int cx24116_set_symbolrate(struct cx24116_state *state, u32 rate) { dprintk("%s(%d)\n", __func__, rate); /* check if symbol rate is within limits */ if ((rate > state->frontend.ops.info.symbol_rate_max) || (rate < state->frontend.ops.info.symbol_rate_min)) { dprintk("%s() unsupported symbol_rate = %d\n", __func__, rate); return -EOPNOTSUPP; } state->dnxt.symbol_rate = rate; dprintk("%s() symbol_rate = %d\n", __func__, rate); return 0; } static int cx24116_load_firmware(struct dvb_frontend *fe, const struct firmware *fw); static int cx24116_firmware_ondemand(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; const struct firmware *fw; int ret = 0; dprintk("%s()\n", __func__); if (cx24116_readreg(state, 0x20) > 0) { if (state->skip_fw_load) return 0; /* Load firmware */ /* request the firmware, this will block until loaded */ printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__, CX24116_DEFAULT_FIRMWARE); ret = request_firmware(&fw, CX24116_DEFAULT_FIRMWARE, state->i2c->dev.parent); printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n", __func__); if (ret) { printk(KERN_ERR "%s: No firmware uploaded " "(timeout or file not found?)\n", __func__); return ret; } /* Make sure we don't recurse back through here * during loading */ state->skip_fw_load = 1; ret = cx24116_load_firmware(fe, fw); if (ret) printk(KERN_ERR "%s: Writing firmware to device failed\n", __func__); release_firmware(fw); printk(KERN_INFO "%s: Firmware upload %s\n", __func__, ret == 0 ? "complete" : "failed"); /* Ensure firmware is always loaded if required */ state->skip_fw_load = 0; } return ret; } /* Take a basic firmware command structure, format it * and forward it for processing */ static int cx24116_cmd_execute(struct dvb_frontend *fe, struct cx24116_cmd *cmd) { struct cx24116_state *state = fe->demodulator_priv; int i, ret; dprintk("%s()\n", __func__); /* Load the firmware if required */ ret = cx24116_firmware_ondemand(fe); if (ret != 0) { printk(KERN_ERR "%s(): Unable initialise the firmware\n", __func__); return ret; } /* Write the command */ for (i = 0; i < cmd->len ; i++) { dprintk("%s: 0x%02x == 0x%02x\n", __func__, i, cmd->args[i]); cx24116_writereg(state, i, cmd->args[i]); } /* Start execution and wait for cmd to terminate */ cx24116_writereg(state, CX24116_REG_EXECUTE, 0x01); while (cx24116_readreg(state, CX24116_REG_EXECUTE)) { msleep(10); if (i++ > 64) { /* Avoid looping forever if the firmware does not respond */ printk(KERN_WARNING "%s() Firmware not responding\n", __func__); return -EREMOTEIO; } } return 0; } static int cx24116_load_firmware(struct dvb_frontend *fe, const struct firmware *fw) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int i, ret, len, max, remaining; unsigned char vers[4]; dprintk("%s\n", __func__); dprintk("Firmware is %zu bytes (%02x %02x .. %02x %02x)\n", fw->size, fw->data[0], fw->data[1], fw->data[fw->size-2], fw->data[fw->size-1]); /* Toggle 88x SRST pin to reset demod */ if (state->config->reset_device) state->config->reset_device(fe); /* Begin the firmware load process */ /* Prepare the demod, load the firmware, cleanup after load */ /* Init PLL */ cx24116_writereg(state, 0xE5, 0x00); cx24116_writereg(state, 0xF1, 0x08); cx24116_writereg(state, 0xF2, 0x13); /* Start PLL */ cx24116_writereg(state, 0xe0, 0x03); cx24116_writereg(state, 0xe0, 0x00); /* Unknown */ cx24116_writereg(state, CX24116_REG_CLKDIV, 0x46); cx24116_writereg(state, CX24116_REG_RATEDIV, 0x00); /* Unknown */ cx24116_writereg(state, 0xF0, 0x03); cx24116_writereg(state, 0xF4, 0x81); cx24116_writereg(state, 0xF5, 0x00); cx24116_writereg(state, 0xF6, 0x00); /* Split firmware to the max I2C write len and write. * Writes whole firmware as one write when i2c_wr_max is set to 0. */ if (state->config->i2c_wr_max) max = state->config->i2c_wr_max; else max = INT_MAX; /* enough for 32k firmware */ for (remaining = fw->size; remaining > 0; remaining -= max - 1) { len = remaining; if (len > max - 1) len = max - 1; cx24116_writeregN(state, 0xF7, &fw->data[fw->size - remaining], len); } cx24116_writereg(state, 0xF4, 0x10); cx24116_writereg(state, 0xF0, 0x00); cx24116_writereg(state, 0xF8, 0x06); /* Firmware CMD 10: VCO config */ cmd.args[0x00] = CMD_SET_VCO; cmd.args[0x01] = 0x05; cmd.args[0x02] = 0xdc; cmd.args[0x03] = 0xda; cmd.args[0x04] = 0xae; cmd.args[0x05] = 0xaa; cmd.args[0x06] = 0x04; cmd.args[0x07] = 0x9d; cmd.args[0x08] = 0xfc; cmd.args[0x09] = 0x06; cmd.len = 0x0a; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; cx24116_writereg(state, CX24116_REG_SSTATUS, 0x00); /* Firmware CMD 14: Tuner config */ cmd.args[0x00] = CMD_TUNERINIT; cmd.args[0x01] = 0x00; cmd.args[0x02] = 0x00; cmd.len = 0x03; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; cx24116_writereg(state, 0xe5, 0x00); /* Firmware CMD 13: MPEG config */ cmd.args[0x00] = CMD_MPEGCONFIG; cmd.args[0x01] = 0x01; cmd.args[0x02] = 0x75; cmd.args[0x03] = 0x00; if (state->config->mpg_clk_pos_pol) cmd.args[0x04] = state->config->mpg_clk_pos_pol; else cmd.args[0x04] = 0x02; cmd.args[0x05] = 0x00; cmd.len = 0x06; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Firmware CMD 35: Get firmware version */ cmd.args[0x00] = CMD_UPDFWVERS; cmd.len = 0x02; for (i = 0; i < 4; i++) { cmd.args[0x01] = i; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; vers[i] = cx24116_readreg(state, CX24116_REG_MAILBOX); } printk(KERN_INFO "%s: FW version %i.%i.%i.%i\n", __func__, vers[0], vers[1], vers[2], vers[3]); return 0; } static int cx24116_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct cx24116_state *state = fe->demodulator_priv; int lock = cx24116_readreg(state, CX24116_REG_SSTATUS) & CX24116_STATUS_MASK; dprintk("%s: status = 0x%02x\n", __func__, lock); *status = 0; if (lock & CX24116_HAS_SIGNAL) *status |= FE_HAS_SIGNAL; if (lock & CX24116_HAS_CARRIER) *status |= FE_HAS_CARRIER; if (lock & CX24116_HAS_VITERBI) *status |= FE_HAS_VITERBI; if (lock & CX24116_HAS_SYNCLOCK) *status |= FE_HAS_SYNC | FE_HAS_LOCK; return 0; } static int cx24116_read_ber(struct dvb_frontend *fe, u32 *ber) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); *ber = (cx24116_readreg(state, CX24116_REG_BER24) << 24) | (cx24116_readreg(state, CX24116_REG_BER16) << 16) | (cx24116_readreg(state, CX24116_REG_BER8) << 8) | cx24116_readreg(state, CX24116_REG_BER0); return 0; } /* TODO Determine function and scale appropriately */ static int cx24116_read_signal_strength(struct dvb_frontend *fe, u16 *signal_strength) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; u16 sig_reading; dprintk("%s()\n", __func__); /* Firmware CMD 19: Get AGC */ cmd.args[0x00] = CMD_GETAGC; cmd.len = 0x01; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; sig_reading = (cx24116_readreg(state, CX24116_REG_SSTATUS) & CX24116_SIGNAL_MASK) | (cx24116_readreg(state, CX24116_REG_SIGNAL) << 6); *signal_strength = 0 - sig_reading; dprintk("%s: raw / cooked = 0x%04x / 0x%04x\n", __func__, sig_reading, *signal_strength); return 0; } /* SNR (0..100)% = (sig & 0xf0) * 10 + (sig & 0x0f) * 10 / 16 */ static int cx24116_read_snr_pct(struct dvb_frontend *fe, u16 *snr) { struct cx24116_state *state = fe->demodulator_priv; u8 snr_reading; static const u32 snr_tab[] = { /* 10 x Table (rounded up) */ 0x00000, 0x0199A, 0x03333, 0x04ccD, 0x06667, 0x08000, 0x0999A, 0x0b333, 0x0cccD, 0x0e667, 0x10000, 0x1199A, 0x13333, 0x14ccD, 0x16667, 0x18000 }; dprintk("%s()\n", __func__); snr_reading = cx24116_readreg(state, CX24116_REG_QUALITY0); if (snr_reading >= 0xa0 /* 100% */) *snr = 0xffff; else *snr = snr_tab[(snr_reading & 0xf0) >> 4] + (snr_tab[(snr_reading & 0x0f)] >> 4); dprintk("%s: raw / cooked = 0x%02x / 0x%04x\n", __func__, snr_reading, *snr); return 0; } /* The reelbox patches show the value in the registers represents * ESNO, from 0->30db (values 0->300). We provide this value by * default. */ static int cx24116_read_snr_esno(struct dvb_frontend *fe, u16 *snr) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); *snr = cx24116_readreg(state, CX24116_REG_QUALITY8) << 8 | cx24116_readreg(state, CX24116_REG_QUALITY0); dprintk("%s: raw 0x%04x\n", __func__, *snr); return 0; } static int cx24116_read_snr(struct dvb_frontend *fe, u16 *snr) { if (esno_snr == 1) return cx24116_read_snr_esno(fe, snr); else return cx24116_read_snr_pct(fe, snr); } static int cx24116_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); *ucblocks = (cx24116_readreg(state, CX24116_REG_UCB8) << 8) | cx24116_readreg(state, CX24116_REG_UCB0); return 0; } /* Overwrite the current tuning params, we are about to tune */ static void cx24116_clone_params(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; state->dcur = state->dnxt; } /* Wait for LNB */ static int cx24116_wait_for_lnb(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; int i; dprintk("%s() qstatus = 0x%02x\n", __func__, cx24116_readreg(state, CX24116_REG_QSTATUS)); /* Wait for up to 300 ms */ for (i = 0; i < 30 ; i++) { if (cx24116_readreg(state, CX24116_REG_QSTATUS) & 0x20) return 0; msleep(10); } dprintk("%s(): LNB not ready\n", __func__); return -ETIMEDOUT; /* -EBUSY ? */ } static int cx24116_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct cx24116_cmd cmd; int ret; dprintk("%s: %s\n", __func__, voltage == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" : voltage == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : "??"); /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Wait for voltage/min repeat delay */ msleep(100); cmd.args[0x00] = CMD_LNBDCLEVEL; cmd.args[0x01] = (voltage == SEC_VOLTAGE_18 ? 0x01 : 0x00); cmd.len = 0x02; /* Min delay time before DiSEqC send */ msleep(15); return cx24116_cmd_execute(fe, &cmd); } static int cx24116_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct cx24116_cmd cmd; int ret; dprintk("%s(%d)\n", __func__, tone); if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) { printk(KERN_ERR "%s: Invalid, tone=%d\n", __func__, tone); return -EINVAL; } /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Min delay time after DiSEqC send */ msleep(15); /* XXX determine is FW does this, see send_diseqc/burst */ /* Now we set the tone */ cmd.args[0x00] = CMD_SET_TONE; cmd.args[0x01] = 0x00; cmd.args[0x02] = 0x00; switch (tone) { case SEC_TONE_ON: dprintk("%s: setting tone on\n", __func__); cmd.args[0x03] = 0x01; break; case SEC_TONE_OFF: dprintk("%s: setting tone off\n", __func__); cmd.args[0x03] = 0x00; break; } cmd.len = 0x04; /* Min delay time before DiSEqC send */ msleep(15); /* XXX determine is FW does this, see send_diseqc/burst */ return cx24116_cmd_execute(fe, &cmd); } /* Initialise DiSEqC */ static int cx24116_diseqc_init(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; /* Firmware CMD 20: LNB/DiSEqC config */ cmd.args[0x00] = CMD_LNBCONFIG; cmd.args[0x01] = 0x00; cmd.args[0x02] = 0x10; cmd.args[0x03] = 0x00; cmd.args[0x04] = 0x8f; cmd.args[0x05] = 0x28; cmd.args[0x06] = (toneburst == CX24116_DISEQC_TONEOFF) ? 0x00 : 0x01; cmd.args[0x07] = 0x01; cmd.len = 0x08; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Prepare a DiSEqC command */ state->dsec_cmd.args[0x00] = CMD_LNBSEND; /* DiSEqC burst */ state->dsec_cmd.args[CX24116_DISEQC_BURST] = CX24116_DISEQC_MINI_A; /* Unknown */ state->dsec_cmd.args[CX24116_DISEQC_ARG2_2] = 0x02; state->dsec_cmd.args[CX24116_DISEQC_ARG3_0] = 0x00; /* Continuation flag? */ state->dsec_cmd.args[CX24116_DISEQC_ARG4_0] = 0x00; /* DiSEqC message length */ state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = 0x00; /* Command length */ state->dsec_cmd.len = CX24116_DISEQC_MSGOFS; return 0; } /* Send DiSEqC message with derived burst (hack) || previous burst */ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *d) { struct cx24116_state *state = fe->demodulator_priv; int i, ret; /* Validate length */ if (d->msg_len > sizeof(d->msg)) return -EINVAL; /* Dump DiSEqC message */ if (debug) { printk(KERN_INFO "cx24116: %s(", __func__); for (i = 0 ; i < d->msg_len ;) { printk(KERN_INFO "0x%02x", d->msg[i]); if (++i < d->msg_len) printk(KERN_INFO ", "); } printk(") toneburst=%d\n", toneburst); } /* DiSEqC message */ for (i = 0; i < d->msg_len; i++) state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i]; /* DiSEqC message length */ state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = d->msg_len; /* Command length */ state->dsec_cmd.len = CX24116_DISEQC_MSGOFS + state->dsec_cmd.args[CX24116_DISEQC_MSGLEN]; /* DiSEqC toneburst */ if (toneburst == CX24116_DISEQC_MESGCACHE) /* Message is cached */ return 0; else if (toneburst == CX24116_DISEQC_TONEOFF) /* Message is sent without burst */ state->dsec_cmd.args[CX24116_DISEQC_BURST] = 0; else if (toneburst == CX24116_DISEQC_TONECACHE) { /* * Message is sent with derived else cached burst * * WRITE PORT GROUP COMMAND 38 * * 0/A/A: E0 10 38 F0..F3 * 1/B/B: E0 10 38 F4..F7 * 2/C/A: E0 10 38 F8..FB * 3/D/B: E0 10 38 FC..FF * * databyte[3]= 8421:8421 * ABCD:WXYZ * CLR :SET * * WX= PORT SELECT 0..3 (X=TONEBURST) * Y = VOLTAGE (0=13V, 1=18V) * Z = BAND (0=LOW, 1=HIGH(22K)) */ if (d->msg_len >= 4 && d->msg[2] == 0x38) state->dsec_cmd.args[CX24116_DISEQC_BURST] = ((d->msg[3] & 4) >> 2); if (debug) dprintk("%s burst=%d\n", __func__, state->dsec_cmd.args[CX24116_DISEQC_BURST]); } /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Wait for voltage/min repeat delay */ msleep(100); /* Command */ ret = cx24116_cmd_execute(fe, &state->dsec_cmd); if (ret != 0) return ret; /* * Wait for send * * Eutelsat spec: * >15ms delay + (XXX determine if FW does this, see set_tone) * 13.5ms per byte + * >15ms delay + * 12.5ms burst + * >15ms delay (XXX determine if FW does this, see set_tone) */ msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) + ((toneburst == CX24116_DISEQC_TONEOFF) ? 30 : 60)); return 0; } /* Send DiSEqC burst */ static int cx24116_diseqc_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst) { struct cx24116_state *state = fe->demodulator_priv; int ret; dprintk("%s(%d) toneburst=%d\n", __func__, burst, toneburst); /* DiSEqC burst */ if (burst == SEC_MINI_A) state->dsec_cmd.args[CX24116_DISEQC_BURST] = CX24116_DISEQC_MINI_A; else if (burst == SEC_MINI_B) state->dsec_cmd.args[CX24116_DISEQC_BURST] = CX24116_DISEQC_MINI_B; else return -EINVAL; /* DiSEqC toneburst */ if (toneburst != CX24116_DISEQC_MESGCACHE) /* Burst is cached */ return 0; /* Burst is to be sent with cached message */ /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Wait for voltage/min repeat delay */ msleep(100); /* Command */ ret = cx24116_cmd_execute(fe, &state->dsec_cmd); if (ret != 0) return ret; /* * Wait for send * * Eutelsat spec: * >15ms delay + (XXX determine if FW does this, see set_tone) * 13.5ms per byte + * >15ms delay + * 12.5ms burst + * >15ms delay (XXX determine if FW does this, see set_tone) */ msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) + 60); return 0; } static void cx24116_release(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); kfree(state); } static struct dvb_frontend_ops cx24116_ops; struct dvb_frontend *cx24116_attach(const struct cx24116_config *config, struct i2c_adapter *i2c) { struct cx24116_state *state = NULL; int ret; dprintk("%s\n", __func__); /* allocate memory for the internal state */ state = kzalloc(sizeof(struct cx24116_state), GFP_KERNEL); if (state == NULL) goto error1; state->config = config; state->i2c = i2c; /* check if the demod is present */ ret = (cx24116_readreg(state, 0xFF) << 8) | cx24116_readreg(state, 0xFE); if (ret != 0x0501) { printk(KERN_INFO "Invalid probe, probably not a CX24116 device\n"); goto error2; } /* create dvb_frontend */ memcpy(&state->frontend.ops, &cx24116_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error2: kfree(state); error1: return NULL; } EXPORT_SYMBOL(cx24116_attach); /* * Initialise or wake up device * * Power config will reset and load initial firmware if required */ static int cx24116_initfe(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; dprintk("%s()\n", __func__); /* Power on */ cx24116_writereg(state, 0xe0, 0); cx24116_writereg(state, 0xe1, 0); cx24116_writereg(state, 0xea, 0); /* Firmware CMD 36: Power config */ cmd.args[0x00] = CMD_TUNERSLEEP; cmd.args[0x01] = 0; cmd.len = 0x02; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; ret = cx24116_diseqc_init(fe); if (ret != 0) return ret; /* HVR-4000 needs this */ return cx24116_set_voltage(fe, SEC_VOLTAGE_13); } /* * Put device to sleep */ static int cx24116_sleep(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; struct cx24116_cmd cmd; int ret; dprintk("%s()\n", __func__); /* Firmware CMD 36: Power config */ cmd.args[0x00] = CMD_TUNERSLEEP; cmd.args[0x01] = 1; cmd.len = 0x02; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Power off (Shutdown clocks) */ cx24116_writereg(state, 0xea, 0xff); cx24116_writereg(state, 0xe1, 1); cx24116_writereg(state, 0xe0, 1); return 0; } /* dvb-core told us to tune, the tv property cache will be complete, * it's safe for is to pull values and use them for tuning purposes. */ static int cx24116_set_frontend(struct dvb_frontend *fe) { struct cx24116_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct cx24116_cmd cmd; fe_status_t tunerstat; int i, status, ret, retune = 1; dprintk("%s()\n", __func__); switch (c->delivery_system) { case SYS_DVBS: dprintk("%s: DVB-S delivery system selected\n", __func__); /* Only QPSK is supported for DVB-S */ if (c->modulation != QPSK) { dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } /* Pilot doesn't exist in DVB-S, turn bit off */ state->dnxt.pilot_val = CX24116_PILOT_OFF; /* DVB-S only supports 0.35 */ if (c->rolloff != ROLLOFF_35) { dprintk("%s: unsupported rolloff selected (%d)\n", __func__, c->rolloff); return -EOPNOTSUPP; } state->dnxt.rolloff_val = CX24116_ROLLOFF_035; break; case SYS_DVBS2: dprintk("%s: DVB-S2 delivery system selected\n", __func__); /* * NBC 8PSK/QPSK with DVB-S is supported for DVB-S2, * but not hardware auto detection */ if (c->modulation != PSK_8 && c->modulation != QPSK) { dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } switch (c->pilot) { case PILOT_AUTO: /* Not supported but emulated */ state->dnxt.pilot_val = (c->modulation == QPSK) ? CX24116_PILOT_OFF : CX24116_PILOT_ON; retune++; break; case PILOT_OFF: state->dnxt.pilot_val = CX24116_PILOT_OFF; break; case PILOT_ON: state->dnxt.pilot_val = CX24116_PILOT_ON; break; default: dprintk("%s: unsupported pilot mode selected (%d)\n", __func__, c->pilot); return -EOPNOTSUPP; } switch (c->rolloff) { case ROLLOFF_20: state->dnxt.rolloff_val = CX24116_ROLLOFF_020; break; case ROLLOFF_25: state->dnxt.rolloff_val = CX24116_ROLLOFF_025; break; case ROLLOFF_35: state->dnxt.rolloff_val = CX24116_ROLLOFF_035; break; case ROLLOFF_AUTO: /* Rolloff must be explicit */ default: dprintk("%s: unsupported rolloff selected (%d)\n", __func__, c->rolloff); return -EOPNOTSUPP; } break; default: dprintk("%s: unsupported delivery system selected (%d)\n", __func__, c->delivery_system); return -EOPNOTSUPP; } state->dnxt.delsys = c->delivery_system; state->dnxt.modulation = c->modulation; state->dnxt.frequency = c->frequency; state->dnxt.pilot = c->pilot; state->dnxt.rolloff = c->rolloff; ret = cx24116_set_inversion(state, c->inversion); if (ret != 0) return ret; /* FEC_NONE/AUTO for DVB-S2 is not supported and detected here */ ret = cx24116_set_fec(state, c->delivery_system, c->modulation, c->fec_inner); if (ret != 0) return ret; ret = cx24116_set_symbolrate(state, c->symbol_rate); if (ret != 0) return ret; /* discard the 'current' tuning parameters and prepare to tune */ cx24116_clone_params(fe); dprintk("%s: delsys = %d\n", __func__, state->dcur.delsys); dprintk("%s: modulation = %d\n", __func__, state->dcur.modulation); dprintk("%s: frequency = %d\n", __func__, state->dcur.frequency); dprintk("%s: pilot = %d (val = 0x%02x)\n", __func__, state->dcur.pilot, state->dcur.pilot_val); dprintk("%s: retune = %d\n", __func__, retune); dprintk("%s: rolloff = %d (val = 0x%02x)\n", __func__, state->dcur.rolloff, state->dcur.rolloff_val); dprintk("%s: symbol_rate = %d\n", __func__, state->dcur.symbol_rate); dprintk("%s: FEC = %d (mask/val = 0x%02x/0x%02x)\n", __func__, state->dcur.fec, state->dcur.fec_mask, state->dcur.fec_val); dprintk("%s: Inversion = %d (val = 0x%02x)\n", __func__, state->dcur.inversion, state->dcur.inversion_val); /* This is also done in advise/acquire on HVR4000 but not on LITE */ if (state->config->set_ts_params) state->config->set_ts_params(fe, 0); /* Set/Reset B/W */ cmd.args[0x00] = CMD_BANDWIDTH; cmd.args[0x01] = 0x01; cmd.len = 0x02; ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) return ret; /* Prepare a tune request */ cmd.args[0x00] = CMD_TUNEREQUEST; /* Frequency */ cmd.args[0x01] = (state->dcur.frequency & 0xff0000) >> 16; cmd.args[0x02] = (state->dcur.frequency & 0x00ff00) >> 8; cmd.args[0x03] = (state->dcur.frequency & 0x0000ff); /* Symbol Rate */ cmd.args[0x04] = ((state->dcur.symbol_rate / 1000) & 0xff00) >> 8; cmd.args[0x05] = ((state->dcur.symbol_rate / 1000) & 0x00ff); /* Automatic Inversion */ cmd.args[0x06] = state->dcur.inversion_val; /* Modulation / FEC / Pilot */ cmd.args[0x07] = state->dcur.fec_val | state->dcur.pilot_val; cmd.args[0x08] = CX24116_SEARCH_RANGE_KHZ >> 8; cmd.args[0x09] = CX24116_SEARCH_RANGE_KHZ & 0xff; cmd.args[0x0a] = 0x00; cmd.args[0x0b] = 0x00; cmd.args[0x0c] = state->dcur.rolloff_val; cmd.args[0x0d] = state->dcur.fec_mask; if (state->dcur.symbol_rate > 30000000) { cmd.args[0x0e] = 0x04; cmd.args[0x0f] = 0x00; cmd.args[0x10] = 0x01; cmd.args[0x11] = 0x77; cmd.args[0x12] = 0x36; cx24116_writereg(state, CX24116_REG_CLKDIV, 0x44); cx24116_writereg(state, CX24116_REG_RATEDIV, 0x01); } else { cmd.args[0x0e] = 0x06; cmd.args[0x0f] = 0x00; cmd.args[0x10] = 0x00; cmd.args[0x11] = 0xFA; cmd.args[0x12] = 0x24; cx24116_writereg(state, CX24116_REG_CLKDIV, 0x46); cx24116_writereg(state, CX24116_REG_RATEDIV, 0x00); } cmd.len = 0x13; /* We need to support pilot and non-pilot tuning in the * driver automatically. This is a workaround for because * the demod does not support autodetect. */ do { /* Reset status register */ status = cx24116_readreg(state, CX24116_REG_SSTATUS) & CX24116_SIGNAL_MASK; cx24116_writereg(state, CX24116_REG_SSTATUS, status); /* Tune */ ret = cx24116_cmd_execute(fe, &cmd); if (ret != 0) break; /* * Wait for up to 500 ms before retrying * * If we are able to tune then generally it occurs within 100ms. * If it takes longer, try a different toneburst setting. */ for (i = 0; i < 50 ; i++) { cx24116_read_status(fe, &tunerstat); status = tunerstat & (FE_HAS_SIGNAL | FE_HAS_SYNC); if (status == (FE_HAS_SIGNAL | FE_HAS_SYNC)) { dprintk("%s: Tuned\n", __func__); goto tuned; } msleep(10); } dprintk("%s: Not tuned\n", __func__); /* Toggle pilot bit when in auto-pilot */ if (state->dcur.pilot == PILOT_AUTO) cmd.args[0x07] ^= CX24116_PILOT_ON; } while (--retune); tuned: /* Set/Reset B/W */ cmd.args[0x00] = CMD_BANDWIDTH; cmd.args[0x01] = 0x00; cmd.len = 0x02; return cx24116_cmd_execute(fe, &cmd); } static int cx24116_tune(struct dvb_frontend *fe, bool re_tune, unsigned int mode_flags, unsigned int *delay, fe_status_t *status) { /* * It is safe to discard "params" here, as the DVB core will sync * fe->dtv_property_cache with fepriv->parameters_in, where the * DVBv3 params are stored. The only practical usage for it indicate * that re-tuning is needed, e. g. (fepriv->state & FESTATE_RETUNE) is * true. */ *delay = HZ / 5; if (re_tune) { int ret = cx24116_set_frontend(fe); if (ret) return ret; } return cx24116_read_status(fe, status); } static int cx24116_get_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } static struct dvb_frontend_ops cx24116_ops = { .delsys = { SYS_DVBS, SYS_DVBS2 }, .info = { .name = "Conexant CX24116/CX24118", .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 1011, /* kHz for QPSK frontends */ .frequency_tolerance = 5000, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_2G_MODULATION | FE_CAN_QPSK | FE_CAN_RECOVER }, .release = cx24116_release, .init = cx24116_initfe, .sleep = cx24116_sleep, .read_status = cx24116_read_status, .read_ber = cx24116_read_ber, .read_signal_strength = cx24116_read_signal_strength, .read_snr = cx24116_read_snr, .read_ucblocks = cx24116_read_ucblocks, .set_tone = cx24116_set_tone, .set_voltage = cx24116_set_voltage, .diseqc_send_master_cmd = cx24116_send_diseqc_msg, .diseqc_send_burst = cx24116_diseqc_send_burst, .get_frontend_algo = cx24116_get_algo, .tune = cx24116_tune, .set_frontend = cx24116_set_frontend, }; MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24116/cx24118 hardware"); MODULE_AUTHOR("Steven Toth"); MODULE_LICENSE("GPL");
static int cx24116_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *d) { struct cx24116_state *state = fe->demodulator_priv; int i, ret; /* Dump DiSEqC message */ if (debug) { printk(KERN_INFO "cx24116: %s(", __func__); for (i = 0 ; i < d->msg_len ;) { printk(KERN_INFO "0x%02x", d->msg[i]); if (++i < d->msg_len) printk(KERN_INFO ", "); } printk(") toneburst=%d\n", toneburst); } /* Validate length */ if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS)) return -EINVAL; /* DiSEqC message */ for (i = 0; i < d->msg_len; i++) state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i]; /* DiSEqC message length */ state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = d->msg_len; /* Command length */ state->dsec_cmd.len = CX24116_DISEQC_MSGOFS + state->dsec_cmd.args[CX24116_DISEQC_MSGLEN]; /* DiSEqC toneburst */ if (toneburst == CX24116_DISEQC_MESGCACHE) /* Message is cached */ return 0; else if (toneburst == CX24116_DISEQC_TONEOFF) /* Message is sent without burst */ state->dsec_cmd.args[CX24116_DISEQC_BURST] = 0; else if (toneburst == CX24116_DISEQC_TONECACHE) { /* * Message is sent with derived else cached burst * * WRITE PORT GROUP COMMAND 38 * * 0/A/A: E0 10 38 F0..F3 * 1/B/B: E0 10 38 F4..F7 * 2/C/A: E0 10 38 F8..FB * 3/D/B: E0 10 38 FC..FF * * databyte[3]= 8421:8421 * ABCD:WXYZ * CLR :SET * * WX= PORT SELECT 0..3 (X=TONEBURST) * Y = VOLTAGE (0=13V, 1=18V) * Z = BAND (0=LOW, 1=HIGH(22K)) */ if (d->msg_len >= 4 && d->msg[2] == 0x38) state->dsec_cmd.args[CX24116_DISEQC_BURST] = ((d->msg[3] & 4) >> 2); if (debug) dprintk("%s burst=%d\n", __func__, state->dsec_cmd.args[CX24116_DISEQC_BURST]); } /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Wait for voltage/min repeat delay */ msleep(100); /* Command */ ret = cx24116_cmd_execute(fe, &state->dsec_cmd); if (ret != 0) return ret; /* * Wait for send * * Eutelsat spec: * >15ms delay + (XXX determine if FW does this, see set_tone) * 13.5ms per byte + * >15ms delay + * 12.5ms burst + * >15ms delay (XXX determine if FW does this, see set_tone) */ msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) + ((toneburst == CX24116_DISEQC_TONEOFF) ? 30 : 60)); return 0; }
static int cx24116_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *d) { struct cx24116_state *state = fe->demodulator_priv; int i, ret; /* Validate length */ if (d->msg_len > sizeof(d->msg)) return -EINVAL; /* Dump DiSEqC message */ if (debug) { printk(KERN_INFO "cx24116: %s(", __func__); for (i = 0 ; i < d->msg_len ;) { printk(KERN_INFO "0x%02x", d->msg[i]); if (++i < d->msg_len) printk(KERN_INFO ", "); } printk(") toneburst=%d\n", toneburst); } /* DiSEqC message */ for (i = 0; i < d->msg_len; i++) state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i]; /* DiSEqC message length */ state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] = d->msg_len; /* Command length */ state->dsec_cmd.len = CX24116_DISEQC_MSGOFS + state->dsec_cmd.args[CX24116_DISEQC_MSGLEN]; /* DiSEqC toneburst */ if (toneburst == CX24116_DISEQC_MESGCACHE) /* Message is cached */ return 0; else if (toneburst == CX24116_DISEQC_TONEOFF) /* Message is sent without burst */ state->dsec_cmd.args[CX24116_DISEQC_BURST] = 0; else if (toneburst == CX24116_DISEQC_TONECACHE) { /* * Message is sent with derived else cached burst * * WRITE PORT GROUP COMMAND 38 * * 0/A/A: E0 10 38 F0..F3 * 1/B/B: E0 10 38 F4..F7 * 2/C/A: E0 10 38 F8..FB * 3/D/B: E0 10 38 FC..FF * * databyte[3]= 8421:8421 * ABCD:WXYZ * CLR :SET * * WX= PORT SELECT 0..3 (X=TONEBURST) * Y = VOLTAGE (0=13V, 1=18V) * Z = BAND (0=LOW, 1=HIGH(22K)) */ if (d->msg_len >= 4 && d->msg[2] == 0x38) state->dsec_cmd.args[CX24116_DISEQC_BURST] = ((d->msg[3] & 4) >> 2); if (debug) dprintk("%s burst=%d\n", __func__, state->dsec_cmd.args[CX24116_DISEQC_BURST]); } /* Wait for LNB ready */ ret = cx24116_wait_for_lnb(fe); if (ret != 0) return ret; /* Wait for voltage/min repeat delay */ msleep(100); /* Command */ ret = cx24116_cmd_execute(fe, &state->dsec_cmd); if (ret != 0) return ret; /* * Wait for send * * Eutelsat spec: * >15ms delay + (XXX determine if FW does this, see set_tone) * 13.5ms per byte + * >15ms delay + * 12.5ms burst + * >15ms delay (XXX determine if FW does this, see set_tone) */ msleep((state->dsec_cmd.args[CX24116_DISEQC_MSGLEN] << 4) + ((toneburst == CX24116_DISEQC_TONEOFF) ? 30 : 60)); return 0; }
{'added': [(966, '\t/* Validate length */'), (967, '\tif (d->msg_len > sizeof(d->msg))'), (968, ' return -EINVAL;'), (969, '')], 'deleted': [(977, '\t/* Validate length */'), (978, '\tif (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))'), (979, '\t\treturn -EINVAL;'), (980, '')]}
4
4
918
6,034
44
343
15
https://github.com/torvalds/linux
CVE-2015-9289
CWE-125
1,637
wasm.cc
C++
Envoy::Extensions::Common::Wasm::Context::onRequestBody
#include "extensions/common/wasm/wasm.h" #include <stdio.h> #include <limits> #include <memory> #include <string> #include "envoy/common/exception.h" #include "envoy/config/wasm/v2/wasm.pb.validate.h" #include "envoy/grpc/status.h" #include "envoy/http/codes.h" #include "envoy/local_info/local_info.h" #include "envoy/server/wasm.h" #include "envoy/thread_local/thread_local.h" #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" #include "common/common/base64.h" #include "common/common/empty_string.h" #include "common/common/enum_to_int.h" #include "common/common/logger.h" #include "common/config/datasource.h" #include "common/http/header_map_impl.h" #include "common/http/message_impl.h" #include "common/http/utility.h" #include "common/tracing/http_tracer_impl.h" #include "extensions/common/wasm/wasm_state.h" #include "extensions/common/wasm/well_known_names.h" #include "extensions/filters/common/expr/context.h" #include "absl/base/casts.h" #include "absl/container/flat_hash_map.h" #include "absl/container/node_hash_map.h" #include "absl/synchronization/mutex.h" #include "eval/eval/field_access.h" #include "eval/eval/field_backed_list_impl.h" #include "eval/eval/field_backed_map_impl.h" #include "eval/public/cel_value.h" #include "openssl/bytestring.h" #include "openssl/hmac.h" #include "openssl/sha.h" namespace Envoy { namespace Extensions { namespace Common { namespace Wasm { // Any currently executing Wasm call context. #define WASM_CONTEXT(_c) \ (ContextOrEffectiveContext(static_cast<Context*>((void)_c, current_context_))) // The id of the context which should be used for calls out of the VM in place of current_context_ // above. namespace { // TODO: move to utils during upstreaming. std::string base64Sha256(absl::string_view data) { std::vector<uint8_t> digest(SHA256_DIGEST_LENGTH); EVP_MD_CTX* ctx(EVP_MD_CTX_new()); auto rc = EVP_DigestInit(ctx, EVP_sha256()); RELEASE_ASSERT(rc == 1, "Failed to init digest context"); rc = EVP_DigestUpdate(ctx, data.data(), data.size()); RELEASE_ASSERT(rc == 1, "Failed to update digest"); rc = EVP_DigestFinal(ctx, digest.data(), nullptr); RELEASE_ASSERT(rc == 1, "Failed to finalize digest"); EVP_MD_CTX_free(ctx); return Base64::encode(reinterpret_cast<const char*>(&digest[0]), digest.size()); } inline Word wasmResultToWord(WasmResult r) { return Word(static_cast<uint64_t>(r)); } inline uint32_t convertWordToUint32(Word w) { return static_cast<uint32_t>(w.u64_); } // Convert a function of the form Word(Word...) to one of the form uint32_t(uint32_t...). template <typename F, F* fn> struct ConvertFunctionWordToUint32 { static void convertFunctionWordToUint32() {} }; template <typename R, typename... Args, auto (*F)(Args...)->R> struct ConvertFunctionWordToUint32<R(Args...), F> { static auto convertFunctionWordToUint32(typename ConvertWordTypeToUint32<Args>::type... args) { return convertWordToUint32(F(std::forward<Args>(args)...)); } }; template <typename... Args, auto (*F)(Args...)->void> struct ConvertFunctionWordToUint32<void(Args...), F> { static void convertFunctionWordToUint32(typename ConvertWordTypeToUint32<Args>::type... args) { F(std::forward<Args>(args)...); } }; class SharedData { public: WasmResult get(absl::string_view vm_id, const absl::string_view key, std::pair<std::string, uint32_t>* result) { absl::ReaderMutexLock l(&mutex); auto map = data.find(vm_id); if (map == data.end()) { return WasmResult::NotFound; } auto it = map->second.find(key); if (it != map->second.end()) { *result = it->second; return WasmResult::Ok; } return WasmResult::NotFound; } WasmResult set(absl::string_view vm_id, absl::string_view key, absl::string_view value, uint32_t cas) { absl::WriterMutexLock l(&mutex); absl::flat_hash_map<std::string, std::pair<std::string, uint32_t>>* map; auto map_it = data.find(vm_id); if (map_it == data.end()) { map = &data[vm_id]; } else { map = &map_it->second; } auto it = map->find(key); if (it != map->end()) { if (cas && cas != it->second.second) { return WasmResult::CasMismatch; } it->second = std::make_pair(std::string(value), nextCas()); } else { map->emplace(key, std::make_pair(std::string(value), nextCas())); } return WasmResult::Ok; } uint32_t registerQueue(absl::string_view vm_id, absl::string_view queue_name, uint32_t context_id, Event::Dispatcher& dispatcher) { absl::WriterMutexLock l(&mutex); auto key = std::make_pair(std::string(vm_id), std::string(queue_name)); auto it = queue_tokens.insert(std::make_pair(key, static_cast<uint32_t>(0))); if (it.second) { it.first->second = nextQueueToken(); queue_token_set.insert(it.first->second); } uint32_t token = it.first->second; auto& q = queues[token]; q.vm_id = std::string(vm_id); q.context_id = context_id; q.dispatcher = &dispatcher; // Preserve any existing data. return token; } uint32_t resolveQueue(absl::string_view vm_id, absl::string_view queue_name) { absl::WriterMutexLock l(&mutex); auto key = std::make_pair(std::string(vm_id), std::string(queue_name)); auto it = queue_tokens.find(key); if (it != queue_tokens.end()) { return it->second; } return 0; // N.B. zero indicates that the queue was not found. } WasmResult dequeue(uint32_t token, std::string* data) { absl::ReaderMutexLock l(&mutex); auto it = queues.find(token); if (it == queues.end()) { return WasmResult::NotFound; } if (it->second.queue.empty()) { return WasmResult::Empty; } *data = it->second.queue.front(); it->second.queue.pop_front(); return WasmResult::Ok; } WasmResult enqueue(uint32_t token, absl::string_view value) { absl::WriterMutexLock l(&mutex); auto it = queues.find(token); if (it == queues.end()) { return WasmResult::NotFound; } it->second.queue.push_back(std::string(value)); auto vm_id = it->second.vm_id; auto context_id = it->second.context_id; it->second.dispatcher->post([vm_id, context_id, token] { auto wasm = getThreadLocalWasmPtr(vm_id); if (wasm) { wasm->queueReady(context_id, token); } }); return WasmResult::Ok; } uint32_t nextCas() { auto result = cas; cas++; if (!cas) { // 0 is not a valid CAS value. cas++; } return result; } private: uint32_t nextQueueToken() { while (true) { uint32_t token = next_queue_token++; if (token == 0) { continue; // 0 is an illegal token. } if (queue_token_set.find(token) == queue_token_set.end()) { return token; } } } struct Queue { std::string vm_id; uint32_t context_id; Event::Dispatcher* dispatcher; std::deque<std::string> queue; }; absl::Mutex mutex; uint32_t cas = 1; uint32_t next_queue_token = 1; absl::node_hash_map<std::string, absl::flat_hash_map<std::string, std::pair<std::string, uint32_t>>> data; absl::node_hash_map<uint32_t, Queue> queues; struct pair_hash { template <class T1, class T2> std::size_t operator()(const std::pair<T1, T2>& pair) const { return std::hash<T1>()(pair.first) ^ std::hash<T2>()(pair.second); } }; absl::flat_hash_map<std::pair<std::string, std::string>, uint32_t, pair_hash> queue_tokens; absl::flat_hash_set<uint32_t> queue_token_set; }; SharedData global_shared_data; // Map from Wasm ID to the local Wasm instance. thread_local absl::flat_hash_map<std::string, std::weak_ptr<Wasm>> local_wasms; const std::string INLINE_STRING = "<inline>"; template <typename Pairs> size_t pairsSize(const Pairs& result) { size_t size = 4; // number of headers for (auto& p : result) { size += 8; // size of key, size of value size += p.first.size() + 1; // null terminated key size += p.second.size() + 1; // null terminated value } return size; } template <typename Pairs> void marshalPairs(const Pairs& result, char* buffer) { char* b = buffer; *reinterpret_cast<uint32_t*>(b) = result.size(); b += sizeof(uint32_t); for (auto& p : result) { *reinterpret_cast<uint32_t*>(b) = p.first.size(); b += sizeof(uint32_t); *reinterpret_cast<uint32_t*>(b) = p.second.size(); b += sizeof(uint32_t); } for (auto& p : result) { memcpy(b, p.first.data(), p.first.size()); b += p.first.size(); *b++ = 0; memcpy(b, p.second.data(), p.second.size()); b += p.second.size(); *b++ = 0; } } Pairs toPairs(absl::string_view buffer) { Pairs result; const char* b = buffer.data(); if (buffer.size() < sizeof(uint32_t)) { return {}; } auto size = *reinterpret_cast<const uint32_t*>(b); b += sizeof(uint32_t); if (sizeof(uint32_t) + size * 2 * sizeof(uint32_t) > buffer.size()) { return {}; } result.resize(size); for (uint32_t i = 0; i < size; i++) { result[i].first = absl::string_view(nullptr, *reinterpret_cast<const uint32_t*>(b)); b += sizeof(uint32_t); result[i].second = absl::string_view(nullptr, *reinterpret_cast<const uint32_t*>(b)); b += sizeof(uint32_t); } for (auto& p : result) { p.first = absl::string_view(b, p.first.size()); b += p.first.size() + 1; p.second = absl::string_view(b, p.second.size()); b += p.second.size() + 1; } return result; } template <typename Pairs> bool getPairs(Context* context, const Pairs& result, uint64_t ptr_ptr, uint64_t size_ptr) { if (result.empty()) { return context->wasm()->copyToPointerSize("", ptr_ptr, size_ptr); } uint64_t size = pairsSize(result); uint64_t ptr; char* buffer = static_cast<char*>(context->wasm()->allocMemory(size, &ptr)); marshalPairs(result, buffer); if (!context->wasmVm()->setWord(ptr_ptr, Word(ptr))) { return false; } if (!context->wasmVm()->setWord(size_ptr, Word(size))) { return false; } return true; } void exportPairs(Context* context, const Pairs& pairs, uint64_t* ptr_ptr, uint64_t* size_ptr) { if (pairs.empty()) { *ptr_ptr = 0; *size_ptr = 0; return; } uint64_t size = pairsSize(pairs); char* buffer = static_cast<char*>(context->wasm()->allocMemory(size, ptr_ptr)); marshalPairs(pairs, buffer); *size_ptr = size; } Http::HeaderMapPtr buildHeaderMapFromPairs(const Pairs& pairs) { auto map = std::make_unique<Http::HeaderMapImpl>(); for (auto& p : pairs) { // Note: because of the lack of a string_view interface for addCopy and // the lack of an interface to add an entry with an empty value and return // the entry, there is no efficient way to prevent either a double copy // of the valueor a double lookup of the entry. map->addCopy(Http::LowerCaseString(std::string(p.first)), std::string(p.second)); } return map; } const uint8_t* decodeVarint(const uint8_t* pos, const uint8_t* end, uint32_t* out) { uint32_t ret = 0; int shift = 0; while (pos < end && (*pos & 0x80)) { ret |= (*pos & 0x7f) << shift; shift += 7; pos++; } if (pos < end) { ret |= *pos << shift; pos++; } *out = ret; return pos; } Context* ContextOrEffectiveContext(Context* context) { if (effective_context_id_ == 0) { return context; } auto effective_context = context->wasm()->getContext(effective_context_id_); if (effective_context) { return effective_context; } // The effective_context_id_ no longer exists, revert to the true context. return context; } } // namespace // Test support. uint32_t resolveQueueForTest(absl::string_view vm_id, absl::string_view queue_name) { return global_shared_data.resolveQueue(vm_id, queue_name); } // // HTTP Handlers // Word setPropertyHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->setProperty(key.value(), value.value())); } // Generic selector Word getPropertyHandler(void* raw_context, Word path_ptr, Word path_size, Word value_ptr_ptr, Word value_size_ptr) { auto context = WASM_CONTEXT(raw_context); auto path = context->wasmVm()->getMemory(path_ptr.u64_, path_size.u64_); if (!path.has_value()) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } std::string value; auto result = context->getProperty(path.value(), &value); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(value, value_ptr_ptr.u64_, value_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } // Continue/Reply/Route Word continueRequestHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->continueRequest(); return wasmResultToWord(WasmResult::Ok); } Word continueResponseHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->continueResponse(); return wasmResultToWord(WasmResult::Ok); } Word sendLocalResponseHandler(void* raw_context, Word response_code, Word response_code_details_ptr, Word response_code_details_size, Word body_ptr, Word body_size, Word additional_response_header_pairs_ptr, Word additional_response_header_pairs_size, Word grpc_code) { auto context = WASM_CONTEXT(raw_context); auto details = context->wasmVm()->getMemory(response_code_details_ptr.u64_, response_code_details_size.u64_); auto body = context->wasmVm()->getMemory(body_ptr.u64_, body_size.u64_); auto additional_response_header_pairs = context->wasmVm()->getMemory( additional_response_header_pairs_ptr.u64_, additional_response_header_pairs_size.u64_); if (!details || !body || !additional_response_header_pairs) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto additional_headers = toPairs(additional_response_header_pairs.value()); auto modify_headers = [additional_headers](Http::HeaderMap& headers) { for (auto& p : additional_headers) { const Http::LowerCaseString lower_key(std::move(std::string(p.first))); headers.addCopy(lower_key, std::string(p.second)); } }; auto grpc_status = static_cast<Grpc::Status::GrpcStatus>(grpc_code.u64_); auto grpc_status_opt = (grpc_status != Grpc::Status::GrpcStatus::InvalidCode) ? absl::optional<Grpc::Status::GrpcStatus>(grpc_status) : absl::optional<Grpc::Status::GrpcStatus>(); context->sendLocalResponse(static_cast<Envoy::Http::Code>(response_code.u64_), body.value(), modify_headers, grpc_status_opt, details.value()); return wasmResultToWord(WasmResult::Ok); } Word setEffectiveContextHandler(void* raw_context, Word context_id) { auto context = WASM_CONTEXT(raw_context); uint32_t cid = static_cast<uint32_t>(context_id.u64_); auto c = context->wasm()->getContext(cid); if (!c) { return wasmResultToWord(WasmResult::BadArgument); } effective_context_id_ = cid; return wasmResultToWord(WasmResult::Ok); } Word clearRouteCacheHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->clearRouteCache(); return wasmResultToWord(WasmResult::Ok); } // SharedData Word getSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr_ptr, Word value_size_ptr, Word cas_ptr) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } std::pair<std::string, uint32_t> data; WasmResult result = context->getSharedData(key.value(), &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(data.first, value_ptr_ptr.u64_, value_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } if (!context->wasmVm()->setMemory(cas_ptr.u64_, sizeof(uint32_t), &data.second)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word setSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr, Word value_size, Word cas) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->setSharedData(key.value(), value.value(), cas.u64_)); } Word registerSharedQueueHandler(void* raw_context, Word queue_name_ptr, Word queue_name_size, Word token_ptr) { auto context = WASM_CONTEXT(raw_context); auto queue_name = context->wasmVm()->getMemory(queue_name_ptr.u64_, queue_name_size.u64_); if (!queue_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t token = context->registerSharedQueue(queue_name.value()); if (!context->wasm()->setDatatype(token_ptr.u64_, token)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word dequeueSharedQueueHandler(void* raw_context, Word token, Word data_ptr_ptr, Word data_size_ptr) { auto context = WASM_CONTEXT(raw_context); std::string data; WasmResult result = context->dequeueSharedQueue(token.u32(), &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(data, data_ptr_ptr.u64_, data_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word resolveSharedQueueHandler(void* raw_context, Word vm_id_ptr, Word vm_id_size, Word queue_name_ptr, Word queue_name_size, Word token_ptr) { auto context = WASM_CONTEXT(raw_context); auto vm_id = context->wasmVm()->getMemory(vm_id_ptr.u64_, vm_id_size.u64_); auto queue_name = context->wasmVm()->getMemory(queue_name_ptr.u64_, queue_name_size.u64_); if (!vm_id || !queue_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t token = 0; auto result = context->resolveSharedQueue(vm_id.value(), queue_name.value(), &token); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(token_ptr.u64_, token)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word enqueueSharedQueueHandler(void* raw_context, Word token, Word data_ptr, Word data_size) { auto context = WASM_CONTEXT(raw_context); auto data = context->wasmVm()->getMemory(data_ptr.u64_, data_size.u64_); if (!data) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->enqueueSharedQueue(token.u32(), data.value())); } // Network Word getDownstreamDataBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); absl::string_view data; auto result = context->getDownstreamDataBufferBytes(start.u64_, length.u64_, &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } context->wasm()->copyToPointerSize(data, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word getUpstreamDataBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); absl::string_view data; auto result = context->getUpstreamDataBufferBytes(start.u64_, length.u64_, &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } context->wasm()->copyToPointerSize(data, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } // Header/Trailer/Metadata Maps Word addHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->addHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value(), value.value()); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr_ptr, Word value_size_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto result = context->getHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value()); context->wasm()->copyToPointerSize(result, value_ptr_ptr.u64_, value_size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word replaceHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->replaceHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value(), value.value()); return wasmResultToWord(WasmResult::Ok); } Word removeHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->removeHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value()); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapPairsHandler(void* raw_context, Word type, Word ptr_ptr, Word size_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto result = context->getHeaderMapPairs(static_cast<HeaderMapType>(type.u64_)); if (!getPairs(context, result, ptr_ptr.u64_, size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word setHeaderMapPairsHandler(void* raw_context, Word type, Word ptr, Word size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto data = context->wasmVm()->getMemory(ptr.u64_, size.u64_); if (!data) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->setHeaderMapPairs(static_cast<HeaderMapType>(type.u64_), toPairs(data.value())); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapSizeHandler(void* raw_context, Word type, Word result_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); size_t result = context->getHeaderMapSize(static_cast<HeaderMapType>(type.u64_)); if (!context->wasmVm()->setWord(result_ptr.u64_, Word(result))) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } // Body Buffer Word getRequestBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); auto result = context->getRequestBodyBufferBytes(start.u64_, length.u64_); context->wasm()->copyToPointerSize(result, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word getResponseBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); auto result = context->getResponseBodyBufferBytes(start.u64_, length.u64_); context->wasm()->copyToPointerSize(result, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word httpCallHandler(void* raw_context, Word uri_ptr, Word uri_size, Word header_pairs_ptr, Word header_pairs_size, Word body_ptr, Word body_size, Word trailer_pairs_ptr, Word trailer_pairs_size, Word timeout_milliseconds) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto uri = context->wasmVm()->getMemory(uri_ptr.u64_, uri_size.u64_); auto body = context->wasmVm()->getMemory(body_ptr.u64_, body_size.u64_); auto header_pairs = context->wasmVm()->getMemory(header_pairs_ptr.u64_, header_pairs_size.u64_); auto trailer_pairs = context->wasmVm()->getMemory(trailer_pairs_ptr.u64_, trailer_pairs_size.u64_); if (!uri || !body || !header_pairs || !trailer_pairs) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto headers = toPairs(header_pairs.value()); auto trailers = toPairs(trailer_pairs.value()); return context->httpCall(uri.value(), headers, body.value(), trailers, timeout_milliseconds.u64_); } Word defineMetricHandler(void* raw_context, Word metric_type, Word name_ptr, Word name_size, Word metric_id_ptr) { if (metric_type.u64_ > static_cast<uint64_t>(Context::MetricType::Max)) { return 0; } auto context = WASM_CONTEXT(raw_context); auto name = context->wasmVm()->getMemory(name_ptr.u64_, name_size.u64_); if (!name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t metric_id = 0; auto result = context->defineMetric(static_cast<Context::MetricType>(metric_type.u64_), name.value(), &metric_id); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(metric_id_ptr.u64_, metric_id)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word incrementMetricHandler(void* raw_context, Word metric_id, int64_t offset) { auto context = WASM_CONTEXT(raw_context); return wasmResultToWord(context->incrementMetric(metric_id.u64_, offset)); } Word recordMetricHandler(void* raw_context, Word metric_id, uint64_t value) { auto context = WASM_CONTEXT(raw_context); return wasmResultToWord(context->recordMetric(metric_id.u64_, value)); } Word getMetricHandler(void* raw_context, Word metric_id, Word result_uint64_ptr) { auto context = WASM_CONTEXT(raw_context); uint64_t value = 0; auto result = context->getMetric(metric_id.u64_, &value); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(result_uint64_ptr.u64_, value)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word grpcCallHandler(void* raw_context, Word service_ptr, Word service_size, Word service_name_ptr, Word service_name_size, Word method_name_ptr, Word method_name_size, Word request_ptr, Word request_size, Word timeout_milliseconds) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto service = context->wasmVm()->getMemory(service_ptr.u64_, service_size.u64_); auto service_name = context->wasmVm()->getMemory(service_name_ptr.u64_, service_name_size.u64_); auto method_name = context->wasmVm()->getMemory(method_name_ptr.u64_, method_name_size.u64_); auto request = context->wasmVm()->getMemory(request_ptr.u64_, request_size.u64_); if (!service || !service_name || !method_name || !request) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } envoy::api::v2::core::GrpcService service_proto; if (!service_proto.ParseFromArray(service.value().data(), service.value().size())) { return false; } return context->grpcCall(service_proto, service_name.value(), method_name.value(), request.value(), std::chrono::milliseconds(timeout_milliseconds.u64_)); } Word grpcStreamHandler(void* raw_context, Word service_ptr, Word service_size, Word service_name_ptr, Word service_name_size, Word method_name_ptr, Word method_name_size) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto service = context->wasmVm()->getMemory(service_ptr.u64_, service_size.u64_); auto service_name = context->wasmVm()->getMemory(service_name_ptr.u64_, service_name_size.u64_); auto method_name = context->wasmVm()->getMemory(method_name_ptr.u64_, method_name_size.u64_); if (!service || !service_name || !method_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } envoy::api::v2::core::GrpcService service_proto; if (!service_proto.ParseFromArray(service.value().data(), service.value().size())) { return false; } return context->grpcStream(service_proto, service_name.value(), method_name.value()); } Word grpcCancelHandler(void* raw_context, Word token) { auto context = WASM_CONTEXT(raw_context)->root_context(); return wasmResultToWord(context->grpcCancel(token.u64_)); } Word grpcCloseHandler(void* raw_context, Word token) { auto context = WASM_CONTEXT(raw_context)->root_context(); return wasmResultToWord(context->grpcClose(token.u64_)); } Word grpcSendHandler(void* raw_context, Word token, Word message_ptr, Word message_size, Word end_stream) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto message = context->wasmVm()->getMemory(message_ptr.u64_, message_size.u64_); if (!message) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->grpcSend(token.u64_, message.value(), end_stream.u64_)); } // Implementation of writev-like() syscall that redirects stdout/stderr to Envoy logs. Word writevImpl(void* raw_context, Word fd, Word iovs, Word iovs_len, Word* nwritten_ptr) { auto context = WASM_CONTEXT(raw_context); // Read syscall args. spdlog::level::level_enum log_level; switch (fd.u64_) { case 1 /* stdout */: log_level = spdlog::level::info; break; case 2 /* stderr */: log_level = spdlog::level::err; break; default: return 8; // __WASI_EBADF } std::string s; for (size_t i = 0; i < iovs_len.u64_; i++) { auto memslice = context->wasmVm()->getMemory(iovs.u64_ + i * 2 * sizeof(uint32_t), 2 * sizeof(uint32_t)); if (!memslice) { return 21; // __WASI_EFAULT } const uint32_t* iovec = reinterpret_cast<const uint32_t*>(memslice.value().data()); if (iovec[1] /* buf_len */) { memslice = context->wasmVm()->getMemory(iovec[0] /* buf */, iovec[1] /* buf_len */); if (!memslice) { return 21; // __WASI_EFAULT } s.append(memslice.value().data(), memslice.value().size()); } } size_t written = s.size(); if (written) { // Remove trailing newline from the logs, if any. if (s[written - 1] == '\n') { s.erase(written - 1); } context->scriptLog(log_level, s); } *nwritten_ptr = Word(written); return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_fd_write(_wasi_fd_t fd, const _wasi_ciovec_t *iov, size_t iovs_len, size_t* // nwritten); Word wasi_unstable_fd_writeHandler(void* raw_context, Word fd, Word iovs, Word iovs_len, Word nwritten_ptr) { auto context = WASM_CONTEXT(raw_context); Word nwritten(0); auto result = writevImpl(raw_context, fd, iovs, iovs_len, &nwritten); if (result.u64_ != 0) { // __WASI_ESUCCESS return result; } if (!context->wasmVm()->setWord(nwritten_ptr.u64_, Word(nwritten))) { return 21; // __WASI_EFAULT } return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_fd_seek(__wasi_fd_t fd, __wasi_filedelta_t offset, __wasi_whence_t // whence,__wasi_filesize_t *newoffset); Word wasi_unstable_fd_seekHandler(void*, Word, int64_t, Word, Word) { throw WasmException("wasi_unstable fd_seek"); } // __wasi_errno_t __wasi_fd_close(__wasi_fd_t fd); Word wasi_unstable_fd_closeHandler(void*, Word) { throw WasmException("wasi_unstable fd_close"); } // __wasi_errno_t __wasi_environ_get(char **environ, char *environ_buf); Word wasi_unstable_environ_getHandler(void*, Word, Word) { return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_environ_sizes_get(size_t *environ_count, size_t *environ_buf_size); Word wasi_unstable_environ_sizes_getHandler(void* raw_context, Word count_ptr, Word buf_size_ptr) { auto context = WASM_CONTEXT(raw_context); if (!context->wasmVm()->setWord(count_ptr.u64_, Word(0))) { return 21; // __WASI_EFAULT } if (!context->wasmVm()->setWord(buf_size_ptr.u64_, Word(0))) { return 21; // __WASI_EFAULT } return 0; // __WASI_ESUCCESS } // void __wasi_proc_exit(__wasi_exitcode_t rval); void wasi_unstable_proc_exitHandler(void*, Word) { throw WasmException("wasi_unstable proc_exit"); } Word pthread_equalHandler(void*, Word left, Word right) { return left.u64_ == right.u64_; } Word setTickPeriodMillisecondsHandler(void* raw_context, Word tick_period_milliseconds) { return wasmResultToWord( WASM_CONTEXT(raw_context) ->setTickPeriod(std::chrono::milliseconds(tick_period_milliseconds.u64_))); } Word getCurrentTimeNanosecondsHandler(void* raw_context, Word result_uint64_ptr) { auto context = WASM_CONTEXT(raw_context); uint64_t result = context->getCurrentTimeNanoseconds(); if (!context->wasm()->setDatatype(result_uint64_ptr.u64_, result)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word logHandler(void* raw_context, Word level, Word address, Word size) { auto context = WASM_CONTEXT(raw_context); auto message = context->wasmVm()->getMemory(address.u64_, size.u64_); if (!message) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->scriptLog(static_cast<spdlog::level::level_enum>(level.u64_), message.value()); return wasmResultToWord(WasmResult::Ok); } WasmResult Context::setTickPeriod(std::chrono::milliseconds tick_period) { wasm_->setTickPeriod(root_context_id_ ? root_context_id_ : id_, tick_period); return WasmResult::Ok; } uint64_t Context::getCurrentTimeNanoseconds() { return std::chrono::duration_cast<std::chrono::nanoseconds>( wasm_->time_source_.systemTime().time_since_epoch()) .count(); } // TODO(https://github.com/google/cel-cpp/issues/38) bool exportValue(const Filters::Common::Expr::CelValue& value, ProtobufWkt::Value* out) { using Filters::Common::Expr::CelValue; switch (value.type()) { case CelValue::Type::kBool: out->set_bool_value(value.BoolOrDie()); return true; case CelValue::Type::kInt64: out->set_number_value(static_cast<double>(value.Int64OrDie())); return true; case CelValue::Type::kUint64: out->set_number_value(static_cast<double>(value.Uint64OrDie())); return true; case CelValue::Type::kDouble: out->set_number_value(value.DoubleOrDie()); return true; case CelValue::Type::kString: *out->mutable_string_value() = std::string(value.StringOrDie().value()); return true; case CelValue::Type::kBytes: *out->mutable_string_value() = std::string(value.BytesOrDie().value()); return true; case CelValue::Type::kMessage: { if (value.IsNull()) { out->set_null_value(ProtobufWkt::NullValue::NULL_VALUE); } else { auto msg = value.MessageOrDie(); out->mutable_struct_value()->MergeFrom(*msg); } return true; } case CelValue::Type::kDuration: *out->mutable_string_value() = absl::FormatDuration(value.DurationOrDie()); return true; case CelValue::Type::kTimestamp: *out->mutable_string_value() = absl::FormatTime(value.TimestampOrDie()); return true; case CelValue::Type::kList: { auto list = value.ListOrDie(); auto values = out->mutable_list_value(); for (int i = 0; i < list->size(); i++) { if (!exportValue((*list)[i], values->add_values())) { return false; } } return true; } case CelValue::Type::kMap: { auto map = value.MapOrDie(); auto list = map->ListKeys(); auto struct_obj = out->mutable_struct_value(); for (int i = 0; i < list->size(); i++) { ProtobufWkt::Value field_key; if (!exportValue((*list)[i], &field_key)) { return false; } ProtobufWkt::Value field_value; if (!exportValue((*map)[(*list)[i]].value(), &field_value)) { return false; } (*struct_obj->mutable_fields())[field_key.string_value()] = field_value; } return true; } default: // do nothing for special values return false; } return false; } WasmResult serializeValue(Filters::Common::Expr::CelValue value, std::string* result) { using Filters::Common::Expr::CelValue; switch (value.type()) { case CelValue::Type::kMessage: if (value.MessageOrDie() != nullptr && value.MessageOrDie()->SerializeToString(result)) { return WasmResult::Ok; } return WasmResult::SerializationFailure; case CelValue::Type::kString: result->assign(value.StringOrDie().value().data(), value.StringOrDie().value().size()); return WasmResult::Ok; case CelValue::Type::kBytes: result->assign(value.BytesOrDie().value().data(), value.BytesOrDie().value().size()); return WasmResult::Ok; case CelValue::Type::kInt64: { auto out = value.Int64OrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(int64_t)); return WasmResult::Ok; } case CelValue::Type::kUint64: { auto out = value.Uint64OrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(uint64_t)); return WasmResult::Ok; } case CelValue::Type::kDouble: { auto out = value.DoubleOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(double)); return WasmResult::Ok; } case CelValue::Type::kBool: { auto out = value.BoolOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(bool)); return WasmResult::Ok; } case CelValue::Type::kDuration: { auto out = value.DurationOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(absl::Duration)); return WasmResult::Ok; } case CelValue::Type::kTimestamp: { auto out = value.TimestampOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(absl::Time)); return WasmResult::Ok; } case CelValue::Type::kMap: { ProtobufWkt::Value out; if (!exportValue(value, &out)) { return WasmResult::SerializationFailure; } if (!out.struct_value().SerializeToString(result)) { return WasmResult::SerializationFailure; } return WasmResult::Ok; } case CelValue::Type::kList: { ProtobufWkt::Value out; if (!exportValue(value, &out)) { return WasmResult::SerializationFailure; } if (!out.list_value().SerializeToString(result)) { return WasmResult::SerializationFailure; } return WasmResult::Ok; } default: return WasmResult::SerializationFailure; } return WasmResult::SerializationFailure; } // An expression wrapper for the WASM state class WasmStateWrapper : public google::api::expr::runtime::CelMap { public: WasmStateWrapper(const StreamInfo::FilterState& filter_state) : filter_state_(filter_state) {} absl::optional<google::api::expr::runtime::CelValue> operator[](google::api::expr::runtime::CelValue key) const override { if (!key.IsString()) { return {}; } auto value = key.StringOrDie().value(); try { const WasmState& result = filter_state_.getDataReadOnly<WasmState>(value); return google::api::expr::runtime::CelValue::CreateBytes(&result.value()); } catch (const EnvoyException& e) { return {}; } } int size() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } bool empty() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } const google::api::expr::runtime::CelList* ListKeys() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } private: const StreamInfo::FilterState& filter_state_; }; WasmResult Context::getProperty(absl::string_view path, std::string* result) { using google::api::expr::runtime::CelValue; using google::api::expr::runtime::FieldBackedListImpl; using google::api::expr::runtime::FieldBackedMapImpl; bool first = true; CelValue value; Protobuf::Arena arena; const StreamInfo::StreamInfo* info = getConstRequestStreamInfo(); const auto request_headers = request_headers_ ? request_headers_ : access_log_request_headers_; const auto response_headers = response_headers_ ? response_headers_ : access_log_response_headers_; const auto response_trailers = response_trailers_ ? response_trailers_ : access_log_response_trailers_; size_t start = 0; while (true) { if (start >= path.size()) { break; } size_t end = path.find('\0', start); if (end == absl::string_view::npos) { // this should not happen unless the input string is not null-terminated in the view return WasmResult::ParseFailure; } auto part = path.substr(start, end - start); start = end + 1; // top-level ident if (first) { first = false; if (part == "metadata") { value = CelValue::CreateMessage(&info->dynamicMetadata(), &arena); } else if (part == "filter_state") { value = CelValue::CreateMap( Protobuf::Arena::Create<WasmStateWrapper>(&arena, info->filterState())); } else if (part == "request") { value = CelValue::CreateMap(Protobuf::Arena::Create<Filters::Common::Expr::RequestWrapper>( &arena, request_headers, *info)); } else if (part == "response") { value = CelValue::CreateMap(Protobuf::Arena::Create<Filters::Common::Expr::ResponseWrapper>( &arena, response_headers, response_trailers, *info)); } else if (part == "connection") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::ConnectionWrapper>(&arena, *info)); } else if (part == "upstream") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::UpstreamWrapper>(&arena, *info)); } else if (part == "node") { value = CelValue::CreateMessage(&plugin_->local_info_.node(), &arena); } else if (part == "source") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::PeerWrapper>(&arena, *info, false)); } else if (part == "destination") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::PeerWrapper>(&arena, *info, true)); } else if (part == "request_protocol") { // TODO(kyessenov) move this upstream to CEL context if (info->protocol().has_value()) { value = CelValue::CreateString(&Http::Utility::getProtocolString(info->protocol().value())); } else { return WasmResult::NotFound; } // Reflective accessors } else if (part == "listener_direction") { value = CelValue::CreateInt64(plugin_->direction_); } else if (part == "listener_metadata") { value = CelValue::CreateMessage(plugin_->listener_metadata_, &arena); } else if (part == "cluster_name" && info->upstreamHost() != nullptr) { value = CelValue::CreateString(&info->upstreamHost()->cluster().name()); } else if (part == "cluster_metadata" && info->upstreamHost() != nullptr) { value = CelValue::CreateMessage(&info->upstreamHost()->cluster().metadata(), &arena); } else if (part == "route_name") { value = CelValue::CreateString(&info->getRouteName()); } else if (part == "route_metadata" && info->routeEntry() != nullptr) { value = CelValue::CreateMessage(&info->routeEntry()->metadata(), &arena); } else { return WasmResult::NotFound; } continue; } if (value.IsMap()) { auto& map = *value.MapOrDie(); auto field = map[CelValue::CreateString(part)]; if (field.has_value()) { value = field.value(); } else { return {}; } } else if (value.IsMessage()) { auto msg = value.MessageOrDie(); if (msg == nullptr) { return {}; } const Protobuf::Descriptor* desc = msg->GetDescriptor(); const Protobuf::FieldDescriptor* field_desc = desc->FindFieldByName(std::string(part)); if (field_desc == nullptr) { return {}; } else if (field_desc->is_map()) { value = CelValue::CreateMap( Protobuf::Arena::Create<FieldBackedMapImpl>(&arena, msg, field_desc, &arena)); } else if (field_desc->is_repeated()) { value = CelValue::CreateList( Protobuf::Arena::Create<FieldBackedListImpl>(&arena, msg, field_desc, &arena)); } else { auto status = google::api::expr::runtime::CreateValueFromSingleField(msg, field_desc, &arena, &value); if (!status.ok()) { return {}; } } } else { return {}; } } return serializeValue(value, result); } // Shared Data WasmResult Context::getSharedData(absl::string_view key, std::pair<std::string, uint32_t>* data) { return global_shared_data.get(wasm_->vm_id(), key, data); } WasmResult Context::setSharedData(absl::string_view key, absl::string_view value, uint32_t cas) { return global_shared_data.set(wasm_->vm_id(), key, value, cas); } // Shared Queue uint32_t Context::registerSharedQueue(absl::string_view queue_name) { // Get the id of the root context if this is a stream context because onQueueReady is on the root. return global_shared_data.registerQueue( wasm_->vm_id(), queue_name, isRootContext() ? id_ : root_context_id_, wasm_->dispatcher_); } WasmResult Context::resolveSharedQueue(absl::string_view vm_id, absl::string_view queue_name, uint32_t* token_ptr) { uint32_t token = global_shared_data.resolveQueue(vm_id, queue_name); if (!token) { return WasmResult::NotFound; } *token_ptr = token; return WasmResult::Ok; } WasmResult Context::dequeueSharedQueue(uint32_t token, std::string* data) { return global_shared_data.dequeue(token, data); } WasmResult Context::enqueueSharedQueue(uint32_t token, absl::string_view value) { return global_shared_data.enqueue(token, value); } // Network bytes. WasmResult Context::getDownstreamDataBufferBytes(uint32_t start, uint32_t length, absl::string_view* data) { if (!network_downstream_data_buffer_) return WasmResult::NotFound; if (network_downstream_data_buffer_->length() < static_cast<uint64_t>(start + length)) return WasmResult::InvalidMemoryAccess; *data = absl::string_view( static_cast<char*>(network_downstream_data_buffer_->linearize(start + length)) + start, length); return WasmResult::Ok; } WasmResult Context::getUpstreamDataBufferBytes(uint32_t start, uint32_t length, absl::string_view* data) { if (!network_upstream_data_buffer_) return WasmResult::NotFound; if (network_upstream_data_buffer_->length() < static_cast<uint64_t>(start + length)) return WasmResult::InvalidMemoryAccess; *data = absl::string_view( static_cast<char*>(network_upstream_data_buffer_->linearize(start + length)) + start, length); return WasmResult::Ok; } // Header/Trailer/Metadata Maps. Http::HeaderMap* Context::getMap(HeaderMapType type) { switch (type) { case HeaderMapType::RequestHeaders: return request_headers_; case HeaderMapType::RequestTrailers: return request_trailers_; case HeaderMapType::ResponseHeaders: return response_headers_; case HeaderMapType::ResponseTrailers: return response_trailers_; case HeaderMapType::GrpcCreateInitialMetadata: return grpc_create_initial_metadata_; default: return nullptr; } } const Http::HeaderMap* Context::getConstMap(HeaderMapType type) { switch (type) { case HeaderMapType::RequestHeaders: if (access_log_request_headers_) { return access_log_request_headers_; } return request_headers_; case HeaderMapType::RequestTrailers: if (access_log_request_trailers_) { return access_log_request_trailers_; } return request_trailers_; case HeaderMapType::ResponseHeaders: if (access_log_response_headers_) { return access_log_response_headers_; } return response_headers_; case HeaderMapType::ResponseTrailers: if (access_log_response_trailers_) { return access_log_response_trailers_; } return response_trailers_; case HeaderMapType::GrpcCreateInitialMetadata: return grpc_create_initial_metadata_; case HeaderMapType::GrpcReceiveInitialMetadata: return grpc_receive_initial_metadata_.get(); case HeaderMapType::GrpcReceiveTrailingMetadata: return grpc_receive_trailing_metadata_.get(); } return nullptr; } void Context::addHeaderMapValue(HeaderMapType type, absl::string_view key, absl::string_view value) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); map->addCopy(lower_key, std::string(value)); } absl::string_view Context::getHeaderMapValue(HeaderMapType type, absl::string_view key) { auto map = getConstMap(type); if (!map) { return ""; } const Http::LowerCaseString lower_key(std::move(std::string(key))); auto entry = map->get(lower_key); if (!entry) { return ""; } return entry->value().getStringView(); } Pairs headerMapToPairs(const Http::HeaderMap* map) { if (!map) { return {}; } Pairs pairs; pairs.reserve(map->size()); map->iterate( [](const Http::HeaderEntry& header, void* pairs) -> Http::HeaderMap::Iterate { (static_cast<Pairs*>(pairs)) ->push_back( std::make_pair(header.key().getStringView(), header.value().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &pairs); return pairs; } Pairs Context::getHeaderMapPairs(HeaderMapType type) { return headerMapToPairs(getConstMap(type)); } void Context::setHeaderMapPairs(HeaderMapType type, const Pairs& pairs) { auto map = getMap(type); if (!map) { return; } std::vector<std::string> keys; map->iterate( [](const Http::HeaderEntry& header, void* keys) -> Http::HeaderMap::Iterate { (static_cast<std::vector<std::string>*>(keys)) ->push_back(std::string(header.key().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &keys); for (auto& k : keys) { const Http::LowerCaseString lower_key(std::move(k)); map->remove(lower_key); } for (auto& p : pairs) { const Http::LowerCaseString lower_key(std::move(std::string(p.first))); map->addCopy(lower_key, std::move(std::string(p.second))); } } void Context::removeHeaderMapValue(HeaderMapType type, absl::string_view key) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); map->remove(lower_key); } void Context::replaceHeaderMapValue(HeaderMapType type, absl::string_view key, absl::string_view value) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); auto entry = map->get(lower_key); if (entry != nullptr) { entry->value(value.data(), value.size()); } else { map->addCopy(lower_key, std::string(value)); } } uint32_t Context::getHeaderMapSize(HeaderMapType type) { auto map = getMap(type); if (!map) { return 0; } return map->refreshByteSize(); } // Body Buffer absl::string_view Context::getRequestBodyBufferBytes(uint32_t start, uint32_t length) { if (!requestBodyBuffer_) { return ""; } if (requestBodyBuffer_->length() < static_cast<uint64_t>((start + length))) { return ""; } return absl::string_view( static_cast<char*>(requestBodyBuffer_->linearize(start + length)) + start, length); } absl::string_view Context::getResponseBodyBufferBytes(uint32_t start, uint32_t length) { if (!responseBodyBuffer_) { return ""; } if (responseBodyBuffer_->length() < static_cast<uint64_t>((start + length))) { return ""; } return absl::string_view( static_cast<char*>(responseBodyBuffer_->linearize(start + length)) + start, length); } // Async call via HTTP uint32_t Context::httpCall(absl::string_view cluster, const Pairs& request_headers, absl::string_view request_body, const Pairs& request_trailers, int timeout_milliseconds) { if (timeout_milliseconds < 0) { return 0; } auto cluster_string = std::string(cluster); if (clusterManager().get(cluster_string) == nullptr) { return 0; } Http::MessagePtr message(new Http::RequestMessageImpl(buildHeaderMapFromPairs(request_headers))); // Check that we were provided certain headers. if (message->headers().Path() == nullptr || message->headers().Method() == nullptr || message->headers().Host() == nullptr) { return 0; } if (!request_body.empty()) { message->body().reset(new Buffer::OwnedImpl(request_body.data(), request_body.size())); message->headers().insertContentLength().value(request_body.size()); } if (request_trailers.size() > 0) { message->trailers(buildHeaderMapFromPairs(request_trailers)); } absl::optional<std::chrono::milliseconds> timeout; if (timeout_milliseconds > 0) { timeout = std::chrono::milliseconds(timeout_milliseconds); } auto token = next_http_call_token_++; // Handle rollover. for (;;) { if (token == 0) { token = next_http_call_token_++; } if (!http_request_.count(token)) { break; } token = next_http_call_token_++; } auto& handler = http_request_[token]; // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::RequestOptions options; options.setTimeout(timeout); Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); auto http_request = clusterManager() .httpAsyncClientForCluster(cluster_string) .send(std::move(message), handler, options); if (!http_request) { http_request_.erase(token); return 0; } handler.context = this; handler.token = token; handler.request = http_request; return token; } uint32_t Context::grpcCall(const envoy::api::v2::core::GrpcService& grpc_service, absl::string_view service_name, absl::string_view method_name, absl::string_view request, const absl::optional<std::chrono::milliseconds>& timeout) { auto token = next_grpc_token_++; if (IsGrpcStreamToken(token)) { token = next_grpc_token_++; } // Handle rollover. for (;;) { if (token == 0) { token = next_grpc_token_ += 2; } if (!grpc_call_request_.count(token)) { break; } token = next_grpc_token_ += 2; } auto& handler = grpc_call_request_[token]; handler.context = this; handler.token = token; auto grpc_client = clusterManager() .grpcAsyncClientManager() .factoryForGrpcService(grpc_service, *wasm()->scope_, true /* skip_cluster_check */) ->create(); // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::RequestOptions options; options.setTimeout(timeout); Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); // NB: this call causes the onCreateInitialMetadata callback to occur inline *before* this call // returns. Consequently the grpc_request is not available. Attempting to close or reset from that // callback will fail. auto grpc_request = grpc_client->sendRaw(service_name, method_name, std::make_unique<Buffer::OwnedImpl>(request), handler, Tracing::NullSpan::instance(), options); if (!grpc_request) { grpc_call_request_.erase(token); return 0; } handler.client = std::move(grpc_client); handler.request = grpc_request; return token; } uint32_t Context::grpcStream(const envoy::api::v2::core::GrpcService& grpc_service, absl::string_view service_name, absl::string_view method_name) { auto token = next_grpc_token_++; if (IsGrpcCallToken(token)) { token = next_grpc_token_++; } // Handle rollover. for (;;) { if (token == 0) { token = next_grpc_token_ += 2; } if (!grpc_stream_.count(token)) { break; } token = next_grpc_token_ += 2; } auto& handler = grpc_stream_[token]; handler.context = this; handler.token = token; auto grpc_client = clusterManager() .grpcAsyncClientManager() .factoryForGrpcService(grpc_service, *wasm()->scope_, true /* skip_cluster_check */) ->create(); // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::StreamOptions options; Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); // NB: this call causes the onCreateInitialMetadata callback to occur inline *before* this call // returns. Consequently the grpc_stream is not available. Attempting to close or reset from that // callback will fail. auto grpc_stream = grpc_client->startRaw(service_name, method_name, handler, options); if (!grpc_stream) { grpc_stream_.erase(token); return 0; } handler.client = std::move(grpc_client); handler.stream = grpc_stream; return token; } void Context::httpRespond(const Pairs& response_headers, absl::string_view body, const Pairs& response_trailers) { (void)response_headers; (void)body; (void)response_trailers; } // StreamInfo const StreamInfo::StreamInfo* Context::getConstRequestStreamInfo() const { if (encoder_callbacks_) { return &encoder_callbacks_->streamInfo(); } else if (decoder_callbacks_) { return &decoder_callbacks_->streamInfo(); } else if (access_log_stream_info_) { return access_log_stream_info_; } return nullptr; } StreamInfo::StreamInfo* Context::getRequestStreamInfo() const { if (encoder_callbacks_) { return &encoder_callbacks_->streamInfo(); } else if (decoder_callbacks_) { return &decoder_callbacks_->streamInfo(); } return nullptr; } WasmResult Context::setProperty(absl::string_view key, absl::string_view serialized_value) { auto* stream_info = getRequestStreamInfo(); if (!stream_info) { return WasmResult::NotFound; } stream_info->filterState().setData(key, std::make_unique<WasmState>(serialized_value), StreamInfo::FilterState::StateType::Mutable); return WasmResult::Ok; } void Context::scriptLog(spdlog::level::level_enum level, absl::string_view message) { switch (level) { case spdlog::level::trace: ENVOY_LOG(trace, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::debug: ENVOY_LOG(debug, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::info: ENVOY_LOG(info, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::warn: ENVOY_LOG(warn, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::err: ENVOY_LOG(error, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::critical: ENVOY_LOG(critical, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::off: NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } } // Connection bool Context::isSsl() { return decoder_callbacks_->connection()->ssl() != nullptr; } // // Calls into the WASM code. // void Context::onStart(absl::string_view root_id, absl::string_view vm_configuration) { if (wasm_->onStart_) { auto root_id_addr = wasm_->copyString(root_id); auto config_addr = wasm_->copyString(vm_configuration); wasm_->onStart_(this, id_, root_id_addr, root_id.size(), config_addr, vm_configuration.size()); } } bool Context::validateConfiguration(absl::string_view configuration) { if (!wasm_->validateConfiguration_) { return true; } auto address = wasm_->copyString(configuration); return wasm_->validateConfiguration_(this, id_, address, configuration.size()).u64_ != 0; } bool Context::onConfigure(absl::string_view configuration) { if (!wasm_->onConfigure_) { return true; } auto address = wasm_->copyString(configuration); return wasm_->onConfigure_(this, id_, address, configuration.size()).u64_ != 0; } void Context::onCreate(uint32_t root_context_id) { if (wasm_->onCreate_) { wasm_->onCreate_(this, id_, root_context_id); } } Network::FilterStatus Context::onNetworkNewConnection() { onCreate(root_context_id_); if (!wasm_->onNewConnection_) { return Network::FilterStatus::Continue; } if (wasm_->onNewConnection_(this, id_).u64_ == 0) { return Network::FilterStatus::Continue; } return Network::FilterStatus::StopIteration; } Network::FilterStatus Context::onDownstreamData(int data_length, bool end_of_stream) { if (!wasm_->onDownstreamData_) { return Network::FilterStatus::Continue; } auto result = wasm_->onDownstreamData_(this, id_, static_cast<uint32_t>(data_length), static_cast<uint32_t>(end_of_stream)); // TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values. return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration; } Network::FilterStatus Context::onUpstreamData(int data_length, bool end_of_stream) { if (!wasm_->onUpstreamData_) { return Network::FilterStatus::Continue; } auto result = wasm_->onUpstreamData_(this, id_, static_cast<uint32_t>(data_length), static_cast<uint32_t>(end_of_stream)); // TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values. return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration; } void Context::onDownstreamConnectionClose(PeerType peer_type) { if (wasm_->onDownstreamConnectionClose_) { wasm_->onDownstreamConnectionClose_(this, id_, static_cast<uint32_t>(peer_type)); } } void Context::onUpstreamConnectionClose(PeerType peer_type) { if (wasm_->onUpstreamConnectionClose_) { wasm_->onUpstreamConnectionClose_(this, id_, static_cast<uint32_t>(peer_type)); } } Http::FilterHeadersStatus Context::onRequestHeaders() { onCreate(root_context_id_); in_vm_context_created_ = true; // Store the stream id so that we can use it in log(). auto& stream_info = decoder_callbacks_->streamInfo(); auto& metadata = (*stream_info.dynamicMetadata() .mutable_filter_metadata())[HttpFilters::HttpFilterNames::get().Wasm]; (*metadata.mutable_fields())[std::string("_stream_id_" + std::string(root_id()))] .set_number_value(id_); if (!wasm_->onRequestHeaders_) { return Http::FilterHeadersStatus::Continue; } if (wasm_->onRequestHeaders_(this, id_).u64_ == 0) { return Http::FilterHeadersStatus::Continue; } return Http::FilterHeadersStatus::StopIteration; } Http::FilterDataStatus Context::onRequestBody(int body_buffer_length, bool end_of_stream) { if (!wasm_->onRequestBody_) { return Http::FilterDataStatus::Continue; } switch (wasm_ ->onRequestBody_(this, id_, static_cast<uint32_t>(body_buffer_length), static_cast<uint32_t>(end_of_stream)) .u64_) { case 0: return Http::FilterDataStatus::Continue; case 1: return Http::FilterDataStatus::StopIterationAndBuffer; case 2: return Http::FilterDataStatus::StopIterationAndWatermark; default: return Http::FilterDataStatus::StopIterationNoBuffer; } } Http::FilterTrailersStatus Context::onRequestTrailers() { if (!wasm_->onRequestTrailers_) { return Http::FilterTrailersStatus::Continue; } if (wasm_->onRequestTrailers_(this, id_).u64_ == 0) { return Http::FilterTrailersStatus::Continue; } return Http::FilterTrailersStatus::StopIteration; } Http::FilterMetadataStatus Context::onRequestMetadata() { if (!wasm_->onRequestMetadata_) { return Http::FilterMetadataStatus::Continue; } if (wasm_->onRequestMetadata_(this, id_).u64_ == 0) { return Http::FilterMetadataStatus::Continue; } return Http::FilterMetadataStatus::Continue; // This is currently the only return code. } Http::FilterHeadersStatus Context::onResponseHeaders() { if (!in_vm_context_created_) { // If the request is invalid then onRequestHeaders() will not be called and neither will // onCreate() then sendLocalReply be called which will call this function. In this case we // need to call onCreate() so that the Context inside the VM is created before the // onResponseHeaders() call. onCreate(root_context_id_); in_vm_context_created_ = true; } if (!wasm_->onResponseHeaders_) { return Http::FilterHeadersStatus::Continue; } if (wasm_->onResponseHeaders_(this, id_).u64_ == 0) { return Http::FilterHeadersStatus::Continue; } return Http::FilterHeadersStatus::StopIteration; } Http::FilterDataStatus Context::onResponseBody(int body_buffer_length, bool end_of_stream) { if (!wasm_->onResponseBody_) { return Http::FilterDataStatus::Continue; } switch (wasm_ ->onResponseBody_(this, id_, static_cast<uint32_t>(body_buffer_length), static_cast<uint32_t>(end_of_stream)) .u64_) { case 0: return Http::FilterDataStatus::Continue; case 1: return Http::FilterDataStatus::StopIterationAndBuffer; case 2: return Http::FilterDataStatus::StopIterationAndWatermark; default: return Http::FilterDataStatus::StopIterationNoBuffer; } } Http::FilterTrailersStatus Context::onResponseTrailers() { if (!wasm_->onResponseTrailers_) { return Http::FilterTrailersStatus::Continue; } if (wasm_->onResponseTrailers_(this, id_).u64_ == 0) { return Http::FilterTrailersStatus::Continue; } return Http::FilterTrailersStatus::StopIteration; } Http::FilterMetadataStatus Context::onResponseMetadata() { if (!wasm_->onResponseMetadata_) { return Http::FilterMetadataStatus::Continue; } if (wasm_->onResponseMetadata_(this, id_).u64_ == 0) { return Http::FilterMetadataStatus::Continue; } return Http::FilterMetadataStatus::Continue; // This is currently the only return code. } void Context::onHttpCallResponse(uint32_t token, const Pairs& response_headers, absl::string_view response_body, const Pairs& response_trailers) { if (!wasm_->onHttpCallResponse_) { return; } uint64_t headers_ptr, headers_size, trailers_ptr, trailers_size; exportPairs(this, response_headers, &headers_ptr, &headers_size); exportPairs(this, response_trailers, &trailers_ptr, &trailers_size); auto body_ptr = wasm_->copyString(response_body); auto body_size = response_body.size(); wasm_->onHttpCallResponse_(this, id_, token, headers_ptr, headers_size, body_ptr, body_size, trailers_ptr, trailers_size); } void Context::onQueueReady(uint32_t token) { if (wasm_->onQueueReady_) { wasm_->onQueueReady_(this, id_, token); } } void Context::onGrpcCreateInitialMetadata(uint32_t token, Http::HeaderMap& metadata) { if (!wasm_->onGrpcCreateInitialMetadata_) { return; } grpc_create_initial_metadata_ = &metadata; wasm_->onGrpcCreateInitialMetadata_(this, id_, token); grpc_create_initial_metadata_ = nullptr; } void Context::onGrpcReceiveInitialMetadata(uint32_t token, Http::HeaderMapPtr&& metadata) { if (!wasm_->onGrpcReceiveInitialMetadata_) { return; } grpc_receive_initial_metadata_ = std::move(metadata); wasm_->onGrpcReceiveInitialMetadata_(this, id_, token); grpc_receive_initial_metadata_ = nullptr; } void Context::onGrpcReceiveTrailingMetadata(uint32_t token, Http::HeaderMapPtr&& metadata) { if (!wasm_->onGrpcReceiveTrailingMetadata_) { return; } grpc_receive_trailing_metadata_ = std::move(metadata); wasm_->onGrpcReceiveTrailingMetadata_(this, id_, token); grpc_receive_trailing_metadata_ = nullptr; } WasmResult Context::defineMetric(MetricType type, absl::string_view name, uint32_t* metric_id_ptr) { auto stat_name = wasm_->stat_name_set_->getDynamic(name); if (type == MetricType::Counter) { auto id = wasm_->nextCounterMetricId(); auto c = &wasm_->scope_->counterFromStatName(stat_name); wasm_->counters_.emplace(id, c); *metric_id_ptr = id; return WasmResult::Ok; } else if (type == MetricType::Gauge) { auto id = wasm_->nextGaugeMetricId(); auto g = &wasm_->scope_->gaugeFromStatName(stat_name, Stats::Gauge::ImportMode::Accumulate); wasm_->gauges_.emplace(id, g); *metric_id_ptr = id; return WasmResult::Ok; } else if (type == MetricType::Histogram) { auto id = wasm_->nextHistogramMetricId(); auto h = &wasm_->scope_->histogramFromStatName(stat_name, Stats::Histogram::Unit::Unspecified); wasm_->histograms_.emplace(id, h); *metric_id_ptr = id; return WasmResult::Ok; } return WasmResult::BadArgument; } WasmResult Context::incrementMetric(uint32_t metric_id, int64_t offset) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { if (offset > 0) { it->second->add(offset); return WasmResult::Ok; } else { return WasmResult::BadArgument; } return WasmResult::NotFound; } } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { if (offset > 0) { it->second->add(offset); return WasmResult::Ok; } else { it->second->sub(-offset); return WasmResult::Ok; } } return WasmResult::NotFound; } return WasmResult::BadArgument; } WasmResult Context::recordMetric(uint32_t metric_id, uint64_t value) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { it->second->add(value); return WasmResult::Ok; } } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { it->second->set(value); return WasmResult::Ok; } } else if (type == MetricType::Histogram) { auto it = wasm_->histograms_.find(metric_id); if (it != wasm_->histograms_.end()) { it->second->recordValue(value); return WasmResult::Ok; } } return WasmResult::NotFound; } WasmResult Context::getMetric(uint32_t metric_id, uint64_t* result_uint64_ptr) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { *result_uint64_ptr = it->second->value(); return WasmResult::Ok; } return WasmResult::NotFound; } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { *result_uint64_ptr = it->second->value(); return WasmResult::Ok; } return WasmResult::NotFound; } return WasmResult::BadArgument; } Wasm::Wasm(absl::string_view vm, absl::string_view vm_id, absl::string_view vm_configuration, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher) : vm_id_(std::string(vm_id)), wasm_vm_(Common::Wasm::createWasmVm(vm)), plugin_(plugin), scope_(scope), cluster_manager_(cluster_manager), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), vm_configuration_(vm_configuration), stat_name_set_(scope_->symbolTable().makeSet("Wasm").release()) {} std::string Plugin::makeLogPrefix() const { std::string prefix; if (!name_.empty()) { prefix = prefix + " " + name_; } if (!root_id_.empty()) { prefix = prefix + " " + std::string(root_id_); } if (vm_id_.empty()) { prefix = prefix + " " + std::string(vm_id_); } return prefix; } Context::~Context() { // Cancel any outstanding requests. for (auto& p : http_request_) { p.second.request->cancel(); } for (auto& p : grpc_call_request_) { p.second.request->cancel(); } for (auto& p : grpc_stream_) { p.second.stream->resetStream(); } // Do not remove vm or root contexts which have the same lifetime as wasm_. if (root_context_id_) { wasm_->contexts_.erase(id_); } } void Wasm::registerCallbacks() { #define _REGISTER(_fn) \ wasm_vm_->registerCallback( \ "env", #_fn, &_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(_fn##Handler), \ _fn##Handler>::convertFunctionWordToUint32) if (is_emscripten_) { _REGISTER(pthread_equal); } #undef _REGISTER #define _REGISTER_WASI(_fn) \ wasm_vm_->registerCallback( \ "wasi_unstable", #_fn, &wasi_unstable_##_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(wasi_unstable_##_fn##Handler), \ wasi_unstable_##_fn##Handler>::convertFunctionWordToUint32) if (is_emscripten_) { _REGISTER_WASI(fd_write); _REGISTER_WASI(fd_seek); _REGISTER_WASI(fd_close); _REGISTER_WASI(environ_get); _REGISTER_WASI(environ_sizes_get); _REGISTER_WASI(proc_exit); } #undef _REGISTER_WASI // Calls with the "proxy_" prefix. #define _REGISTER_PROXY(_fn) \ wasm_vm_->registerCallback( \ "env", "proxy_" #_fn, &_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(_fn##Handler), \ _fn##Handler>::convertFunctionWordToUint32); _REGISTER_PROXY(log); _REGISTER_PROXY(setProperty); _REGISTER_PROXY(getProperty); _REGISTER_PROXY(continueRequest); _REGISTER_PROXY(continueResponse); _REGISTER_PROXY(sendLocalResponse); _REGISTER_PROXY(clearRouteCache); _REGISTER_PROXY(getSharedData); _REGISTER_PROXY(setSharedData); _REGISTER_PROXY(registerSharedQueue); _REGISTER_PROXY(resolveSharedQueue); _REGISTER_PROXY(dequeueSharedQueue); _REGISTER_PROXY(enqueueSharedQueue); _REGISTER_PROXY(getDownstreamDataBufferBytes); _REGISTER_PROXY(getUpstreamDataBufferBytes); _REGISTER_PROXY(getHeaderMapValue); _REGISTER_PROXY(addHeaderMapValue); _REGISTER_PROXY(replaceHeaderMapValue); _REGISTER_PROXY(removeHeaderMapValue); _REGISTER_PROXY(getHeaderMapPairs); _REGISTER_PROXY(setHeaderMapPairs); _REGISTER_PROXY(getHeaderMapSize); _REGISTER_PROXY(getRequestBodyBufferBytes); _REGISTER_PROXY(getResponseBodyBufferBytes); _REGISTER_PROXY(httpCall); _REGISTER_PROXY(grpcCall); _REGISTER_PROXY(grpcStream); _REGISTER_PROXY(grpcClose); _REGISTER_PROXY(grpcCancel); _REGISTER_PROXY(grpcSend); _REGISTER_PROXY(setTickPeriodMilliseconds); _REGISTER_PROXY(getCurrentTimeNanoseconds); _REGISTER_PROXY(defineMetric); _REGISTER_PROXY(incrementMetric); _REGISTER_PROXY(recordMetric); _REGISTER_PROXY(getMetric); _REGISTER_PROXY(setEffectiveContext); #undef _REGISTER_PROXY } void Wasm::getFunctions() { #define _GET(_fn) wasm_vm_->getFunction(#_fn, &_fn##_); _GET(_start); _GET(__wasm_call_ctors); _GET(malloc); _GET(free); #undef _GET #define _GET_PROXY(_fn) wasm_vm_->getFunction("proxy_" #_fn, &_fn##_); _GET_PROXY(validateConfiguration); _GET_PROXY(onStart); _GET_PROXY(onConfigure); _GET_PROXY(onTick); _GET_PROXY(onCreate); _GET_PROXY(onNewConnection); _GET_PROXY(onDownstreamData); _GET_PROXY(onUpstreamData); _GET_PROXY(onDownstreamConnectionClose); _GET_PROXY(onUpstreamConnectionClose); _GET_PROXY(onRequestHeaders); _GET_PROXY(onRequestBody); _GET_PROXY(onRequestTrailers); _GET_PROXY(onRequestMetadata); _GET_PROXY(onResponseHeaders); _GET_PROXY(onResponseBody); _GET_PROXY(onResponseTrailers); _GET_PROXY(onResponseMetadata); _GET_PROXY(onHttpCallResponse); _GET_PROXY(onGrpcReceive); _GET_PROXY(onGrpcClose); _GET_PROXY(onGrpcCreateInitialMetadata); _GET_PROXY(onGrpcReceiveInitialMetadata); _GET_PROXY(onGrpcReceiveTrailingMetadata); _GET_PROXY(onQueueReady); _GET_PROXY(onDone); _GET_PROXY(onLog); _GET_PROXY(onDelete); #undef _GET_PROXY if (!malloc_ || !free_) { throw WasmException("WASM missing malloc/free"); } } Wasm::Wasm(const Wasm& wasm, Event::Dispatcher& dispatcher) : std::enable_shared_from_this<Wasm>(wasm), vm_id_(wasm.vm_id_), plugin_(wasm.plugin_), scope_(wasm.scope_), cluster_manager_(wasm.cluster_manager_), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), stat_name_set_(wasm.stat_name_set_) { if (wasm.wasmVm()->cloneable()) { wasm_vm_ = wasm.wasmVm()->clone(); vm_context_ = std::make_shared<Context>(this); getFunctions(); } else { wasm_vm_ = Common::Wasm::createWasmVm(wasm.wasmVm()->runtime()); if (!initialize(wasm.code(), wasm.allow_precompiled())) { throw WasmException("Failed to initialize WASM code"); } } } bool Wasm::initialize(const std::string& code, bool allow_precompiled) { if (!wasm_vm_) { return false; } // If the configured_vm_id is empty, then hash the code to create a unique vm_id. if (vm_id_.empty()) { vm_id_ = base64Sha256(code); } auto ok = wasm_vm_->load(code, allow_precompiled); if (!ok) { return false; } auto metadata = wasm_vm_->getCustomSection("emscripten_metadata"); if (!metadata.empty()) { // See https://github.com/emscripten-core/emscripten/blob/incoming/tools/shared.py#L3059 is_emscripten_ = true; auto start = reinterpret_cast<const uint8_t*>(metadata.data()); auto end = reinterpret_cast<const uint8_t*>(metadata.data() + metadata.size()); start = decodeVarint(start, end, &emscripten_metadata_major_version_); start = decodeVarint(start, end, &emscripten_metadata_minor_version_); start = decodeVarint(start, end, &emscripten_abi_major_version_); start = decodeVarint(start, end, &emscripten_abi_minor_version_); uint32_t temp; if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 1) { // metadata 0.2 - added: wasm_backend. start = decodeVarint(start, end, &temp); } start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 0) { // metadata 0.1 - added: global_base, dynamic_base, dynamictop_ptr and tempdouble_ptr. start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); decodeVarint(start, end, &temp); if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 2) { // metadata 0.3 - added: standalone_wasm. start = decodeVarint(start, end, &emscripten_standalone_wasm_); } } } registerCallbacks(); wasm_vm_->link(vm_id_); vm_context_ = std::make_shared<Context>(this); getFunctions(); startVm(vm_context_.get()); code_ = code; allow_precompiled_ = allow_precompiled; return true; } void Wasm::startVm(Context* root_context) { /* Call "_start" function, and fallback to "__wasm_call_ctors" if the former is not available. */ if (_start_) { _start_(root_context); } else if (__wasm_call_ctors_) { __wasm_call_ctors_(root_context); } } bool Wasm::configure(Context* root_context, absl::string_view configuration) { if (!onConfigure_) { return true; } auto address = copyString(configuration); return onConfigure_(root_context, root_context->id(), address, configuration.size()).u64_ != 0; } Context* Wasm::start() { auto root_id = plugin_->root_id_; auto it = root_contexts_.find(root_id); if (it != root_contexts_.end()) { it->second->onStart(root_id, vm_configuration()); return it->second.get(); } auto context = std::make_unique<Context>(this, root_id, plugin_); auto context_ptr = context.get(); root_contexts_[root_id] = std::move(context); context_ptr->onStart(root_id, vm_configuration()); return context_ptr; }; void Wasm::startForTesting(std::unique_ptr<Context> context) { auto context_ptr = context.get(); if (!context->wasm_) { // Initialization was delayed till the Wasm object was created. context->wasm_ = this; context->plugin_ = plugin_; context->id_ = allocContextId(); contexts_[context->id_] = context.get(); } root_contexts_[""] = std::move(context); context_ptr->onStart("", ""); } void Wasm::setTickPeriod(uint32_t context_id, std::chrono::milliseconds new_tick_period) { auto& tick_period = tick_period_[context_id]; auto& timer = timer_[context_id]; bool was_running = timer && tick_period.count() > 0; tick_period = new_tick_period; if (tick_period.count() > 0 && !was_running) { timer = dispatcher_.createTimer([weak = std::weak_ptr<Wasm>(shared_from_this()), context_id]() { auto shared = weak.lock(); if (shared) { shared->tickHandler(context_id); } }); timer->enableTimer(tick_period); } } void Wasm::tickHandler(uint32_t root_context_id) { auto& tick_period = tick_period_[root_context_id]; auto& timer = timer_[root_context_id]; if (onTick_) { onTick_(getContext(root_context_id), root_context_id); if (timer && tick_period.count() > 0) { timer->enableTimer(tick_period); } } } uint32_t Wasm::allocContextId() { while (true) { auto id = next_context_id_++; // Prevent reuse. if (contexts_.find(id) == contexts_.end()) { return id; } } } void Wasm::queueReady(uint32_t root_context_id, uint32_t token) { auto it = contexts_.find(root_context_id); if (it == contexts_.end() || !it->second->isRootContext()) { return; } it->second->onQueueReady(token); } Network::FilterStatus Context::onNewConnection() { return onNetworkNewConnection(); }; Network::FilterStatus Context::onData(Buffer::Instance& data, bool end_stream) { network_downstream_data_buffer_ = &data; auto result = onDownstreamData(data.length(), end_stream); network_downstream_data_buffer_ = nullptr; return result; } Network::FilterStatus Context::onWrite(Buffer::Instance& data, bool end_stream) { network_upstream_data_buffer_ = &data; auto result = onUpstreamData(data.length(), end_stream); network_upstream_data_buffer_ = nullptr; if (end_stream) { // This is called when seeing end_stream=true and not on an upstream connection event, // because registering for latter requires replicating the whole TCP proxy extension. onUpstreamConnectionClose(PeerType::Unknown); } return result; } void Context::onEvent(Network::ConnectionEvent event) { switch (event) { case Network::ConnectionEvent::LocalClose: onDownstreamConnectionClose(PeerType::Local); break; case Network::ConnectionEvent::RemoteClose: onDownstreamConnectionClose(PeerType::Remote); break; default: break; } } void Context::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { network_read_filter_callbacks_ = &callbacks; network_read_filter_callbacks_->connection().addConnectionCallbacks(*this); } void Context::initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) { network_write_filter_callbacks_ = &callbacks; } void Wasm::log(absl::string_view root_id, const Http::HeaderMap* request_headers, const Http::HeaderMap* response_headers, const Http::HeaderMap* response_trailers, const StreamInfo::StreamInfo& stream_info) { // Check dynamic metadata for the id_ of the stream for this root_id. Context* context = nullptr; auto metadata_it = stream_info.dynamicMetadata().filter_metadata().find( HttpFilters::HttpFilterNames::get().Wasm); if (metadata_it != stream_info.dynamicMetadata().filter_metadata().end()) { auto find_id = metadata_it->second.fields().find(std::string("_stream_id_" + std::string(root_id))); if (find_id != metadata_it->second.fields().end()) { context = getContext(static_cast<uint32_t>(find_id->second.number_value())); } } if (!context) { context = getRootContext(root_id); } context->log(request_headers, response_headers, response_trailers, stream_info); } void Context::log(const Http::HeaderMap* request_headers, const Http::HeaderMap* response_headers, const Http::HeaderMap* response_trailers, const StreamInfo::StreamInfo& stream_info) { access_log_request_headers_ = request_headers; // ? request_trailers ? access_log_response_headers_ = response_headers; access_log_response_trailers_ = response_trailers; access_log_stream_info_ = &stream_info; onLog(); access_log_request_headers_ = nullptr; // ? request_trailers ? access_log_response_headers_ = nullptr; access_log_response_trailers_ = nullptr; access_log_stream_info_ = nullptr; onDelete(); } void Context::onDestroy() { if (destroyed_) { return; } destroyed_ = true; onDone(); } void Context::onDone() { if (wasm_->onDone_) { wasm_->onDone_(this, id_); } } void Context::onLog() { if (wasm_->onLog_) { wasm_->onLog_(this, id_); } } void Context::onDelete() { if (wasm_->onDelete_) { wasm_->onDelete_(this, id_); } } Http::FilterHeadersStatus Context::decodeHeaders(Http::HeaderMap& headers, bool end_stream) { request_headers_ = &headers; request_end_of_stream_ = end_stream; auto result = onRequestHeaders(); request_headers_ = nullptr; return result; } Http::FilterDataStatus Context::decodeData(Buffer::Instance& data, bool end_stream) { requestBodyBuffer_ = &data; auto result = onRequestBody(data.length(), end_stream); requestBodyBuffer_ = nullptr; return result; } Http::FilterTrailersStatus Context::decodeTrailers(Http::HeaderMap& trailers) { request_trailers_ = &trailers; auto result = onRequestTrailers(); request_trailers_ = nullptr; return result; } Http::FilterMetadataStatus Context::decodeMetadata(Http::MetadataMap& response_metadata) { response_metadata_ = &response_metadata; auto result = onRequestMetadata(); response_metadata_ = nullptr; return result; } void Context::setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks& callbacks) { decoder_callbacks_ = &callbacks; } Http::FilterHeadersStatus Context::encode100ContinueHeaders(Http::HeaderMap&) { return Http::FilterHeadersStatus::Continue; } Http::FilterHeadersStatus Context::encodeHeaders(Http::HeaderMap& headers, bool end_stream) { response_headers_ = &headers; response_end_of_stream_ = end_stream; auto result = onResponseHeaders(); response_headers_ = nullptr; return result; } Http::FilterDataStatus Context::encodeData(Buffer::Instance& data, bool end_stream) { responseBodyBuffer_ = &data; auto result = onResponseBody(data.length(), end_stream); responseBodyBuffer_ = nullptr; return result; } Http::FilterTrailersStatus Context::encodeTrailers(Http::HeaderMap& trailers) { response_trailers_ = &trailers; auto result = onResponseTrailers(); response_trailers_ = nullptr; return result; } Http::FilterMetadataStatus Context::encodeMetadata(Http::MetadataMap& response_metadata) { response_metadata_ = &response_metadata; auto result = onResponseMetadata(); response_metadata_ = nullptr; return result; } // Http::FilterMetadataStatus::Continue; void Context::setEncoderFilterCallbacks(Envoy::Http::StreamEncoderFilterCallbacks& callbacks) { encoder_callbacks_ = &callbacks; } void Context::onHttpCallSuccess(uint32_t token, Envoy::Http::MessagePtr& response) { auto body = absl::string_view(static_cast<char*>(response->body()->linearize(response->body()->length())), response->body()->length()); onHttpCallResponse(token, headerMapToPairs(&response->headers()), body, headerMapToPairs(response->trailers())); http_request_.erase(token); } void Context::onHttpCallFailure(uint32_t token, Http::AsyncClient::FailureReason /* reason */) { onHttpCallResponse(token, {}, "", {}); http_request_.erase(token); } void AsyncClientHandler::onSuccess(Envoy::Http::MessagePtr&& response) { context->onHttpCallSuccess(token, response); } void AsyncClientHandler::onFailure(Http::AsyncClient::FailureReason reason) { context->onHttpCallFailure(token, reason); } void GrpcCallClientHandler::onCreateInitialMetadata(Http::HeaderMap& metadata) { context->onGrpcCreateInitialMetadata(token, metadata); } void GrpcStreamClientHandler::onCreateInitialMetadata(Http::HeaderMap& metadata) { context->onGrpcCreateInitialMetadata(token, metadata); } void GrpcStreamClientHandler::onReceiveInitialMetadata(Http::HeaderMapPtr&& metadata) { context->onGrpcReceiveInitialMetadata(token, std::move(metadata)); } void GrpcStreamClientHandler::onReceiveTrailingMetadata(Http::HeaderMapPtr&& metadata) { context->onGrpcReceiveTrailingMetadata(token, std::move(metadata)); } void Context::onGrpcReceive(uint32_t token, Buffer::InstancePtr response) { if (wasm_->onGrpcReceive_) { auto response_size = response->length(); auto response_ptr = wasm_->copyBuffer(*response); wasm_->onGrpcReceive_(this, id_, token, response_ptr, response_size); } if (IsGrpcCallToken(token)) { grpc_call_request_.erase(token); } } void Context::onGrpcClose(uint32_t token, const Grpc::Status::GrpcStatus& status, const absl::string_view message) { if (wasm_->onGrpcClose_) { auto message_ptr = wasm_->copyString(message); wasm_->onGrpcClose_(this, id_, token, static_cast<uint64_t>(status), message_ptr, message.size()); } if (IsGrpcCallToken(token)) { grpc_call_request_.erase(token); } else { grpc_stream_.erase(token); } } WasmResult Context::grpcSend(uint32_t token, absl::string_view message, bool end_stream) { if (IsGrpcCallToken(token)) { return WasmResult::BadArgument; } auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->sendMessageRaw( Buffer::InstancePtr(new Buffer::OwnedImpl(message.data(), message.size())), end_stream); } return WasmResult::Ok; } WasmResult Context::grpcClose(uint32_t token) { if (IsGrpcCallToken(token)) { auto it = grpc_call_request_.find(token); if (it == grpc_call_request_.end()) { return WasmResult::NotFound; } if (it != grpc_call_request_.end() && it->second.request) { it->second.request->cancel(); } grpc_call_request_.erase(token); } else { auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->closeStream(); } grpc_stream_.erase(token); } return WasmResult::Ok; } WasmResult Context::grpcCancel(uint32_t token) { if (IsGrpcCallToken(token)) { auto it = grpc_call_request_.find(token); if (it == grpc_call_request_.end()) { return WasmResult::NotFound; } if (it != grpc_call_request_.end() && it->second.request) { it->second.request->cancel(); } grpc_call_request_.erase(token); } else { auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->resetStream(); } grpc_stream_.erase(token); } return WasmResult::Ok; } void GrpcCallClientHandler::onSuccessRaw(Buffer::InstancePtr&& response, Tracing::Span&) { context->onGrpcReceive(token, std::move(response)); } void GrpcCallClientHandler::onFailure(Grpc::Status::GrpcStatus status, const std::string& message, Tracing::Span&) { context->onGrpcClose(token, status, message); } bool GrpcStreamClientHandler::onReceiveMessageRaw(Buffer::InstancePtr&& response) { context->onGrpcReceive(token, std::move(response)); return true; } void GrpcStreamClientHandler::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) { context->onGrpcClose(token, status, message); } static std::shared_ptr<Wasm> createWasmInternal(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api, std::unique_ptr<Context> root_context_for_testing) { auto wasm = std::make_shared<Wasm>(vm_config.runtime(), vm_config.vm_id(), vm_config.configuration(), plugin, scope, cluster_manager, dispatcher); const auto& code = Config::DataSource::read(vm_config.code(), true, api); const auto& path = Config::DataSource::getPath(vm_config.code()) .value_or(code.empty() ? EMPTY_STRING : INLINE_STRING); if (code.empty()) { throw WasmException(fmt::format("Failed to load WASM code from {}", path)); } if (!wasm->initialize(code, vm_config.allow_precompiled())) { throw WasmException(fmt::format("Failed to initialize WASM code from {}", path)); } if (!root_context_for_testing) { wasm->start(); } else { wasm->startForTesting(std::move(root_context_for_testing)); } return wasm; } std::shared_ptr<Wasm> createWasm(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api) { return createWasmInternal(vm_config, plugin, scope, cluster_manager, dispatcher, api, nullptr /* root_context_for_testing */); } // namespace Wasm std::shared_ptr<Wasm> createWasmForTesting(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api, std::unique_ptr<Context> root_context_for_testing) { return createWasmInternal(vm_config, plugin, scope, cluster_manager, dispatcher, api, std::move(root_context_for_testing)); } std::shared_ptr<Wasm> createThreadLocalWasm(Wasm& base_wasm, absl::string_view configuration, Event::Dispatcher& dispatcher) { auto wasm = std::make_shared<Wasm>(base_wasm, dispatcher); Context* root_context = wasm->start(); if (!wasm->configure(root_context, configuration)) { throw WasmException("Failed to configure WASM code"); } if (!wasm->vm_id().empty()) { local_wasms[wasm->vm_id()] = wasm; } return wasm; } std::shared_ptr<Wasm> getThreadLocalWasmPtr(absl::string_view vm_id) { auto it = local_wasms.find(vm_id); if (it == local_wasms.end()) { return nullptr; } auto wasm = it->second.lock(); if (!wasm) { local_wasms.erase(vm_id); } return wasm; } std::shared_ptr<Wasm> getOrCreateThreadLocalWasm(Wasm& base_wasm, absl::string_view configuration, Event::Dispatcher& dispatcher) { auto wasm = getThreadLocalWasmPtr(base_wasm.vm_id()); if (wasm) { auto root_context = wasm->start(); if (!wasm->configure(root_context, configuration)) { throw WasmException("Failed to configure WASM code"); } return wasm; } return createThreadLocalWasm(base_wasm, configuration, dispatcher); } } // namespace Wasm } // namespace Common } // namespace Extensions } // namespace Envoy
#include "extensions/common/wasm/wasm.h" #include <stdio.h> #include <limits> #include <memory> #include <string> #include "envoy/common/exception.h" #include "envoy/config/wasm/v2/wasm.pb.validate.h" #include "envoy/grpc/status.h" #include "envoy/http/codes.h" #include "envoy/local_info/local_info.h" #include "envoy/server/wasm.h" #include "envoy/thread_local/thread_local.h" #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" #include "common/common/base64.h" #include "common/common/empty_string.h" #include "common/common/enum_to_int.h" #include "common/common/logger.h" #include "common/config/datasource.h" #include "common/http/header_map_impl.h" #include "common/http/message_impl.h" #include "common/http/utility.h" #include "common/tracing/http_tracer_impl.h" #include "extensions/common/wasm/wasm_state.h" #include "extensions/common/wasm/well_known_names.h" #include "extensions/filters/common/expr/context.h" #include "absl/base/casts.h" #include "absl/container/flat_hash_map.h" #include "absl/container/node_hash_map.h" #include "absl/synchronization/mutex.h" #include "eval/eval/field_access.h" #include "eval/eval/field_backed_list_impl.h" #include "eval/eval/field_backed_map_impl.h" #include "eval/public/cel_value.h" #include "openssl/bytestring.h" #include "openssl/hmac.h" #include "openssl/sha.h" namespace Envoy { namespace Extensions { namespace Common { namespace Wasm { // Any currently executing Wasm call context. #define WASM_CONTEXT(_c) \ (ContextOrEffectiveContext(static_cast<Context*>((void)_c, current_context_))) // The id of the context which should be used for calls out of the VM in place of current_context_ // above. namespace { // TODO: move to utils during upstreaming. std::string base64Sha256(absl::string_view data) { std::vector<uint8_t> digest(SHA256_DIGEST_LENGTH); EVP_MD_CTX* ctx(EVP_MD_CTX_new()); auto rc = EVP_DigestInit(ctx, EVP_sha256()); RELEASE_ASSERT(rc == 1, "Failed to init digest context"); rc = EVP_DigestUpdate(ctx, data.data(), data.size()); RELEASE_ASSERT(rc == 1, "Failed to update digest"); rc = EVP_DigestFinal(ctx, digest.data(), nullptr); RELEASE_ASSERT(rc == 1, "Failed to finalize digest"); EVP_MD_CTX_free(ctx); return Base64::encode(reinterpret_cast<const char*>(&digest[0]), digest.size()); } inline Word wasmResultToWord(WasmResult r) { return Word(static_cast<uint64_t>(r)); } inline uint32_t convertWordToUint32(Word w) { return static_cast<uint32_t>(w.u64_); } // Convert a function of the form Word(Word...) to one of the form uint32_t(uint32_t...). template <typename F, F* fn> struct ConvertFunctionWordToUint32 { static void convertFunctionWordToUint32() {} }; template <typename R, typename... Args, auto (*F)(Args...)->R> struct ConvertFunctionWordToUint32<R(Args...), F> { static auto convertFunctionWordToUint32(typename ConvertWordTypeToUint32<Args>::type... args) { return convertWordToUint32(F(std::forward<Args>(args)...)); } }; template <typename... Args, auto (*F)(Args...)->void> struct ConvertFunctionWordToUint32<void(Args...), F> { static void convertFunctionWordToUint32(typename ConvertWordTypeToUint32<Args>::type... args) { F(std::forward<Args>(args)...); } }; class SharedData { public: WasmResult get(absl::string_view vm_id, const absl::string_view key, std::pair<std::string, uint32_t>* result) { absl::ReaderMutexLock l(&mutex); auto map = data.find(vm_id); if (map == data.end()) { return WasmResult::NotFound; } auto it = map->second.find(key); if (it != map->second.end()) { *result = it->second; return WasmResult::Ok; } return WasmResult::NotFound; } WasmResult set(absl::string_view vm_id, absl::string_view key, absl::string_view value, uint32_t cas) { absl::WriterMutexLock l(&mutex); absl::flat_hash_map<std::string, std::pair<std::string, uint32_t>>* map; auto map_it = data.find(vm_id); if (map_it == data.end()) { map = &data[vm_id]; } else { map = &map_it->second; } auto it = map->find(key); if (it != map->end()) { if (cas && cas != it->second.second) { return WasmResult::CasMismatch; } it->second = std::make_pair(std::string(value), nextCas()); } else { map->emplace(key, std::make_pair(std::string(value), nextCas())); } return WasmResult::Ok; } uint32_t registerQueue(absl::string_view vm_id, absl::string_view queue_name, uint32_t context_id, Event::Dispatcher& dispatcher) { absl::WriterMutexLock l(&mutex); auto key = std::make_pair(std::string(vm_id), std::string(queue_name)); auto it = queue_tokens.insert(std::make_pair(key, static_cast<uint32_t>(0))); if (it.second) { it.first->second = nextQueueToken(); queue_token_set.insert(it.first->second); } uint32_t token = it.first->second; auto& q = queues[token]; q.vm_id = std::string(vm_id); q.context_id = context_id; q.dispatcher = &dispatcher; // Preserve any existing data. return token; } uint32_t resolveQueue(absl::string_view vm_id, absl::string_view queue_name) { absl::WriterMutexLock l(&mutex); auto key = std::make_pair(std::string(vm_id), std::string(queue_name)); auto it = queue_tokens.find(key); if (it != queue_tokens.end()) { return it->second; } return 0; // N.B. zero indicates that the queue was not found. } WasmResult dequeue(uint32_t token, std::string* data) { absl::ReaderMutexLock l(&mutex); auto it = queues.find(token); if (it == queues.end()) { return WasmResult::NotFound; } if (it->second.queue.empty()) { return WasmResult::Empty; } *data = it->second.queue.front(); it->second.queue.pop_front(); return WasmResult::Ok; } WasmResult enqueue(uint32_t token, absl::string_view value) { absl::WriterMutexLock l(&mutex); auto it = queues.find(token); if (it == queues.end()) { return WasmResult::NotFound; } it->second.queue.push_back(std::string(value)); auto vm_id = it->second.vm_id; auto context_id = it->second.context_id; it->second.dispatcher->post([vm_id, context_id, token] { auto wasm = getThreadLocalWasmPtr(vm_id); if (wasm) { wasm->queueReady(context_id, token); } }); return WasmResult::Ok; } uint32_t nextCas() { auto result = cas; cas++; if (!cas) { // 0 is not a valid CAS value. cas++; } return result; } private: uint32_t nextQueueToken() { while (true) { uint32_t token = next_queue_token++; if (token == 0) { continue; // 0 is an illegal token. } if (queue_token_set.find(token) == queue_token_set.end()) { return token; } } } struct Queue { std::string vm_id; uint32_t context_id; Event::Dispatcher* dispatcher; std::deque<std::string> queue; }; absl::Mutex mutex; uint32_t cas = 1; uint32_t next_queue_token = 1; absl::node_hash_map<std::string, absl::flat_hash_map<std::string, std::pair<std::string, uint32_t>>> data; absl::node_hash_map<uint32_t, Queue> queues; struct pair_hash { template <class T1, class T2> std::size_t operator()(const std::pair<T1, T2>& pair) const { return std::hash<T1>()(pair.first) ^ std::hash<T2>()(pair.second); } }; absl::flat_hash_map<std::pair<std::string, std::string>, uint32_t, pair_hash> queue_tokens; absl::flat_hash_set<uint32_t> queue_token_set; }; SharedData global_shared_data; // Map from Wasm ID to the local Wasm instance. thread_local absl::flat_hash_map<std::string, std::weak_ptr<Wasm>> local_wasms; const std::string INLINE_STRING = "<inline>"; template <typename Pairs> size_t pairsSize(const Pairs& result) { size_t size = 4; // number of headers for (auto& p : result) { size += 8; // size of key, size of value size += p.first.size() + 1; // null terminated key size += p.second.size() + 1; // null terminated value } return size; } template <typename Pairs> void marshalPairs(const Pairs& result, char* buffer) { char* b = buffer; *reinterpret_cast<uint32_t*>(b) = result.size(); b += sizeof(uint32_t); for (auto& p : result) { *reinterpret_cast<uint32_t*>(b) = p.first.size(); b += sizeof(uint32_t); *reinterpret_cast<uint32_t*>(b) = p.second.size(); b += sizeof(uint32_t); } for (auto& p : result) { memcpy(b, p.first.data(), p.first.size()); b += p.first.size(); *b++ = 0; memcpy(b, p.second.data(), p.second.size()); b += p.second.size(); *b++ = 0; } } Pairs toPairs(absl::string_view buffer) { Pairs result; const char* b = buffer.data(); if (buffer.size() < sizeof(uint32_t)) { return {}; } auto size = *reinterpret_cast<const uint32_t*>(b); b += sizeof(uint32_t); if (sizeof(uint32_t) + size * 2 * sizeof(uint32_t) > buffer.size()) { return {}; } result.resize(size); for (uint32_t i = 0; i < size; i++) { result[i].first = absl::string_view(nullptr, *reinterpret_cast<const uint32_t*>(b)); b += sizeof(uint32_t); result[i].second = absl::string_view(nullptr, *reinterpret_cast<const uint32_t*>(b)); b += sizeof(uint32_t); } for (auto& p : result) { p.first = absl::string_view(b, p.first.size()); b += p.first.size() + 1; p.second = absl::string_view(b, p.second.size()); b += p.second.size() + 1; } return result; } template <typename Pairs> bool getPairs(Context* context, const Pairs& result, uint64_t ptr_ptr, uint64_t size_ptr) { if (result.empty()) { return context->wasm()->copyToPointerSize("", ptr_ptr, size_ptr); } uint64_t size = pairsSize(result); uint64_t ptr; char* buffer = static_cast<char*>(context->wasm()->allocMemory(size, &ptr)); marshalPairs(result, buffer); if (!context->wasmVm()->setWord(ptr_ptr, Word(ptr))) { return false; } if (!context->wasmVm()->setWord(size_ptr, Word(size))) { return false; } return true; } void exportPairs(Context* context, const Pairs& pairs, uint64_t* ptr_ptr, uint64_t* size_ptr) { if (pairs.empty()) { *ptr_ptr = 0; *size_ptr = 0; return; } uint64_t size = pairsSize(pairs); char* buffer = static_cast<char*>(context->wasm()->allocMemory(size, ptr_ptr)); marshalPairs(pairs, buffer); *size_ptr = size; } Http::HeaderMapPtr buildHeaderMapFromPairs(const Pairs& pairs) { auto map = std::make_unique<Http::HeaderMapImpl>(); for (auto& p : pairs) { // Note: because of the lack of a string_view interface for addCopy and // the lack of an interface to add an entry with an empty value and return // the entry, there is no efficient way to prevent either a double copy // of the valueor a double lookup of the entry. map->addCopy(Http::LowerCaseString(std::string(p.first)), std::string(p.second)); } return map; } const uint8_t* decodeVarint(const uint8_t* pos, const uint8_t* end, uint32_t* out) { uint32_t ret = 0; int shift = 0; while (pos < end && (*pos & 0x80)) { ret |= (*pos & 0x7f) << shift; shift += 7; pos++; } if (pos < end) { ret |= *pos << shift; pos++; } *out = ret; return pos; } Context* ContextOrEffectiveContext(Context* context) { if (effective_context_id_ == 0) { return context; } auto effective_context = context->wasm()->getContext(effective_context_id_); if (effective_context) { return effective_context; } // The effective_context_id_ no longer exists, revert to the true context. return context; } } // namespace // Test support. uint32_t resolveQueueForTest(absl::string_view vm_id, absl::string_view queue_name) { return global_shared_data.resolveQueue(vm_id, queue_name); } // // HTTP Handlers // Word setPropertyHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->setProperty(key.value(), value.value())); } // Generic selector Word getPropertyHandler(void* raw_context, Word path_ptr, Word path_size, Word value_ptr_ptr, Word value_size_ptr) { auto context = WASM_CONTEXT(raw_context); auto path = context->wasmVm()->getMemory(path_ptr.u64_, path_size.u64_); if (!path.has_value()) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } std::string value; auto result = context->getProperty(path.value(), &value); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(value, value_ptr_ptr.u64_, value_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } // Continue/Reply/Route Word continueRequestHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->continueRequest(); return wasmResultToWord(WasmResult::Ok); } Word continueResponseHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->continueResponse(); return wasmResultToWord(WasmResult::Ok); } Word sendLocalResponseHandler(void* raw_context, Word response_code, Word response_code_details_ptr, Word response_code_details_size, Word body_ptr, Word body_size, Word additional_response_header_pairs_ptr, Word additional_response_header_pairs_size, Word grpc_code) { auto context = WASM_CONTEXT(raw_context); auto details = context->wasmVm()->getMemory(response_code_details_ptr.u64_, response_code_details_size.u64_); auto body = context->wasmVm()->getMemory(body_ptr.u64_, body_size.u64_); auto additional_response_header_pairs = context->wasmVm()->getMemory( additional_response_header_pairs_ptr.u64_, additional_response_header_pairs_size.u64_); if (!details || !body || !additional_response_header_pairs) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto additional_headers = toPairs(additional_response_header_pairs.value()); auto modify_headers = [additional_headers](Http::HeaderMap& headers) { for (auto& p : additional_headers) { const Http::LowerCaseString lower_key(std::move(std::string(p.first))); headers.addCopy(lower_key, std::string(p.second)); } }; auto grpc_status = static_cast<Grpc::Status::GrpcStatus>(grpc_code.u64_); auto grpc_status_opt = (grpc_status != Grpc::Status::GrpcStatus::InvalidCode) ? absl::optional<Grpc::Status::GrpcStatus>(grpc_status) : absl::optional<Grpc::Status::GrpcStatus>(); context->sendLocalResponse(static_cast<Envoy::Http::Code>(response_code.u64_), body.value(), modify_headers, grpc_status_opt, details.value()); return wasmResultToWord(WasmResult::Ok); } Word setEffectiveContextHandler(void* raw_context, Word context_id) { auto context = WASM_CONTEXT(raw_context); uint32_t cid = static_cast<uint32_t>(context_id.u64_); auto c = context->wasm()->getContext(cid); if (!c) { return wasmResultToWord(WasmResult::BadArgument); } effective_context_id_ = cid; return wasmResultToWord(WasmResult::Ok); } Word clearRouteCacheHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->clearRouteCache(); return wasmResultToWord(WasmResult::Ok); } // SharedData Word getSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr_ptr, Word value_size_ptr, Word cas_ptr) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } std::pair<std::string, uint32_t> data; WasmResult result = context->getSharedData(key.value(), &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(data.first, value_ptr_ptr.u64_, value_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } if (!context->wasmVm()->setMemory(cas_ptr.u64_, sizeof(uint32_t), &data.second)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word setSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr, Word value_size, Word cas) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->setSharedData(key.value(), value.value(), cas.u64_)); } Word registerSharedQueueHandler(void* raw_context, Word queue_name_ptr, Word queue_name_size, Word token_ptr) { auto context = WASM_CONTEXT(raw_context); auto queue_name = context->wasmVm()->getMemory(queue_name_ptr.u64_, queue_name_size.u64_); if (!queue_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t token = context->registerSharedQueue(queue_name.value()); if (!context->wasm()->setDatatype(token_ptr.u64_, token)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word dequeueSharedQueueHandler(void* raw_context, Word token, Word data_ptr_ptr, Word data_size_ptr) { auto context = WASM_CONTEXT(raw_context); std::string data; WasmResult result = context->dequeueSharedQueue(token.u32(), &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(data, data_ptr_ptr.u64_, data_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word resolveSharedQueueHandler(void* raw_context, Word vm_id_ptr, Word vm_id_size, Word queue_name_ptr, Word queue_name_size, Word token_ptr) { auto context = WASM_CONTEXT(raw_context); auto vm_id = context->wasmVm()->getMemory(vm_id_ptr.u64_, vm_id_size.u64_); auto queue_name = context->wasmVm()->getMemory(queue_name_ptr.u64_, queue_name_size.u64_); if (!vm_id || !queue_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t token = 0; auto result = context->resolveSharedQueue(vm_id.value(), queue_name.value(), &token); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(token_ptr.u64_, token)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word enqueueSharedQueueHandler(void* raw_context, Word token, Word data_ptr, Word data_size) { auto context = WASM_CONTEXT(raw_context); auto data = context->wasmVm()->getMemory(data_ptr.u64_, data_size.u64_); if (!data) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->enqueueSharedQueue(token.u32(), data.value())); } // Network Word getDownstreamDataBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); absl::string_view data; auto result = context->getDownstreamDataBufferBytes(start.u64_, length.u64_, &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } context->wasm()->copyToPointerSize(data, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word getUpstreamDataBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); absl::string_view data; auto result = context->getUpstreamDataBufferBytes(start.u64_, length.u64_, &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } context->wasm()->copyToPointerSize(data, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } // Header/Trailer/Metadata Maps Word addHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->addHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value(), value.value()); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr_ptr, Word value_size_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto result = context->getHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value()); context->wasm()->copyToPointerSize(result, value_ptr_ptr.u64_, value_size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word replaceHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->replaceHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value(), value.value()); return wasmResultToWord(WasmResult::Ok); } Word removeHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->removeHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value()); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapPairsHandler(void* raw_context, Word type, Word ptr_ptr, Word size_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto result = context->getHeaderMapPairs(static_cast<HeaderMapType>(type.u64_)); if (!getPairs(context, result, ptr_ptr.u64_, size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word setHeaderMapPairsHandler(void* raw_context, Word type, Word ptr, Word size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto data = context->wasmVm()->getMemory(ptr.u64_, size.u64_); if (!data) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->setHeaderMapPairs(static_cast<HeaderMapType>(type.u64_), toPairs(data.value())); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapSizeHandler(void* raw_context, Word type, Word result_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); size_t result = context->getHeaderMapSize(static_cast<HeaderMapType>(type.u64_)); if (!context->wasmVm()->setWord(result_ptr.u64_, Word(result))) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } // Body Buffer Word getRequestBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); auto result = context->getRequestBodyBufferBytes(start.u64_, length.u64_); context->wasm()->copyToPointerSize(result, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word getResponseBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); auto result = context->getResponseBodyBufferBytes(start.u64_, length.u64_); context->wasm()->copyToPointerSize(result, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word httpCallHandler(void* raw_context, Word uri_ptr, Word uri_size, Word header_pairs_ptr, Word header_pairs_size, Word body_ptr, Word body_size, Word trailer_pairs_ptr, Word trailer_pairs_size, Word timeout_milliseconds) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto uri = context->wasmVm()->getMemory(uri_ptr.u64_, uri_size.u64_); auto body = context->wasmVm()->getMemory(body_ptr.u64_, body_size.u64_); auto header_pairs = context->wasmVm()->getMemory(header_pairs_ptr.u64_, header_pairs_size.u64_); auto trailer_pairs = context->wasmVm()->getMemory(trailer_pairs_ptr.u64_, trailer_pairs_size.u64_); if (!uri || !body || !header_pairs || !trailer_pairs) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto headers = toPairs(header_pairs.value()); auto trailers = toPairs(trailer_pairs.value()); return context->httpCall(uri.value(), headers, body.value(), trailers, timeout_milliseconds.u64_); } Word defineMetricHandler(void* raw_context, Word metric_type, Word name_ptr, Word name_size, Word metric_id_ptr) { if (metric_type.u64_ > static_cast<uint64_t>(Context::MetricType::Max)) { return 0; } auto context = WASM_CONTEXT(raw_context); auto name = context->wasmVm()->getMemory(name_ptr.u64_, name_size.u64_); if (!name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t metric_id = 0; auto result = context->defineMetric(static_cast<Context::MetricType>(metric_type.u64_), name.value(), &metric_id); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(metric_id_ptr.u64_, metric_id)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word incrementMetricHandler(void* raw_context, Word metric_id, int64_t offset) { auto context = WASM_CONTEXT(raw_context); return wasmResultToWord(context->incrementMetric(metric_id.u64_, offset)); } Word recordMetricHandler(void* raw_context, Word metric_id, uint64_t value) { auto context = WASM_CONTEXT(raw_context); return wasmResultToWord(context->recordMetric(metric_id.u64_, value)); } Word getMetricHandler(void* raw_context, Word metric_id, Word result_uint64_ptr) { auto context = WASM_CONTEXT(raw_context); uint64_t value = 0; auto result = context->getMetric(metric_id.u64_, &value); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(result_uint64_ptr.u64_, value)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word grpcCallHandler(void* raw_context, Word service_ptr, Word service_size, Word service_name_ptr, Word service_name_size, Word method_name_ptr, Word method_name_size, Word request_ptr, Word request_size, Word timeout_milliseconds) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto service = context->wasmVm()->getMemory(service_ptr.u64_, service_size.u64_); auto service_name = context->wasmVm()->getMemory(service_name_ptr.u64_, service_name_size.u64_); auto method_name = context->wasmVm()->getMemory(method_name_ptr.u64_, method_name_size.u64_); auto request = context->wasmVm()->getMemory(request_ptr.u64_, request_size.u64_); if (!service || !service_name || !method_name || !request) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } envoy::api::v2::core::GrpcService service_proto; if (!service_proto.ParseFromArray(service.value().data(), service.value().size())) { return false; } return context->grpcCall(service_proto, service_name.value(), method_name.value(), request.value(), std::chrono::milliseconds(timeout_milliseconds.u64_)); } Word grpcStreamHandler(void* raw_context, Word service_ptr, Word service_size, Word service_name_ptr, Word service_name_size, Word method_name_ptr, Word method_name_size) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto service = context->wasmVm()->getMemory(service_ptr.u64_, service_size.u64_); auto service_name = context->wasmVm()->getMemory(service_name_ptr.u64_, service_name_size.u64_); auto method_name = context->wasmVm()->getMemory(method_name_ptr.u64_, method_name_size.u64_); if (!service || !service_name || !method_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } envoy::api::v2::core::GrpcService service_proto; if (!service_proto.ParseFromArray(service.value().data(), service.value().size())) { return false; } return context->grpcStream(service_proto, service_name.value(), method_name.value()); } Word grpcCancelHandler(void* raw_context, Word token) { auto context = WASM_CONTEXT(raw_context)->root_context(); return wasmResultToWord(context->grpcCancel(token.u64_)); } Word grpcCloseHandler(void* raw_context, Word token) { auto context = WASM_CONTEXT(raw_context)->root_context(); return wasmResultToWord(context->grpcClose(token.u64_)); } Word grpcSendHandler(void* raw_context, Word token, Word message_ptr, Word message_size, Word end_stream) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto message = context->wasmVm()->getMemory(message_ptr.u64_, message_size.u64_); if (!message) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->grpcSend(token.u64_, message.value(), end_stream.u64_)); } // Implementation of writev-like() syscall that redirects stdout/stderr to Envoy logs. Word writevImpl(void* raw_context, Word fd, Word iovs, Word iovs_len, Word* nwritten_ptr) { auto context = WASM_CONTEXT(raw_context); // Read syscall args. spdlog::level::level_enum log_level; switch (fd.u64_) { case 1 /* stdout */: log_level = spdlog::level::info; break; case 2 /* stderr */: log_level = spdlog::level::err; break; default: return 8; // __WASI_EBADF } std::string s; for (size_t i = 0; i < iovs_len.u64_; i++) { auto memslice = context->wasmVm()->getMemory(iovs.u64_ + i * 2 * sizeof(uint32_t), 2 * sizeof(uint32_t)); if (!memslice) { return 21; // __WASI_EFAULT } const uint32_t* iovec = reinterpret_cast<const uint32_t*>(memslice.value().data()); if (iovec[1] /* buf_len */) { memslice = context->wasmVm()->getMemory(iovec[0] /* buf */, iovec[1] /* buf_len */); if (!memslice) { return 21; // __WASI_EFAULT } s.append(memslice.value().data(), memslice.value().size()); } } size_t written = s.size(); if (written) { // Remove trailing newline from the logs, if any. if (s[written - 1] == '\n') { s.erase(written - 1); } context->scriptLog(log_level, s); } *nwritten_ptr = Word(written); return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_fd_write(_wasi_fd_t fd, const _wasi_ciovec_t *iov, size_t iovs_len, size_t* // nwritten); Word wasi_unstable_fd_writeHandler(void* raw_context, Word fd, Word iovs, Word iovs_len, Word nwritten_ptr) { auto context = WASM_CONTEXT(raw_context); Word nwritten(0); auto result = writevImpl(raw_context, fd, iovs, iovs_len, &nwritten); if (result.u64_ != 0) { // __WASI_ESUCCESS return result; } if (!context->wasmVm()->setWord(nwritten_ptr.u64_, Word(nwritten))) { return 21; // __WASI_EFAULT } return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_fd_seek(__wasi_fd_t fd, __wasi_filedelta_t offset, __wasi_whence_t // whence,__wasi_filesize_t *newoffset); Word wasi_unstable_fd_seekHandler(void*, Word, int64_t, Word, Word) { throw WasmException("wasi_unstable fd_seek"); } // __wasi_errno_t __wasi_fd_close(__wasi_fd_t fd); Word wasi_unstable_fd_closeHandler(void*, Word) { throw WasmException("wasi_unstable fd_close"); } // __wasi_errno_t __wasi_environ_get(char **environ, char *environ_buf); Word wasi_unstable_environ_getHandler(void*, Word, Word) { return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_environ_sizes_get(size_t *environ_count, size_t *environ_buf_size); Word wasi_unstable_environ_sizes_getHandler(void* raw_context, Word count_ptr, Word buf_size_ptr) { auto context = WASM_CONTEXT(raw_context); if (!context->wasmVm()->setWord(count_ptr.u64_, Word(0))) { return 21; // __WASI_EFAULT } if (!context->wasmVm()->setWord(buf_size_ptr.u64_, Word(0))) { return 21; // __WASI_EFAULT } return 0; // __WASI_ESUCCESS } // void __wasi_proc_exit(__wasi_exitcode_t rval); void wasi_unstable_proc_exitHandler(void*, Word) { throw WasmException("wasi_unstable proc_exit"); } Word pthread_equalHandler(void*, Word left, Word right) { return left.u64_ == right.u64_; } Word setTickPeriodMillisecondsHandler(void* raw_context, Word tick_period_milliseconds) { return wasmResultToWord( WASM_CONTEXT(raw_context) ->setTickPeriod(std::chrono::milliseconds(tick_period_milliseconds.u64_))); } Word getCurrentTimeNanosecondsHandler(void* raw_context, Word result_uint64_ptr) { auto context = WASM_CONTEXT(raw_context); uint64_t result = context->getCurrentTimeNanoseconds(); if (!context->wasm()->setDatatype(result_uint64_ptr.u64_, result)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word logHandler(void* raw_context, Word level, Word address, Word size) { auto context = WASM_CONTEXT(raw_context); auto message = context->wasmVm()->getMemory(address.u64_, size.u64_); if (!message) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->scriptLog(static_cast<spdlog::level::level_enum>(level.u64_), message.value()); return wasmResultToWord(WasmResult::Ok); } WasmResult Context::setTickPeriod(std::chrono::milliseconds tick_period) { wasm_->setTickPeriod(root_context_id_ ? root_context_id_ : id_, tick_period); return WasmResult::Ok; } uint64_t Context::getCurrentTimeNanoseconds() { return std::chrono::duration_cast<std::chrono::nanoseconds>( wasm_->time_source_.systemTime().time_since_epoch()) .count(); } // TODO(https://github.com/google/cel-cpp/issues/38) bool exportValue(const Filters::Common::Expr::CelValue& value, ProtobufWkt::Value* out) { using Filters::Common::Expr::CelValue; switch (value.type()) { case CelValue::Type::kBool: out->set_bool_value(value.BoolOrDie()); return true; case CelValue::Type::kInt64: out->set_number_value(static_cast<double>(value.Int64OrDie())); return true; case CelValue::Type::kUint64: out->set_number_value(static_cast<double>(value.Uint64OrDie())); return true; case CelValue::Type::kDouble: out->set_number_value(value.DoubleOrDie()); return true; case CelValue::Type::kString: *out->mutable_string_value() = std::string(value.StringOrDie().value()); return true; case CelValue::Type::kBytes: *out->mutable_string_value() = std::string(value.BytesOrDie().value()); return true; case CelValue::Type::kMessage: { if (value.IsNull()) { out->set_null_value(ProtobufWkt::NullValue::NULL_VALUE); } else { auto msg = value.MessageOrDie(); out->mutable_struct_value()->MergeFrom(*msg); } return true; } case CelValue::Type::kDuration: *out->mutable_string_value() = absl::FormatDuration(value.DurationOrDie()); return true; case CelValue::Type::kTimestamp: *out->mutable_string_value() = absl::FormatTime(value.TimestampOrDie()); return true; case CelValue::Type::kList: { auto list = value.ListOrDie(); auto values = out->mutable_list_value(); for (int i = 0; i < list->size(); i++) { if (!exportValue((*list)[i], values->add_values())) { return false; } } return true; } case CelValue::Type::kMap: { auto map = value.MapOrDie(); auto list = map->ListKeys(); auto struct_obj = out->mutable_struct_value(); for (int i = 0; i < list->size(); i++) { ProtobufWkt::Value field_key; if (!exportValue((*list)[i], &field_key)) { return false; } ProtobufWkt::Value field_value; if (!exportValue((*map)[(*list)[i]].value(), &field_value)) { return false; } (*struct_obj->mutable_fields())[field_key.string_value()] = field_value; } return true; } default: // do nothing for special values return false; } return false; } WasmResult serializeValue(Filters::Common::Expr::CelValue value, std::string* result) { using Filters::Common::Expr::CelValue; switch (value.type()) { case CelValue::Type::kMessage: if (value.MessageOrDie() != nullptr && value.MessageOrDie()->SerializeToString(result)) { return WasmResult::Ok; } return WasmResult::SerializationFailure; case CelValue::Type::kString: result->assign(value.StringOrDie().value().data(), value.StringOrDie().value().size()); return WasmResult::Ok; case CelValue::Type::kBytes: result->assign(value.BytesOrDie().value().data(), value.BytesOrDie().value().size()); return WasmResult::Ok; case CelValue::Type::kInt64: { auto out = value.Int64OrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(int64_t)); return WasmResult::Ok; } case CelValue::Type::kUint64: { auto out = value.Uint64OrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(uint64_t)); return WasmResult::Ok; } case CelValue::Type::kDouble: { auto out = value.DoubleOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(double)); return WasmResult::Ok; } case CelValue::Type::kBool: { auto out = value.BoolOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(bool)); return WasmResult::Ok; } case CelValue::Type::kDuration: { auto out = value.DurationOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(absl::Duration)); return WasmResult::Ok; } case CelValue::Type::kTimestamp: { auto out = value.TimestampOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(absl::Time)); return WasmResult::Ok; } case CelValue::Type::kMap: { ProtobufWkt::Value out; if (!exportValue(value, &out)) { return WasmResult::SerializationFailure; } if (!out.struct_value().SerializeToString(result)) { return WasmResult::SerializationFailure; } return WasmResult::Ok; } case CelValue::Type::kList: { ProtobufWkt::Value out; if (!exportValue(value, &out)) { return WasmResult::SerializationFailure; } if (!out.list_value().SerializeToString(result)) { return WasmResult::SerializationFailure; } return WasmResult::Ok; } default: return WasmResult::SerializationFailure; } return WasmResult::SerializationFailure; } // An expression wrapper for the WASM state class WasmStateWrapper : public google::api::expr::runtime::CelMap { public: WasmStateWrapper(const StreamInfo::FilterState& filter_state) : filter_state_(filter_state) {} absl::optional<google::api::expr::runtime::CelValue> operator[](google::api::expr::runtime::CelValue key) const override { if (!key.IsString()) { return {}; } auto value = key.StringOrDie().value(); try { const WasmState& result = filter_state_.getDataReadOnly<WasmState>(value); return google::api::expr::runtime::CelValue::CreateBytes(&result.value()); } catch (const EnvoyException& e) { return {}; } } int size() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } bool empty() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } const google::api::expr::runtime::CelList* ListKeys() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } private: const StreamInfo::FilterState& filter_state_; }; WasmResult Context::getProperty(absl::string_view path, std::string* result) { using google::api::expr::runtime::CelValue; using google::api::expr::runtime::FieldBackedListImpl; using google::api::expr::runtime::FieldBackedMapImpl; bool first = true; CelValue value; Protobuf::Arena arena; const StreamInfo::StreamInfo* info = getConstRequestStreamInfo(); const auto request_headers = request_headers_ ? request_headers_ : access_log_request_headers_; const auto response_headers = response_headers_ ? response_headers_ : access_log_response_headers_; const auto response_trailers = response_trailers_ ? response_trailers_ : access_log_response_trailers_; size_t start = 0; while (true) { if (start >= path.size()) { break; } size_t end = path.find('\0', start); if (end == absl::string_view::npos) { // this should not happen unless the input string is not null-terminated in the view return WasmResult::ParseFailure; } auto part = path.substr(start, end - start); start = end + 1; // top-level ident if (first) { first = false; if (part == "metadata") { value = CelValue::CreateMessage(&info->dynamicMetadata(), &arena); } else if (part == "filter_state") { value = CelValue::CreateMap( Protobuf::Arena::Create<WasmStateWrapper>(&arena, info->filterState())); } else if (part == "request") { value = CelValue::CreateMap(Protobuf::Arena::Create<Filters::Common::Expr::RequestWrapper>( &arena, request_headers, *info)); } else if (part == "response") { value = CelValue::CreateMap(Protobuf::Arena::Create<Filters::Common::Expr::ResponseWrapper>( &arena, response_headers, response_trailers, *info)); } else if (part == "connection") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::ConnectionWrapper>(&arena, *info)); } else if (part == "upstream") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::UpstreamWrapper>(&arena, *info)); } else if (part == "node") { value = CelValue::CreateMessage(&plugin_->local_info_.node(), &arena); } else if (part == "source") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::PeerWrapper>(&arena, *info, false)); } else if (part == "destination") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::PeerWrapper>(&arena, *info, true)); } else if (part == "request_protocol") { // TODO(kyessenov) move this upstream to CEL context if (info->protocol().has_value()) { value = CelValue::CreateString(&Http::Utility::getProtocolString(info->protocol().value())); } else { return WasmResult::NotFound; } // Reflective accessors } else if (part == "listener_direction") { value = CelValue::CreateInt64(plugin_->direction_); } else if (part == "listener_metadata") { value = CelValue::CreateMessage(plugin_->listener_metadata_, &arena); } else if (part == "cluster_name" && info->upstreamHost() != nullptr) { value = CelValue::CreateString(&info->upstreamHost()->cluster().name()); } else if (part == "cluster_metadata" && info->upstreamHost() != nullptr) { value = CelValue::CreateMessage(&info->upstreamHost()->cluster().metadata(), &arena); } else if (part == "route_name") { value = CelValue::CreateString(&info->getRouteName()); } else if (part == "route_metadata" && info->routeEntry() != nullptr) { value = CelValue::CreateMessage(&info->routeEntry()->metadata(), &arena); } else { return WasmResult::NotFound; } continue; } if (value.IsMap()) { auto& map = *value.MapOrDie(); auto field = map[CelValue::CreateString(part)]; if (field.has_value()) { value = field.value(); } else { return {}; } } else if (value.IsMessage()) { auto msg = value.MessageOrDie(); if (msg == nullptr) { return {}; } const Protobuf::Descriptor* desc = msg->GetDescriptor(); const Protobuf::FieldDescriptor* field_desc = desc->FindFieldByName(std::string(part)); if (field_desc == nullptr) { return {}; } else if (field_desc->is_map()) { value = CelValue::CreateMap( Protobuf::Arena::Create<FieldBackedMapImpl>(&arena, msg, field_desc, &arena)); } else if (field_desc->is_repeated()) { value = CelValue::CreateList( Protobuf::Arena::Create<FieldBackedListImpl>(&arena, msg, field_desc, &arena)); } else { auto status = google::api::expr::runtime::CreateValueFromSingleField(msg, field_desc, &arena, &value); if (!status.ok()) { return {}; } } } else { return {}; } } return serializeValue(value, result); } // Shared Data WasmResult Context::getSharedData(absl::string_view key, std::pair<std::string, uint32_t>* data) { return global_shared_data.get(wasm_->vm_id(), key, data); } WasmResult Context::setSharedData(absl::string_view key, absl::string_view value, uint32_t cas) { return global_shared_data.set(wasm_->vm_id(), key, value, cas); } // Shared Queue uint32_t Context::registerSharedQueue(absl::string_view queue_name) { // Get the id of the root context if this is a stream context because onQueueReady is on the root. return global_shared_data.registerQueue( wasm_->vm_id(), queue_name, isRootContext() ? id_ : root_context_id_, wasm_->dispatcher_); } WasmResult Context::resolveSharedQueue(absl::string_view vm_id, absl::string_view queue_name, uint32_t* token_ptr) { uint32_t token = global_shared_data.resolveQueue(vm_id, queue_name); if (!token) { return WasmResult::NotFound; } *token_ptr = token; return WasmResult::Ok; } WasmResult Context::dequeueSharedQueue(uint32_t token, std::string* data) { return global_shared_data.dequeue(token, data); } WasmResult Context::enqueueSharedQueue(uint32_t token, absl::string_view value) { return global_shared_data.enqueue(token, value); } // Network bytes. WasmResult Context::getDownstreamDataBufferBytes(uint32_t start, uint32_t length, absl::string_view* data) { if (!network_downstream_data_buffer_) return WasmResult::NotFound; if (network_downstream_data_buffer_->length() < static_cast<uint64_t>(start + length)) return WasmResult::InvalidMemoryAccess; *data = absl::string_view( static_cast<char*>(network_downstream_data_buffer_->linearize(start + length)) + start, length); return WasmResult::Ok; } WasmResult Context::getUpstreamDataBufferBytes(uint32_t start, uint32_t length, absl::string_view* data) { if (!network_upstream_data_buffer_) return WasmResult::NotFound; if (network_upstream_data_buffer_->length() < static_cast<uint64_t>(start + length)) return WasmResult::InvalidMemoryAccess; *data = absl::string_view( static_cast<char*>(network_upstream_data_buffer_->linearize(start + length)) + start, length); return WasmResult::Ok; } // Header/Trailer/Metadata Maps. Http::HeaderMap* Context::getMap(HeaderMapType type) { switch (type) { case HeaderMapType::RequestHeaders: return request_headers_; case HeaderMapType::RequestTrailers: return request_trailers_; case HeaderMapType::ResponseHeaders: return response_headers_; case HeaderMapType::ResponseTrailers: return response_trailers_; case HeaderMapType::GrpcCreateInitialMetadata: return grpc_create_initial_metadata_; default: return nullptr; } } const Http::HeaderMap* Context::getConstMap(HeaderMapType type) { switch (type) { case HeaderMapType::RequestHeaders: if (access_log_request_headers_) { return access_log_request_headers_; } return request_headers_; case HeaderMapType::RequestTrailers: if (access_log_request_trailers_) { return access_log_request_trailers_; } return request_trailers_; case HeaderMapType::ResponseHeaders: if (access_log_response_headers_) { return access_log_response_headers_; } return response_headers_; case HeaderMapType::ResponseTrailers: if (access_log_response_trailers_) { return access_log_response_trailers_; } return response_trailers_; case HeaderMapType::GrpcCreateInitialMetadata: return grpc_create_initial_metadata_; case HeaderMapType::GrpcReceiveInitialMetadata: return grpc_receive_initial_metadata_.get(); case HeaderMapType::GrpcReceiveTrailingMetadata: return grpc_receive_trailing_metadata_.get(); } return nullptr; } void Context::addHeaderMapValue(HeaderMapType type, absl::string_view key, absl::string_view value) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); map->addCopy(lower_key, std::string(value)); } absl::string_view Context::getHeaderMapValue(HeaderMapType type, absl::string_view key) { auto map = getConstMap(type); if (!map) { return ""; } const Http::LowerCaseString lower_key(std::move(std::string(key))); auto entry = map->get(lower_key); if (!entry) { return ""; } return entry->value().getStringView(); } Pairs headerMapToPairs(const Http::HeaderMap* map) { if (!map) { return {}; } Pairs pairs; pairs.reserve(map->size()); map->iterate( [](const Http::HeaderEntry& header, void* pairs) -> Http::HeaderMap::Iterate { (static_cast<Pairs*>(pairs)) ->push_back( std::make_pair(header.key().getStringView(), header.value().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &pairs); return pairs; } Pairs Context::getHeaderMapPairs(HeaderMapType type) { return headerMapToPairs(getConstMap(type)); } void Context::setHeaderMapPairs(HeaderMapType type, const Pairs& pairs) { auto map = getMap(type); if (!map) { return; } std::vector<std::string> keys; map->iterate( [](const Http::HeaderEntry& header, void* keys) -> Http::HeaderMap::Iterate { (static_cast<std::vector<std::string>*>(keys)) ->push_back(std::string(header.key().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &keys); for (auto& k : keys) { const Http::LowerCaseString lower_key(std::move(k)); map->remove(lower_key); } for (auto& p : pairs) { const Http::LowerCaseString lower_key(std::move(std::string(p.first))); map->addCopy(lower_key, std::move(std::string(p.second))); } } void Context::removeHeaderMapValue(HeaderMapType type, absl::string_view key) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); map->remove(lower_key); } void Context::replaceHeaderMapValue(HeaderMapType type, absl::string_view key, absl::string_view value) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); auto entry = map->get(lower_key); if (entry != nullptr) { entry->value(value.data(), value.size()); } else { map->addCopy(lower_key, std::string(value)); } } uint32_t Context::getHeaderMapSize(HeaderMapType type) { auto map = getMap(type); if (!map) { return 0; } return map->refreshByteSize(); } // Body Buffer absl::string_view Context::getRequestBodyBufferBytes(uint32_t start, uint32_t length) { if (!requestBodyBuffer_) { return ""; } if (requestBodyBuffer_->length() < static_cast<uint64_t>((start + length))) { return ""; } return absl::string_view( static_cast<char*>(requestBodyBuffer_->linearize(start + length)) + start, length); } absl::string_view Context::getResponseBodyBufferBytes(uint32_t start, uint32_t length) { if (!responseBodyBuffer_) { return ""; } if (responseBodyBuffer_->length() < static_cast<uint64_t>((start + length))) { return ""; } return absl::string_view( static_cast<char*>(responseBodyBuffer_->linearize(start + length)) + start, length); } // Async call via HTTP uint32_t Context::httpCall(absl::string_view cluster, const Pairs& request_headers, absl::string_view request_body, const Pairs& request_trailers, int timeout_milliseconds) { if (timeout_milliseconds < 0) { return 0; } auto cluster_string = std::string(cluster); if (clusterManager().get(cluster_string) == nullptr) { return 0; } Http::MessagePtr message(new Http::RequestMessageImpl(buildHeaderMapFromPairs(request_headers))); // Check that we were provided certain headers. if (message->headers().Path() == nullptr || message->headers().Method() == nullptr || message->headers().Host() == nullptr) { return 0; } if (!request_body.empty()) { message->body().reset(new Buffer::OwnedImpl(request_body.data(), request_body.size())); message->headers().insertContentLength().value(request_body.size()); } if (request_trailers.size() > 0) { message->trailers(buildHeaderMapFromPairs(request_trailers)); } absl::optional<std::chrono::milliseconds> timeout; if (timeout_milliseconds > 0) { timeout = std::chrono::milliseconds(timeout_milliseconds); } auto token = next_http_call_token_++; // Handle rollover. for (;;) { if (token == 0) { token = next_http_call_token_++; } if (!http_request_.count(token)) { break; } token = next_http_call_token_++; } auto& handler = http_request_[token]; // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::RequestOptions options; options.setTimeout(timeout); Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); auto http_request = clusterManager() .httpAsyncClientForCluster(cluster_string) .send(std::move(message), handler, options); if (!http_request) { http_request_.erase(token); return 0; } handler.context = this; handler.token = token; handler.request = http_request; return token; } uint32_t Context::grpcCall(const envoy::api::v2::core::GrpcService& grpc_service, absl::string_view service_name, absl::string_view method_name, absl::string_view request, const absl::optional<std::chrono::milliseconds>& timeout) { auto token = next_grpc_token_++; if (IsGrpcStreamToken(token)) { token = next_grpc_token_++; } // Handle rollover. for (;;) { if (token == 0) { token = next_grpc_token_ += 2; } if (!grpc_call_request_.count(token)) { break; } token = next_grpc_token_ += 2; } auto& handler = grpc_call_request_[token]; handler.context = this; handler.token = token; auto grpc_client = clusterManager() .grpcAsyncClientManager() .factoryForGrpcService(grpc_service, *wasm()->scope_, true /* skip_cluster_check */) ->create(); // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::RequestOptions options; options.setTimeout(timeout); Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); // NB: this call causes the onCreateInitialMetadata callback to occur inline *before* this call // returns. Consequently the grpc_request is not available. Attempting to close or reset from that // callback will fail. auto grpc_request = grpc_client->sendRaw(service_name, method_name, std::make_unique<Buffer::OwnedImpl>(request), handler, Tracing::NullSpan::instance(), options); if (!grpc_request) { grpc_call_request_.erase(token); return 0; } handler.client = std::move(grpc_client); handler.request = grpc_request; return token; } uint32_t Context::grpcStream(const envoy::api::v2::core::GrpcService& grpc_service, absl::string_view service_name, absl::string_view method_name) { auto token = next_grpc_token_++; if (IsGrpcCallToken(token)) { token = next_grpc_token_++; } // Handle rollover. for (;;) { if (token == 0) { token = next_grpc_token_ += 2; } if (!grpc_stream_.count(token)) { break; } token = next_grpc_token_ += 2; } auto& handler = grpc_stream_[token]; handler.context = this; handler.token = token; auto grpc_client = clusterManager() .grpcAsyncClientManager() .factoryForGrpcService(grpc_service, *wasm()->scope_, true /* skip_cluster_check */) ->create(); // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::StreamOptions options; Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); // NB: this call causes the onCreateInitialMetadata callback to occur inline *before* this call // returns. Consequently the grpc_stream is not available. Attempting to close or reset from that // callback will fail. auto grpc_stream = grpc_client->startRaw(service_name, method_name, handler, options); if (!grpc_stream) { grpc_stream_.erase(token); return 0; } handler.client = std::move(grpc_client); handler.stream = grpc_stream; return token; } void Context::httpRespond(const Pairs& response_headers, absl::string_view body, const Pairs& response_trailers) { (void)response_headers; (void)body; (void)response_trailers; } // StreamInfo const StreamInfo::StreamInfo* Context::getConstRequestStreamInfo() const { if (encoder_callbacks_) { return &encoder_callbacks_->streamInfo(); } else if (decoder_callbacks_) { return &decoder_callbacks_->streamInfo(); } else if (access_log_stream_info_) { return access_log_stream_info_; } return nullptr; } StreamInfo::StreamInfo* Context::getRequestStreamInfo() const { if (encoder_callbacks_) { return &encoder_callbacks_->streamInfo(); } else if (decoder_callbacks_) { return &decoder_callbacks_->streamInfo(); } return nullptr; } WasmResult Context::setProperty(absl::string_view key, absl::string_view serialized_value) { auto* stream_info = getRequestStreamInfo(); if (!stream_info) { return WasmResult::NotFound; } stream_info->filterState().setData(key, std::make_unique<WasmState>(serialized_value), StreamInfo::FilterState::StateType::Mutable); return WasmResult::Ok; } void Context::scriptLog(spdlog::level::level_enum level, absl::string_view message) { switch (level) { case spdlog::level::trace: ENVOY_LOG(trace, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::debug: ENVOY_LOG(debug, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::info: ENVOY_LOG(info, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::warn: ENVOY_LOG(warn, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::err: ENVOY_LOG(error, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::critical: ENVOY_LOG(critical, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::off: NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } } // Connection bool Context::isSsl() { return decoder_callbacks_->connection()->ssl() != nullptr; } // // Calls into the WASM code. // void Context::onStart(absl::string_view root_id, absl::string_view vm_configuration) { if (wasm_->onStart_) { auto root_id_addr = wasm_->copyString(root_id); auto config_addr = wasm_->copyString(vm_configuration); wasm_->onStart_(this, id_, root_id_addr, root_id.size(), config_addr, vm_configuration.size()); } in_vm_context_created_ = true; } bool Context::validateConfiguration(absl::string_view configuration) { if (!wasm_->validateConfiguration_) { return true; } auto address = wasm_->copyString(configuration); return wasm_->validateConfiguration_(this, id_, address, configuration.size()).u64_ != 0; } bool Context::onConfigure(absl::string_view configuration) { if (!wasm_->onConfigure_) { return true; } auto address = wasm_->copyString(configuration); return wasm_->onConfigure_(this, id_, address, configuration.size()).u64_ != 0; } void Context::onCreate(uint32_t root_context_id) { if (wasm_->onCreate_) { wasm_->onCreate_(this, id_, root_context_id); } } Network::FilterStatus Context::onNetworkNewConnection() { onCreate(root_context_id_); in_vm_context_created_ = true; if (!wasm_->onNewConnection_) { return Network::FilterStatus::Continue; } if (wasm_->onNewConnection_(this, id_).u64_ == 0) { return Network::FilterStatus::Continue; } return Network::FilterStatus::StopIteration; } Network::FilterStatus Context::onDownstreamData(int data_length, bool end_of_stream) { if (!in_vm_context_created_ || !wasm_->onDownstreamData_) { return Network::FilterStatus::Continue; } auto result = wasm_->onDownstreamData_(this, id_, static_cast<uint32_t>(data_length), static_cast<uint32_t>(end_of_stream)); // TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values. return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration; } Network::FilterStatus Context::onUpstreamData(int data_length, bool end_of_stream) { if (!in_vm_context_created_ || !wasm_->onUpstreamData_) { return Network::FilterStatus::Continue; } auto result = wasm_->onUpstreamData_(this, id_, static_cast<uint32_t>(data_length), static_cast<uint32_t>(end_of_stream)); // TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values. return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration; } void Context::onDownstreamConnectionClose(PeerType peer_type) { if (in_vm_context_created_ && wasm_->onDownstreamConnectionClose_) { wasm_->onDownstreamConnectionClose_(this, id_, static_cast<uint32_t>(peer_type)); } } void Context::onUpstreamConnectionClose(PeerType peer_type) { if (in_vm_context_created_ && wasm_->onUpstreamConnectionClose_) { wasm_->onUpstreamConnectionClose_(this, id_, static_cast<uint32_t>(peer_type)); } } Http::FilterHeadersStatus Context::onRequestHeaders() { onCreate(root_context_id_); in_vm_context_created_ = true; // Store the stream id so that we can use it in log(). auto& stream_info = decoder_callbacks_->streamInfo(); auto& metadata = (*stream_info.dynamicMetadata() .mutable_filter_metadata())[HttpFilters::HttpFilterNames::get().Wasm]; (*metadata.mutable_fields())[std::string("_stream_id_" + std::string(root_id()))] .set_number_value(id_); if (!wasm_->onRequestHeaders_) { return Http::FilterHeadersStatus::Continue; } if (wasm_->onRequestHeaders_(this, id_).u64_ == 0) { return Http::FilterHeadersStatus::Continue; } return Http::FilterHeadersStatus::StopIteration; } Http::FilterDataStatus Context::onRequestBody(int body_buffer_length, bool end_of_stream) { if (!in_vm_context_created_ || !wasm_->onRequestBody_) { return Http::FilterDataStatus::Continue; } switch (wasm_ ->onRequestBody_(this, id_, static_cast<uint32_t>(body_buffer_length), static_cast<uint32_t>(end_of_stream)) .u64_) { case 0: return Http::FilterDataStatus::Continue; case 1: return Http::FilterDataStatus::StopIterationAndBuffer; case 2: return Http::FilterDataStatus::StopIterationAndWatermark; default: return Http::FilterDataStatus::StopIterationNoBuffer; } } Http::FilterTrailersStatus Context::onRequestTrailers() { if (!in_vm_context_created_ || !wasm_->onRequestTrailers_) { return Http::FilterTrailersStatus::Continue; } if (wasm_->onRequestTrailers_(this, id_).u64_ == 0) { return Http::FilterTrailersStatus::Continue; } return Http::FilterTrailersStatus::StopIteration; } Http::FilterMetadataStatus Context::onRequestMetadata() { if (!in_vm_context_created_ || !wasm_->onRequestMetadata_) { return Http::FilterMetadataStatus::Continue; } if (wasm_->onRequestMetadata_(this, id_).u64_ == 0) { return Http::FilterMetadataStatus::Continue; } return Http::FilterMetadataStatus::Continue; // This is currently the only return code. } Http::FilterHeadersStatus Context::onResponseHeaders() { if (!in_vm_context_created_) { // If the request is invalid then onRequestHeaders() will not be called and neither will // onCreate() then sendLocalReply be called which will call this function. In this case we // need to call onCreate() so that the Context inside the VM is created before the // onResponseHeaders() call. onCreate(root_context_id_); in_vm_context_created_ = true; } if (!wasm_->onResponseHeaders_) { return Http::FilterHeadersStatus::Continue; } if (wasm_->onResponseHeaders_(this, id_).u64_ == 0) { return Http::FilterHeadersStatus::Continue; } return Http::FilterHeadersStatus::StopIteration; } Http::FilterDataStatus Context::onResponseBody(int body_buffer_length, bool end_of_stream) { if (!in_vm_context_created_ || !wasm_->onResponseBody_) { return Http::FilterDataStatus::Continue; } switch (wasm_ ->onResponseBody_(this, id_, static_cast<uint32_t>(body_buffer_length), static_cast<uint32_t>(end_of_stream)) .u64_) { case 0: return Http::FilterDataStatus::Continue; case 1: return Http::FilterDataStatus::StopIterationAndBuffer; case 2: return Http::FilterDataStatus::StopIterationAndWatermark; default: return Http::FilterDataStatus::StopIterationNoBuffer; } } Http::FilterTrailersStatus Context::onResponseTrailers() { if (!in_vm_context_created_ || !wasm_->onResponseTrailers_) { return Http::FilterTrailersStatus::Continue; } if (wasm_->onResponseTrailers_(this, id_).u64_ == 0) { return Http::FilterTrailersStatus::Continue; } return Http::FilterTrailersStatus::StopIteration; } Http::FilterMetadataStatus Context::onResponseMetadata() { if (!in_vm_context_created_ || !wasm_->onResponseMetadata_) { return Http::FilterMetadataStatus::Continue; } if (wasm_->onResponseMetadata_(this, id_).u64_ == 0) { return Http::FilterMetadataStatus::Continue; } return Http::FilterMetadataStatus::Continue; // This is currently the only return code. } void Context::onHttpCallResponse(uint32_t token, const Pairs& response_headers, absl::string_view response_body, const Pairs& response_trailers) { if (!wasm_->onHttpCallResponse_) { return; } uint64_t headers_ptr, headers_size, trailers_ptr, trailers_size; exportPairs(this, response_headers, &headers_ptr, &headers_size); exportPairs(this, response_trailers, &trailers_ptr, &trailers_size); auto body_ptr = wasm_->copyString(response_body); auto body_size = response_body.size(); wasm_->onHttpCallResponse_(this, id_, token, headers_ptr, headers_size, body_ptr, body_size, trailers_ptr, trailers_size); } void Context::onQueueReady(uint32_t token) { if (wasm_->onQueueReady_) { wasm_->onQueueReady_(this, id_, token); } } void Context::onGrpcCreateInitialMetadata(uint32_t token, Http::HeaderMap& metadata) { if (!wasm_->onGrpcCreateInitialMetadata_) { return; } grpc_create_initial_metadata_ = &metadata; wasm_->onGrpcCreateInitialMetadata_(this, id_, token); grpc_create_initial_metadata_ = nullptr; } void Context::onGrpcReceiveInitialMetadata(uint32_t token, Http::HeaderMapPtr&& metadata) { if (!wasm_->onGrpcReceiveInitialMetadata_) { return; } grpc_receive_initial_metadata_ = std::move(metadata); wasm_->onGrpcReceiveInitialMetadata_(this, id_, token); grpc_receive_initial_metadata_ = nullptr; } void Context::onGrpcReceiveTrailingMetadata(uint32_t token, Http::HeaderMapPtr&& metadata) { if (!wasm_->onGrpcReceiveTrailingMetadata_) { return; } grpc_receive_trailing_metadata_ = std::move(metadata); wasm_->onGrpcReceiveTrailingMetadata_(this, id_, token); grpc_receive_trailing_metadata_ = nullptr; } WasmResult Context::defineMetric(MetricType type, absl::string_view name, uint32_t* metric_id_ptr) { auto stat_name = wasm_->stat_name_set_->getDynamic(name); if (type == MetricType::Counter) { auto id = wasm_->nextCounterMetricId(); auto c = &wasm_->scope_->counterFromStatName(stat_name); wasm_->counters_.emplace(id, c); *metric_id_ptr = id; return WasmResult::Ok; } else if (type == MetricType::Gauge) { auto id = wasm_->nextGaugeMetricId(); auto g = &wasm_->scope_->gaugeFromStatName(stat_name, Stats::Gauge::ImportMode::Accumulate); wasm_->gauges_.emplace(id, g); *metric_id_ptr = id; return WasmResult::Ok; } else if (type == MetricType::Histogram) { auto id = wasm_->nextHistogramMetricId(); auto h = &wasm_->scope_->histogramFromStatName(stat_name, Stats::Histogram::Unit::Unspecified); wasm_->histograms_.emplace(id, h); *metric_id_ptr = id; return WasmResult::Ok; } return WasmResult::BadArgument; } WasmResult Context::incrementMetric(uint32_t metric_id, int64_t offset) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { if (offset > 0) { it->second->add(offset); return WasmResult::Ok; } else { return WasmResult::BadArgument; } return WasmResult::NotFound; } } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { if (offset > 0) { it->second->add(offset); return WasmResult::Ok; } else { it->second->sub(-offset); return WasmResult::Ok; } } return WasmResult::NotFound; } return WasmResult::BadArgument; } WasmResult Context::recordMetric(uint32_t metric_id, uint64_t value) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { it->second->add(value); return WasmResult::Ok; } } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { it->second->set(value); return WasmResult::Ok; } } else if (type == MetricType::Histogram) { auto it = wasm_->histograms_.find(metric_id); if (it != wasm_->histograms_.end()) { it->second->recordValue(value); return WasmResult::Ok; } } return WasmResult::NotFound; } WasmResult Context::getMetric(uint32_t metric_id, uint64_t* result_uint64_ptr) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { *result_uint64_ptr = it->second->value(); return WasmResult::Ok; } return WasmResult::NotFound; } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { *result_uint64_ptr = it->second->value(); return WasmResult::Ok; } return WasmResult::NotFound; } return WasmResult::BadArgument; } Wasm::Wasm(absl::string_view vm, absl::string_view vm_id, absl::string_view vm_configuration, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher) : vm_id_(std::string(vm_id)), wasm_vm_(Common::Wasm::createWasmVm(vm)), plugin_(plugin), scope_(scope), cluster_manager_(cluster_manager), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), vm_configuration_(vm_configuration), stat_name_set_(scope_->symbolTable().makeSet("Wasm").release()) {} std::string Plugin::makeLogPrefix() const { std::string prefix; if (!name_.empty()) { prefix = prefix + " " + name_; } if (!root_id_.empty()) { prefix = prefix + " " + std::string(root_id_); } if (vm_id_.empty()) { prefix = prefix + " " + std::string(vm_id_); } return prefix; } Context::~Context() { // Cancel any outstanding requests. for (auto& p : http_request_) { p.second.request->cancel(); } for (auto& p : grpc_call_request_) { p.second.request->cancel(); } for (auto& p : grpc_stream_) { p.second.stream->resetStream(); } // Do not remove vm or root contexts which have the same lifetime as wasm_. if (root_context_id_) { wasm_->contexts_.erase(id_); } } void Wasm::registerCallbacks() { #define _REGISTER(_fn) \ wasm_vm_->registerCallback( \ "env", #_fn, &_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(_fn##Handler), \ _fn##Handler>::convertFunctionWordToUint32) if (is_emscripten_) { _REGISTER(pthread_equal); } #undef _REGISTER #define _REGISTER_WASI(_fn) \ wasm_vm_->registerCallback( \ "wasi_unstable", #_fn, &wasi_unstable_##_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(wasi_unstable_##_fn##Handler), \ wasi_unstable_##_fn##Handler>::convertFunctionWordToUint32) if (is_emscripten_) { _REGISTER_WASI(fd_write); _REGISTER_WASI(fd_seek); _REGISTER_WASI(fd_close); _REGISTER_WASI(environ_get); _REGISTER_WASI(environ_sizes_get); _REGISTER_WASI(proc_exit); } #undef _REGISTER_WASI // Calls with the "proxy_" prefix. #define _REGISTER_PROXY(_fn) \ wasm_vm_->registerCallback( \ "env", "proxy_" #_fn, &_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(_fn##Handler), \ _fn##Handler>::convertFunctionWordToUint32); _REGISTER_PROXY(log); _REGISTER_PROXY(setProperty); _REGISTER_PROXY(getProperty); _REGISTER_PROXY(continueRequest); _REGISTER_PROXY(continueResponse); _REGISTER_PROXY(sendLocalResponse); _REGISTER_PROXY(clearRouteCache); _REGISTER_PROXY(getSharedData); _REGISTER_PROXY(setSharedData); _REGISTER_PROXY(registerSharedQueue); _REGISTER_PROXY(resolveSharedQueue); _REGISTER_PROXY(dequeueSharedQueue); _REGISTER_PROXY(enqueueSharedQueue); _REGISTER_PROXY(getDownstreamDataBufferBytes); _REGISTER_PROXY(getUpstreamDataBufferBytes); _REGISTER_PROXY(getHeaderMapValue); _REGISTER_PROXY(addHeaderMapValue); _REGISTER_PROXY(replaceHeaderMapValue); _REGISTER_PROXY(removeHeaderMapValue); _REGISTER_PROXY(getHeaderMapPairs); _REGISTER_PROXY(setHeaderMapPairs); _REGISTER_PROXY(getHeaderMapSize); _REGISTER_PROXY(getRequestBodyBufferBytes); _REGISTER_PROXY(getResponseBodyBufferBytes); _REGISTER_PROXY(httpCall); _REGISTER_PROXY(grpcCall); _REGISTER_PROXY(grpcStream); _REGISTER_PROXY(grpcClose); _REGISTER_PROXY(grpcCancel); _REGISTER_PROXY(grpcSend); _REGISTER_PROXY(setTickPeriodMilliseconds); _REGISTER_PROXY(getCurrentTimeNanoseconds); _REGISTER_PROXY(defineMetric); _REGISTER_PROXY(incrementMetric); _REGISTER_PROXY(recordMetric); _REGISTER_PROXY(getMetric); _REGISTER_PROXY(setEffectiveContext); #undef _REGISTER_PROXY } void Wasm::getFunctions() { #define _GET(_fn) wasm_vm_->getFunction(#_fn, &_fn##_); _GET(_start); _GET(__wasm_call_ctors); _GET(malloc); _GET(free); #undef _GET #define _GET_PROXY(_fn) wasm_vm_->getFunction("proxy_" #_fn, &_fn##_); _GET_PROXY(validateConfiguration); _GET_PROXY(onStart); _GET_PROXY(onConfigure); _GET_PROXY(onTick); _GET_PROXY(onCreate); _GET_PROXY(onNewConnection); _GET_PROXY(onDownstreamData); _GET_PROXY(onUpstreamData); _GET_PROXY(onDownstreamConnectionClose); _GET_PROXY(onUpstreamConnectionClose); _GET_PROXY(onRequestHeaders); _GET_PROXY(onRequestBody); _GET_PROXY(onRequestTrailers); _GET_PROXY(onRequestMetadata); _GET_PROXY(onResponseHeaders); _GET_PROXY(onResponseBody); _GET_PROXY(onResponseTrailers); _GET_PROXY(onResponseMetadata); _GET_PROXY(onHttpCallResponse); _GET_PROXY(onGrpcReceive); _GET_PROXY(onGrpcClose); _GET_PROXY(onGrpcCreateInitialMetadata); _GET_PROXY(onGrpcReceiveInitialMetadata); _GET_PROXY(onGrpcReceiveTrailingMetadata); _GET_PROXY(onQueueReady); _GET_PROXY(onDone); _GET_PROXY(onLog); _GET_PROXY(onDelete); #undef _GET_PROXY if (!malloc_ || !free_) { throw WasmException("WASM missing malloc/free"); } } Wasm::Wasm(const Wasm& wasm, Event::Dispatcher& dispatcher) : std::enable_shared_from_this<Wasm>(wasm), vm_id_(wasm.vm_id_), plugin_(wasm.plugin_), scope_(wasm.scope_), cluster_manager_(wasm.cluster_manager_), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), stat_name_set_(wasm.stat_name_set_) { if (wasm.wasmVm()->cloneable()) { wasm_vm_ = wasm.wasmVm()->clone(); vm_context_ = std::make_shared<Context>(this); getFunctions(); } else { wasm_vm_ = Common::Wasm::createWasmVm(wasm.wasmVm()->runtime()); if (!initialize(wasm.code(), wasm.allow_precompiled())) { throw WasmException("Failed to initialize WASM code"); } } } bool Wasm::initialize(const std::string& code, bool allow_precompiled) { if (!wasm_vm_) { return false; } // If the configured_vm_id is empty, then hash the code to create a unique vm_id. if (vm_id_.empty()) { vm_id_ = base64Sha256(code); } auto ok = wasm_vm_->load(code, allow_precompiled); if (!ok) { return false; } auto metadata = wasm_vm_->getCustomSection("emscripten_metadata"); if (!metadata.empty()) { // See https://github.com/emscripten-core/emscripten/blob/incoming/tools/shared.py#L3059 is_emscripten_ = true; auto start = reinterpret_cast<const uint8_t*>(metadata.data()); auto end = reinterpret_cast<const uint8_t*>(metadata.data() + metadata.size()); start = decodeVarint(start, end, &emscripten_metadata_major_version_); start = decodeVarint(start, end, &emscripten_metadata_minor_version_); start = decodeVarint(start, end, &emscripten_abi_major_version_); start = decodeVarint(start, end, &emscripten_abi_minor_version_); uint32_t temp; if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 1) { // metadata 0.2 - added: wasm_backend. start = decodeVarint(start, end, &temp); } start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 0) { // metadata 0.1 - added: global_base, dynamic_base, dynamictop_ptr and tempdouble_ptr. start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); decodeVarint(start, end, &temp); if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 2) { // metadata 0.3 - added: standalone_wasm. start = decodeVarint(start, end, &emscripten_standalone_wasm_); } } } registerCallbacks(); wasm_vm_->link(vm_id_); vm_context_ = std::make_shared<Context>(this); getFunctions(); startVm(vm_context_.get()); code_ = code; allow_precompiled_ = allow_precompiled; return true; } void Wasm::startVm(Context* root_context) { /* Call "_start" function, and fallback to "__wasm_call_ctors" if the former is not available. */ if (_start_) { _start_(root_context); } else if (__wasm_call_ctors_) { __wasm_call_ctors_(root_context); } } bool Wasm::configure(Context* root_context, absl::string_view configuration) { if (!onConfigure_) { return true; } auto address = copyString(configuration); return onConfigure_(root_context, root_context->id(), address, configuration.size()).u64_ != 0; } Context* Wasm::start() { auto root_id = plugin_->root_id_; auto it = root_contexts_.find(root_id); if (it != root_contexts_.end()) { it->second->onStart(root_id, vm_configuration()); return it->second.get(); } auto context = std::make_unique<Context>(this, root_id, plugin_); auto context_ptr = context.get(); root_contexts_[root_id] = std::move(context); context_ptr->onStart(root_id, vm_configuration()); return context_ptr; }; void Wasm::startForTesting(std::unique_ptr<Context> context) { auto context_ptr = context.get(); if (!context->wasm_) { // Initialization was delayed till the Wasm object was created. context->wasm_ = this; context->plugin_ = plugin_; context->id_ = allocContextId(); contexts_[context->id_] = context.get(); } root_contexts_[""] = std::move(context); context_ptr->onStart("", ""); } void Wasm::setTickPeriod(uint32_t context_id, std::chrono::milliseconds new_tick_period) { auto& tick_period = tick_period_[context_id]; auto& timer = timer_[context_id]; bool was_running = timer && tick_period.count() > 0; tick_period = new_tick_period; if (tick_period.count() > 0 && !was_running) { timer = dispatcher_.createTimer([weak = std::weak_ptr<Wasm>(shared_from_this()), context_id]() { auto shared = weak.lock(); if (shared) { shared->tickHandler(context_id); } }); timer->enableTimer(tick_period); } } void Wasm::tickHandler(uint32_t root_context_id) { auto& tick_period = tick_period_[root_context_id]; auto& timer = timer_[root_context_id]; if (onTick_) { onTick_(getContext(root_context_id), root_context_id); if (timer && tick_period.count() > 0) { timer->enableTimer(tick_period); } } } uint32_t Wasm::allocContextId() { while (true) { auto id = next_context_id_++; // Prevent reuse. if (contexts_.find(id) == contexts_.end()) { return id; } } } void Wasm::queueReady(uint32_t root_context_id, uint32_t token) { auto it = contexts_.find(root_context_id); if (it == contexts_.end() || !it->second->isRootContext()) { return; } it->second->onQueueReady(token); } Network::FilterStatus Context::onNewConnection() { return onNetworkNewConnection(); }; Network::FilterStatus Context::onData(Buffer::Instance& data, bool end_stream) { network_downstream_data_buffer_ = &data; auto result = onDownstreamData(data.length(), end_stream); network_downstream_data_buffer_ = nullptr; return result; } Network::FilterStatus Context::onWrite(Buffer::Instance& data, bool end_stream) { network_upstream_data_buffer_ = &data; auto result = onUpstreamData(data.length(), end_stream); network_upstream_data_buffer_ = nullptr; if (end_stream) { // This is called when seeing end_stream=true and not on an upstream connection event, // because registering for latter requires replicating the whole TCP proxy extension. onUpstreamConnectionClose(PeerType::Unknown); } return result; } void Context::onEvent(Network::ConnectionEvent event) { switch (event) { case Network::ConnectionEvent::LocalClose: onDownstreamConnectionClose(PeerType::Local); break; case Network::ConnectionEvent::RemoteClose: onDownstreamConnectionClose(PeerType::Remote); break; default: break; } } void Context::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { network_read_filter_callbacks_ = &callbacks; network_read_filter_callbacks_->connection().addConnectionCallbacks(*this); } void Context::initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) { network_write_filter_callbacks_ = &callbacks; } void Wasm::log(absl::string_view root_id, const Http::HeaderMap* request_headers, const Http::HeaderMap* response_headers, const Http::HeaderMap* response_trailers, const StreamInfo::StreamInfo& stream_info) { // Check dynamic metadata for the id_ of the stream for this root_id. Context* context = nullptr; auto metadata_it = stream_info.dynamicMetadata().filter_metadata().find( HttpFilters::HttpFilterNames::get().Wasm); if (metadata_it != stream_info.dynamicMetadata().filter_metadata().end()) { auto find_id = metadata_it->second.fields().find(std::string("_stream_id_" + std::string(root_id))); if (find_id != metadata_it->second.fields().end()) { context = getContext(static_cast<uint32_t>(find_id->second.number_value())); } } if (!context) { context = getRootContext(root_id); } context->log(request_headers, response_headers, response_trailers, stream_info); } void Context::log(const Http::HeaderMap* request_headers, const Http::HeaderMap* response_headers, const Http::HeaderMap* response_trailers, const StreamInfo::StreamInfo& stream_info) { access_log_request_headers_ = request_headers; // ? request_trailers ? access_log_response_headers_ = response_headers; access_log_response_trailers_ = response_trailers; access_log_stream_info_ = &stream_info; onLog(); access_log_request_headers_ = nullptr; // ? request_trailers ? access_log_response_headers_ = nullptr; access_log_response_trailers_ = nullptr; access_log_stream_info_ = nullptr; onDelete(); } void Context::onDestroy() { if (destroyed_) { return; } destroyed_ = true; onDone(); } void Context::onDone() { if (in_vm_context_created_ && wasm_->onDone_) { wasm_->onDone_(this, id_); } } void Context::onLog() { if (in_vm_context_created_ && wasm_->onLog_) { wasm_->onLog_(this, id_); } } void Context::onDelete() { if (in_vm_context_created_ && wasm_->onDelete_) { wasm_->onDelete_(this, id_); } } Http::FilterHeadersStatus Context::decodeHeaders(Http::HeaderMap& headers, bool end_stream) { request_headers_ = &headers; request_end_of_stream_ = end_stream; auto result = onRequestHeaders(); request_headers_ = nullptr; return result; } Http::FilterDataStatus Context::decodeData(Buffer::Instance& data, bool end_stream) { requestBodyBuffer_ = &data; auto result = onRequestBody(data.length(), end_stream); requestBodyBuffer_ = nullptr; return result; } Http::FilterTrailersStatus Context::decodeTrailers(Http::HeaderMap& trailers) { request_trailers_ = &trailers; auto result = onRequestTrailers(); request_trailers_ = nullptr; return result; } Http::FilterMetadataStatus Context::decodeMetadata(Http::MetadataMap& response_metadata) { response_metadata_ = &response_metadata; auto result = onRequestMetadata(); response_metadata_ = nullptr; return result; } void Context::setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks& callbacks) { decoder_callbacks_ = &callbacks; } Http::FilterHeadersStatus Context::encode100ContinueHeaders(Http::HeaderMap&) { return Http::FilterHeadersStatus::Continue; } Http::FilterHeadersStatus Context::encodeHeaders(Http::HeaderMap& headers, bool end_stream) { response_headers_ = &headers; response_end_of_stream_ = end_stream; auto result = onResponseHeaders(); response_headers_ = nullptr; return result; } Http::FilterDataStatus Context::encodeData(Buffer::Instance& data, bool end_stream) { responseBodyBuffer_ = &data; auto result = onResponseBody(data.length(), end_stream); responseBodyBuffer_ = nullptr; return result; } Http::FilterTrailersStatus Context::encodeTrailers(Http::HeaderMap& trailers) { response_trailers_ = &trailers; auto result = onResponseTrailers(); response_trailers_ = nullptr; return result; } Http::FilterMetadataStatus Context::encodeMetadata(Http::MetadataMap& response_metadata) { response_metadata_ = &response_metadata; auto result = onResponseMetadata(); response_metadata_ = nullptr; return result; } // Http::FilterMetadataStatus::Continue; void Context::setEncoderFilterCallbacks(Envoy::Http::StreamEncoderFilterCallbacks& callbacks) { encoder_callbacks_ = &callbacks; } void Context::onHttpCallSuccess(uint32_t token, Envoy::Http::MessagePtr& response) { auto body = absl::string_view(static_cast<char*>(response->body()->linearize(response->body()->length())), response->body()->length()); onHttpCallResponse(token, headerMapToPairs(&response->headers()), body, headerMapToPairs(response->trailers())); http_request_.erase(token); } void Context::onHttpCallFailure(uint32_t token, Http::AsyncClient::FailureReason /* reason */) { onHttpCallResponse(token, {}, "", {}); http_request_.erase(token); } void AsyncClientHandler::onSuccess(Envoy::Http::MessagePtr&& response) { context->onHttpCallSuccess(token, response); } void AsyncClientHandler::onFailure(Http::AsyncClient::FailureReason reason) { context->onHttpCallFailure(token, reason); } void GrpcCallClientHandler::onCreateInitialMetadata(Http::HeaderMap& metadata) { context->onGrpcCreateInitialMetadata(token, metadata); } void GrpcStreamClientHandler::onCreateInitialMetadata(Http::HeaderMap& metadata) { context->onGrpcCreateInitialMetadata(token, metadata); } void GrpcStreamClientHandler::onReceiveInitialMetadata(Http::HeaderMapPtr&& metadata) { context->onGrpcReceiveInitialMetadata(token, std::move(metadata)); } void GrpcStreamClientHandler::onReceiveTrailingMetadata(Http::HeaderMapPtr&& metadata) { context->onGrpcReceiveTrailingMetadata(token, std::move(metadata)); } void Context::onGrpcReceive(uint32_t token, Buffer::InstancePtr response) { if (wasm_->onGrpcReceive_) { auto response_size = response->length(); auto response_ptr = wasm_->copyBuffer(*response); wasm_->onGrpcReceive_(this, id_, token, response_ptr, response_size); } if (IsGrpcCallToken(token)) { grpc_call_request_.erase(token); } } void Context::onGrpcClose(uint32_t token, const Grpc::Status::GrpcStatus& status, const absl::string_view message) { if (wasm_->onGrpcClose_) { auto message_ptr = wasm_->copyString(message); wasm_->onGrpcClose_(this, id_, token, static_cast<uint64_t>(status), message_ptr, message.size()); } if (IsGrpcCallToken(token)) { grpc_call_request_.erase(token); } else { grpc_stream_.erase(token); } } WasmResult Context::grpcSend(uint32_t token, absl::string_view message, bool end_stream) { if (IsGrpcCallToken(token)) { return WasmResult::BadArgument; } auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->sendMessageRaw( Buffer::InstancePtr(new Buffer::OwnedImpl(message.data(), message.size())), end_stream); } return WasmResult::Ok; } WasmResult Context::grpcClose(uint32_t token) { if (IsGrpcCallToken(token)) { auto it = grpc_call_request_.find(token); if (it == grpc_call_request_.end()) { return WasmResult::NotFound; } if (it != grpc_call_request_.end() && it->second.request) { it->second.request->cancel(); } grpc_call_request_.erase(token); } else { auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->closeStream(); } grpc_stream_.erase(token); } return WasmResult::Ok; } WasmResult Context::grpcCancel(uint32_t token) { if (IsGrpcCallToken(token)) { auto it = grpc_call_request_.find(token); if (it == grpc_call_request_.end()) { return WasmResult::NotFound; } if (it != grpc_call_request_.end() && it->second.request) { it->second.request->cancel(); } grpc_call_request_.erase(token); } else { auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->resetStream(); } grpc_stream_.erase(token); } return WasmResult::Ok; } void GrpcCallClientHandler::onSuccessRaw(Buffer::InstancePtr&& response, Tracing::Span&) { context->onGrpcReceive(token, std::move(response)); } void GrpcCallClientHandler::onFailure(Grpc::Status::GrpcStatus status, const std::string& message, Tracing::Span&) { context->onGrpcClose(token, status, message); } bool GrpcStreamClientHandler::onReceiveMessageRaw(Buffer::InstancePtr&& response) { context->onGrpcReceive(token, std::move(response)); return true; } void GrpcStreamClientHandler::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) { context->onGrpcClose(token, status, message); } static std::shared_ptr<Wasm> createWasmInternal(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api, std::unique_ptr<Context> root_context_for_testing) { auto wasm = std::make_shared<Wasm>(vm_config.runtime(), vm_config.vm_id(), vm_config.configuration(), plugin, scope, cluster_manager, dispatcher); const auto& code = Config::DataSource::read(vm_config.code(), true, api); const auto& path = Config::DataSource::getPath(vm_config.code()) .value_or(code.empty() ? EMPTY_STRING : INLINE_STRING); if (code.empty()) { throw WasmException(fmt::format("Failed to load WASM code from {}", path)); } if (!wasm->initialize(code, vm_config.allow_precompiled())) { throw WasmException(fmt::format("Failed to initialize WASM code from {}", path)); } if (!root_context_for_testing) { wasm->start(); } else { wasm->startForTesting(std::move(root_context_for_testing)); } return wasm; } std::shared_ptr<Wasm> createWasm(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api) { return createWasmInternal(vm_config, plugin, scope, cluster_manager, dispatcher, api, nullptr /* root_context_for_testing */); } // namespace Wasm std::shared_ptr<Wasm> createWasmForTesting(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api, std::unique_ptr<Context> root_context_for_testing) { return createWasmInternal(vm_config, plugin, scope, cluster_manager, dispatcher, api, std::move(root_context_for_testing)); } std::shared_ptr<Wasm> createThreadLocalWasm(Wasm& base_wasm, absl::string_view configuration, Event::Dispatcher& dispatcher) { auto wasm = std::make_shared<Wasm>(base_wasm, dispatcher); Context* root_context = wasm->start(); if (!wasm->configure(root_context, configuration)) { throw WasmException("Failed to configure WASM code"); } if (!wasm->vm_id().empty()) { local_wasms[wasm->vm_id()] = wasm; } return wasm; } std::shared_ptr<Wasm> getThreadLocalWasmPtr(absl::string_view vm_id) { auto it = local_wasms.find(vm_id); if (it == local_wasms.end()) { return nullptr; } auto wasm = it->second.lock(); if (!wasm) { local_wasms.erase(vm_id); } return wasm; } std::shared_ptr<Wasm> getOrCreateThreadLocalWasm(Wasm& base_wasm, absl::string_view configuration, Event::Dispatcher& dispatcher) { auto wasm = getThreadLocalWasmPtr(base_wasm.vm_id()); if (wasm) { auto root_context = wasm->start(); if (!wasm->configure(root_context, configuration)) { throw WasmException("Failed to configure WASM code"); } return wasm; } return createThreadLocalWasm(base_wasm, configuration, dispatcher); } } // namespace Wasm } // namespace Common } // namespace Extensions } // namespace Envoy
Http::FilterDataStatus Context::onRequestBody(int body_buffer_length, bool end_of_stream) { if (!wasm_->onRequestBody_) { return Http::FilterDataStatus::Continue; } switch (wasm_ ->onRequestBody_(this, id_, static_cast<uint32_t>(body_buffer_length), static_cast<uint32_t>(end_of_stream)) .u64_) { case 0: return Http::FilterDataStatus::Continue; case 1: return Http::FilterDataStatus::StopIterationAndBuffer; case 2: return Http::FilterDataStatus::StopIterationAndWatermark; default: return Http::FilterDataStatus::StopIterationNoBuffer; } }
Http::FilterDataStatus Context::onRequestBody(int body_buffer_length, bool end_of_stream) { if (!in_vm_context_created_ || !wasm_->onRequestBody_) { return Http::FilterDataStatus::Continue; } switch (wasm_ ->onRequestBody_(this, id_, static_cast<uint32_t>(body_buffer_length), static_cast<uint32_t>(end_of_stream)) .u64_) { case 0: return Http::FilterDataStatus::Continue; case 1: return Http::FilterDataStatus::StopIterationAndBuffer; case 2: return Http::FilterDataStatus::StopIterationAndWatermark; default: return Http::FilterDataStatus::StopIterationNoBuffer; } }
{'added': [(1702, ' in_vm_context_created_ = true;'), (1729, ' in_vm_context_created_ = true;'), (1740, ' if (!in_vm_context_created_ || !wasm_->onDownstreamData_) {'), (1750, ' if (!in_vm_context_created_ || !wasm_->onUpstreamData_) {'), (1760, ' if (in_vm_context_created_ && wasm_->onDownstreamConnectionClose_) {'), (1766, ' if (in_vm_context_created_ && wasm_->onUpstreamConnectionClose_) {'), (1790, ' if (!in_vm_context_created_ || !wasm_->onRequestBody_) {'), (1809, ' if (!in_vm_context_created_ || !wasm_->onRequestTrailers_) {'), (1819, ' if (!in_vm_context_created_ || !wasm_->onRequestMetadata_) {'), (1847, ' if (!in_vm_context_created_ || !wasm_->onResponseBody_) {'), (1866, ' if (!in_vm_context_created_ || !wasm_->onResponseTrailers_) {'), (1876, ' if (!in_vm_context_created_ || !wasm_->onResponseMetadata_) {'), (2450, ' if (in_vm_context_created_ && wasm_->onDone_) {'), (2456, ' if (in_vm_context_created_ && wasm_->onLog_) {'), (2462, ' if (in_vm_context_created_ && wasm_->onDelete_) {')], 'deleted': [(1738, ' if (!wasm_->onDownstreamData_) {'), (1748, ' if (!wasm_->onUpstreamData_) {'), (1758, ' if (wasm_->onDownstreamConnectionClose_) {'), (1764, ' if (wasm_->onUpstreamConnectionClose_) {'), (1788, ' if (!wasm_->onRequestBody_) {'), (1807, ' if (!wasm_->onRequestTrailers_) {'), (1817, ' if (!wasm_->onRequestMetadata_) {'), (1845, ' if (!wasm_->onResponseBody_) {'), (1864, ' if (!wasm_->onResponseTrailers_) {'), (1874, ' if (!wasm_->onResponseMetadata_) {'), (2448, ' if (wasm_->onDone_) {'), (2454, ' if (wasm_->onLog_) {'), (2460, ' if (wasm_->onDelete_) {')]}
15
13
2,395
18,810
18
98
5
https://github.com/istio/envoy
CVE-2020-10739
CWE-476
3,043
bus-polkit.c
C
async_polkit_query_free
/* SPDX-License-Identifier: LGPL-2.1+ */ #include "bus-internal.h" #include "bus-message.h" #include "bus-polkit.h" #include "strv.h" #include "user-util.h" static int check_good_user(sd_bus_message *m, uid_t good_user) { _cleanup_(sd_bus_creds_unrefp) sd_bus_creds *creds = NULL; uid_t sender_uid; int r; assert(m); if (good_user == UID_INVALID) return 0; r = sd_bus_query_sender_creds(m, SD_BUS_CREDS_EUID, &creds); if (r < 0) return r; /* Don't trust augmented credentials for authorization */ assert_return((sd_bus_creds_get_augmented_mask(creds) & SD_BUS_CREDS_EUID) == 0, -EPERM); r = sd_bus_creds_get_euid(creds, &sender_uid); if (r < 0) return r; return sender_uid == good_user; } #if ENABLE_POLKIT static int bus_message_append_strv_key_value( sd_bus_message *m, const char **l) { const char **k, **v; int r; assert(m); r = sd_bus_message_open_container(m, 'a', "{ss}"); if (r < 0) return r; STRV_FOREACH_PAIR(k, v, l) { r = sd_bus_message_append(m, "{ss}", *k, *v); if (r < 0) return r; } r = sd_bus_message_close_container(m); if (r < 0) return r; return r; } #endif int bus_test_polkit( sd_bus_message *call, int capability, const char *action, const char **details, uid_t good_user, bool *_challenge, sd_bus_error *ret_error) { int r; assert(call); assert(action); /* Tests non-interactively! */ r = check_good_user(call, good_user); if (r != 0) return r; r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT else { _cleanup_(sd_bus_message_unrefp) sd_bus_message *request = NULL; _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL; int authorized = false, challenge = false; const char *sender; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; r = sd_bus_message_new_method_call( call->bus, &request, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( request, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(request, details); if (r < 0) return r; r = sd_bus_message_append(request, "us", 0, NULL); if (r < 0) return r; r = sd_bus_call(call->bus, request, 0, ret_error, &reply); if (r < 0) { /* Treat no PK available as access denied */ if (sd_bus_error_has_name(ret_error, SD_BUS_ERROR_SERVICE_UNKNOWN)) { sd_bus_error_free(ret_error); return -EACCES; } return r; } r = sd_bus_message_enter_container(reply, 'r', "bba{ss}"); if (r < 0) return r; r = sd_bus_message_read(reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (_challenge) { *_challenge = challenge; return 0; } } #endif return -EACCES; } #if ENABLE_POLKIT typedef struct AsyncPolkitQuery { char *action; char **details; sd_bus_message *request, *reply; sd_bus_message_handler_t callback; void *userdata; sd_bus_slot *slot; Hashmap *registry; } AsyncPolkitQuery; static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); free(q); } static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); q->slot = sd_bus_slot_unref(q->slot); q->reply = sd_bus_message_ref(reply); r = sd_bus_message_rewind(q->request, true); if (r < 0) { r = sd_bus_reply_method_errno(q->request, r, NULL); goto finish; } r = q->callback(q->request, q->userdata, &error_buffer); r = bus_maybe_reply_error(q->request, r, &error_buffer); finish: async_polkit_query_free(q); return r; } #endif int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; const char *sender; sd_bus_message_handler_t callback; void *userdata; int c; #endif int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT if (sd_bus_get_current_message(call->bus) != call) return -EINVAL; callback = sd_bus_get_current_handler(call->bus); if (!callback) return -EINVAL; userdata = sd_bus_get_current_userdata(call->bus); sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), .callback = callback, .userdata = userdata, }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; } void bus_verify_polkit_async_registry_free(Hashmap *registry) { #if ENABLE_POLKIT hashmap_free_with_destructor(registry, async_polkit_query_free); #endif }
/* SPDX-License-Identifier: LGPL-2.1+ */ #include "bus-internal.h" #include "bus-message.h" #include "bus-polkit.h" #include "strv.h" #include "user-util.h" static int check_good_user(sd_bus_message *m, uid_t good_user) { _cleanup_(sd_bus_creds_unrefp) sd_bus_creds *creds = NULL; uid_t sender_uid; int r; assert(m); if (good_user == UID_INVALID) return 0; r = sd_bus_query_sender_creds(m, SD_BUS_CREDS_EUID, &creds); if (r < 0) return r; /* Don't trust augmented credentials for authorization */ assert_return((sd_bus_creds_get_augmented_mask(creds) & SD_BUS_CREDS_EUID) == 0, -EPERM); r = sd_bus_creds_get_euid(creds, &sender_uid); if (r < 0) return r; return sender_uid == good_user; } #if ENABLE_POLKIT static int bus_message_append_strv_key_value( sd_bus_message *m, const char **l) { const char **k, **v; int r; assert(m); r = sd_bus_message_open_container(m, 'a', "{ss}"); if (r < 0) return r; STRV_FOREACH_PAIR(k, v, l) { r = sd_bus_message_append(m, "{ss}", *k, *v); if (r < 0) return r; } r = sd_bus_message_close_container(m); if (r < 0) return r; return r; } #endif int bus_test_polkit( sd_bus_message *call, int capability, const char *action, const char **details, uid_t good_user, bool *_challenge, sd_bus_error *ret_error) { int r; assert(call); assert(action); /* Tests non-interactively! */ r = check_good_user(call, good_user); if (r != 0) return r; r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT else { _cleanup_(sd_bus_message_unrefp) sd_bus_message *request = NULL; _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL; int authorized = false, challenge = false; const char *sender; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; r = sd_bus_message_new_method_call( call->bus, &request, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( request, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(request, details); if (r < 0) return r; r = sd_bus_message_append(request, "us", 0, NULL); if (r < 0) return r; r = sd_bus_call(call->bus, request, 0, ret_error, &reply); if (r < 0) { /* Treat no PK available as access denied */ if (sd_bus_error_has_name(ret_error, SD_BUS_ERROR_SERVICE_UNKNOWN)) { sd_bus_error_free(ret_error); return -EACCES; } return r; } r = sd_bus_message_enter_container(reply, 'r', "bba{ss}"); if (r < 0) return r; r = sd_bus_message_read(reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (_challenge) { *_challenge = challenge; return 0; } } #endif return -EACCES; } #if ENABLE_POLKIT typedef struct AsyncPolkitQuery { char *action; char **details; sd_bus_message *request, *reply; sd_bus_slot *slot; Hashmap *registry; sd_event_source *defer_event_source; } AsyncPolkitQuery; static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); sd_event_source_disable_unref(q->defer_event_source); free(q); } static int async_polkit_defer(sd_event_source *s, void *userdata) { AsyncPolkitQuery *q = userdata; assert(s); /* This is called as idle event source after we processed the async polkit reply, hopefully after the * method call we re-enqueued has been properly processed. */ async_polkit_query_free(q); return 0; } static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); assert(q->slot); q->slot = sd_bus_slot_unref(q->slot); assert(!q->reply); q->reply = sd_bus_message_ref(reply); /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the * whole message processing again, and thus re-validating and re-retrieving the "userdata" field * again. * * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again, * i.e. after the second time the message is processed is complete. */ assert(!q->defer_event_source); r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q); if (r < 0) goto fail; r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE); if (r < 0) goto fail; r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT); if (r < 0) goto fail; r = sd_bus_message_rewind(q->request, true); if (r < 0) goto fail; r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request); if (r < 0) goto fail; return 1; fail: log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m"); (void) sd_bus_reply_method_errno(q->request, r, NULL); async_polkit_query_free(q); return r; } #endif int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; int c; #endif const char *sender; int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; #if ENABLE_POLKIT c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; } void bus_verify_polkit_async_registry_free(Hashmap *registry) { #if ENABLE_POLKIT hashmap_free_with_destructor(registry, async_polkit_query_free); #endif }
static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); free(q); }
static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); sd_event_source_disable_unref(q->defer_event_source); free(q); }
{'added': [(163, ''), (165, ' sd_event_source *defer_event_source;'), (183, ' sd_event_source_disable_unref(q->defer_event_source);'), (187, 'static int async_polkit_defer(sd_event_source *s, void *userdata) {'), (188, ' AsyncPolkitQuery *q = userdata;'), (189, ''), (190, ' assert(s);'), (191, ''), (192, ' /* This is called as idle event source after we processed the async polkit reply, hopefully after the'), (193, ' * method call we re-enqueued has been properly processed. */'), (194, ''), (195, ' async_polkit_query_free(q);'), (196, ' return 0;'), (197, '}'), (198, ''), (207, ' assert(q->slot);'), (209, ''), (210, ' assert(!q->reply);'), (213, " /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the"), (214, ' * whole message processing again, and thus re-validating and re-retrieving the "userdata" field'), (215, ' * again.'), (216, ' *'), (217, ' * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again,'), (218, ' * i.e. after the second time the message is processed is complete. */'), (219, ''), (220, ' assert(!q->defer_event_source);'), (221, ' r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q);'), (222, ' if (r < 0)'), (223, ' goto fail;'), (224, ''), (225, ' r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE);'), (226, ' if (r < 0)'), (227, ' goto fail;'), (228, ''), (229, ' r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT);'), (230, ' if (r < 0)'), (231, ' goto fail;'), (232, ''), (234, ' if (r < 0)'), (235, ' goto fail;'), (236, ''), (237, ' r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request);'), (238, ' if (r < 0)'), (239, ' goto fail;'), (241, ' return 1;'), (243, 'fail:'), (244, ' log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m");'), (245, ' (void) sd_bus_reply_method_errno(q->request, r, NULL);'), (267, ' const char *sender;'), (335, '#if ENABLE_POLKIT')], 'deleted': [(162, ' sd_bus_message_handler_t callback;'), (163, ' void *userdata;'), (169, ''), (199, ' if (r < 0) {'), (200, ' r = sd_bus_reply_method_errno(q->request, r, NULL);'), (201, ' goto finish;'), (202, ' }'), (204, ' r = q->callback(q->request, q->userdata, &error_buffer);'), (205, ' r = bus_maybe_reply_error(q->request, r, &error_buffer);'), (207, 'finish:'), (209, ''), (228, ' const char *sender;'), (229, ' sd_bus_message_handler_t callback;'), (230, ' void *userdata;'), (296, '#if ENABLE_POLKIT'), (297, ' if (sd_bus_get_current_message(call->bus) != call)'), (298, ' return -EINVAL;'), (299, ''), (300, ' callback = sd_bus_get_current_handler(call->bus);'), (301, ' if (!callback)'), (302, ' return -EINVAL;'), (303, ''), (304, ' userdata = sd_bus_get_current_userdata(call->bus);'), (305, ''), (352, ' .callback = callback,'), (353, ' .userdata = userdata,')]}
50
26
290
1,631
12
76
4
https://github.com/systemd/systemd
CVE-2020-1712
CWE-416
183
tcd.c
C
opj_tcd_code_block_enc_allocate_data
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2001-2003, David Janssens * Copyright (c) 2002-2003, Yannick Verschueren * Copyright (c) 2003-2007, Francois-Olivier Devaux * Copyright (c) 2003-2014, Antonin Descampe * Copyright (c) 2005, Herve Drolon, FreeImage Team * Copyright (c) 2006-2007, Parvatha Elangovan * Copyright (c) 2008, 2011-2012, Centre National d'Etudes Spatiales (CNES), FR * Copyright (c) 2012, CS Systemes d'Information, France * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "opj_includes.h" #include "opj_common.h" /* ----------------------------------------------------------------------- */ /* TODO MSD: */ #ifdef TODO_MSD void tcd_dump(FILE *fd, opj_tcd_t *tcd, opj_tcd_image_t * img) { int tileno, compno, resno, bandno, precno;/*, cblkno;*/ fprintf(fd, "image {\n"); fprintf(fd, " tw=%d, th=%d x0=%d x1=%d y0=%d y1=%d\n", img->tw, img->th, tcd->image->x0, tcd->image->x1, tcd->image->y0, tcd->image->y1); for (tileno = 0; tileno < img->th * img->tw; tileno++) { opj_tcd_tile_t *tile = &tcd->tcd_image->tiles[tileno]; fprintf(fd, " tile {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, numcomps=%d\n", tile->x0, tile->y0, tile->x1, tile->y1, tile->numcomps); for (compno = 0; compno < tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tile->comps[compno]; fprintf(fd, " tilec {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, numresolutions=%d\n", tilec->x0, tilec->y0, tilec->x1, tilec->y1, tilec->numresolutions); for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; fprintf(fd, "\n res {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, pw=%d, ph=%d, numbands=%d\n", res->x0, res->y0, res->x1, res->y1, res->pw, res->ph, res->numbands); for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; fprintf(fd, " band {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, stepsize=%f, numbps=%d\n", band->x0, band->y0, band->x1, band->y1, band->stepsize, band->numbps); for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prec = &band->precincts[precno]; fprintf(fd, " prec {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, cw=%d, ch=%d\n", prec->x0, prec->y0, prec->x1, prec->y1, prec->cw, prec->ch); /* for (cblkno = 0; cblkno < prec->cw * prec->ch; cblkno++) { opj_tcd_cblk_t *cblk = &prec->cblks[cblkno]; fprintf(fd, " cblk {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d\n", cblk->x0, cblk->y0, cblk->x1, cblk->y1); fprintf(fd, " }\n"); } */ fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, "}\n"); } #endif /** * Initializes tile coding/decoding */ static INLINE OPJ_BOOL opj_tcd_init_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BOOL isEncoder, OPJ_FLOAT32 fraction, OPJ_SIZE_T sizeof_block, opj_event_mgr_t* manager); /** * Allocates memory for a decoding code block. */ static OPJ_BOOL opj_tcd_code_block_dec_allocate(opj_tcd_cblk_dec_t * p_code_block); /** * Deallocates the decoding data of the given precinct. */ static void opj_tcd_code_block_dec_deallocate(opj_tcd_precinct_t * p_precinct); /** * Allocates memory for an encoding code block (but not data). */ static OPJ_BOOL opj_tcd_code_block_enc_allocate(opj_tcd_cblk_enc_t * p_code_block); /** * Allocates data for an encoding code block */ static OPJ_BOOL opj_tcd_code_block_enc_allocate_data(opj_tcd_cblk_enc_t * p_code_block); /** * Deallocates the encoding data of the given precinct. */ static void opj_tcd_code_block_enc_deallocate(opj_tcd_precinct_t * p_precinct); /** Free the memory allocated for encoding @param tcd TCD handle */ static void opj_tcd_free_tile(opj_tcd_t *tcd); static OPJ_BOOL opj_tcd_t2_decode(opj_tcd_t *p_tcd, OPJ_BYTE * p_src_data, OPJ_UINT32 * p_data_read, OPJ_UINT32 p_max_src_size, opj_codestream_index_t *p_cstr_index, opj_event_mgr_t *p_manager); static OPJ_BOOL opj_tcd_t1_decode(opj_tcd_t *p_tcd, opj_event_mgr_t *p_manager); static OPJ_BOOL opj_tcd_dwt_decode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_mct_decode(opj_tcd_t *p_tcd, opj_event_mgr_t *p_manager); static OPJ_BOOL opj_tcd_dc_level_shift_decode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_dc_level_shift_encode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_mct_encode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_dwt_encode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_t1_encode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_t2_encode(opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info, opj_event_mgr_t *p_manager); static OPJ_BOOL opj_tcd_rate_allocate_encode(opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info, opj_event_mgr_t *p_manager); /* ----------------------------------------------------------------------- */ /** Create a new TCD handle */ opj_tcd_t* opj_tcd_create(OPJ_BOOL p_is_decoder) { opj_tcd_t *l_tcd = 00; /* create the tcd structure */ l_tcd = (opj_tcd_t*) opj_calloc(1, sizeof(opj_tcd_t)); if (!l_tcd) { return 00; } l_tcd->m_is_decoder = p_is_decoder ? 1 : 0; l_tcd->tcd_image = (opj_tcd_image_t*)opj_calloc(1, sizeof(opj_tcd_image_t)); if (!l_tcd->tcd_image) { opj_free(l_tcd); return 00; } return l_tcd; } /* ----------------------------------------------------------------------- */ void opj_tcd_rateallocate_fixed(opj_tcd_t *tcd) { OPJ_UINT32 layno; for (layno = 0; layno < tcd->tcp->numlayers; layno++) { opj_tcd_makelayer_fixed(tcd, layno, 1); } } void opj_tcd_makelayer(opj_tcd_t *tcd, OPJ_UINT32 layno, OPJ_FLOAT64 thresh, OPJ_UINT32 final) { OPJ_UINT32 compno, resno, bandno, precno, cblkno; OPJ_UINT32 passno; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; tcd_tile->distolayer[layno] = 0; /* fixed_quality */ for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; /* Skip empty bands */ if (opj_tcd_is_band_empty(band)) { continue; } for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; opj_tcd_layer_t *layer = &cblk->layers[layno]; OPJ_UINT32 n; if (layno == 0) { cblk->numpassesinlayers = 0; } n = cblk->numpassesinlayers; for (passno = cblk->numpassesinlayers; passno < cblk->totalpasses; passno++) { OPJ_UINT32 dr; OPJ_FLOAT64 dd; opj_tcd_pass_t *pass = &cblk->passes[passno]; if (n == 0) { dr = pass->rate; dd = pass->distortiondec; } else { dr = pass->rate - cblk->passes[n - 1].rate; dd = pass->distortiondec - cblk->passes[n - 1].distortiondec; } if (!dr) { if (dd != 0) { n = passno + 1; } continue; } if (thresh - (dd / dr) < DBL_EPSILON) { /* do not rely on float equality, check with DBL_EPSILON margin */ n = passno + 1; } } layer->numpasses = n - cblk->numpassesinlayers; if (!layer->numpasses) { layer->disto = 0; continue; } if (cblk->numpassesinlayers == 0) { layer->len = cblk->passes[n - 1].rate; layer->data = cblk->data; layer->disto = cblk->passes[n - 1].distortiondec; } else { layer->len = cblk->passes[n - 1].rate - cblk->passes[cblk->numpassesinlayers - 1].rate; layer->data = cblk->data + cblk->passes[cblk->numpassesinlayers - 1].rate; layer->disto = cblk->passes[n - 1].distortiondec - cblk->passes[cblk->numpassesinlayers - 1].distortiondec; } tcd_tile->distolayer[layno] += layer->disto; /* fixed_quality */ if (final) { cblk->numpassesinlayers = n; } } } } } } } void opj_tcd_makelayer_fixed(opj_tcd_t *tcd, OPJ_UINT32 layno, OPJ_UINT32 final) { OPJ_UINT32 compno, resno, bandno, precno, cblkno; OPJ_INT32 value; /*, matrice[tcd_tcp->numlayers][tcd_tile->comps[0].numresolutions][3]; */ OPJ_INT32 matrice[10][10][3]; OPJ_UINT32 i, j, k; opj_cp_t *cp = tcd->cp; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; opj_tcp_t *tcd_tcp = tcd->tcp; for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; for (i = 0; i < tcd_tcp->numlayers; i++) { for (j = 0; j < tilec->numresolutions; j++) { for (k = 0; k < 3; k++) { matrice[i][j][k] = (OPJ_INT32)((OPJ_FLOAT32)cp->m_specific_param.m_enc.m_matrice[i * tilec->numresolutions * 3 + j * 3 + k] * (OPJ_FLOAT32)(tcd->image->comps[compno].prec / 16.0)); } } } for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; /* Skip empty bands */ if (opj_tcd_is_band_empty(band)) { continue; } for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; opj_tcd_layer_t *layer = &cblk->layers[layno]; OPJ_UINT32 n; OPJ_INT32 imsb = (OPJ_INT32)(tcd->image->comps[compno].prec - cblk->numbps); /* number of bit-plan equal to zero */ /* Correction of the matrix of coefficient to include the IMSB information */ if (layno == 0) { value = matrice[layno][resno][bandno]; if (imsb >= value) { value = 0; } else { value -= imsb; } } else { value = matrice[layno][resno][bandno] - matrice[layno - 1][resno][bandno]; if (imsb >= matrice[layno - 1][resno][bandno]) { value -= (imsb - matrice[layno - 1][resno][bandno]); if (value < 0) { value = 0; } } } if (layno == 0) { cblk->numpassesinlayers = 0; } n = cblk->numpassesinlayers; if (cblk->numpassesinlayers == 0) { if (value != 0) { n = 3 * (OPJ_UINT32)value - 2 + cblk->numpassesinlayers; } else { n = cblk->numpassesinlayers; } } else { n = 3 * (OPJ_UINT32)value + cblk->numpassesinlayers; } layer->numpasses = n - cblk->numpassesinlayers; if (!layer->numpasses) { continue; } if (cblk->numpassesinlayers == 0) { layer->len = cblk->passes[n - 1].rate; layer->data = cblk->data; } else { layer->len = cblk->passes[n - 1].rate - cblk->passes[cblk->numpassesinlayers - 1].rate; layer->data = cblk->data + cblk->passes[cblk->numpassesinlayers - 1].rate; } if (final) { cblk->numpassesinlayers = n; } } } } } } } OPJ_BOOL opj_tcd_rateallocate(opj_tcd_t *tcd, OPJ_BYTE *dest, OPJ_UINT32 * p_data_written, OPJ_UINT32 len, opj_codestream_info_t *cstr_info, opj_event_mgr_t *p_manager) { OPJ_UINT32 compno, resno, bandno, precno, cblkno, layno; OPJ_UINT32 passno; OPJ_FLOAT64 min, max; OPJ_FLOAT64 cumdisto[100]; /* fixed_quality */ const OPJ_FLOAT64 K = 1; /* 1.1; fixed_quality */ OPJ_FLOAT64 maxSE = 0; opj_cp_t *cp = tcd->cp; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; opj_tcp_t *tcd_tcp = tcd->tcp; min = DBL_MAX; max = 0; tcd_tile->numpix = 0; /* fixed_quality */ for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; tilec->numpix = 0; for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; /* Skip empty bands */ if (opj_tcd_is_band_empty(band)) { continue; } for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; for (passno = 0; passno < cblk->totalpasses; passno++) { opj_tcd_pass_t *pass = &cblk->passes[passno]; OPJ_INT32 dr; OPJ_FLOAT64 dd, rdslope; if (passno == 0) { dr = (OPJ_INT32)pass->rate; dd = pass->distortiondec; } else { dr = (OPJ_INT32)(pass->rate - cblk->passes[passno - 1].rate); dd = pass->distortiondec - cblk->passes[passno - 1].distortiondec; } if (dr == 0) { continue; } rdslope = dd / dr; if (rdslope < min) { min = rdslope; } if (rdslope > max) { max = rdslope; } } /* passno */ /* fixed_quality */ tcd_tile->numpix += ((cblk->x1 - cblk->x0) * (cblk->y1 - cblk->y0)); tilec->numpix += ((cblk->x1 - cblk->x0) * (cblk->y1 - cblk->y0)); } /* cbklno */ } /* precno */ } /* bandno */ } /* resno */ maxSE += (((OPJ_FLOAT64)(1 << tcd->image->comps[compno].prec) - 1.0) * ((OPJ_FLOAT64)(1 << tcd->image->comps[compno].prec) - 1.0)) * ((OPJ_FLOAT64)(tilec->numpix)); } /* compno */ /* index file */ if (cstr_info) { opj_tile_info_t *tile_info = &cstr_info->tile[tcd->tcd_tileno]; tile_info->numpix = tcd_tile->numpix; tile_info->distotile = tcd_tile->distotile; tile_info->thresh = (OPJ_FLOAT64 *) opj_malloc(tcd_tcp->numlayers * sizeof( OPJ_FLOAT64)); if (!tile_info->thresh) { /* FIXME event manager error callback */ return OPJ_FALSE; } } for (layno = 0; layno < tcd_tcp->numlayers; layno++) { OPJ_FLOAT64 lo = min; OPJ_FLOAT64 hi = max; OPJ_UINT32 maxlen = tcd_tcp->rates[layno] > 0.0f ? opj_uint_min((( OPJ_UINT32) ceil(tcd_tcp->rates[layno])), len) : len; OPJ_FLOAT64 goodthresh = 0; OPJ_FLOAT64 stable_thresh = 0; OPJ_UINT32 i; OPJ_FLOAT64 distotarget; /* fixed_quality */ /* fixed_quality */ distotarget = tcd_tile->distotile - ((K * maxSE) / pow((OPJ_FLOAT32)10, tcd_tcp->distoratio[layno] / 10)); /* Don't try to find an optimal threshold but rather take everything not included yet, if -r xx,yy,zz,0 (disto_alloc == 1 and rates == 0) -q xx,yy,zz,0 (fixed_quality == 1 and distoratio == 0) ==> possible to have some lossy layers and the last layer for sure lossless */ if (((cp->m_specific_param.m_enc.m_disto_alloc == 1) && (tcd_tcp->rates[layno] > 0.0f)) || ((cp->m_specific_param.m_enc.m_fixed_quality == 1) && (tcd_tcp->distoratio[layno] > 0.0))) { opj_t2_t*t2 = opj_t2_create(tcd->image, cp); OPJ_FLOAT64 thresh = 0; if (t2 == 00) { return OPJ_FALSE; } for (i = 0; i < 128; ++i) { OPJ_FLOAT64 distoachieved = 0; /* fixed_quality */ thresh = (lo + hi) / 2; opj_tcd_makelayer(tcd, layno, thresh, 0); if (cp->m_specific_param.m_enc.m_fixed_quality) { /* fixed_quality */ if (OPJ_IS_CINEMA(cp->rsiz)) { if (! opj_t2_encode_packets(t2, tcd->tcd_tileno, tcd_tile, layno + 1, dest, p_data_written, maxlen, cstr_info, tcd->cur_tp_num, tcd->tp_pos, tcd->cur_pino, THRESH_CALC, p_manager)) { lo = thresh; continue; } else { distoachieved = layno == 0 ? tcd_tile->distolayer[0] : cumdisto[layno - 1] + tcd_tile->distolayer[layno]; if (distoachieved < distotarget) { hi = thresh; stable_thresh = thresh; continue; } else { lo = thresh; } } } else { distoachieved = (layno == 0) ? tcd_tile->distolayer[0] : (cumdisto[layno - 1] + tcd_tile->distolayer[layno]); if (distoachieved < distotarget) { hi = thresh; stable_thresh = thresh; continue; } lo = thresh; } } else { if (! opj_t2_encode_packets(t2, tcd->tcd_tileno, tcd_tile, layno + 1, dest, p_data_written, maxlen, cstr_info, tcd->cur_tp_num, tcd->tp_pos, tcd->cur_pino, THRESH_CALC, p_manager)) { /* TODO: what to do with l ??? seek / tell ??? */ /* opj_event_msg(tcd->cinfo, EVT_INFO, "rate alloc: len=%d, max=%d\n", l, maxlen); */ lo = thresh; continue; } hi = thresh; stable_thresh = thresh; } } goodthresh = stable_thresh == 0 ? thresh : stable_thresh; opj_t2_destroy(t2); } else { goodthresh = min; } if (cstr_info) { /* Threshold for Marcela Index */ cstr_info->tile[tcd->tcd_tileno].thresh[layno] = goodthresh; } opj_tcd_makelayer(tcd, layno, goodthresh, 1); /* fixed_quality */ cumdisto[layno] = (layno == 0) ? tcd_tile->distolayer[0] : (cumdisto[layno - 1] + tcd_tile->distolayer[layno]); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_init(opj_tcd_t *p_tcd, opj_image_t * p_image, opj_cp_t * p_cp, opj_thread_pool_t* p_tp) { p_tcd->image = p_image; p_tcd->cp = p_cp; p_tcd->tcd_image->tiles = (opj_tcd_tile_t *) opj_calloc(1, sizeof(opj_tcd_tile_t)); if (! p_tcd->tcd_image->tiles) { return OPJ_FALSE; } p_tcd->tcd_image->tiles->comps = (opj_tcd_tilecomp_t *) opj_calloc( p_image->numcomps, sizeof(opj_tcd_tilecomp_t)); if (! p_tcd->tcd_image->tiles->comps) { return OPJ_FALSE; } p_tcd->tcd_image->tiles->numcomps = p_image->numcomps; p_tcd->tp_pos = p_cp->m_specific_param.m_enc.m_tp_pos; p_tcd->thread_pool = p_tp; return OPJ_TRUE; } /** Destroy a previously created TCD handle */ void opj_tcd_destroy(opj_tcd_t *tcd) { if (tcd) { opj_tcd_free_tile(tcd); if (tcd->tcd_image) { opj_free(tcd->tcd_image); tcd->tcd_image = 00; } opj_free(tcd); } } OPJ_BOOL opj_alloc_tile_component_data(opj_tcd_tilecomp_t *l_tilec) { if ((l_tilec->data == 00) || ((l_tilec->data_size_needed > l_tilec->data_size) && (l_tilec->ownsData == OPJ_FALSE))) { l_tilec->data = (OPJ_INT32 *) opj_image_data_alloc(l_tilec->data_size_needed); if (! l_tilec->data) { return OPJ_FALSE; } /*fprintf(stderr, "tAllocate data of tilec (int): %d x OPJ_UINT32n",l_data_size);*/ l_tilec->data_size = l_tilec->data_size_needed; l_tilec->ownsData = OPJ_TRUE; } else if (l_tilec->data_size_needed > l_tilec->data_size) { /* We don't need to keep old data */ opj_image_data_free(l_tilec->data); l_tilec->data = (OPJ_INT32 *) opj_image_data_alloc(l_tilec->data_size_needed); if (! l_tilec->data) { l_tilec->data_size = 0; l_tilec->data_size_needed = 0; l_tilec->ownsData = OPJ_FALSE; return OPJ_FALSE; } /*fprintf(stderr, "tReallocate data of tilec (int): from %d to %d x OPJ_UINT32n", l_tilec->data_size, l_data_size);*/ l_tilec->data_size = l_tilec->data_size_needed; l_tilec->ownsData = OPJ_TRUE; } return OPJ_TRUE; } /* ----------------------------------------------------------------------- */ static INLINE OPJ_BOOL opj_tcd_init_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BOOL isEncoder, OPJ_FLOAT32 fraction, OPJ_SIZE_T sizeof_block, opj_event_mgr_t* manager) { OPJ_UINT32(*l_gain_ptr)(OPJ_UINT32) = 00; OPJ_UINT32 compno, resno, bandno, precno, cblkno; opj_tcp_t * l_tcp = 00; opj_cp_t * l_cp = 00; opj_tcd_tile_t * l_tile = 00; opj_tccp_t *l_tccp = 00; opj_tcd_tilecomp_t *l_tilec = 00; opj_image_comp_t * l_image_comp = 00; opj_tcd_resolution_t *l_res = 00; opj_tcd_band_t *l_band = 00; opj_stepsize_t * l_step_size = 00; opj_tcd_precinct_t *l_current_precinct = 00; opj_image_t *l_image = 00; OPJ_UINT32 p, q; OPJ_UINT32 l_level_no; OPJ_UINT32 l_pdx, l_pdy; OPJ_UINT32 l_gain; OPJ_INT32 l_x0b, l_y0b; OPJ_UINT32 l_tx0, l_ty0; /* extent of precincts , top left, bottom right**/ OPJ_INT32 l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end, l_br_prc_y_end; /* number of precinct for a resolution */ OPJ_UINT32 l_nb_precincts; /* room needed to store l_nb_precinct precinct for a resolution */ OPJ_UINT32 l_nb_precinct_size; /* number of code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks; /* room needed to store l_nb_code_blocks code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks_size; /* size of data for a tile */ OPJ_UINT32 l_data_size; l_cp = p_tcd->cp; l_tcp = &(l_cp->tcps[p_tile_no]); l_tile = p_tcd->tcd_image->tiles; l_tccp = l_tcp->tccps; l_tilec = l_tile->comps; l_image = p_tcd->image; l_image_comp = p_tcd->image->comps; p = p_tile_no % l_cp->tw; /* tile coordinates */ q = p_tile_no / l_cp->tw; /*fprintf(stderr, "Tile coordinate = %d,%d\n", p, q);*/ /* 4 borders of the tile rescale on the image if necessary */ l_tx0 = l_cp->tx0 + p * l_cp->tdx; /* can't be greater than l_image->x1 so won't overflow */ l_tile->x0 = (OPJ_INT32)opj_uint_max(l_tx0, l_image->x0); l_tile->x1 = (OPJ_INT32)opj_uint_min(opj_uint_adds(l_tx0, l_cp->tdx), l_image->x1); /* all those OPJ_UINT32 are casted to OPJ_INT32, let's do some sanity check */ if ((l_tile->x0 < 0) || (l_tile->x1 <= l_tile->x0)) { opj_event_msg(manager, EVT_ERROR, "Tile X coordinates are not supported\n"); return OPJ_FALSE; } l_ty0 = l_cp->ty0 + q * l_cp->tdy; /* can't be greater than l_image->y1 so won't overflow */ l_tile->y0 = (OPJ_INT32)opj_uint_max(l_ty0, l_image->y0); l_tile->y1 = (OPJ_INT32)opj_uint_min(opj_uint_adds(l_ty0, l_cp->tdy), l_image->y1); /* all those OPJ_UINT32 are casted to OPJ_INT32, let's do some sanity check */ if ((l_tile->y0 < 0) || (l_tile->y1 <= l_tile->y0)) { opj_event_msg(manager, EVT_ERROR, "Tile Y coordinates are not supported\n"); return OPJ_FALSE; } /* testcase 1888.pdf.asan.35.988 */ if (l_tccp->numresolutions == 0) { opj_event_msg(manager, EVT_ERROR, "tiles require at least one resolution\n"); return OPJ_FALSE; } /*fprintf(stderr, "Tile border = %d,%d,%d,%d\n", l_tile->x0, l_tile->y0,l_tile->x1,l_tile->y1);*/ /*tile->numcomps = image->numcomps; */ for (compno = 0; compno < l_tile->numcomps; ++compno) { /*fprintf(stderr, "compno = %d/%d\n", compno, l_tile->numcomps);*/ l_image_comp->resno_decoded = 0; /* border of each l_tile component (global) */ l_tilec->x0 = opj_int_ceildiv(l_tile->x0, (OPJ_INT32)l_image_comp->dx); l_tilec->y0 = opj_int_ceildiv(l_tile->y0, (OPJ_INT32)l_image_comp->dy); l_tilec->x1 = opj_int_ceildiv(l_tile->x1, (OPJ_INT32)l_image_comp->dx); l_tilec->y1 = opj_int_ceildiv(l_tile->y1, (OPJ_INT32)l_image_comp->dy); /*fprintf(stderr, "\tTile compo border = %d,%d,%d,%d\n", l_tilec->x0, l_tilec->y0,l_tilec->x1,l_tilec->y1);*/ /* compute l_data_size with overflow check */ l_data_size = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0); /* issue 733, l_data_size == 0U, probably something wrong should be checked before getting here */ if ((l_data_size > 0U) && ((((OPJ_UINT32) - 1) / l_data_size) < (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0))) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile data\n"); return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0); if ((((OPJ_UINT32) - 1) / (OPJ_UINT32)sizeof(OPJ_UINT32)) < l_data_size) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile data\n"); return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)sizeof(OPJ_UINT32); l_tilec->numresolutions = l_tccp->numresolutions; if (l_tccp->numresolutions < l_cp->m_specific_param.m_dec.m_reduce) { l_tilec->minimum_num_resolutions = 1; } else { l_tilec->minimum_num_resolutions = l_tccp->numresolutions - l_cp->m_specific_param.m_dec.m_reduce; } l_tilec->data_size_needed = l_data_size; if (p_tcd->m_is_decoder && !opj_alloc_tile_component_data(l_tilec)) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile data\n"); return OPJ_FALSE; } l_data_size = l_tilec->numresolutions * (OPJ_UINT32)sizeof( opj_tcd_resolution_t); if (l_tilec->resolutions == 00) { l_tilec->resolutions = (opj_tcd_resolution_t *) opj_malloc(l_data_size); if (! l_tilec->resolutions) { return OPJ_FALSE; } /*fprintf(stderr, "\tAllocate resolutions of tilec (opj_tcd_resolution_t): %d\n",l_data_size);*/ l_tilec->resolutions_size = l_data_size; memset(l_tilec->resolutions, 0, l_data_size); } else if (l_data_size > l_tilec->resolutions_size) { opj_tcd_resolution_t* new_resolutions = (opj_tcd_resolution_t *) opj_realloc( l_tilec->resolutions, l_data_size); if (! new_resolutions) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile resolutions\n"); opj_free(l_tilec->resolutions); l_tilec->resolutions = NULL; l_tilec->resolutions_size = 0; return OPJ_FALSE; } l_tilec->resolutions = new_resolutions; /*fprintf(stderr, "\tReallocate data of tilec (int): from %d to %d x OPJ_UINT32\n", l_tilec->resolutions_size, l_data_size);*/ memset(((OPJ_BYTE*) l_tilec->resolutions) + l_tilec->resolutions_size, 0, l_data_size - l_tilec->resolutions_size); l_tilec->resolutions_size = l_data_size; } l_level_no = l_tilec->numresolutions; l_res = l_tilec->resolutions; l_step_size = l_tccp->stepsizes; if (l_tccp->qmfbid == 0) { l_gain_ptr = &opj_dwt_getgain_real; } else { l_gain_ptr = &opj_dwt_getgain; } /*fprintf(stderr, "\tlevel_no=%d\n",l_level_no);*/ for (resno = 0; resno < l_tilec->numresolutions; ++resno) { /*fprintf(stderr, "\t\tresno = %d/%d\n", resno, l_tilec->numresolutions);*/ OPJ_INT32 tlcbgxstart, tlcbgystart /*, brcbgxend, brcbgyend*/; OPJ_UINT32 cbgwidthexpn, cbgheightexpn; OPJ_UINT32 cblkwidthexpn, cblkheightexpn; --l_level_no; /* border for each resolution level (global) */ l_res->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_res->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_res->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_res->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); /*fprintf(stderr, "\t\t\tres_x0= %d, res_y0 =%d, res_x1=%d, res_y1=%d\n", l_res->x0, l_res->y0, l_res->x1, l_res->y1);*/ /* p. 35, table A-23, ISO/IEC FDIS154444-1 : 2000 (18 august 2000) */ l_pdx = l_tccp->prcw[resno]; l_pdy = l_tccp->prch[resno]; /*fprintf(stderr, "\t\t\tpdx=%d, pdy=%d\n", l_pdx, l_pdy);*/ /* p. 64, B.6, ISO/IEC FDIS15444-1 : 2000 (18 august 2000) */ l_tl_prc_x_start = opj_int_floordivpow2(l_res->x0, (OPJ_INT32)l_pdx) << l_pdx; l_tl_prc_y_start = opj_int_floordivpow2(l_res->y0, (OPJ_INT32)l_pdy) << l_pdy; l_br_prc_x_end = opj_int_ceildivpow2(l_res->x1, (OPJ_INT32)l_pdx) << l_pdx; l_br_prc_y_end = opj_int_ceildivpow2(l_res->y1, (OPJ_INT32)l_pdy) << l_pdy; /*fprintf(stderr, "\t\t\tprc_x_start=%d, prc_y_start=%d, br_prc_x_end=%d, br_prc_y_end=%d \n", l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end ,l_br_prc_y_end );*/ l_res->pw = (l_res->x0 == l_res->x1) ? 0U : (OPJ_UINT32)(( l_br_prc_x_end - l_tl_prc_x_start) >> l_pdx); l_res->ph = (l_res->y0 == l_res->y1) ? 0U : (OPJ_UINT32)(( l_br_prc_y_end - l_tl_prc_y_start) >> l_pdy); /*fprintf(stderr, "\t\t\tres_pw=%d, res_ph=%d\n", l_res->pw, l_res->ph );*/ if ((l_res->pw != 0U) && ((((OPJ_UINT32) - 1) / l_res->pw) < l_res->ph)) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile data\n"); return OPJ_FALSE; } l_nb_precincts = l_res->pw * l_res->ph; if ((((OPJ_UINT32) - 1) / (OPJ_UINT32)sizeof(opj_tcd_precinct_t)) < l_nb_precincts) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile data\n"); return OPJ_FALSE; } l_nb_precinct_size = l_nb_precincts * (OPJ_UINT32)sizeof(opj_tcd_precinct_t); if (resno == 0) { tlcbgxstart = l_tl_prc_x_start; tlcbgystart = l_tl_prc_y_start; /*brcbgxend = l_br_prc_x_end;*/ /* brcbgyend = l_br_prc_y_end;*/ cbgwidthexpn = l_pdx; cbgheightexpn = l_pdy; l_res->numbands = 1; } else { tlcbgxstart = opj_int_ceildivpow2(l_tl_prc_x_start, 1); tlcbgystart = opj_int_ceildivpow2(l_tl_prc_y_start, 1); /*brcbgxend = opj_int_ceildivpow2(l_br_prc_x_end, 1);*/ /*brcbgyend = opj_int_ceildivpow2(l_br_prc_y_end, 1);*/ cbgwidthexpn = l_pdx - 1; cbgheightexpn = l_pdy - 1; l_res->numbands = 3; } cblkwidthexpn = opj_uint_min(l_tccp->cblkw, cbgwidthexpn); cblkheightexpn = opj_uint_min(l_tccp->cblkh, cbgheightexpn); l_band = l_res->bands; for (bandno = 0; bandno < l_res->numbands; ++bandno, ++l_band, ++l_step_size) { OPJ_INT32 numbps; /*fprintf(stderr, "\t\t\tband_no=%d/%d\n", bandno, l_res->numbands );*/ if (resno == 0) { l_band->bandno = 0 ; l_band->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); } else { l_band->bandno = bandno + 1; /* x0b = 1 if bandno = 1 or 3 */ l_x0b = l_band->bandno & 1; /* y0b = 1 if bandno = 2 or 3 */ l_y0b = (OPJ_INT32)((l_band->bandno) >> 1); /* l_band border (global) */ l_band->x0 = opj_int64_ceildivpow2(l_tilec->x0 - ((OPJ_INT64)l_x0b << l_level_no), (OPJ_INT32)(l_level_no + 1)); l_band->y0 = opj_int64_ceildivpow2(l_tilec->y0 - ((OPJ_INT64)l_y0b << l_level_no), (OPJ_INT32)(l_level_no + 1)); l_band->x1 = opj_int64_ceildivpow2(l_tilec->x1 - ((OPJ_INT64)l_x0b << l_level_no), (OPJ_INT32)(l_level_no + 1)); l_band->y1 = opj_int64_ceildivpow2(l_tilec->y1 - ((OPJ_INT64)l_y0b << l_level_no), (OPJ_INT32)(l_level_no + 1)); } if (isEncoder) { /* Skip empty bands */ if (opj_tcd_is_band_empty(l_band)) { /* Do not zero l_band->precints to avoid leaks */ /* but make sure we don't use it later, since */ /* it will point to precincts of previous bands... */ continue; } } /** avoid an if with storing function pointer */ l_gain = (*l_gain_ptr)(l_band->bandno); numbps = (OPJ_INT32)(l_image_comp->prec + l_gain); l_band->stepsize = (OPJ_FLOAT32)(((1.0 + l_step_size->mant / 2048.0) * pow(2.0, (OPJ_INT32)(numbps - l_step_size->expn)))) * fraction; /* Mb value of Equation E-2 in "E.1 Inverse quantization * procedure" of the standard */ l_band->numbps = l_step_size->expn + (OPJ_INT32)l_tccp->numgbits - 1; if (!l_band->precincts && (l_nb_precincts > 0U)) { l_band->precincts = (opj_tcd_precinct_t *) opj_malloc(/*3 * */ l_nb_precinct_size); if (! l_band->precincts) { opj_event_msg(manager, EVT_ERROR, "Not enough memory to handle band precints\n"); return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate precincts of a band (opj_tcd_precinct_t): %d\n",l_nb_precinct_size); */ memset(l_band->precincts, 0, l_nb_precinct_size); l_band->precincts_data_size = l_nb_precinct_size; } else if (l_band->precincts_data_size < l_nb_precinct_size) { opj_tcd_precinct_t * new_precincts = (opj_tcd_precinct_t *) opj_realloc( l_band->precincts,/*3 * */ l_nb_precinct_size); if (! new_precincts) { opj_event_msg(manager, EVT_ERROR, "Not enough memory to handle band precints\n"); opj_free(l_band->precincts); l_band->precincts = NULL; l_band->precincts_data_size = 0; return OPJ_FALSE; } l_band->precincts = new_precincts; /*fprintf(stderr, "\t\t\t\tReallocate precincts of a band (opj_tcd_precinct_t): from %d to %d\n",l_band->precincts_data_size, l_nb_precinct_size);*/ memset(((OPJ_BYTE *) l_band->precincts) + l_band->precincts_data_size, 0, l_nb_precinct_size - l_band->precincts_data_size); l_band->precincts_data_size = l_nb_precinct_size; } l_current_precinct = l_band->precincts; for (precno = 0; precno < l_nb_precincts; ++precno) { OPJ_INT32 tlcblkxstart, tlcblkystart, brcblkxend, brcblkyend; OPJ_INT32 cbgxstart = tlcbgxstart + (OPJ_INT32)(precno % l_res->pw) * (1 << cbgwidthexpn); OPJ_INT32 cbgystart = tlcbgystart + (OPJ_INT32)(precno / l_res->pw) * (1 << cbgheightexpn); OPJ_INT32 cbgxend = cbgxstart + (1 << cbgwidthexpn); OPJ_INT32 cbgyend = cbgystart + (1 << cbgheightexpn); /*fprintf(stderr, "\t precno=%d; bandno=%d, resno=%d; compno=%d\n", precno, bandno , resno, compno);*/ /*fprintf(stderr, "\t tlcbgxstart(=%d) + (precno(=%d) percent res->pw(=%d)) * (1 << cbgwidthexpn(=%d)) \n",tlcbgxstart,precno,l_res->pw,cbgwidthexpn);*/ /* precinct size (global) */ /*fprintf(stderr, "\t cbgxstart=%d, l_band->x0 = %d \n",cbgxstart, l_band->x0);*/ l_current_precinct->x0 = opj_int_max(cbgxstart, l_band->x0); l_current_precinct->y0 = opj_int_max(cbgystart, l_band->y0); l_current_precinct->x1 = opj_int_min(cbgxend, l_band->x1); l_current_precinct->y1 = opj_int_min(cbgyend, l_band->y1); /*fprintf(stderr, "\t prc_x0=%d; prc_y0=%d, prc_x1=%d; prc_y1=%d\n",l_current_precinct->x0, l_current_precinct->y0 ,l_current_precinct->x1, l_current_precinct->y1);*/ tlcblkxstart = opj_int_floordivpow2(l_current_precinct->x0, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t tlcblkxstart =%d\n",tlcblkxstart );*/ tlcblkystart = opj_int_floordivpow2(l_current_precinct->y0, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t tlcblkystart =%d\n",tlcblkystart );*/ brcblkxend = opj_int_ceildivpow2(l_current_precinct->x1, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t brcblkxend =%d\n",brcblkxend );*/ brcblkyend = opj_int_ceildivpow2(l_current_precinct->y1, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t brcblkyend =%d\n",brcblkyend );*/ l_current_precinct->cw = (OPJ_UINT32)((brcblkxend - tlcblkxstart) >> cblkwidthexpn); l_current_precinct->ch = (OPJ_UINT32)((brcblkyend - tlcblkystart) >> cblkheightexpn); l_nb_code_blocks = l_current_precinct->cw * l_current_precinct->ch; /*fprintf(stderr, "\t\t\t\t precinct_cw = %d x recinct_ch = %d\n",l_current_precinct->cw, l_current_precinct->ch); */ l_nb_code_blocks_size = l_nb_code_blocks * (OPJ_UINT32)sizeof_block; if (!l_current_precinct->cblks.blocks && (l_nb_code_blocks > 0U)) { l_current_precinct->cblks.blocks = opj_malloc(l_nb_code_blocks_size); if (! l_current_precinct->cblks.blocks) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate cblks of a precinct (opj_tcd_cblk_dec_t): %d\n",l_nb_code_blocks_size);*/ memset(l_current_precinct->cblks.blocks, 0, l_nb_code_blocks_size); l_current_precinct->block_size = l_nb_code_blocks_size; } else if (l_nb_code_blocks_size > l_current_precinct->block_size) { void *new_blocks = opj_realloc(l_current_precinct->cblks.blocks, l_nb_code_blocks_size); if (! new_blocks) { opj_free(l_current_precinct->cblks.blocks); l_current_precinct->cblks.blocks = NULL; l_current_precinct->block_size = 0; opj_event_msg(manager, EVT_ERROR, "Not enough memory for current precinct codeblock element\n"); return OPJ_FALSE; } l_current_precinct->cblks.blocks = new_blocks; /*fprintf(stderr, "\t\t\t\tReallocate cblks of a precinct (opj_tcd_cblk_dec_t): from %d to %d\n",l_current_precinct->block_size, l_nb_code_blocks_size); */ memset(((OPJ_BYTE *) l_current_precinct->cblks.blocks) + l_current_precinct->block_size , 0 , l_nb_code_blocks_size - l_current_precinct->block_size); l_current_precinct->block_size = l_nb_code_blocks_size; } if (! l_current_precinct->incltree) { l_current_precinct->incltree = opj_tgt_create(l_current_precinct->cw, l_current_precinct->ch, manager); } else { l_current_precinct->incltree = opj_tgt_init(l_current_precinct->incltree, l_current_precinct->cw, l_current_precinct->ch, manager); } if (! l_current_precinct->imsbtree) { l_current_precinct->imsbtree = opj_tgt_create(l_current_precinct->cw, l_current_precinct->ch, manager); } else { l_current_precinct->imsbtree = opj_tgt_init(l_current_precinct->imsbtree, l_current_precinct->cw, l_current_precinct->ch, manager); } for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { OPJ_INT32 cblkxstart = tlcblkxstart + (OPJ_INT32)(cblkno % l_current_precinct->cw) * (1 << cblkwidthexpn); OPJ_INT32 cblkystart = tlcblkystart + (OPJ_INT32)(cblkno / l_current_precinct->cw) * (1 << cblkheightexpn); OPJ_INT32 cblkxend = cblkxstart + (1 << cblkwidthexpn); OPJ_INT32 cblkyend = cblkystart + (1 << cblkheightexpn); if (isEncoder) { opj_tcd_cblk_enc_t* l_code_block = l_current_precinct->cblks.enc + cblkno; if (! opj_tcd_code_block_enc_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); if (! opj_tcd_code_block_enc_allocate_data(l_code_block)) { return OPJ_FALSE; } } else { opj_tcd_cblk_dec_t* l_code_block = l_current_precinct->cblks.dec + cblkno; if (! opj_tcd_code_block_dec_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); } } ++l_current_precinct; } /* precno */ } /* bandno */ ++l_res; } /* resno */ ++l_tccp; ++l_tilec; ++l_image_comp; } /* compno */ return OPJ_TRUE; } OPJ_BOOL opj_tcd_init_encode_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, opj_event_mgr_t* p_manager) { return opj_tcd_init_tile(p_tcd, p_tile_no, OPJ_TRUE, 1.0F, sizeof(opj_tcd_cblk_enc_t), p_manager); } OPJ_BOOL opj_tcd_init_decode_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, opj_event_mgr_t* p_manager) { return opj_tcd_init_tile(p_tcd, p_tile_no, OPJ_FALSE, 0.5F, sizeof(opj_tcd_cblk_dec_t), p_manager); } /** * Allocates memory for an encoding code block (but not data memory). */ static OPJ_BOOL opj_tcd_code_block_enc_allocate(opj_tcd_cblk_enc_t * p_code_block) { if (! p_code_block->layers) { /* no memset since data */ p_code_block->layers = (opj_tcd_layer_t*) opj_calloc(100, sizeof(opj_tcd_layer_t)); if (! p_code_block->layers) { return OPJ_FALSE; } } if (! p_code_block->passes) { p_code_block->passes = (opj_tcd_pass_t*) opj_calloc(100, sizeof(opj_tcd_pass_t)); if (! p_code_block->passes) { return OPJ_FALSE; } } return OPJ_TRUE; } /** * Allocates data memory for an encoding code block. */ static OPJ_BOOL opj_tcd_code_block_enc_allocate_data(opj_tcd_cblk_enc_t * p_code_block) { OPJ_UINT32 l_data_size; /* The +1 is needed for https://github.com/uclouvain/openjpeg/issues/835 */ l_data_size = 1 + (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) * (p_code_block->y1 - p_code_block->y0) * (OPJ_INT32)sizeof(OPJ_UINT32)); if (l_data_size > p_code_block->data_size) { if (p_code_block->data) { /* We refer to data - 1 since below we incremented it */ opj_free(p_code_block->data - 1); } p_code_block->data = (OPJ_BYTE*) opj_malloc(l_data_size + 1); if (! p_code_block->data) { p_code_block->data_size = 0U; return OPJ_FALSE; } p_code_block->data_size = l_data_size; /* We reserve the initial byte as a fake byte to a non-FF value */ /* and increment the data pointer, so that opj_mqc_init_enc() */ /* can do bp = data - 1, and opj_mqc_byteout() can safely dereference */ /* it. */ p_code_block->data[0] = 0; p_code_block->data += 1; /*why +1 ?*/ } return OPJ_TRUE; } void opj_tcd_reinit_segment(opj_tcd_seg_t* seg) { memset(seg, 0, sizeof(opj_tcd_seg_t)); } /** * Allocates memory for a decoding code block. */ static OPJ_BOOL opj_tcd_code_block_dec_allocate(opj_tcd_cblk_dec_t * p_code_block) { if (! p_code_block->segs) { p_code_block->segs = (opj_tcd_seg_t *) opj_calloc(OPJ_J2K_DEFAULT_NB_SEGS, sizeof(opj_tcd_seg_t)); if (! p_code_block->segs) { return OPJ_FALSE; } /*fprintf(stderr, "Allocate %d elements of code_block->data\n", OPJ_J2K_DEFAULT_NB_SEGS * sizeof(opj_tcd_seg_t));*/ p_code_block->m_current_max_segs = OPJ_J2K_DEFAULT_NB_SEGS; /*fprintf(stderr, "m_current_max_segs of code_block->data = %d\n", p_code_block->m_current_max_segs);*/ } else { /* sanitize */ opj_tcd_seg_t * l_segs = p_code_block->segs; OPJ_UINT32 l_current_max_segs = p_code_block->m_current_max_segs; opj_tcd_seg_data_chunk_t* l_chunks = p_code_block->chunks; OPJ_UINT32 l_numchunksalloc = p_code_block->numchunksalloc; OPJ_UINT32 i; memset(p_code_block, 0, sizeof(opj_tcd_cblk_dec_t)); p_code_block->segs = l_segs; p_code_block->m_current_max_segs = l_current_max_segs; for (i = 0; i < l_current_max_segs; ++i) { opj_tcd_reinit_segment(&l_segs[i]); } p_code_block->chunks = l_chunks; p_code_block->numchunksalloc = l_numchunksalloc; } return OPJ_TRUE; } OPJ_UINT32 opj_tcd_get_decoded_tile_size(opj_tcd_t *p_tcd) { OPJ_UINT32 i; OPJ_UINT32 l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tcd_resolution_t * l_res = 00; OPJ_UINT32 l_size_comp, l_remaining; OPJ_UINT32 l_temp; l_tile_comp = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i = 0; i < p_tcd->image->numcomps; ++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } l_res = l_tile_comp->resolutions + l_tile_comp->minimum_num_resolutions - 1; l_temp = (OPJ_UINT32)((l_res->x1 - l_res->x0) * (l_res->y1 - l_res->y0)); /* x1*y1 can't overflow */ if (l_size_comp && UINT_MAX / l_size_comp < l_temp) { return UINT_MAX; } l_temp *= l_size_comp; if (l_temp > UINT_MAX - l_data_size) { return UINT_MAX; } l_data_size += l_temp; ++l_img_comp; ++l_tile_comp; } return l_data_size; } OPJ_BOOL opj_tcd_encode_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BYTE *p_dest, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_length, opj_codestream_info_t *p_cstr_info, opj_event_mgr_t *p_manager) { if (p_tcd->cur_tp_num == 0) { p_tcd->tcd_tileno = p_tile_no; p_tcd->tcp = &p_tcd->cp->tcps[p_tile_no]; /* INDEX >> "Precinct_nb_X et Precinct_nb_Y" */ if (p_cstr_info) { OPJ_UINT32 l_num_packs = 0; OPJ_UINT32 i; opj_tcd_tilecomp_t *l_tilec_idx = &p_tcd->tcd_image->tiles->comps[0]; /* based on component 0 */ opj_tccp_t *l_tccp = p_tcd->tcp->tccps; /* based on component 0 */ for (i = 0; i < l_tilec_idx->numresolutions; i++) { opj_tcd_resolution_t *l_res_idx = &l_tilec_idx->resolutions[i]; p_cstr_info->tile[p_tile_no].pw[i] = (int)l_res_idx->pw; p_cstr_info->tile[p_tile_no].ph[i] = (int)l_res_idx->ph; l_num_packs += l_res_idx->pw * l_res_idx->ph; p_cstr_info->tile[p_tile_no].pdx[i] = (int)l_tccp->prcw[i]; p_cstr_info->tile[p_tile_no].pdy[i] = (int)l_tccp->prch[i]; } p_cstr_info->tile[p_tile_no].packet = (opj_packet_info_t*) opj_calloc(( size_t)p_cstr_info->numcomps * (size_t)p_cstr_info->numlayers * l_num_packs, sizeof(opj_packet_info_t)); if (!p_cstr_info->tile[p_tile_no].packet) { /* FIXME event manager error callback */ return OPJ_FALSE; } } /* << INDEX */ /* FIXME _ProfStart(PGROUP_DC_SHIFT); */ /*---------------TILE-------------------*/ if (! opj_tcd_dc_level_shift_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DC_SHIFT); */ /* FIXME _ProfStart(PGROUP_MCT); */ if (! opj_tcd_mct_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_MCT); */ /* FIXME _ProfStart(PGROUP_DWT); */ if (! opj_tcd_dwt_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DWT); */ /* FIXME _ProfStart(PGROUP_T1); */ if (! opj_tcd_t1_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T1); */ /* FIXME _ProfStart(PGROUP_RATE); */ if (! opj_tcd_rate_allocate_encode(p_tcd, p_dest, p_max_length, p_cstr_info, p_manager)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_RATE); */ } /*--------------TIER2------------------*/ /* INDEX */ if (p_cstr_info) { p_cstr_info->index_write = 1; } /* FIXME _ProfStart(PGROUP_T2); */ if (! opj_tcd_t2_encode(p_tcd, p_dest, p_data_written, p_max_length, p_cstr_info, p_manager)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T2); */ /*---------------CLEAN-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_decode_tile(opj_tcd_t *p_tcd, OPJ_BYTE *p_src, OPJ_UINT32 p_max_length, OPJ_UINT32 p_tile_no, opj_codestream_index_t *p_cstr_index, opj_event_mgr_t *p_manager ) { OPJ_UINT32 l_data_read; p_tcd->tcd_tileno = p_tile_no; p_tcd->tcp = &(p_tcd->cp->tcps[p_tile_no]); #ifdef TODO_MSD /* FIXME */ /* INDEX >> */ if (p_cstr_info) { OPJ_UINT32 resno, compno, numprec = 0; for (compno = 0; compno < (OPJ_UINT32) p_cstr_info->numcomps; compno++) { opj_tcp_t *tcp = &p_tcd->cp->tcps[0]; opj_tccp_t *tccp = &tcp->tccps[compno]; opj_tcd_tilecomp_t *tilec_idx = &p_tcd->tcd_image->tiles->comps[compno]; for (resno = 0; resno < tilec_idx->numresolutions; resno++) { opj_tcd_resolution_t *res_idx = &tilec_idx->resolutions[resno]; p_cstr_info->tile[p_tile_no].pw[resno] = res_idx->pw; p_cstr_info->tile[p_tile_no].ph[resno] = res_idx->ph; numprec += res_idx->pw * res_idx->ph; p_cstr_info->tile[p_tile_no].pdx[resno] = tccp->prcw[resno]; p_cstr_info->tile[p_tile_no].pdy[resno] = tccp->prch[resno]; } } p_cstr_info->tile[p_tile_no].packet = (opj_packet_info_t *) opj_malloc( p_cstr_info->numlayers * numprec * sizeof(opj_packet_info_t)); p_cstr_info->packno = 0; } /* << INDEX */ #endif /*--------------TIER2------------------*/ /* FIXME _ProfStart(PGROUP_T2); */ l_data_read = 0; if (! opj_tcd_t2_decode(p_tcd, p_src, &l_data_read, p_max_length, p_cstr_index, p_manager)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T2); */ /*------------------TIER1-----------------*/ /* FIXME _ProfStart(PGROUP_T1); */ if (! opj_tcd_t1_decode(p_tcd, p_manager)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T1); */ /*----------------DWT---------------------*/ /* FIXME _ProfStart(PGROUP_DWT); */ if (! opj_tcd_dwt_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DWT); */ /*----------------MCT-------------------*/ /* FIXME _ProfStart(PGROUP_MCT); */ if (! opj_tcd_mct_decode(p_tcd, p_manager)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_MCT); */ /* FIXME _ProfStart(PGROUP_DC_SHIFT); */ if (! opj_tcd_dc_level_shift_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DC_SHIFT); */ /*---------------TILE-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_update_tile_data(opj_tcd_t *p_tcd, OPJ_BYTE * p_dest, OPJ_UINT32 p_dest_length ) { OPJ_UINT32 i, j, k, l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; opj_tcd_resolution_t * l_res; OPJ_UINT32 l_size_comp, l_remaining; OPJ_UINT32 l_stride, l_width, l_height; l_data_size = opj_tcd_get_decoded_tile_size(p_tcd); if (l_data_size == UINT_MAX || l_data_size > p_dest_length) { return OPJ_FALSE; } l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i = 0; i < p_tcd->image->numcomps; ++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ l_res = l_tilec->resolutions + l_img_comp->resno_decoded; l_width = (OPJ_UINT32)(l_res->x1 - l_res->x0); l_height = (OPJ_UINT32)(l_res->y1 - l_res->y0); l_stride = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0) - l_width; if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } switch (l_size_comp) { case 1: { OPJ_CHAR * l_dest_ptr = (OPJ_CHAR *) p_dest; const OPJ_INT32 * l_src_ptr = l_tilec->data; if (l_img_comp->sgnd) { for (j = 0; j < l_height; ++j) { for (k = 0; k < l_width; ++k) { *(l_dest_ptr++) = (OPJ_CHAR)(*(l_src_ptr++)); } l_src_ptr += l_stride; } } else { for (j = 0; j < l_height; ++j) { for (k = 0; k < l_width; ++k) { *(l_dest_ptr++) = (OPJ_CHAR)((*(l_src_ptr++)) & 0xff); } l_src_ptr += l_stride; } } p_dest = (OPJ_BYTE *)l_dest_ptr; } break; case 2: { const OPJ_INT32 * l_src_ptr = l_tilec->data; OPJ_INT16 * l_dest_ptr = (OPJ_INT16 *) p_dest; if (l_img_comp->sgnd) { for (j = 0; j < l_height; ++j) { for (k = 0; k < l_width; ++k) { OPJ_INT16 val = (OPJ_INT16)(*(l_src_ptr++)); memcpy(l_dest_ptr, &val, sizeof(val)); l_dest_ptr ++; } l_src_ptr += l_stride; } } else { for (j = 0; j < l_height; ++j) { for (k = 0; k < l_width; ++k) { OPJ_INT16 val = (OPJ_INT16)((*(l_src_ptr++)) & 0xffff); memcpy(l_dest_ptr, &val, sizeof(val)); l_dest_ptr ++; } l_src_ptr += l_stride; } } p_dest = (OPJ_BYTE*) l_dest_ptr; } break; case 4: { OPJ_INT32 * l_dest_ptr = (OPJ_INT32 *) p_dest; OPJ_INT32 * l_src_ptr = l_tilec->data; for (j = 0; j < l_height; ++j) { memcpy(l_dest_ptr, l_src_ptr, l_width * sizeof(OPJ_INT32)); l_dest_ptr += l_width; l_src_ptr += l_width + l_stride; } p_dest = (OPJ_BYTE*) l_dest_ptr; } break; } ++l_img_comp; ++l_tilec; } return OPJ_TRUE; } static void opj_tcd_free_tile(opj_tcd_t *p_tcd) { OPJ_UINT32 compno, resno, bandno, precno; opj_tcd_tile_t *l_tile = 00; opj_tcd_tilecomp_t *l_tile_comp = 00; opj_tcd_resolution_t *l_res = 00; opj_tcd_band_t *l_band = 00; opj_tcd_precinct_t *l_precinct = 00; OPJ_UINT32 l_nb_resolutions, l_nb_precincts; void (* l_tcd_code_block_deallocate)(opj_tcd_precinct_t *) = 00; if (! p_tcd) { return; } if (! p_tcd->tcd_image) { return; } if (p_tcd->m_is_decoder) { l_tcd_code_block_deallocate = opj_tcd_code_block_dec_deallocate; } else { l_tcd_code_block_deallocate = opj_tcd_code_block_enc_deallocate; } l_tile = p_tcd->tcd_image->tiles; if (! l_tile) { return; } l_tile_comp = l_tile->comps; for (compno = 0; compno < l_tile->numcomps; ++compno) { l_res = l_tile_comp->resolutions; if (l_res) { l_nb_resolutions = l_tile_comp->resolutions_size / sizeof(opj_tcd_resolution_t); for (resno = 0; resno < l_nb_resolutions; ++resno) { l_band = l_res->bands; for (bandno = 0; bandno < 3; ++bandno) { l_precinct = l_band->precincts; if (l_precinct) { l_nb_precincts = l_band->precincts_data_size / sizeof(opj_tcd_precinct_t); for (precno = 0; precno < l_nb_precincts; ++precno) { opj_tgt_destroy(l_precinct->incltree); l_precinct->incltree = 00; opj_tgt_destroy(l_precinct->imsbtree); l_precinct->imsbtree = 00; (*l_tcd_code_block_deallocate)(l_precinct); ++l_precinct; } opj_free(l_band->precincts); l_band->precincts = 00; } ++l_band; } /* for (resno */ ++l_res; } opj_free(l_tile_comp->resolutions); l_tile_comp->resolutions = 00; } if (l_tile_comp->ownsData && l_tile_comp->data) { opj_image_data_free(l_tile_comp->data); l_tile_comp->data = 00; l_tile_comp->ownsData = 0; l_tile_comp->data_size = 0; l_tile_comp->data_size_needed = 0; } ++l_tile_comp; } opj_free(l_tile->comps); l_tile->comps = 00; opj_free(p_tcd->tcd_image->tiles); p_tcd->tcd_image->tiles = 00; } static OPJ_BOOL opj_tcd_t2_decode(opj_tcd_t *p_tcd, OPJ_BYTE * p_src_data, OPJ_UINT32 * p_data_read, OPJ_UINT32 p_max_src_size, opj_codestream_index_t *p_cstr_index, opj_event_mgr_t *p_manager ) { opj_t2_t * l_t2; l_t2 = opj_t2_create(p_tcd->image, p_tcd->cp); if (l_t2 == 00) { return OPJ_FALSE; } if (! opj_t2_decode_packets( l_t2, p_tcd->tcd_tileno, p_tcd->tcd_image->tiles, p_src_data, p_data_read, p_max_src_size, p_cstr_index, p_manager)) { opj_t2_destroy(l_t2); return OPJ_FALSE; } opj_t2_destroy(l_t2); /*---------------CLEAN-------------------*/ return OPJ_TRUE; } static OPJ_BOOL opj_tcd_t1_decode(opj_tcd_t *p_tcd, opj_event_mgr_t *p_manager) { OPJ_UINT32 compno; opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t* l_tile_comp = l_tile->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; volatile OPJ_BOOL ret = OPJ_TRUE; OPJ_BOOL check_pterm = OPJ_FALSE; opj_mutex_t* p_manager_mutex = NULL; p_manager_mutex = opj_mutex_create(); /* Only enable PTERM check if we decode all layers */ if (p_tcd->tcp->num_layers_to_decode == p_tcd->tcp->numlayers && (l_tccp->cblksty & J2K_CCP_CBLKSTY_PTERM) != 0) { check_pterm = OPJ_TRUE; } for (compno = 0; compno < l_tile->numcomps; ++compno) { opj_t1_decode_cblks(p_tcd->thread_pool, &ret, l_tile_comp, l_tccp, p_manager, p_manager_mutex, check_pterm); if (!ret) { break; } ++l_tile_comp; ++l_tccp; } opj_thread_pool_wait_completion(p_tcd->thread_pool, 0); if (p_manager_mutex) { opj_mutex_destroy(p_manager_mutex); } return ret; } static OPJ_BOOL opj_tcd_dwt_decode(opj_tcd_t *p_tcd) { OPJ_UINT32 compno; opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = l_tile->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; opj_image_comp_t * l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { /* if (tcd->cp->reduce != 0) { tcd->image->comps[compno].resno_decoded = tile->comps[compno].numresolutions - tcd->cp->reduce - 1; if (tcd->image->comps[compno].resno_decoded < 0) { return false; } } numres2decode = tcd->image->comps[compno].resno_decoded + 1; if(numres2decode > 0){ */ if (l_tccp->qmfbid == 1) { if (! opj_dwt_decode(p_tcd->thread_pool, l_tile_comp, l_img_comp->resno_decoded + 1)) { return OPJ_FALSE; } } else { if (! opj_dwt_decode_real(l_tile_comp, l_img_comp->resno_decoded + 1)) { return OPJ_FALSE; } } ++l_tile_comp; ++l_img_comp; ++l_tccp; } return OPJ_TRUE; } static OPJ_BOOL opj_tcd_mct_decode(opj_tcd_t *p_tcd, opj_event_mgr_t *p_manager) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcp_t * l_tcp = p_tcd->tcp; opj_tcd_tilecomp_t * l_tile_comp = l_tile->comps; OPJ_UINT32 l_samples, i; if (! l_tcp->mct) { return OPJ_TRUE; } l_samples = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); if (l_tile->numcomps >= 3) { /* testcase 1336.pdf.asan.47.376 */ if ((l_tile->comps[0].x1 - l_tile->comps[0].x0) * (l_tile->comps[0].y1 - l_tile->comps[0].y0) < (OPJ_INT32)l_samples || (l_tile->comps[1].x1 - l_tile->comps[1].x0) * (l_tile->comps[1].y1 - l_tile->comps[1].y0) < (OPJ_INT32)l_samples || (l_tile->comps[2].x1 - l_tile->comps[2].x0) * (l_tile->comps[2].y1 - l_tile->comps[2].y0) < (OPJ_INT32)l_samples) { opj_event_msg(p_manager, EVT_ERROR, "Tiles don't all have the same dimension. Skip the MCT step.\n"); return OPJ_FALSE; } else if (l_tcp->mct == 2) { OPJ_BYTE ** l_data; if (! l_tcp->m_mct_decoding_matrix) { return OPJ_TRUE; } l_data = (OPJ_BYTE **) opj_malloc(l_tile->numcomps * sizeof(OPJ_BYTE*)); if (! l_data) { return OPJ_FALSE; } for (i = 0; i < l_tile->numcomps; ++i) { l_data[i] = (OPJ_BYTE*) l_tile_comp->data; ++l_tile_comp; } if (! opj_mct_decode_custom(/* MCT data */ (OPJ_BYTE*) l_tcp->m_mct_decoding_matrix, /* size of components */ l_samples, /* components */ l_data, /* nb of components (i.e. size of pData) */ l_tile->numcomps, /* tells if the data is signed */ p_tcd->image->comps->sgnd)) { opj_free(l_data); return OPJ_FALSE; } opj_free(l_data); } else { if (l_tcp->tccps->qmfbid == 1) { opj_mct_decode(l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, l_samples); } else { opj_mct_decode_real((OPJ_FLOAT32*)l_tile->comps[0].data, (OPJ_FLOAT32*)l_tile->comps[1].data, (OPJ_FLOAT32*)l_tile->comps[2].data, l_samples); } } } else { opj_event_msg(p_manager, EVT_ERROR, "Number of components (%d) is inconsistent with a MCT. Skip the MCT step.\n", l_tile->numcomps); } return OPJ_TRUE; } static OPJ_BOOL opj_tcd_dc_level_shift_decode(opj_tcd_t *p_tcd) { OPJ_UINT32 compno; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tccp_t * l_tccp = 00; opj_image_comp_t * l_img_comp = 00; opj_tcd_resolution_t* l_res = 00; opj_tcd_tile_t * l_tile; OPJ_UINT32 l_width, l_height, i, j; OPJ_INT32 * l_current_ptr; OPJ_INT32 l_min, l_max; OPJ_UINT32 l_stride; l_tile = p_tcd->tcd_image->tiles; l_tile_comp = l_tile->comps; l_tccp = p_tcd->tcp->tccps; l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { l_res = l_tile_comp->resolutions + l_img_comp->resno_decoded; l_width = (OPJ_UINT32)(l_res->x1 - l_res->x0); l_height = (OPJ_UINT32)(l_res->y1 - l_res->y0); l_stride = (OPJ_UINT32)(l_tile_comp->x1 - l_tile_comp->x0) - l_width; assert(l_height == 0 || l_width + l_stride <= l_tile_comp->data_size / l_height); /*MUPDF*/ if (l_img_comp->sgnd) { l_min = -(1 << (l_img_comp->prec - 1)); l_max = (1 << (l_img_comp->prec - 1)) - 1; } else { l_min = 0; l_max = (OPJ_INT32)((1U << l_img_comp->prec) - 1); } l_current_ptr = l_tile_comp->data; if (l_tccp->qmfbid == 1) { for (j = 0; j < l_height; ++j) { for (i = 0; i < l_width; ++i) { *l_current_ptr = opj_int_clamp(*l_current_ptr + l_tccp->m_dc_level_shift, l_min, l_max); ++l_current_ptr; } l_current_ptr += l_stride; } } else { for (j = 0; j < l_height; ++j) { for (i = 0; i < l_width; ++i) { OPJ_FLOAT32 l_value = *((OPJ_FLOAT32 *) l_current_ptr); OPJ_INT32 l_value_int = (OPJ_INT32)opj_lrintf(l_value); if (l_value > INT_MAX || (l_value_int > 0 && l_tccp->m_dc_level_shift > 0 && l_value_int > INT_MAX - l_tccp->m_dc_level_shift)) { *l_current_ptr = l_max; } else { *l_current_ptr = opj_int_clamp( l_value_int + l_tccp->m_dc_level_shift, l_min, l_max); } ++l_current_ptr; } l_current_ptr += l_stride; } } ++l_img_comp; ++l_tccp; ++l_tile_comp; } return OPJ_TRUE; } /** * Deallocates the encoding data of the given precinct. */ static void opj_tcd_code_block_dec_deallocate(opj_tcd_precinct_t * p_precinct) { OPJ_UINT32 cblkno, l_nb_code_blocks; opj_tcd_cblk_dec_t * l_code_block = p_precinct->cblks.dec; if (l_code_block) { /*fprintf(stderr,"deallocate codeblock:{\n");*/ /*fprintf(stderr,"\t x0=%d, y0=%d, x1=%d, y1=%d\n",l_code_block->x0, l_code_block->y0, l_code_block->x1, l_code_block->y1);*/ /*fprintf(stderr,"\t numbps=%d, numlenbits=%d, len=%d, numnewpasses=%d, real_num_segs=%d, m_current_max_segs=%d\n ", l_code_block->numbps, l_code_block->numlenbits, l_code_block->len, l_code_block->numnewpasses, l_code_block->real_num_segs, l_code_block->m_current_max_segs );*/ l_nb_code_blocks = p_precinct->block_size / sizeof(opj_tcd_cblk_dec_t); /*fprintf(stderr,"nb_code_blocks =%d\t}\n", l_nb_code_blocks);*/ for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { if (l_code_block->segs) { opj_free(l_code_block->segs); l_code_block->segs = 00; } if (l_code_block->chunks) { opj_free(l_code_block->chunks); l_code_block->chunks = 00; } ++l_code_block; } opj_free(p_precinct->cblks.dec); p_precinct->cblks.dec = 00; } } /** * Deallocates the encoding data of the given precinct. */ static void opj_tcd_code_block_enc_deallocate(opj_tcd_precinct_t * p_precinct) { OPJ_UINT32 cblkno, l_nb_code_blocks; opj_tcd_cblk_enc_t * l_code_block = p_precinct->cblks.enc; if (l_code_block) { l_nb_code_blocks = p_precinct->block_size / sizeof(opj_tcd_cblk_enc_t); for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { if (l_code_block->data) { /* We refer to data - 1 since below we incremented it */ /* in opj_tcd_code_block_enc_allocate_data() */ opj_free(l_code_block->data - 1); l_code_block->data = 00; } if (l_code_block->layers) { opj_free(l_code_block->layers); l_code_block->layers = 00; } if (l_code_block->passes) { opj_free(l_code_block->passes); l_code_block->passes = 00; } ++l_code_block; } opj_free(p_precinct->cblks.enc); p_precinct->cblks.enc = 00; } } OPJ_UINT32 opj_tcd_get_encoded_tile_size(opj_tcd_t *p_tcd) { OPJ_UINT32 i, l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; OPJ_UINT32 l_size_comp, l_remaining; l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i = 0; i < p_tcd->image->numcomps; ++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } l_data_size += l_size_comp * (OPJ_UINT32)((l_tilec->x1 - l_tilec->x0) * (l_tilec->y1 - l_tilec->y0)); ++l_img_comp; ++l_tilec; } return l_data_size; } static OPJ_BOOL opj_tcd_dc_level_shift_encode(opj_tcd_t *p_tcd) { OPJ_UINT32 compno; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tccp_t * l_tccp = 00; opj_image_comp_t * l_img_comp = 00; opj_tcd_tile_t * l_tile; OPJ_UINT32 l_nb_elem, i; OPJ_INT32 * l_current_ptr; l_tile = p_tcd->tcd_image->tiles; l_tile_comp = l_tile->comps; l_tccp = p_tcd->tcp->tccps; l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { l_current_ptr = l_tile_comp->data; l_nb_elem = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); if (l_tccp->qmfbid == 1) { for (i = 0; i < l_nb_elem; ++i) { *l_current_ptr -= l_tccp->m_dc_level_shift ; ++l_current_ptr; } } else { for (i = 0; i < l_nb_elem; ++i) { *l_current_ptr = (*l_current_ptr - l_tccp->m_dc_level_shift) * (1 << 11); ++l_current_ptr; } } ++l_img_comp; ++l_tccp; ++l_tile_comp; } return OPJ_TRUE; } static OPJ_BOOL opj_tcd_mct_encode(opj_tcd_t *p_tcd) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = p_tcd->tcd_image->tiles->comps; OPJ_UINT32 samples = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); OPJ_UINT32 i; OPJ_BYTE ** l_data = 00; opj_tcp_t * l_tcp = p_tcd->tcp; if (!p_tcd->tcp->mct) { return OPJ_TRUE; } if (p_tcd->tcp->mct == 2) { if (! p_tcd->tcp->m_mct_coding_matrix) { return OPJ_TRUE; } l_data = (OPJ_BYTE **) opj_malloc(l_tile->numcomps * sizeof(OPJ_BYTE*)); if (! l_data) { return OPJ_FALSE; } for (i = 0; i < l_tile->numcomps; ++i) { l_data[i] = (OPJ_BYTE*) l_tile_comp->data; ++l_tile_comp; } if (! opj_mct_encode_custom(/* MCT data */ (OPJ_BYTE*) p_tcd->tcp->m_mct_coding_matrix, /* size of components */ samples, /* components */ l_data, /* nb of components (i.e. size of pData) */ l_tile->numcomps, /* tells if the data is signed */ p_tcd->image->comps->sgnd)) { opj_free(l_data); return OPJ_FALSE; } opj_free(l_data); } else if (l_tcp->tccps->qmfbid == 0) { opj_mct_encode_real(l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, samples); } else { opj_mct_encode(l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, samples); } return OPJ_TRUE; } static OPJ_BOOL opj_tcd_dwt_encode(opj_tcd_t *p_tcd) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = p_tcd->tcd_image->tiles->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; OPJ_UINT32 compno; for (compno = 0; compno < l_tile->numcomps; ++compno) { if (l_tccp->qmfbid == 1) { if (! opj_dwt_encode(l_tile_comp)) { return OPJ_FALSE; } } else if (l_tccp->qmfbid == 0) { if (! opj_dwt_encode_real(l_tile_comp)) { return OPJ_FALSE; } } ++l_tile_comp; ++l_tccp; } return OPJ_TRUE; } static OPJ_BOOL opj_tcd_t1_encode(opj_tcd_t *p_tcd) { opj_t1_t * l_t1; const OPJ_FLOAT64 * l_mct_norms; OPJ_UINT32 l_mct_numcomps = 0U; opj_tcp_t * l_tcp = p_tcd->tcp; l_t1 = opj_t1_create(OPJ_TRUE); if (l_t1 == 00) { return OPJ_FALSE; } if (l_tcp->mct == 1) { l_mct_numcomps = 3U; /* irreversible encoding */ if (l_tcp->tccps->qmfbid == 0) { l_mct_norms = opj_mct_get_mct_norms_real(); } else { l_mct_norms = opj_mct_get_mct_norms(); } } else { l_mct_numcomps = p_tcd->image->numcomps; l_mct_norms = (const OPJ_FLOAT64 *)(l_tcp->mct_norms); } if (! opj_t1_encode_cblks(l_t1, p_tcd->tcd_image->tiles, l_tcp, l_mct_norms, l_mct_numcomps)) { opj_t1_destroy(l_t1); return OPJ_FALSE; } opj_t1_destroy(l_t1); return OPJ_TRUE; } static OPJ_BOOL opj_tcd_t2_encode(opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info, opj_event_mgr_t *p_manager) { opj_t2_t * l_t2; l_t2 = opj_t2_create(p_tcd->image, p_tcd->cp); if (l_t2 == 00) { return OPJ_FALSE; } if (! opj_t2_encode_packets( l_t2, p_tcd->tcd_tileno, p_tcd->tcd_image->tiles, p_tcd->tcp->numlayers, p_dest_data, p_data_written, p_max_dest_size, p_cstr_info, p_tcd->tp_num, p_tcd->tp_pos, p_tcd->cur_pino, FINAL_PASS, p_manager)) { opj_t2_destroy(l_t2); return OPJ_FALSE; } opj_t2_destroy(l_t2); /*---------------CLEAN-------------------*/ return OPJ_TRUE; } static OPJ_BOOL opj_tcd_rate_allocate_encode(opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info, opj_event_mgr_t *p_manager) { opj_cp_t * l_cp = p_tcd->cp; OPJ_UINT32 l_nb_written = 0; if (p_cstr_info) { p_cstr_info->index_write = 0; } if (l_cp->m_specific_param.m_enc.m_disto_alloc || l_cp->m_specific_param.m_enc.m_fixed_quality) { /* fixed_quality */ /* Normal Rate/distortion allocation */ if (! opj_tcd_rateallocate(p_tcd, p_dest_data, &l_nb_written, p_max_dest_size, p_cstr_info, p_manager)) { return OPJ_FALSE; } } else { /* Fixed layer allocation */ opj_tcd_rateallocate_fixed(p_tcd); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_copy_tile_data(opj_tcd_t *p_tcd, OPJ_BYTE * p_src, OPJ_UINT32 p_src_length) { OPJ_UINT32 i, j, l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; OPJ_UINT32 l_size_comp, l_remaining; OPJ_UINT32 l_nb_elem; l_data_size = opj_tcd_get_encoded_tile_size(p_tcd); if (l_data_size != p_src_length) { return OPJ_FALSE; } l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i = 0; i < p_tcd->image->numcomps; ++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ l_nb_elem = (OPJ_UINT32)((l_tilec->x1 - l_tilec->x0) * (l_tilec->y1 - l_tilec->y0)); if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } switch (l_size_comp) { case 1: { OPJ_CHAR * l_src_ptr = (OPJ_CHAR *) p_src; OPJ_INT32 * l_dest_ptr = l_tilec->data; if (l_img_comp->sgnd) { for (j = 0; j < l_nb_elem; ++j) { *(l_dest_ptr++) = (OPJ_INT32)(*(l_src_ptr++)); } } else { for (j = 0; j < l_nb_elem; ++j) { *(l_dest_ptr++) = (*(l_src_ptr++)) & 0xff; } } p_src = (OPJ_BYTE*) l_src_ptr; } break; case 2: { OPJ_INT32 * l_dest_ptr = l_tilec->data; OPJ_INT16 * l_src_ptr = (OPJ_INT16 *) p_src; if (l_img_comp->sgnd) { for (j = 0; j < l_nb_elem; ++j) { *(l_dest_ptr++) = (OPJ_INT32)(*(l_src_ptr++)); } } else { for (j = 0; j < l_nb_elem; ++j) { *(l_dest_ptr++) = (*(l_src_ptr++)) & 0xffff; } } p_src = (OPJ_BYTE*) l_src_ptr; } break; case 4: { OPJ_INT32 * l_src_ptr = (OPJ_INT32 *) p_src; OPJ_INT32 * l_dest_ptr = l_tilec->data; for (j = 0; j < l_nb_elem; ++j) { *(l_dest_ptr++) = (OPJ_INT32)(*(l_src_ptr++)); } p_src = (OPJ_BYTE*) l_src_ptr; } break; } ++l_img_comp; ++l_tilec; } return OPJ_TRUE; } OPJ_BOOL opj_tcd_is_band_empty(opj_tcd_band_t* band) { return (band->x1 - band->x0 == 0) || (band->y1 - band->y0 == 0); }
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2001-2003, David Janssens * Copyright (c) 2002-2003, Yannick Verschueren * Copyright (c) 2003-2007, Francois-Olivier Devaux * Copyright (c) 2003-2014, Antonin Descampe * Copyright (c) 2005, Herve Drolon, FreeImage Team * Copyright (c) 2006-2007, Parvatha Elangovan * Copyright (c) 2008, 2011-2012, Centre National d'Etudes Spatiales (CNES), FR * Copyright (c) 2012, CS Systemes d'Information, France * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "opj_includes.h" #include "opj_common.h" /* ----------------------------------------------------------------------- */ /* TODO MSD: */ #ifdef TODO_MSD void tcd_dump(FILE *fd, opj_tcd_t *tcd, opj_tcd_image_t * img) { int tileno, compno, resno, bandno, precno;/*, cblkno;*/ fprintf(fd, "image {\n"); fprintf(fd, " tw=%d, th=%d x0=%d x1=%d y0=%d y1=%d\n", img->tw, img->th, tcd->image->x0, tcd->image->x1, tcd->image->y0, tcd->image->y1); for (tileno = 0; tileno < img->th * img->tw; tileno++) { opj_tcd_tile_t *tile = &tcd->tcd_image->tiles[tileno]; fprintf(fd, " tile {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, numcomps=%d\n", tile->x0, tile->y0, tile->x1, tile->y1, tile->numcomps); for (compno = 0; compno < tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tile->comps[compno]; fprintf(fd, " tilec {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, numresolutions=%d\n", tilec->x0, tilec->y0, tilec->x1, tilec->y1, tilec->numresolutions); for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; fprintf(fd, "\n res {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, pw=%d, ph=%d, numbands=%d\n", res->x0, res->y0, res->x1, res->y1, res->pw, res->ph, res->numbands); for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; fprintf(fd, " band {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, stepsize=%f, numbps=%d\n", band->x0, band->y0, band->x1, band->y1, band->stepsize, band->numbps); for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prec = &band->precincts[precno]; fprintf(fd, " prec {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d, cw=%d, ch=%d\n", prec->x0, prec->y0, prec->x1, prec->y1, prec->cw, prec->ch); /* for (cblkno = 0; cblkno < prec->cw * prec->ch; cblkno++) { opj_tcd_cblk_t *cblk = &prec->cblks[cblkno]; fprintf(fd, " cblk {\n"); fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d\n", cblk->x0, cblk->y0, cblk->x1, cblk->y1); fprintf(fd, " }\n"); } */ fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, " }\n"); } fprintf(fd, "}\n"); } #endif /** * Initializes tile coding/decoding */ static INLINE OPJ_BOOL opj_tcd_init_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BOOL isEncoder, OPJ_FLOAT32 fraction, OPJ_SIZE_T sizeof_block, opj_event_mgr_t* manager); /** * Allocates memory for a decoding code block. */ static OPJ_BOOL opj_tcd_code_block_dec_allocate(opj_tcd_cblk_dec_t * p_code_block); /** * Deallocates the decoding data of the given precinct. */ static void opj_tcd_code_block_dec_deallocate(opj_tcd_precinct_t * p_precinct); /** * Allocates memory for an encoding code block (but not data). */ static OPJ_BOOL opj_tcd_code_block_enc_allocate(opj_tcd_cblk_enc_t * p_code_block); /** * Allocates data for an encoding code block */ static OPJ_BOOL opj_tcd_code_block_enc_allocate_data(opj_tcd_cblk_enc_t * p_code_block); /** * Deallocates the encoding data of the given precinct. */ static void opj_tcd_code_block_enc_deallocate(opj_tcd_precinct_t * p_precinct); /** Free the memory allocated for encoding @param tcd TCD handle */ static void opj_tcd_free_tile(opj_tcd_t *tcd); static OPJ_BOOL opj_tcd_t2_decode(opj_tcd_t *p_tcd, OPJ_BYTE * p_src_data, OPJ_UINT32 * p_data_read, OPJ_UINT32 p_max_src_size, opj_codestream_index_t *p_cstr_index, opj_event_mgr_t *p_manager); static OPJ_BOOL opj_tcd_t1_decode(opj_tcd_t *p_tcd, opj_event_mgr_t *p_manager); static OPJ_BOOL opj_tcd_dwt_decode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_mct_decode(opj_tcd_t *p_tcd, opj_event_mgr_t *p_manager); static OPJ_BOOL opj_tcd_dc_level_shift_decode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_dc_level_shift_encode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_mct_encode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_dwt_encode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_t1_encode(opj_tcd_t *p_tcd); static OPJ_BOOL opj_tcd_t2_encode(opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info, opj_event_mgr_t *p_manager); static OPJ_BOOL opj_tcd_rate_allocate_encode(opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info, opj_event_mgr_t *p_manager); /* ----------------------------------------------------------------------- */ /** Create a new TCD handle */ opj_tcd_t* opj_tcd_create(OPJ_BOOL p_is_decoder) { opj_tcd_t *l_tcd = 00; /* create the tcd structure */ l_tcd = (opj_tcd_t*) opj_calloc(1, sizeof(opj_tcd_t)); if (!l_tcd) { return 00; } l_tcd->m_is_decoder = p_is_decoder ? 1 : 0; l_tcd->tcd_image = (opj_tcd_image_t*)opj_calloc(1, sizeof(opj_tcd_image_t)); if (!l_tcd->tcd_image) { opj_free(l_tcd); return 00; } return l_tcd; } /* ----------------------------------------------------------------------- */ void opj_tcd_rateallocate_fixed(opj_tcd_t *tcd) { OPJ_UINT32 layno; for (layno = 0; layno < tcd->tcp->numlayers; layno++) { opj_tcd_makelayer_fixed(tcd, layno, 1); } } void opj_tcd_makelayer(opj_tcd_t *tcd, OPJ_UINT32 layno, OPJ_FLOAT64 thresh, OPJ_UINT32 final) { OPJ_UINT32 compno, resno, bandno, precno, cblkno; OPJ_UINT32 passno; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; tcd_tile->distolayer[layno] = 0; /* fixed_quality */ for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; /* Skip empty bands */ if (opj_tcd_is_band_empty(band)) { continue; } for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; opj_tcd_layer_t *layer = &cblk->layers[layno]; OPJ_UINT32 n; if (layno == 0) { cblk->numpassesinlayers = 0; } n = cblk->numpassesinlayers; for (passno = cblk->numpassesinlayers; passno < cblk->totalpasses; passno++) { OPJ_UINT32 dr; OPJ_FLOAT64 dd; opj_tcd_pass_t *pass = &cblk->passes[passno]; if (n == 0) { dr = pass->rate; dd = pass->distortiondec; } else { dr = pass->rate - cblk->passes[n - 1].rate; dd = pass->distortiondec - cblk->passes[n - 1].distortiondec; } if (!dr) { if (dd != 0) { n = passno + 1; } continue; } if (thresh - (dd / dr) < DBL_EPSILON) { /* do not rely on float equality, check with DBL_EPSILON margin */ n = passno + 1; } } layer->numpasses = n - cblk->numpassesinlayers; if (!layer->numpasses) { layer->disto = 0; continue; } if (cblk->numpassesinlayers == 0) { layer->len = cblk->passes[n - 1].rate; layer->data = cblk->data; layer->disto = cblk->passes[n - 1].distortiondec; } else { layer->len = cblk->passes[n - 1].rate - cblk->passes[cblk->numpassesinlayers - 1].rate; layer->data = cblk->data + cblk->passes[cblk->numpassesinlayers - 1].rate; layer->disto = cblk->passes[n - 1].distortiondec - cblk->passes[cblk->numpassesinlayers - 1].distortiondec; } tcd_tile->distolayer[layno] += layer->disto; /* fixed_quality */ if (final) { cblk->numpassesinlayers = n; } } } } } } } void opj_tcd_makelayer_fixed(opj_tcd_t *tcd, OPJ_UINT32 layno, OPJ_UINT32 final) { OPJ_UINT32 compno, resno, bandno, precno, cblkno; OPJ_INT32 value; /*, matrice[tcd_tcp->numlayers][tcd_tile->comps[0].numresolutions][3]; */ OPJ_INT32 matrice[10][10][3]; OPJ_UINT32 i, j, k; opj_cp_t *cp = tcd->cp; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; opj_tcp_t *tcd_tcp = tcd->tcp; for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; for (i = 0; i < tcd_tcp->numlayers; i++) { for (j = 0; j < tilec->numresolutions; j++) { for (k = 0; k < 3; k++) { matrice[i][j][k] = (OPJ_INT32)((OPJ_FLOAT32)cp->m_specific_param.m_enc.m_matrice[i * tilec->numresolutions * 3 + j * 3 + k] * (OPJ_FLOAT32)(tcd->image->comps[compno].prec / 16.0)); } } } for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; /* Skip empty bands */ if (opj_tcd_is_band_empty(band)) { continue; } for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; opj_tcd_layer_t *layer = &cblk->layers[layno]; OPJ_UINT32 n; OPJ_INT32 imsb = (OPJ_INT32)(tcd->image->comps[compno].prec - cblk->numbps); /* number of bit-plan equal to zero */ /* Correction of the matrix of coefficient to include the IMSB information */ if (layno == 0) { value = matrice[layno][resno][bandno]; if (imsb >= value) { value = 0; } else { value -= imsb; } } else { value = matrice[layno][resno][bandno] - matrice[layno - 1][resno][bandno]; if (imsb >= matrice[layno - 1][resno][bandno]) { value -= (imsb - matrice[layno - 1][resno][bandno]); if (value < 0) { value = 0; } } } if (layno == 0) { cblk->numpassesinlayers = 0; } n = cblk->numpassesinlayers; if (cblk->numpassesinlayers == 0) { if (value != 0) { n = 3 * (OPJ_UINT32)value - 2 + cblk->numpassesinlayers; } else { n = cblk->numpassesinlayers; } } else { n = 3 * (OPJ_UINT32)value + cblk->numpassesinlayers; } layer->numpasses = n - cblk->numpassesinlayers; if (!layer->numpasses) { continue; } if (cblk->numpassesinlayers == 0) { layer->len = cblk->passes[n - 1].rate; layer->data = cblk->data; } else { layer->len = cblk->passes[n - 1].rate - cblk->passes[cblk->numpassesinlayers - 1].rate; layer->data = cblk->data + cblk->passes[cblk->numpassesinlayers - 1].rate; } if (final) { cblk->numpassesinlayers = n; } } } } } } } OPJ_BOOL opj_tcd_rateallocate(opj_tcd_t *tcd, OPJ_BYTE *dest, OPJ_UINT32 * p_data_written, OPJ_UINT32 len, opj_codestream_info_t *cstr_info, opj_event_mgr_t *p_manager) { OPJ_UINT32 compno, resno, bandno, precno, cblkno, layno; OPJ_UINT32 passno; OPJ_FLOAT64 min, max; OPJ_FLOAT64 cumdisto[100]; /* fixed_quality */ const OPJ_FLOAT64 K = 1; /* 1.1; fixed_quality */ OPJ_FLOAT64 maxSE = 0; opj_cp_t *cp = tcd->cp; opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles; opj_tcp_t *tcd_tcp = tcd->tcp; min = DBL_MAX; max = 0; tcd_tile->numpix = 0; /* fixed_quality */ for (compno = 0; compno < tcd_tile->numcomps; compno++) { opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno]; tilec->numpix = 0; for (resno = 0; resno < tilec->numresolutions; resno++) { opj_tcd_resolution_t *res = &tilec->resolutions[resno]; for (bandno = 0; bandno < res->numbands; bandno++) { opj_tcd_band_t *band = &res->bands[bandno]; /* Skip empty bands */ if (opj_tcd_is_band_empty(band)) { continue; } for (precno = 0; precno < res->pw * res->ph; precno++) { opj_tcd_precinct_t *prc = &band->precincts[precno]; for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) { opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno]; for (passno = 0; passno < cblk->totalpasses; passno++) { opj_tcd_pass_t *pass = &cblk->passes[passno]; OPJ_INT32 dr; OPJ_FLOAT64 dd, rdslope; if (passno == 0) { dr = (OPJ_INT32)pass->rate; dd = pass->distortiondec; } else { dr = (OPJ_INT32)(pass->rate - cblk->passes[passno - 1].rate); dd = pass->distortiondec - cblk->passes[passno - 1].distortiondec; } if (dr == 0) { continue; } rdslope = dd / dr; if (rdslope < min) { min = rdslope; } if (rdslope > max) { max = rdslope; } } /* passno */ /* fixed_quality */ tcd_tile->numpix += ((cblk->x1 - cblk->x0) * (cblk->y1 - cblk->y0)); tilec->numpix += ((cblk->x1 - cblk->x0) * (cblk->y1 - cblk->y0)); } /* cbklno */ } /* precno */ } /* bandno */ } /* resno */ maxSE += (((OPJ_FLOAT64)(1 << tcd->image->comps[compno].prec) - 1.0) * ((OPJ_FLOAT64)(1 << tcd->image->comps[compno].prec) - 1.0)) * ((OPJ_FLOAT64)(tilec->numpix)); } /* compno */ /* index file */ if (cstr_info) { opj_tile_info_t *tile_info = &cstr_info->tile[tcd->tcd_tileno]; tile_info->numpix = tcd_tile->numpix; tile_info->distotile = tcd_tile->distotile; tile_info->thresh = (OPJ_FLOAT64 *) opj_malloc(tcd_tcp->numlayers * sizeof( OPJ_FLOAT64)); if (!tile_info->thresh) { /* FIXME event manager error callback */ return OPJ_FALSE; } } for (layno = 0; layno < tcd_tcp->numlayers; layno++) { OPJ_FLOAT64 lo = min; OPJ_FLOAT64 hi = max; OPJ_UINT32 maxlen = tcd_tcp->rates[layno] > 0.0f ? opj_uint_min((( OPJ_UINT32) ceil(tcd_tcp->rates[layno])), len) : len; OPJ_FLOAT64 goodthresh = 0; OPJ_FLOAT64 stable_thresh = 0; OPJ_UINT32 i; OPJ_FLOAT64 distotarget; /* fixed_quality */ /* fixed_quality */ distotarget = tcd_tile->distotile - ((K * maxSE) / pow((OPJ_FLOAT32)10, tcd_tcp->distoratio[layno] / 10)); /* Don't try to find an optimal threshold but rather take everything not included yet, if -r xx,yy,zz,0 (disto_alloc == 1 and rates == 0) -q xx,yy,zz,0 (fixed_quality == 1 and distoratio == 0) ==> possible to have some lossy layers and the last layer for sure lossless */ if (((cp->m_specific_param.m_enc.m_disto_alloc == 1) && (tcd_tcp->rates[layno] > 0.0f)) || ((cp->m_specific_param.m_enc.m_fixed_quality == 1) && (tcd_tcp->distoratio[layno] > 0.0))) { opj_t2_t*t2 = opj_t2_create(tcd->image, cp); OPJ_FLOAT64 thresh = 0; if (t2 == 00) { return OPJ_FALSE; } for (i = 0; i < 128; ++i) { OPJ_FLOAT64 distoachieved = 0; /* fixed_quality */ thresh = (lo + hi) / 2; opj_tcd_makelayer(tcd, layno, thresh, 0); if (cp->m_specific_param.m_enc.m_fixed_quality) { /* fixed_quality */ if (OPJ_IS_CINEMA(cp->rsiz)) { if (! opj_t2_encode_packets(t2, tcd->tcd_tileno, tcd_tile, layno + 1, dest, p_data_written, maxlen, cstr_info, tcd->cur_tp_num, tcd->tp_pos, tcd->cur_pino, THRESH_CALC, p_manager)) { lo = thresh; continue; } else { distoachieved = layno == 0 ? tcd_tile->distolayer[0] : cumdisto[layno - 1] + tcd_tile->distolayer[layno]; if (distoachieved < distotarget) { hi = thresh; stable_thresh = thresh; continue; } else { lo = thresh; } } } else { distoachieved = (layno == 0) ? tcd_tile->distolayer[0] : (cumdisto[layno - 1] + tcd_tile->distolayer[layno]); if (distoachieved < distotarget) { hi = thresh; stable_thresh = thresh; continue; } lo = thresh; } } else { if (! opj_t2_encode_packets(t2, tcd->tcd_tileno, tcd_tile, layno + 1, dest, p_data_written, maxlen, cstr_info, tcd->cur_tp_num, tcd->tp_pos, tcd->cur_pino, THRESH_CALC, p_manager)) { /* TODO: what to do with l ??? seek / tell ??? */ /* opj_event_msg(tcd->cinfo, EVT_INFO, "rate alloc: len=%d, max=%d\n", l, maxlen); */ lo = thresh; continue; } hi = thresh; stable_thresh = thresh; } } goodthresh = stable_thresh == 0 ? thresh : stable_thresh; opj_t2_destroy(t2); } else { goodthresh = min; } if (cstr_info) { /* Threshold for Marcela Index */ cstr_info->tile[tcd->tcd_tileno].thresh[layno] = goodthresh; } opj_tcd_makelayer(tcd, layno, goodthresh, 1); /* fixed_quality */ cumdisto[layno] = (layno == 0) ? tcd_tile->distolayer[0] : (cumdisto[layno - 1] + tcd_tile->distolayer[layno]); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_init(opj_tcd_t *p_tcd, opj_image_t * p_image, opj_cp_t * p_cp, opj_thread_pool_t* p_tp) { p_tcd->image = p_image; p_tcd->cp = p_cp; p_tcd->tcd_image->tiles = (opj_tcd_tile_t *) opj_calloc(1, sizeof(opj_tcd_tile_t)); if (! p_tcd->tcd_image->tiles) { return OPJ_FALSE; } p_tcd->tcd_image->tiles->comps = (opj_tcd_tilecomp_t *) opj_calloc( p_image->numcomps, sizeof(opj_tcd_tilecomp_t)); if (! p_tcd->tcd_image->tiles->comps) { return OPJ_FALSE; } p_tcd->tcd_image->tiles->numcomps = p_image->numcomps; p_tcd->tp_pos = p_cp->m_specific_param.m_enc.m_tp_pos; p_tcd->thread_pool = p_tp; return OPJ_TRUE; } /** Destroy a previously created TCD handle */ void opj_tcd_destroy(opj_tcd_t *tcd) { if (tcd) { opj_tcd_free_tile(tcd); if (tcd->tcd_image) { opj_free(tcd->tcd_image); tcd->tcd_image = 00; } opj_free(tcd); } } OPJ_BOOL opj_alloc_tile_component_data(opj_tcd_tilecomp_t *l_tilec) { if ((l_tilec->data == 00) || ((l_tilec->data_size_needed > l_tilec->data_size) && (l_tilec->ownsData == OPJ_FALSE))) { l_tilec->data = (OPJ_INT32 *) opj_image_data_alloc(l_tilec->data_size_needed); if (! l_tilec->data) { return OPJ_FALSE; } /*fprintf(stderr, "tAllocate data of tilec (int): %d x OPJ_UINT32n",l_data_size);*/ l_tilec->data_size = l_tilec->data_size_needed; l_tilec->ownsData = OPJ_TRUE; } else if (l_tilec->data_size_needed > l_tilec->data_size) { /* We don't need to keep old data */ opj_image_data_free(l_tilec->data); l_tilec->data = (OPJ_INT32 *) opj_image_data_alloc(l_tilec->data_size_needed); if (! l_tilec->data) { l_tilec->data_size = 0; l_tilec->data_size_needed = 0; l_tilec->ownsData = OPJ_FALSE; return OPJ_FALSE; } /*fprintf(stderr, "tReallocate data of tilec (int): from %d to %d x OPJ_UINT32n", l_tilec->data_size, l_data_size);*/ l_tilec->data_size = l_tilec->data_size_needed; l_tilec->ownsData = OPJ_TRUE; } return OPJ_TRUE; } /* ----------------------------------------------------------------------- */ static INLINE OPJ_BOOL opj_tcd_init_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BOOL isEncoder, OPJ_FLOAT32 fraction, OPJ_SIZE_T sizeof_block, opj_event_mgr_t* manager) { OPJ_UINT32(*l_gain_ptr)(OPJ_UINT32) = 00; OPJ_UINT32 compno, resno, bandno, precno, cblkno; opj_tcp_t * l_tcp = 00; opj_cp_t * l_cp = 00; opj_tcd_tile_t * l_tile = 00; opj_tccp_t *l_tccp = 00; opj_tcd_tilecomp_t *l_tilec = 00; opj_image_comp_t * l_image_comp = 00; opj_tcd_resolution_t *l_res = 00; opj_tcd_band_t *l_band = 00; opj_stepsize_t * l_step_size = 00; opj_tcd_precinct_t *l_current_precinct = 00; opj_image_t *l_image = 00; OPJ_UINT32 p, q; OPJ_UINT32 l_level_no; OPJ_UINT32 l_pdx, l_pdy; OPJ_UINT32 l_gain; OPJ_INT32 l_x0b, l_y0b; OPJ_UINT32 l_tx0, l_ty0; /* extent of precincts , top left, bottom right**/ OPJ_INT32 l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end, l_br_prc_y_end; /* number of precinct for a resolution */ OPJ_UINT32 l_nb_precincts; /* room needed to store l_nb_precinct precinct for a resolution */ OPJ_UINT32 l_nb_precinct_size; /* number of code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks; /* room needed to store l_nb_code_blocks code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks_size; /* size of data for a tile */ OPJ_UINT32 l_data_size; l_cp = p_tcd->cp; l_tcp = &(l_cp->tcps[p_tile_no]); l_tile = p_tcd->tcd_image->tiles; l_tccp = l_tcp->tccps; l_tilec = l_tile->comps; l_image = p_tcd->image; l_image_comp = p_tcd->image->comps; p = p_tile_no % l_cp->tw; /* tile coordinates */ q = p_tile_no / l_cp->tw; /*fprintf(stderr, "Tile coordinate = %d,%d\n", p, q);*/ /* 4 borders of the tile rescale on the image if necessary */ l_tx0 = l_cp->tx0 + p * l_cp->tdx; /* can't be greater than l_image->x1 so won't overflow */ l_tile->x0 = (OPJ_INT32)opj_uint_max(l_tx0, l_image->x0); l_tile->x1 = (OPJ_INT32)opj_uint_min(opj_uint_adds(l_tx0, l_cp->tdx), l_image->x1); /* all those OPJ_UINT32 are casted to OPJ_INT32, let's do some sanity check */ if ((l_tile->x0 < 0) || (l_tile->x1 <= l_tile->x0)) { opj_event_msg(manager, EVT_ERROR, "Tile X coordinates are not supported\n"); return OPJ_FALSE; } l_ty0 = l_cp->ty0 + q * l_cp->tdy; /* can't be greater than l_image->y1 so won't overflow */ l_tile->y0 = (OPJ_INT32)opj_uint_max(l_ty0, l_image->y0); l_tile->y1 = (OPJ_INT32)opj_uint_min(opj_uint_adds(l_ty0, l_cp->tdy), l_image->y1); /* all those OPJ_UINT32 are casted to OPJ_INT32, let's do some sanity check */ if ((l_tile->y0 < 0) || (l_tile->y1 <= l_tile->y0)) { opj_event_msg(manager, EVT_ERROR, "Tile Y coordinates are not supported\n"); return OPJ_FALSE; } /* testcase 1888.pdf.asan.35.988 */ if (l_tccp->numresolutions == 0) { opj_event_msg(manager, EVT_ERROR, "tiles require at least one resolution\n"); return OPJ_FALSE; } /*fprintf(stderr, "Tile border = %d,%d,%d,%d\n", l_tile->x0, l_tile->y0,l_tile->x1,l_tile->y1);*/ /*tile->numcomps = image->numcomps; */ for (compno = 0; compno < l_tile->numcomps; ++compno) { /*fprintf(stderr, "compno = %d/%d\n", compno, l_tile->numcomps);*/ l_image_comp->resno_decoded = 0; /* border of each l_tile component (global) */ l_tilec->x0 = opj_int_ceildiv(l_tile->x0, (OPJ_INT32)l_image_comp->dx); l_tilec->y0 = opj_int_ceildiv(l_tile->y0, (OPJ_INT32)l_image_comp->dy); l_tilec->x1 = opj_int_ceildiv(l_tile->x1, (OPJ_INT32)l_image_comp->dx); l_tilec->y1 = opj_int_ceildiv(l_tile->y1, (OPJ_INT32)l_image_comp->dy); /*fprintf(stderr, "\tTile compo border = %d,%d,%d,%d\n", l_tilec->x0, l_tilec->y0,l_tilec->x1,l_tilec->y1);*/ /* compute l_data_size with overflow check */ l_data_size = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0); /* issue 733, l_data_size == 0U, probably something wrong should be checked before getting here */ if ((l_data_size > 0U) && ((((OPJ_UINT32) - 1) / l_data_size) < (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0))) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile data\n"); return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0); if ((((OPJ_UINT32) - 1) / (OPJ_UINT32)sizeof(OPJ_UINT32)) < l_data_size) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile data\n"); return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)sizeof(OPJ_UINT32); l_tilec->numresolutions = l_tccp->numresolutions; if (l_tccp->numresolutions < l_cp->m_specific_param.m_dec.m_reduce) { l_tilec->minimum_num_resolutions = 1; } else { l_tilec->minimum_num_resolutions = l_tccp->numresolutions - l_cp->m_specific_param.m_dec.m_reduce; } l_tilec->data_size_needed = l_data_size; if (p_tcd->m_is_decoder && !opj_alloc_tile_component_data(l_tilec)) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile data\n"); return OPJ_FALSE; } l_data_size = l_tilec->numresolutions * (OPJ_UINT32)sizeof( opj_tcd_resolution_t); if (l_tilec->resolutions == 00) { l_tilec->resolutions = (opj_tcd_resolution_t *) opj_malloc(l_data_size); if (! l_tilec->resolutions) { return OPJ_FALSE; } /*fprintf(stderr, "\tAllocate resolutions of tilec (opj_tcd_resolution_t): %d\n",l_data_size);*/ l_tilec->resolutions_size = l_data_size; memset(l_tilec->resolutions, 0, l_data_size); } else if (l_data_size > l_tilec->resolutions_size) { opj_tcd_resolution_t* new_resolutions = (opj_tcd_resolution_t *) opj_realloc( l_tilec->resolutions, l_data_size); if (! new_resolutions) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile resolutions\n"); opj_free(l_tilec->resolutions); l_tilec->resolutions = NULL; l_tilec->resolutions_size = 0; return OPJ_FALSE; } l_tilec->resolutions = new_resolutions; /*fprintf(stderr, "\tReallocate data of tilec (int): from %d to %d x OPJ_UINT32\n", l_tilec->resolutions_size, l_data_size);*/ memset(((OPJ_BYTE*) l_tilec->resolutions) + l_tilec->resolutions_size, 0, l_data_size - l_tilec->resolutions_size); l_tilec->resolutions_size = l_data_size; } l_level_no = l_tilec->numresolutions; l_res = l_tilec->resolutions; l_step_size = l_tccp->stepsizes; if (l_tccp->qmfbid == 0) { l_gain_ptr = &opj_dwt_getgain_real; } else { l_gain_ptr = &opj_dwt_getgain; } /*fprintf(stderr, "\tlevel_no=%d\n",l_level_no);*/ for (resno = 0; resno < l_tilec->numresolutions; ++resno) { /*fprintf(stderr, "\t\tresno = %d/%d\n", resno, l_tilec->numresolutions);*/ OPJ_INT32 tlcbgxstart, tlcbgystart /*, brcbgxend, brcbgyend*/; OPJ_UINT32 cbgwidthexpn, cbgheightexpn; OPJ_UINT32 cblkwidthexpn, cblkheightexpn; --l_level_no; /* border for each resolution level (global) */ l_res->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_res->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_res->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_res->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); /*fprintf(stderr, "\t\t\tres_x0= %d, res_y0 =%d, res_x1=%d, res_y1=%d\n", l_res->x0, l_res->y0, l_res->x1, l_res->y1);*/ /* p. 35, table A-23, ISO/IEC FDIS154444-1 : 2000 (18 august 2000) */ l_pdx = l_tccp->prcw[resno]; l_pdy = l_tccp->prch[resno]; /*fprintf(stderr, "\t\t\tpdx=%d, pdy=%d\n", l_pdx, l_pdy);*/ /* p. 64, B.6, ISO/IEC FDIS15444-1 : 2000 (18 august 2000) */ l_tl_prc_x_start = opj_int_floordivpow2(l_res->x0, (OPJ_INT32)l_pdx) << l_pdx; l_tl_prc_y_start = opj_int_floordivpow2(l_res->y0, (OPJ_INT32)l_pdy) << l_pdy; l_br_prc_x_end = opj_int_ceildivpow2(l_res->x1, (OPJ_INT32)l_pdx) << l_pdx; l_br_prc_y_end = opj_int_ceildivpow2(l_res->y1, (OPJ_INT32)l_pdy) << l_pdy; /*fprintf(stderr, "\t\t\tprc_x_start=%d, prc_y_start=%d, br_prc_x_end=%d, br_prc_y_end=%d \n", l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end ,l_br_prc_y_end );*/ l_res->pw = (l_res->x0 == l_res->x1) ? 0U : (OPJ_UINT32)(( l_br_prc_x_end - l_tl_prc_x_start) >> l_pdx); l_res->ph = (l_res->y0 == l_res->y1) ? 0U : (OPJ_UINT32)(( l_br_prc_y_end - l_tl_prc_y_start) >> l_pdy); /*fprintf(stderr, "\t\t\tres_pw=%d, res_ph=%d\n", l_res->pw, l_res->ph );*/ if ((l_res->pw != 0U) && ((((OPJ_UINT32) - 1) / l_res->pw) < l_res->ph)) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile data\n"); return OPJ_FALSE; } l_nb_precincts = l_res->pw * l_res->ph; if ((((OPJ_UINT32) - 1) / (OPJ_UINT32)sizeof(opj_tcd_precinct_t)) < l_nb_precincts) { opj_event_msg(manager, EVT_ERROR, "Not enough memory for tile data\n"); return OPJ_FALSE; } l_nb_precinct_size = l_nb_precincts * (OPJ_UINT32)sizeof(opj_tcd_precinct_t); if (resno == 0) { tlcbgxstart = l_tl_prc_x_start; tlcbgystart = l_tl_prc_y_start; /*brcbgxend = l_br_prc_x_end;*/ /* brcbgyend = l_br_prc_y_end;*/ cbgwidthexpn = l_pdx; cbgheightexpn = l_pdy; l_res->numbands = 1; } else { tlcbgxstart = opj_int_ceildivpow2(l_tl_prc_x_start, 1); tlcbgystart = opj_int_ceildivpow2(l_tl_prc_y_start, 1); /*brcbgxend = opj_int_ceildivpow2(l_br_prc_x_end, 1);*/ /*brcbgyend = opj_int_ceildivpow2(l_br_prc_y_end, 1);*/ cbgwidthexpn = l_pdx - 1; cbgheightexpn = l_pdy - 1; l_res->numbands = 3; } cblkwidthexpn = opj_uint_min(l_tccp->cblkw, cbgwidthexpn); cblkheightexpn = opj_uint_min(l_tccp->cblkh, cbgheightexpn); l_band = l_res->bands; for (bandno = 0; bandno < l_res->numbands; ++bandno, ++l_band, ++l_step_size) { OPJ_INT32 numbps; /*fprintf(stderr, "\t\t\tband_no=%d/%d\n", bandno, l_res->numbands );*/ if (resno == 0) { l_band->bandno = 0 ; l_band->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); } else { l_band->bandno = bandno + 1; /* x0b = 1 if bandno = 1 or 3 */ l_x0b = l_band->bandno & 1; /* y0b = 1 if bandno = 2 or 3 */ l_y0b = (OPJ_INT32)((l_band->bandno) >> 1); /* l_band border (global) */ l_band->x0 = opj_int64_ceildivpow2(l_tilec->x0 - ((OPJ_INT64)l_x0b << l_level_no), (OPJ_INT32)(l_level_no + 1)); l_band->y0 = opj_int64_ceildivpow2(l_tilec->y0 - ((OPJ_INT64)l_y0b << l_level_no), (OPJ_INT32)(l_level_no + 1)); l_band->x1 = opj_int64_ceildivpow2(l_tilec->x1 - ((OPJ_INT64)l_x0b << l_level_no), (OPJ_INT32)(l_level_no + 1)); l_band->y1 = opj_int64_ceildivpow2(l_tilec->y1 - ((OPJ_INT64)l_y0b << l_level_no), (OPJ_INT32)(l_level_no + 1)); } if (isEncoder) { /* Skip empty bands */ if (opj_tcd_is_band_empty(l_band)) { /* Do not zero l_band->precints to avoid leaks */ /* but make sure we don't use it later, since */ /* it will point to precincts of previous bands... */ continue; } } /** avoid an if with storing function pointer */ l_gain = (*l_gain_ptr)(l_band->bandno); numbps = (OPJ_INT32)(l_image_comp->prec + l_gain); l_band->stepsize = (OPJ_FLOAT32)(((1.0 + l_step_size->mant / 2048.0) * pow(2.0, (OPJ_INT32)(numbps - l_step_size->expn)))) * fraction; /* Mb value of Equation E-2 in "E.1 Inverse quantization * procedure" of the standard */ l_band->numbps = l_step_size->expn + (OPJ_INT32)l_tccp->numgbits - 1; if (!l_band->precincts && (l_nb_precincts > 0U)) { l_band->precincts = (opj_tcd_precinct_t *) opj_malloc(/*3 * */ l_nb_precinct_size); if (! l_band->precincts) { opj_event_msg(manager, EVT_ERROR, "Not enough memory to handle band precints\n"); return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate precincts of a band (opj_tcd_precinct_t): %d\n",l_nb_precinct_size); */ memset(l_band->precincts, 0, l_nb_precinct_size); l_band->precincts_data_size = l_nb_precinct_size; } else if (l_band->precincts_data_size < l_nb_precinct_size) { opj_tcd_precinct_t * new_precincts = (opj_tcd_precinct_t *) opj_realloc( l_band->precincts,/*3 * */ l_nb_precinct_size); if (! new_precincts) { opj_event_msg(manager, EVT_ERROR, "Not enough memory to handle band precints\n"); opj_free(l_band->precincts); l_band->precincts = NULL; l_band->precincts_data_size = 0; return OPJ_FALSE; } l_band->precincts = new_precincts; /*fprintf(stderr, "\t\t\t\tReallocate precincts of a band (opj_tcd_precinct_t): from %d to %d\n",l_band->precincts_data_size, l_nb_precinct_size);*/ memset(((OPJ_BYTE *) l_band->precincts) + l_band->precincts_data_size, 0, l_nb_precinct_size - l_band->precincts_data_size); l_band->precincts_data_size = l_nb_precinct_size; } l_current_precinct = l_band->precincts; for (precno = 0; precno < l_nb_precincts; ++precno) { OPJ_INT32 tlcblkxstart, tlcblkystart, brcblkxend, brcblkyend; OPJ_INT32 cbgxstart = tlcbgxstart + (OPJ_INT32)(precno % l_res->pw) * (1 << cbgwidthexpn); OPJ_INT32 cbgystart = tlcbgystart + (OPJ_INT32)(precno / l_res->pw) * (1 << cbgheightexpn); OPJ_INT32 cbgxend = cbgxstart + (1 << cbgwidthexpn); OPJ_INT32 cbgyend = cbgystart + (1 << cbgheightexpn); /*fprintf(stderr, "\t precno=%d; bandno=%d, resno=%d; compno=%d\n", precno, bandno , resno, compno);*/ /*fprintf(stderr, "\t tlcbgxstart(=%d) + (precno(=%d) percent res->pw(=%d)) * (1 << cbgwidthexpn(=%d)) \n",tlcbgxstart,precno,l_res->pw,cbgwidthexpn);*/ /* precinct size (global) */ /*fprintf(stderr, "\t cbgxstart=%d, l_band->x0 = %d \n",cbgxstart, l_band->x0);*/ l_current_precinct->x0 = opj_int_max(cbgxstart, l_band->x0); l_current_precinct->y0 = opj_int_max(cbgystart, l_band->y0); l_current_precinct->x1 = opj_int_min(cbgxend, l_band->x1); l_current_precinct->y1 = opj_int_min(cbgyend, l_band->y1); /*fprintf(stderr, "\t prc_x0=%d; prc_y0=%d, prc_x1=%d; prc_y1=%d\n",l_current_precinct->x0, l_current_precinct->y0 ,l_current_precinct->x1, l_current_precinct->y1);*/ tlcblkxstart = opj_int_floordivpow2(l_current_precinct->x0, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t tlcblkxstart =%d\n",tlcblkxstart );*/ tlcblkystart = opj_int_floordivpow2(l_current_precinct->y0, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t tlcblkystart =%d\n",tlcblkystart );*/ brcblkxend = opj_int_ceildivpow2(l_current_precinct->x1, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t brcblkxend =%d\n",brcblkxend );*/ brcblkyend = opj_int_ceildivpow2(l_current_precinct->y1, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t brcblkyend =%d\n",brcblkyend );*/ l_current_precinct->cw = (OPJ_UINT32)((brcblkxend - tlcblkxstart) >> cblkwidthexpn); l_current_precinct->ch = (OPJ_UINT32)((brcblkyend - tlcblkystart) >> cblkheightexpn); l_nb_code_blocks = l_current_precinct->cw * l_current_precinct->ch; /*fprintf(stderr, "\t\t\t\t precinct_cw = %d x recinct_ch = %d\n",l_current_precinct->cw, l_current_precinct->ch); */ l_nb_code_blocks_size = l_nb_code_blocks * (OPJ_UINT32)sizeof_block; if (!l_current_precinct->cblks.blocks && (l_nb_code_blocks > 0U)) { l_current_precinct->cblks.blocks = opj_malloc(l_nb_code_blocks_size); if (! l_current_precinct->cblks.blocks) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate cblks of a precinct (opj_tcd_cblk_dec_t): %d\n",l_nb_code_blocks_size);*/ memset(l_current_precinct->cblks.blocks, 0, l_nb_code_blocks_size); l_current_precinct->block_size = l_nb_code_blocks_size; } else if (l_nb_code_blocks_size > l_current_precinct->block_size) { void *new_blocks = opj_realloc(l_current_precinct->cblks.blocks, l_nb_code_blocks_size); if (! new_blocks) { opj_free(l_current_precinct->cblks.blocks); l_current_precinct->cblks.blocks = NULL; l_current_precinct->block_size = 0; opj_event_msg(manager, EVT_ERROR, "Not enough memory for current precinct codeblock element\n"); return OPJ_FALSE; } l_current_precinct->cblks.blocks = new_blocks; /*fprintf(stderr, "\t\t\t\tReallocate cblks of a precinct (opj_tcd_cblk_dec_t): from %d to %d\n",l_current_precinct->block_size, l_nb_code_blocks_size); */ memset(((OPJ_BYTE *) l_current_precinct->cblks.blocks) + l_current_precinct->block_size , 0 , l_nb_code_blocks_size - l_current_precinct->block_size); l_current_precinct->block_size = l_nb_code_blocks_size; } if (! l_current_precinct->incltree) { l_current_precinct->incltree = opj_tgt_create(l_current_precinct->cw, l_current_precinct->ch, manager); } else { l_current_precinct->incltree = opj_tgt_init(l_current_precinct->incltree, l_current_precinct->cw, l_current_precinct->ch, manager); } if (! l_current_precinct->imsbtree) { l_current_precinct->imsbtree = opj_tgt_create(l_current_precinct->cw, l_current_precinct->ch, manager); } else { l_current_precinct->imsbtree = opj_tgt_init(l_current_precinct->imsbtree, l_current_precinct->cw, l_current_precinct->ch, manager); } for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { OPJ_INT32 cblkxstart = tlcblkxstart + (OPJ_INT32)(cblkno % l_current_precinct->cw) * (1 << cblkwidthexpn); OPJ_INT32 cblkystart = tlcblkystart + (OPJ_INT32)(cblkno / l_current_precinct->cw) * (1 << cblkheightexpn); OPJ_INT32 cblkxend = cblkxstart + (1 << cblkwidthexpn); OPJ_INT32 cblkyend = cblkystart + (1 << cblkheightexpn); if (isEncoder) { opj_tcd_cblk_enc_t* l_code_block = l_current_precinct->cblks.enc + cblkno; if (! opj_tcd_code_block_enc_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); if (! opj_tcd_code_block_enc_allocate_data(l_code_block)) { return OPJ_FALSE; } } else { opj_tcd_cblk_dec_t* l_code_block = l_current_precinct->cblks.dec + cblkno; if (! opj_tcd_code_block_dec_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); } } ++l_current_precinct; } /* precno */ } /* bandno */ ++l_res; } /* resno */ ++l_tccp; ++l_tilec; ++l_image_comp; } /* compno */ return OPJ_TRUE; } OPJ_BOOL opj_tcd_init_encode_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, opj_event_mgr_t* p_manager) { return opj_tcd_init_tile(p_tcd, p_tile_no, OPJ_TRUE, 1.0F, sizeof(opj_tcd_cblk_enc_t), p_manager); } OPJ_BOOL opj_tcd_init_decode_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, opj_event_mgr_t* p_manager) { return opj_tcd_init_tile(p_tcd, p_tile_no, OPJ_FALSE, 0.5F, sizeof(opj_tcd_cblk_dec_t), p_manager); } /** * Allocates memory for an encoding code block (but not data memory). */ static OPJ_BOOL opj_tcd_code_block_enc_allocate(opj_tcd_cblk_enc_t * p_code_block) { if (! p_code_block->layers) { /* no memset since data */ p_code_block->layers = (opj_tcd_layer_t*) opj_calloc(100, sizeof(opj_tcd_layer_t)); if (! p_code_block->layers) { return OPJ_FALSE; } } if (! p_code_block->passes) { p_code_block->passes = (opj_tcd_pass_t*) opj_calloc(100, sizeof(opj_tcd_pass_t)); if (! p_code_block->passes) { return OPJ_FALSE; } } return OPJ_TRUE; } /** * Allocates data memory for an encoding code block. */ static OPJ_BOOL opj_tcd_code_block_enc_allocate_data(opj_tcd_cblk_enc_t * p_code_block) { OPJ_UINT32 l_data_size; /* +1 is needed for https://github.com/uclouvain/openjpeg/issues/835 */ /* and actually +2 required for https://github.com/uclouvain/openjpeg/issues/982 */ /* TODO: is there a theoretical upper-bound for the compressed code */ /* block size ? */ l_data_size = 2 + (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) * (p_code_block->y1 - p_code_block->y0) * (OPJ_INT32)sizeof(OPJ_UINT32)); if (l_data_size > p_code_block->data_size) { if (p_code_block->data) { /* We refer to data - 1 since below we incremented it */ opj_free(p_code_block->data - 1); } p_code_block->data = (OPJ_BYTE*) opj_malloc(l_data_size + 1); if (! p_code_block->data) { p_code_block->data_size = 0U; return OPJ_FALSE; } p_code_block->data_size = l_data_size; /* We reserve the initial byte as a fake byte to a non-FF value */ /* and increment the data pointer, so that opj_mqc_init_enc() */ /* can do bp = data - 1, and opj_mqc_byteout() can safely dereference */ /* it. */ p_code_block->data[0] = 0; p_code_block->data += 1; /*why +1 ?*/ } return OPJ_TRUE; } void opj_tcd_reinit_segment(opj_tcd_seg_t* seg) { memset(seg, 0, sizeof(opj_tcd_seg_t)); } /** * Allocates memory for a decoding code block. */ static OPJ_BOOL opj_tcd_code_block_dec_allocate(opj_tcd_cblk_dec_t * p_code_block) { if (! p_code_block->segs) { p_code_block->segs = (opj_tcd_seg_t *) opj_calloc(OPJ_J2K_DEFAULT_NB_SEGS, sizeof(opj_tcd_seg_t)); if (! p_code_block->segs) { return OPJ_FALSE; } /*fprintf(stderr, "Allocate %d elements of code_block->data\n", OPJ_J2K_DEFAULT_NB_SEGS * sizeof(opj_tcd_seg_t));*/ p_code_block->m_current_max_segs = OPJ_J2K_DEFAULT_NB_SEGS; /*fprintf(stderr, "m_current_max_segs of code_block->data = %d\n", p_code_block->m_current_max_segs);*/ } else { /* sanitize */ opj_tcd_seg_t * l_segs = p_code_block->segs; OPJ_UINT32 l_current_max_segs = p_code_block->m_current_max_segs; opj_tcd_seg_data_chunk_t* l_chunks = p_code_block->chunks; OPJ_UINT32 l_numchunksalloc = p_code_block->numchunksalloc; OPJ_UINT32 i; memset(p_code_block, 0, sizeof(opj_tcd_cblk_dec_t)); p_code_block->segs = l_segs; p_code_block->m_current_max_segs = l_current_max_segs; for (i = 0; i < l_current_max_segs; ++i) { opj_tcd_reinit_segment(&l_segs[i]); } p_code_block->chunks = l_chunks; p_code_block->numchunksalloc = l_numchunksalloc; } return OPJ_TRUE; } OPJ_UINT32 opj_tcd_get_decoded_tile_size(opj_tcd_t *p_tcd) { OPJ_UINT32 i; OPJ_UINT32 l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tcd_resolution_t * l_res = 00; OPJ_UINT32 l_size_comp, l_remaining; OPJ_UINT32 l_temp; l_tile_comp = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i = 0; i < p_tcd->image->numcomps; ++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } l_res = l_tile_comp->resolutions + l_tile_comp->minimum_num_resolutions - 1; l_temp = (OPJ_UINT32)((l_res->x1 - l_res->x0) * (l_res->y1 - l_res->y0)); /* x1*y1 can't overflow */ if (l_size_comp && UINT_MAX / l_size_comp < l_temp) { return UINT_MAX; } l_temp *= l_size_comp; if (l_temp > UINT_MAX - l_data_size) { return UINT_MAX; } l_data_size += l_temp; ++l_img_comp; ++l_tile_comp; } return l_data_size; } OPJ_BOOL opj_tcd_encode_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BYTE *p_dest, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_length, opj_codestream_info_t *p_cstr_info, opj_event_mgr_t *p_manager) { if (p_tcd->cur_tp_num == 0) { p_tcd->tcd_tileno = p_tile_no; p_tcd->tcp = &p_tcd->cp->tcps[p_tile_no]; /* INDEX >> "Precinct_nb_X et Precinct_nb_Y" */ if (p_cstr_info) { OPJ_UINT32 l_num_packs = 0; OPJ_UINT32 i; opj_tcd_tilecomp_t *l_tilec_idx = &p_tcd->tcd_image->tiles->comps[0]; /* based on component 0 */ opj_tccp_t *l_tccp = p_tcd->tcp->tccps; /* based on component 0 */ for (i = 0; i < l_tilec_idx->numresolutions; i++) { opj_tcd_resolution_t *l_res_idx = &l_tilec_idx->resolutions[i]; p_cstr_info->tile[p_tile_no].pw[i] = (int)l_res_idx->pw; p_cstr_info->tile[p_tile_no].ph[i] = (int)l_res_idx->ph; l_num_packs += l_res_idx->pw * l_res_idx->ph; p_cstr_info->tile[p_tile_no].pdx[i] = (int)l_tccp->prcw[i]; p_cstr_info->tile[p_tile_no].pdy[i] = (int)l_tccp->prch[i]; } p_cstr_info->tile[p_tile_no].packet = (opj_packet_info_t*) opj_calloc(( size_t)p_cstr_info->numcomps * (size_t)p_cstr_info->numlayers * l_num_packs, sizeof(opj_packet_info_t)); if (!p_cstr_info->tile[p_tile_no].packet) { /* FIXME event manager error callback */ return OPJ_FALSE; } } /* << INDEX */ /* FIXME _ProfStart(PGROUP_DC_SHIFT); */ /*---------------TILE-------------------*/ if (! opj_tcd_dc_level_shift_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DC_SHIFT); */ /* FIXME _ProfStart(PGROUP_MCT); */ if (! opj_tcd_mct_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_MCT); */ /* FIXME _ProfStart(PGROUP_DWT); */ if (! opj_tcd_dwt_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DWT); */ /* FIXME _ProfStart(PGROUP_T1); */ if (! opj_tcd_t1_encode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T1); */ /* FIXME _ProfStart(PGROUP_RATE); */ if (! opj_tcd_rate_allocate_encode(p_tcd, p_dest, p_max_length, p_cstr_info, p_manager)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_RATE); */ } /*--------------TIER2------------------*/ /* INDEX */ if (p_cstr_info) { p_cstr_info->index_write = 1; } /* FIXME _ProfStart(PGROUP_T2); */ if (! opj_tcd_t2_encode(p_tcd, p_dest, p_data_written, p_max_length, p_cstr_info, p_manager)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T2); */ /*---------------CLEAN-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_decode_tile(opj_tcd_t *p_tcd, OPJ_BYTE *p_src, OPJ_UINT32 p_max_length, OPJ_UINT32 p_tile_no, opj_codestream_index_t *p_cstr_index, opj_event_mgr_t *p_manager ) { OPJ_UINT32 l_data_read; p_tcd->tcd_tileno = p_tile_no; p_tcd->tcp = &(p_tcd->cp->tcps[p_tile_no]); #ifdef TODO_MSD /* FIXME */ /* INDEX >> */ if (p_cstr_info) { OPJ_UINT32 resno, compno, numprec = 0; for (compno = 0; compno < (OPJ_UINT32) p_cstr_info->numcomps; compno++) { opj_tcp_t *tcp = &p_tcd->cp->tcps[0]; opj_tccp_t *tccp = &tcp->tccps[compno]; opj_tcd_tilecomp_t *tilec_idx = &p_tcd->tcd_image->tiles->comps[compno]; for (resno = 0; resno < tilec_idx->numresolutions; resno++) { opj_tcd_resolution_t *res_idx = &tilec_idx->resolutions[resno]; p_cstr_info->tile[p_tile_no].pw[resno] = res_idx->pw; p_cstr_info->tile[p_tile_no].ph[resno] = res_idx->ph; numprec += res_idx->pw * res_idx->ph; p_cstr_info->tile[p_tile_no].pdx[resno] = tccp->prcw[resno]; p_cstr_info->tile[p_tile_no].pdy[resno] = tccp->prch[resno]; } } p_cstr_info->tile[p_tile_no].packet = (opj_packet_info_t *) opj_malloc( p_cstr_info->numlayers * numprec * sizeof(opj_packet_info_t)); p_cstr_info->packno = 0; } /* << INDEX */ #endif /*--------------TIER2------------------*/ /* FIXME _ProfStart(PGROUP_T2); */ l_data_read = 0; if (! opj_tcd_t2_decode(p_tcd, p_src, &l_data_read, p_max_length, p_cstr_index, p_manager)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T2); */ /*------------------TIER1-----------------*/ /* FIXME _ProfStart(PGROUP_T1); */ if (! opj_tcd_t1_decode(p_tcd, p_manager)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_T1); */ /*----------------DWT---------------------*/ /* FIXME _ProfStart(PGROUP_DWT); */ if (! opj_tcd_dwt_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DWT); */ /*----------------MCT-------------------*/ /* FIXME _ProfStart(PGROUP_MCT); */ if (! opj_tcd_mct_decode(p_tcd, p_manager)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_MCT); */ /* FIXME _ProfStart(PGROUP_DC_SHIFT); */ if (! opj_tcd_dc_level_shift_decode(p_tcd)) { return OPJ_FALSE; } /* FIXME _ProfStop(PGROUP_DC_SHIFT); */ /*---------------TILE-------------------*/ return OPJ_TRUE; } OPJ_BOOL opj_tcd_update_tile_data(opj_tcd_t *p_tcd, OPJ_BYTE * p_dest, OPJ_UINT32 p_dest_length ) { OPJ_UINT32 i, j, k, l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; opj_tcd_resolution_t * l_res; OPJ_UINT32 l_size_comp, l_remaining; OPJ_UINT32 l_stride, l_width, l_height; l_data_size = opj_tcd_get_decoded_tile_size(p_tcd); if (l_data_size == UINT_MAX || l_data_size > p_dest_length) { return OPJ_FALSE; } l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i = 0; i < p_tcd->image->numcomps; ++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ l_res = l_tilec->resolutions + l_img_comp->resno_decoded; l_width = (OPJ_UINT32)(l_res->x1 - l_res->x0); l_height = (OPJ_UINT32)(l_res->y1 - l_res->y0); l_stride = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0) - l_width; if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } switch (l_size_comp) { case 1: { OPJ_CHAR * l_dest_ptr = (OPJ_CHAR *) p_dest; const OPJ_INT32 * l_src_ptr = l_tilec->data; if (l_img_comp->sgnd) { for (j = 0; j < l_height; ++j) { for (k = 0; k < l_width; ++k) { *(l_dest_ptr++) = (OPJ_CHAR)(*(l_src_ptr++)); } l_src_ptr += l_stride; } } else { for (j = 0; j < l_height; ++j) { for (k = 0; k < l_width; ++k) { *(l_dest_ptr++) = (OPJ_CHAR)((*(l_src_ptr++)) & 0xff); } l_src_ptr += l_stride; } } p_dest = (OPJ_BYTE *)l_dest_ptr; } break; case 2: { const OPJ_INT32 * l_src_ptr = l_tilec->data; OPJ_INT16 * l_dest_ptr = (OPJ_INT16 *) p_dest; if (l_img_comp->sgnd) { for (j = 0; j < l_height; ++j) { for (k = 0; k < l_width; ++k) { OPJ_INT16 val = (OPJ_INT16)(*(l_src_ptr++)); memcpy(l_dest_ptr, &val, sizeof(val)); l_dest_ptr ++; } l_src_ptr += l_stride; } } else { for (j = 0; j < l_height; ++j) { for (k = 0; k < l_width; ++k) { OPJ_INT16 val = (OPJ_INT16)((*(l_src_ptr++)) & 0xffff); memcpy(l_dest_ptr, &val, sizeof(val)); l_dest_ptr ++; } l_src_ptr += l_stride; } } p_dest = (OPJ_BYTE*) l_dest_ptr; } break; case 4: { OPJ_INT32 * l_dest_ptr = (OPJ_INT32 *) p_dest; OPJ_INT32 * l_src_ptr = l_tilec->data; for (j = 0; j < l_height; ++j) { memcpy(l_dest_ptr, l_src_ptr, l_width * sizeof(OPJ_INT32)); l_dest_ptr += l_width; l_src_ptr += l_width + l_stride; } p_dest = (OPJ_BYTE*) l_dest_ptr; } break; } ++l_img_comp; ++l_tilec; } return OPJ_TRUE; } static void opj_tcd_free_tile(opj_tcd_t *p_tcd) { OPJ_UINT32 compno, resno, bandno, precno; opj_tcd_tile_t *l_tile = 00; opj_tcd_tilecomp_t *l_tile_comp = 00; opj_tcd_resolution_t *l_res = 00; opj_tcd_band_t *l_band = 00; opj_tcd_precinct_t *l_precinct = 00; OPJ_UINT32 l_nb_resolutions, l_nb_precincts; void (* l_tcd_code_block_deallocate)(opj_tcd_precinct_t *) = 00; if (! p_tcd) { return; } if (! p_tcd->tcd_image) { return; } if (p_tcd->m_is_decoder) { l_tcd_code_block_deallocate = opj_tcd_code_block_dec_deallocate; } else { l_tcd_code_block_deallocate = opj_tcd_code_block_enc_deallocate; } l_tile = p_tcd->tcd_image->tiles; if (! l_tile) { return; } l_tile_comp = l_tile->comps; for (compno = 0; compno < l_tile->numcomps; ++compno) { l_res = l_tile_comp->resolutions; if (l_res) { l_nb_resolutions = l_tile_comp->resolutions_size / sizeof(opj_tcd_resolution_t); for (resno = 0; resno < l_nb_resolutions; ++resno) { l_band = l_res->bands; for (bandno = 0; bandno < 3; ++bandno) { l_precinct = l_band->precincts; if (l_precinct) { l_nb_precincts = l_band->precincts_data_size / sizeof(opj_tcd_precinct_t); for (precno = 0; precno < l_nb_precincts; ++precno) { opj_tgt_destroy(l_precinct->incltree); l_precinct->incltree = 00; opj_tgt_destroy(l_precinct->imsbtree); l_precinct->imsbtree = 00; (*l_tcd_code_block_deallocate)(l_precinct); ++l_precinct; } opj_free(l_band->precincts); l_band->precincts = 00; } ++l_band; } /* for (resno */ ++l_res; } opj_free(l_tile_comp->resolutions); l_tile_comp->resolutions = 00; } if (l_tile_comp->ownsData && l_tile_comp->data) { opj_image_data_free(l_tile_comp->data); l_tile_comp->data = 00; l_tile_comp->ownsData = 0; l_tile_comp->data_size = 0; l_tile_comp->data_size_needed = 0; } ++l_tile_comp; } opj_free(l_tile->comps); l_tile->comps = 00; opj_free(p_tcd->tcd_image->tiles); p_tcd->tcd_image->tiles = 00; } static OPJ_BOOL opj_tcd_t2_decode(opj_tcd_t *p_tcd, OPJ_BYTE * p_src_data, OPJ_UINT32 * p_data_read, OPJ_UINT32 p_max_src_size, opj_codestream_index_t *p_cstr_index, opj_event_mgr_t *p_manager ) { opj_t2_t * l_t2; l_t2 = opj_t2_create(p_tcd->image, p_tcd->cp); if (l_t2 == 00) { return OPJ_FALSE; } if (! opj_t2_decode_packets( l_t2, p_tcd->tcd_tileno, p_tcd->tcd_image->tiles, p_src_data, p_data_read, p_max_src_size, p_cstr_index, p_manager)) { opj_t2_destroy(l_t2); return OPJ_FALSE; } opj_t2_destroy(l_t2); /*---------------CLEAN-------------------*/ return OPJ_TRUE; } static OPJ_BOOL opj_tcd_t1_decode(opj_tcd_t *p_tcd, opj_event_mgr_t *p_manager) { OPJ_UINT32 compno; opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t* l_tile_comp = l_tile->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; volatile OPJ_BOOL ret = OPJ_TRUE; OPJ_BOOL check_pterm = OPJ_FALSE; opj_mutex_t* p_manager_mutex = NULL; p_manager_mutex = opj_mutex_create(); /* Only enable PTERM check if we decode all layers */ if (p_tcd->tcp->num_layers_to_decode == p_tcd->tcp->numlayers && (l_tccp->cblksty & J2K_CCP_CBLKSTY_PTERM) != 0) { check_pterm = OPJ_TRUE; } for (compno = 0; compno < l_tile->numcomps; ++compno) { opj_t1_decode_cblks(p_tcd->thread_pool, &ret, l_tile_comp, l_tccp, p_manager, p_manager_mutex, check_pterm); if (!ret) { break; } ++l_tile_comp; ++l_tccp; } opj_thread_pool_wait_completion(p_tcd->thread_pool, 0); if (p_manager_mutex) { opj_mutex_destroy(p_manager_mutex); } return ret; } static OPJ_BOOL opj_tcd_dwt_decode(opj_tcd_t *p_tcd) { OPJ_UINT32 compno; opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = l_tile->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; opj_image_comp_t * l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { /* if (tcd->cp->reduce != 0) { tcd->image->comps[compno].resno_decoded = tile->comps[compno].numresolutions - tcd->cp->reduce - 1; if (tcd->image->comps[compno].resno_decoded < 0) { return false; } } numres2decode = tcd->image->comps[compno].resno_decoded + 1; if(numres2decode > 0){ */ if (l_tccp->qmfbid == 1) { if (! opj_dwt_decode(p_tcd->thread_pool, l_tile_comp, l_img_comp->resno_decoded + 1)) { return OPJ_FALSE; } } else { if (! opj_dwt_decode_real(l_tile_comp, l_img_comp->resno_decoded + 1)) { return OPJ_FALSE; } } ++l_tile_comp; ++l_img_comp; ++l_tccp; } return OPJ_TRUE; } static OPJ_BOOL opj_tcd_mct_decode(opj_tcd_t *p_tcd, opj_event_mgr_t *p_manager) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcp_t * l_tcp = p_tcd->tcp; opj_tcd_tilecomp_t * l_tile_comp = l_tile->comps; OPJ_UINT32 l_samples, i; if (! l_tcp->mct) { return OPJ_TRUE; } l_samples = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); if (l_tile->numcomps >= 3) { /* testcase 1336.pdf.asan.47.376 */ if ((l_tile->comps[0].x1 - l_tile->comps[0].x0) * (l_tile->comps[0].y1 - l_tile->comps[0].y0) < (OPJ_INT32)l_samples || (l_tile->comps[1].x1 - l_tile->comps[1].x0) * (l_tile->comps[1].y1 - l_tile->comps[1].y0) < (OPJ_INT32)l_samples || (l_tile->comps[2].x1 - l_tile->comps[2].x0) * (l_tile->comps[2].y1 - l_tile->comps[2].y0) < (OPJ_INT32)l_samples) { opj_event_msg(p_manager, EVT_ERROR, "Tiles don't all have the same dimension. Skip the MCT step.\n"); return OPJ_FALSE; } else if (l_tcp->mct == 2) { OPJ_BYTE ** l_data; if (! l_tcp->m_mct_decoding_matrix) { return OPJ_TRUE; } l_data = (OPJ_BYTE **) opj_malloc(l_tile->numcomps * sizeof(OPJ_BYTE*)); if (! l_data) { return OPJ_FALSE; } for (i = 0; i < l_tile->numcomps; ++i) { l_data[i] = (OPJ_BYTE*) l_tile_comp->data; ++l_tile_comp; } if (! opj_mct_decode_custom(/* MCT data */ (OPJ_BYTE*) l_tcp->m_mct_decoding_matrix, /* size of components */ l_samples, /* components */ l_data, /* nb of components (i.e. size of pData) */ l_tile->numcomps, /* tells if the data is signed */ p_tcd->image->comps->sgnd)) { opj_free(l_data); return OPJ_FALSE; } opj_free(l_data); } else { if (l_tcp->tccps->qmfbid == 1) { opj_mct_decode(l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, l_samples); } else { opj_mct_decode_real((OPJ_FLOAT32*)l_tile->comps[0].data, (OPJ_FLOAT32*)l_tile->comps[1].data, (OPJ_FLOAT32*)l_tile->comps[2].data, l_samples); } } } else { opj_event_msg(p_manager, EVT_ERROR, "Number of components (%d) is inconsistent with a MCT. Skip the MCT step.\n", l_tile->numcomps); } return OPJ_TRUE; } static OPJ_BOOL opj_tcd_dc_level_shift_decode(opj_tcd_t *p_tcd) { OPJ_UINT32 compno; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tccp_t * l_tccp = 00; opj_image_comp_t * l_img_comp = 00; opj_tcd_resolution_t* l_res = 00; opj_tcd_tile_t * l_tile; OPJ_UINT32 l_width, l_height, i, j; OPJ_INT32 * l_current_ptr; OPJ_INT32 l_min, l_max; OPJ_UINT32 l_stride; l_tile = p_tcd->tcd_image->tiles; l_tile_comp = l_tile->comps; l_tccp = p_tcd->tcp->tccps; l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { l_res = l_tile_comp->resolutions + l_img_comp->resno_decoded; l_width = (OPJ_UINT32)(l_res->x1 - l_res->x0); l_height = (OPJ_UINT32)(l_res->y1 - l_res->y0); l_stride = (OPJ_UINT32)(l_tile_comp->x1 - l_tile_comp->x0) - l_width; assert(l_height == 0 || l_width + l_stride <= l_tile_comp->data_size / l_height); /*MUPDF*/ if (l_img_comp->sgnd) { l_min = -(1 << (l_img_comp->prec - 1)); l_max = (1 << (l_img_comp->prec - 1)) - 1; } else { l_min = 0; l_max = (OPJ_INT32)((1U << l_img_comp->prec) - 1); } l_current_ptr = l_tile_comp->data; if (l_tccp->qmfbid == 1) { for (j = 0; j < l_height; ++j) { for (i = 0; i < l_width; ++i) { *l_current_ptr = opj_int_clamp(*l_current_ptr + l_tccp->m_dc_level_shift, l_min, l_max); ++l_current_ptr; } l_current_ptr += l_stride; } } else { for (j = 0; j < l_height; ++j) { for (i = 0; i < l_width; ++i) { OPJ_FLOAT32 l_value = *((OPJ_FLOAT32 *) l_current_ptr); OPJ_INT32 l_value_int = (OPJ_INT32)opj_lrintf(l_value); if (l_value > INT_MAX || (l_value_int > 0 && l_tccp->m_dc_level_shift > 0 && l_value_int > INT_MAX - l_tccp->m_dc_level_shift)) { *l_current_ptr = l_max; } else { *l_current_ptr = opj_int_clamp( l_value_int + l_tccp->m_dc_level_shift, l_min, l_max); } ++l_current_ptr; } l_current_ptr += l_stride; } } ++l_img_comp; ++l_tccp; ++l_tile_comp; } return OPJ_TRUE; } /** * Deallocates the encoding data of the given precinct. */ static void opj_tcd_code_block_dec_deallocate(opj_tcd_precinct_t * p_precinct) { OPJ_UINT32 cblkno, l_nb_code_blocks; opj_tcd_cblk_dec_t * l_code_block = p_precinct->cblks.dec; if (l_code_block) { /*fprintf(stderr,"deallocate codeblock:{\n");*/ /*fprintf(stderr,"\t x0=%d, y0=%d, x1=%d, y1=%d\n",l_code_block->x0, l_code_block->y0, l_code_block->x1, l_code_block->y1);*/ /*fprintf(stderr,"\t numbps=%d, numlenbits=%d, len=%d, numnewpasses=%d, real_num_segs=%d, m_current_max_segs=%d\n ", l_code_block->numbps, l_code_block->numlenbits, l_code_block->len, l_code_block->numnewpasses, l_code_block->real_num_segs, l_code_block->m_current_max_segs );*/ l_nb_code_blocks = p_precinct->block_size / sizeof(opj_tcd_cblk_dec_t); /*fprintf(stderr,"nb_code_blocks =%d\t}\n", l_nb_code_blocks);*/ for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { if (l_code_block->segs) { opj_free(l_code_block->segs); l_code_block->segs = 00; } if (l_code_block->chunks) { opj_free(l_code_block->chunks); l_code_block->chunks = 00; } ++l_code_block; } opj_free(p_precinct->cblks.dec); p_precinct->cblks.dec = 00; } } /** * Deallocates the encoding data of the given precinct. */ static void opj_tcd_code_block_enc_deallocate(opj_tcd_precinct_t * p_precinct) { OPJ_UINT32 cblkno, l_nb_code_blocks; opj_tcd_cblk_enc_t * l_code_block = p_precinct->cblks.enc; if (l_code_block) { l_nb_code_blocks = p_precinct->block_size / sizeof(opj_tcd_cblk_enc_t); for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { if (l_code_block->data) { /* We refer to data - 1 since below we incremented it */ /* in opj_tcd_code_block_enc_allocate_data() */ opj_free(l_code_block->data - 1); l_code_block->data = 00; } if (l_code_block->layers) { opj_free(l_code_block->layers); l_code_block->layers = 00; } if (l_code_block->passes) { opj_free(l_code_block->passes); l_code_block->passes = 00; } ++l_code_block; } opj_free(p_precinct->cblks.enc); p_precinct->cblks.enc = 00; } } OPJ_UINT32 opj_tcd_get_encoded_tile_size(opj_tcd_t *p_tcd) { OPJ_UINT32 i, l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; OPJ_UINT32 l_size_comp, l_remaining; l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i = 0; i < p_tcd->image->numcomps; ++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } l_data_size += l_size_comp * (OPJ_UINT32)((l_tilec->x1 - l_tilec->x0) * (l_tilec->y1 - l_tilec->y0)); ++l_img_comp; ++l_tilec; } return l_data_size; } static OPJ_BOOL opj_tcd_dc_level_shift_encode(opj_tcd_t *p_tcd) { OPJ_UINT32 compno; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tccp_t * l_tccp = 00; opj_image_comp_t * l_img_comp = 00; opj_tcd_tile_t * l_tile; OPJ_UINT32 l_nb_elem, i; OPJ_INT32 * l_current_ptr; l_tile = p_tcd->tcd_image->tiles; l_tile_comp = l_tile->comps; l_tccp = p_tcd->tcp->tccps; l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { l_current_ptr = l_tile_comp->data; l_nb_elem = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); if (l_tccp->qmfbid == 1) { for (i = 0; i < l_nb_elem; ++i) { *l_current_ptr -= l_tccp->m_dc_level_shift ; ++l_current_ptr; } } else { for (i = 0; i < l_nb_elem; ++i) { *l_current_ptr = (*l_current_ptr - l_tccp->m_dc_level_shift) * (1 << 11); ++l_current_ptr; } } ++l_img_comp; ++l_tccp; ++l_tile_comp; } return OPJ_TRUE; } static OPJ_BOOL opj_tcd_mct_encode(opj_tcd_t *p_tcd) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = p_tcd->tcd_image->tiles->comps; OPJ_UINT32 samples = (OPJ_UINT32)((l_tile_comp->x1 - l_tile_comp->x0) * (l_tile_comp->y1 - l_tile_comp->y0)); OPJ_UINT32 i; OPJ_BYTE ** l_data = 00; opj_tcp_t * l_tcp = p_tcd->tcp; if (!p_tcd->tcp->mct) { return OPJ_TRUE; } if (p_tcd->tcp->mct == 2) { if (! p_tcd->tcp->m_mct_coding_matrix) { return OPJ_TRUE; } l_data = (OPJ_BYTE **) opj_malloc(l_tile->numcomps * sizeof(OPJ_BYTE*)); if (! l_data) { return OPJ_FALSE; } for (i = 0; i < l_tile->numcomps; ++i) { l_data[i] = (OPJ_BYTE*) l_tile_comp->data; ++l_tile_comp; } if (! opj_mct_encode_custom(/* MCT data */ (OPJ_BYTE*) p_tcd->tcp->m_mct_coding_matrix, /* size of components */ samples, /* components */ l_data, /* nb of components (i.e. size of pData) */ l_tile->numcomps, /* tells if the data is signed */ p_tcd->image->comps->sgnd)) { opj_free(l_data); return OPJ_FALSE; } opj_free(l_data); } else if (l_tcp->tccps->qmfbid == 0) { opj_mct_encode_real(l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, samples); } else { opj_mct_encode(l_tile->comps[0].data, l_tile->comps[1].data, l_tile->comps[2].data, samples); } return OPJ_TRUE; } static OPJ_BOOL opj_tcd_dwt_encode(opj_tcd_t *p_tcd) { opj_tcd_tile_t * l_tile = p_tcd->tcd_image->tiles; opj_tcd_tilecomp_t * l_tile_comp = p_tcd->tcd_image->tiles->comps; opj_tccp_t * l_tccp = p_tcd->tcp->tccps; OPJ_UINT32 compno; for (compno = 0; compno < l_tile->numcomps; ++compno) { if (l_tccp->qmfbid == 1) { if (! opj_dwt_encode(l_tile_comp)) { return OPJ_FALSE; } } else if (l_tccp->qmfbid == 0) { if (! opj_dwt_encode_real(l_tile_comp)) { return OPJ_FALSE; } } ++l_tile_comp; ++l_tccp; } return OPJ_TRUE; } static OPJ_BOOL opj_tcd_t1_encode(opj_tcd_t *p_tcd) { opj_t1_t * l_t1; const OPJ_FLOAT64 * l_mct_norms; OPJ_UINT32 l_mct_numcomps = 0U; opj_tcp_t * l_tcp = p_tcd->tcp; l_t1 = opj_t1_create(OPJ_TRUE); if (l_t1 == 00) { return OPJ_FALSE; } if (l_tcp->mct == 1) { l_mct_numcomps = 3U; /* irreversible encoding */ if (l_tcp->tccps->qmfbid == 0) { l_mct_norms = opj_mct_get_mct_norms_real(); } else { l_mct_norms = opj_mct_get_mct_norms(); } } else { l_mct_numcomps = p_tcd->image->numcomps; l_mct_norms = (const OPJ_FLOAT64 *)(l_tcp->mct_norms); } if (! opj_t1_encode_cblks(l_t1, p_tcd->tcd_image->tiles, l_tcp, l_mct_norms, l_mct_numcomps)) { opj_t1_destroy(l_t1); return OPJ_FALSE; } opj_t1_destroy(l_t1); return OPJ_TRUE; } static OPJ_BOOL opj_tcd_t2_encode(opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 * p_data_written, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info, opj_event_mgr_t *p_manager) { opj_t2_t * l_t2; l_t2 = opj_t2_create(p_tcd->image, p_tcd->cp); if (l_t2 == 00) { return OPJ_FALSE; } if (! opj_t2_encode_packets( l_t2, p_tcd->tcd_tileno, p_tcd->tcd_image->tiles, p_tcd->tcp->numlayers, p_dest_data, p_data_written, p_max_dest_size, p_cstr_info, p_tcd->tp_num, p_tcd->tp_pos, p_tcd->cur_pino, FINAL_PASS, p_manager)) { opj_t2_destroy(l_t2); return OPJ_FALSE; } opj_t2_destroy(l_t2); /*---------------CLEAN-------------------*/ return OPJ_TRUE; } static OPJ_BOOL opj_tcd_rate_allocate_encode(opj_tcd_t *p_tcd, OPJ_BYTE * p_dest_data, OPJ_UINT32 p_max_dest_size, opj_codestream_info_t *p_cstr_info, opj_event_mgr_t *p_manager) { opj_cp_t * l_cp = p_tcd->cp; OPJ_UINT32 l_nb_written = 0; if (p_cstr_info) { p_cstr_info->index_write = 0; } if (l_cp->m_specific_param.m_enc.m_disto_alloc || l_cp->m_specific_param.m_enc.m_fixed_quality) { /* fixed_quality */ /* Normal Rate/distortion allocation */ if (! opj_tcd_rateallocate(p_tcd, p_dest_data, &l_nb_written, p_max_dest_size, p_cstr_info, p_manager)) { return OPJ_FALSE; } } else { /* Fixed layer allocation */ opj_tcd_rateallocate_fixed(p_tcd); } return OPJ_TRUE; } OPJ_BOOL opj_tcd_copy_tile_data(opj_tcd_t *p_tcd, OPJ_BYTE * p_src, OPJ_UINT32 p_src_length) { OPJ_UINT32 i, j, l_data_size = 0; opj_image_comp_t * l_img_comp = 00; opj_tcd_tilecomp_t * l_tilec = 00; OPJ_UINT32 l_size_comp, l_remaining; OPJ_UINT32 l_nb_elem; l_data_size = opj_tcd_get_encoded_tile_size(p_tcd); if (l_data_size != p_src_length) { return OPJ_FALSE; } l_tilec = p_tcd->tcd_image->tiles->comps; l_img_comp = p_tcd->image->comps; for (i = 0; i < p_tcd->image->numcomps; ++i) { l_size_comp = l_img_comp->prec >> 3; /*(/ 8)*/ l_remaining = l_img_comp->prec & 7; /* (%8) */ l_nb_elem = (OPJ_UINT32)((l_tilec->x1 - l_tilec->x0) * (l_tilec->y1 - l_tilec->y0)); if (l_remaining) { ++l_size_comp; } if (l_size_comp == 3) { l_size_comp = 4; } switch (l_size_comp) { case 1: { OPJ_CHAR * l_src_ptr = (OPJ_CHAR *) p_src; OPJ_INT32 * l_dest_ptr = l_tilec->data; if (l_img_comp->sgnd) { for (j = 0; j < l_nb_elem; ++j) { *(l_dest_ptr++) = (OPJ_INT32)(*(l_src_ptr++)); } } else { for (j = 0; j < l_nb_elem; ++j) { *(l_dest_ptr++) = (*(l_src_ptr++)) & 0xff; } } p_src = (OPJ_BYTE*) l_src_ptr; } break; case 2: { OPJ_INT32 * l_dest_ptr = l_tilec->data; OPJ_INT16 * l_src_ptr = (OPJ_INT16 *) p_src; if (l_img_comp->sgnd) { for (j = 0; j < l_nb_elem; ++j) { *(l_dest_ptr++) = (OPJ_INT32)(*(l_src_ptr++)); } } else { for (j = 0; j < l_nb_elem; ++j) { *(l_dest_ptr++) = (*(l_src_ptr++)) & 0xffff; } } p_src = (OPJ_BYTE*) l_src_ptr; } break; case 4: { OPJ_INT32 * l_src_ptr = (OPJ_INT32 *) p_src; OPJ_INT32 * l_dest_ptr = l_tilec->data; for (j = 0; j < l_nb_elem; ++j) { *(l_dest_ptr++) = (OPJ_INT32)(*(l_src_ptr++)); } p_src = (OPJ_BYTE*) l_src_ptr; } break; } ++l_img_comp; ++l_tilec; } return OPJ_TRUE; } OPJ_BOOL opj_tcd_is_band_empty(opj_tcd_band_t* band) { return (band->x1 - band->x0 == 0) || (band->y1 - band->y0 == 0); }
static OPJ_BOOL opj_tcd_code_block_enc_allocate_data(opj_tcd_cblk_enc_t * p_code_block) { OPJ_UINT32 l_data_size; /* The +1 is needed for https://github.com/uclouvain/openjpeg/issues/835 */ l_data_size = 1 + (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) * (p_code_block->y1 - p_code_block->y0) * (OPJ_INT32)sizeof(OPJ_UINT32)); if (l_data_size > p_code_block->data_size) { if (p_code_block->data) { /* We refer to data - 1 since below we incremented it */ opj_free(p_code_block->data - 1); } p_code_block->data = (OPJ_BYTE*) opj_malloc(l_data_size + 1); if (! p_code_block->data) { p_code_block->data_size = 0U; return OPJ_FALSE; } p_code_block->data_size = l_data_size; /* We reserve the initial byte as a fake byte to a non-FF value */ /* and increment the data pointer, so that opj_mqc_init_enc() */ /* can do bp = data - 1, and opj_mqc_byteout() can safely dereference */ /* it. */ p_code_block->data[0] = 0; p_code_block->data += 1; /*why +1 ?*/ } return OPJ_TRUE; }
static OPJ_BOOL opj_tcd_code_block_enc_allocate_data(opj_tcd_cblk_enc_t * p_code_block) { OPJ_UINT32 l_data_size; /* +1 is needed for https://github.com/uclouvain/openjpeg/issues/835 */ /* and actually +2 required for https://github.com/uclouvain/openjpeg/issues/982 */ /* TODO: is there a theoretical upper-bound for the compressed code */ /* block size ? */ l_data_size = 2 + (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) * (p_code_block->y1 - p_code_block->y0) * (OPJ_INT32)sizeof(OPJ_UINT32)); if (l_data_size > p_code_block->data_size) { if (p_code_block->data) { /* We refer to data - 1 since below we incremented it */ opj_free(p_code_block->data - 1); } p_code_block->data = (OPJ_BYTE*) opj_malloc(l_data_size + 1); if (! p_code_block->data) { p_code_block->data_size = 0U; return OPJ_FALSE; } p_code_block->data_size = l_data_size; /* We reserve the initial byte as a fake byte to a non-FF value */ /* and increment the data pointer, so that opj_mqc_init_enc() */ /* can do bp = data - 1, and opj_mqc_byteout() can safely dereference */ /* it. */ p_code_block->data[0] = 0; p_code_block->data += 1; /*why +1 ?*/ } return OPJ_TRUE; }
{'added': [(1190, ' /* +1 is needed for https://github.com/uclouvain/openjpeg/issues/835 */'), (1191, ' /* and actually +2 required for https://github.com/uclouvain/openjpeg/issues/982 */'), (1192, ' /* TODO: is there a theoretical upper-bound for the compressed code */'), (1193, ' /* block size ? */'), (1194, ' l_data_size = 2 + (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) *')], 'deleted': [(1190, ' /* The +1 is needed for https://github.com/uclouvain/openjpeg/issues/835 */'), (1191, ' l_data_size = 1 + (OPJ_UINT32)((p_code_block->x1 - p_code_block->x0) *')]}
5
2
1,745
12,072
21
132
4
https://github.com/uclouvain/openjpeg
CVE-2017-14151
CWE-119
3,073
keyinfo.c
C
fscrypt_get_encryption_info
/* * key management facility for FS encryption support. * * Copyright (C) 2015, Google, Inc. * * This contains encryption key functions. * * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015. */ #include <keys/user-type.h> #include <linux/scatterlist.h> #include "fscrypt_private.h" static void derive_crypt_complete(struct crypto_async_request *req, int rc) { struct fscrypt_completion_result *ecr = req->data; if (rc == -EINPROGRESS) return; ecr->res = rc; complete(&ecr->completion); } /** * derive_key_aes() - Derive a key using AES-128-ECB * @deriving_key: Encryption key used for derivation. * @source_key: Source key to which to apply derivation. * @derived_key: Derived key. * * Return: Zero on success; non-zero otherwise. */ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], u8 source_key[FS_AES_256_XTS_KEY_SIZE], u8 derived_key[FS_AES_256_XTS_KEY_SIZE]) { int res = 0; struct skcipher_request *req = NULL; DECLARE_FS_COMPLETION_RESULT(ecr); struct scatterlist src_sg, dst_sg; struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); if (IS_ERR(tfm)) { res = PTR_ERR(tfm); tfm = NULL; goto out; } crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); req = skcipher_request_alloc(tfm, GFP_NOFS); if (!req) { res = -ENOMEM; goto out; } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, derive_crypt_complete, &ecr); res = crypto_skcipher_setkey(tfm, deriving_key, FS_AES_128_ECB_KEY_SIZE); if (res < 0) goto out; sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE); sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE); skcipher_request_set_crypt(req, &src_sg, &dst_sg, FS_AES_256_XTS_KEY_SIZE, NULL); res = crypto_skcipher_encrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { wait_for_completion(&ecr.completion); res = ecr.res; } out: skcipher_request_free(req); crypto_free_skcipher(tfm); return res; } static int validate_user_key(struct fscrypt_info *crypt_info, struct fscrypt_context *ctx, u8 *raw_key, const char *prefix) { char *description; struct key *keyring_key; struct fscrypt_key *master_key; const struct user_key_payload *ukp; int res; description = kasprintf(GFP_NOFS, "%s%*phN", prefix, FS_KEY_DESCRIPTOR_SIZE, ctx->master_key_descriptor); if (!description) return -ENOMEM; keyring_key = request_key(&key_type_logon, description, NULL); kfree(description); if (IS_ERR(keyring_key)) return PTR_ERR(keyring_key); if (keyring_key->type != &key_type_logon) { printk_once(KERN_WARNING "%s: key type must be logon\n", __func__); res = -ENOKEY; goto out; } down_read(&keyring_key->sem); ukp = user_key_payload(keyring_key); if (ukp->datalen != sizeof(struct fscrypt_key)) { res = -EINVAL; up_read(&keyring_key->sem); goto out; } master_key = (struct fscrypt_key *)ukp->data; BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE); if (master_key->size != FS_AES_256_XTS_KEY_SIZE) { printk_once(KERN_WARNING "%s: key size incorrect: %d\n", __func__, master_key->size); res = -ENOKEY; up_read(&keyring_key->sem); goto out; } res = derive_key_aes(ctx->nonce, master_key->raw, raw_key); up_read(&keyring_key->sem); if (res) goto out; crypt_info->ci_keyring_key = keyring_key; return 0; out: key_put(keyring_key); return res; } static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode, const char **cipher_str_ret, int *keysize_ret) { if (S_ISREG(inode->i_mode)) { if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) { *cipher_str_ret = "xts(aes)"; *keysize_ret = FS_AES_256_XTS_KEY_SIZE; return 0; } pr_warn_once("fscrypto: unsupported contents encryption mode " "%d for inode %lu\n", ci->ci_data_mode, inode->i_ino); return -ENOKEY; } if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) { if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) { *cipher_str_ret = "cts(cbc(aes))"; *keysize_ret = FS_AES_256_CTS_KEY_SIZE; return 0; } pr_warn_once("fscrypto: unsupported filenames encryption mode " "%d for inode %lu\n", ci->ci_filename_mode, inode->i_ino); return -ENOKEY; } pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n", (inode->i_mode & S_IFMT), inode->i_ino); return -ENOKEY; } static void put_crypt_info(struct fscrypt_info *ci) { if (!ci) return; key_put(ci->ci_keyring_key); crypto_free_skcipher(ci->ci_ctfm); kmem_cache_free(fscrypt_info_cachep, ci); } int fscrypt_get_crypt_info(struct inode *inode) { struct fscrypt_info *crypt_info; struct fscrypt_context ctx; struct crypto_skcipher *ctfm; const char *cipher_str; int keysize; u8 *raw_key = NULL; int res; res = fscrypt_initialize(inode->i_sb->s_cop->flags); if (res) return res; if (!inode->i_sb->s_cop->get_context) return -EOPNOTSUPP; retry: crypt_info = ACCESS_ONCE(inode->i_crypt_info); if (crypt_info) { if (!crypt_info->ci_keyring_key || key_validate(crypt_info->ci_keyring_key) == 0) return 0; fscrypt_put_encryption_info(inode, crypt_info); goto retry; } res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res < 0) { if (!fscrypt_dummy_context_enabled(inode) || inode->i_sb->s_cop->is_encrypted(inode)) return res; /* Fake up a context for an unencrypted directory */ memset(&ctx, 0, sizeof(ctx)); ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE); } else if (res != sizeof(ctx)) { return -EINVAL; } if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1) return -EINVAL; if (ctx.flags & ~FS_POLICY_FLAGS_VALID) return -EINVAL; crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS); if (!crypt_info) return -ENOMEM; crypt_info->ci_flags = ctx.flags; crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; crypt_info->ci_ctfm = NULL; crypt_info->ci_keyring_key = NULL; memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize); if (res) goto out; /* * This cannot be a stack buffer because it is passed to the scatterlist * crypto API as part of key derivation. */ res = -ENOMEM; raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS); if (!raw_key) goto out; res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX); if (res && inode->i_sb->s_cop->key_prefix) { int res2 = validate_user_key(crypt_info, &ctx, raw_key, inode->i_sb->s_cop->key_prefix); if (res2) { if (res2 == -ENOKEY) res = -ENOKEY; goto out; } } else if (res) { goto out; } ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); if (!ctfm || IS_ERR(ctfm)) { res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; printk(KERN_DEBUG "%s: error %d (inode %u) allocating crypto tfm\n", __func__, res, (unsigned) inode->i_ino); goto out; } crypt_info->ci_ctfm = ctfm; crypto_skcipher_clear_flags(ctfm, ~0); crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); res = crypto_skcipher_setkey(ctfm, raw_key, keysize); if (res) goto out; kzfree(raw_key); raw_key = NULL; if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { put_crypt_info(crypt_info); goto retry; } return 0; out: if (res == -ENOKEY) res = 0; put_crypt_info(crypt_info); kzfree(raw_key); return res; } void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) { struct fscrypt_info *prev; if (ci == NULL) ci = ACCESS_ONCE(inode->i_crypt_info); if (ci == NULL) return; prev = cmpxchg(&inode->i_crypt_info, ci, NULL); if (prev != ci) return; put_crypt_info(ci); } EXPORT_SYMBOL(fscrypt_put_encryption_info); int fscrypt_get_encryption_info(struct inode *inode) { struct fscrypt_info *ci = inode->i_crypt_info; if (!ci || (ci->ci_keyring_key && (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED) | (1 << KEY_FLAG_DEAD))))) return fscrypt_get_crypt_info(inode); return 0; } EXPORT_SYMBOL(fscrypt_get_encryption_info);
/* * key management facility for FS encryption support. * * Copyright (C) 2015, Google, Inc. * * This contains encryption key functions. * * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015. */ #include <keys/user-type.h> #include <linux/scatterlist.h> #include "fscrypt_private.h" static void derive_crypt_complete(struct crypto_async_request *req, int rc) { struct fscrypt_completion_result *ecr = req->data; if (rc == -EINPROGRESS) return; ecr->res = rc; complete(&ecr->completion); } /** * derive_key_aes() - Derive a key using AES-128-ECB * @deriving_key: Encryption key used for derivation. * @source_key: Source key to which to apply derivation. * @derived_key: Derived key. * * Return: Zero on success; non-zero otherwise. */ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], u8 source_key[FS_AES_256_XTS_KEY_SIZE], u8 derived_key[FS_AES_256_XTS_KEY_SIZE]) { int res = 0; struct skcipher_request *req = NULL; DECLARE_FS_COMPLETION_RESULT(ecr); struct scatterlist src_sg, dst_sg; struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); if (IS_ERR(tfm)) { res = PTR_ERR(tfm); tfm = NULL; goto out; } crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); req = skcipher_request_alloc(tfm, GFP_NOFS); if (!req) { res = -ENOMEM; goto out; } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, derive_crypt_complete, &ecr); res = crypto_skcipher_setkey(tfm, deriving_key, FS_AES_128_ECB_KEY_SIZE); if (res < 0) goto out; sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE); sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE); skcipher_request_set_crypt(req, &src_sg, &dst_sg, FS_AES_256_XTS_KEY_SIZE, NULL); res = crypto_skcipher_encrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { wait_for_completion(&ecr.completion); res = ecr.res; } out: skcipher_request_free(req); crypto_free_skcipher(tfm); return res; } static int validate_user_key(struct fscrypt_info *crypt_info, struct fscrypt_context *ctx, u8 *raw_key, const char *prefix) { char *description; struct key *keyring_key; struct fscrypt_key *master_key; const struct user_key_payload *ukp; int res; description = kasprintf(GFP_NOFS, "%s%*phN", prefix, FS_KEY_DESCRIPTOR_SIZE, ctx->master_key_descriptor); if (!description) return -ENOMEM; keyring_key = request_key(&key_type_logon, description, NULL); kfree(description); if (IS_ERR(keyring_key)) return PTR_ERR(keyring_key); down_read(&keyring_key->sem); if (keyring_key->type != &key_type_logon) { printk_once(KERN_WARNING "%s: key type must be logon\n", __func__); res = -ENOKEY; goto out; } ukp = user_key_payload(keyring_key); if (ukp->datalen != sizeof(struct fscrypt_key)) { res = -EINVAL; goto out; } master_key = (struct fscrypt_key *)ukp->data; BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE); if (master_key->size != FS_AES_256_XTS_KEY_SIZE) { printk_once(KERN_WARNING "%s: key size incorrect: %d\n", __func__, master_key->size); res = -ENOKEY; goto out; } res = derive_key_aes(ctx->nonce, master_key->raw, raw_key); out: up_read(&keyring_key->sem); key_put(keyring_key); return res; } static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode, const char **cipher_str_ret, int *keysize_ret) { if (S_ISREG(inode->i_mode)) { if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) { *cipher_str_ret = "xts(aes)"; *keysize_ret = FS_AES_256_XTS_KEY_SIZE; return 0; } pr_warn_once("fscrypto: unsupported contents encryption mode " "%d for inode %lu\n", ci->ci_data_mode, inode->i_ino); return -ENOKEY; } if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) { if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) { *cipher_str_ret = "cts(cbc(aes))"; *keysize_ret = FS_AES_256_CTS_KEY_SIZE; return 0; } pr_warn_once("fscrypto: unsupported filenames encryption mode " "%d for inode %lu\n", ci->ci_filename_mode, inode->i_ino); return -ENOKEY; } pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n", (inode->i_mode & S_IFMT), inode->i_ino); return -ENOKEY; } static void put_crypt_info(struct fscrypt_info *ci) { if (!ci) return; crypto_free_skcipher(ci->ci_ctfm); kmem_cache_free(fscrypt_info_cachep, ci); } int fscrypt_get_encryption_info(struct inode *inode) { struct fscrypt_info *crypt_info; struct fscrypt_context ctx; struct crypto_skcipher *ctfm; const char *cipher_str; int keysize; u8 *raw_key = NULL; int res; if (inode->i_crypt_info) return 0; res = fscrypt_initialize(inode->i_sb->s_cop->flags); if (res) return res; if (!inode->i_sb->s_cop->get_context) return -EOPNOTSUPP; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res < 0) { if (!fscrypt_dummy_context_enabled(inode) || inode->i_sb->s_cop->is_encrypted(inode)) return res; /* Fake up a context for an unencrypted directory */ memset(&ctx, 0, sizeof(ctx)); ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE); } else if (res != sizeof(ctx)) { return -EINVAL; } if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1) return -EINVAL; if (ctx.flags & ~FS_POLICY_FLAGS_VALID) return -EINVAL; crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS); if (!crypt_info) return -ENOMEM; crypt_info->ci_flags = ctx.flags; crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; crypt_info->ci_ctfm = NULL; memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize); if (res) goto out; /* * This cannot be a stack buffer because it is passed to the scatterlist * crypto API as part of key derivation. */ res = -ENOMEM; raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS); if (!raw_key) goto out; res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX); if (res && inode->i_sb->s_cop->key_prefix) { int res2 = validate_user_key(crypt_info, &ctx, raw_key, inode->i_sb->s_cop->key_prefix); if (res2) { if (res2 == -ENOKEY) res = -ENOKEY; goto out; } } else if (res) { goto out; } ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); if (!ctfm || IS_ERR(ctfm)) { res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; printk(KERN_DEBUG "%s: error %d (inode %u) allocating crypto tfm\n", __func__, res, (unsigned) inode->i_ino); goto out; } crypt_info->ci_ctfm = ctfm; crypto_skcipher_clear_flags(ctfm, ~0); crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); res = crypto_skcipher_setkey(ctfm, raw_key, keysize); if (res) goto out; if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL) crypt_info = NULL; out: if (res == -ENOKEY) res = 0; put_crypt_info(crypt_info); kzfree(raw_key); return res; } EXPORT_SYMBOL(fscrypt_get_encryption_info); void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) { struct fscrypt_info *prev; if (ci == NULL) ci = ACCESS_ONCE(inode->i_crypt_info); if (ci == NULL) return; prev = cmpxchg(&inode->i_crypt_info, ci, NULL); if (prev != ci) return; put_crypt_info(ci); } EXPORT_SYMBOL(fscrypt_put_encryption_info);
int fscrypt_get_encryption_info(struct inode *inode) { struct fscrypt_info *ci = inode->i_crypt_info; if (!ci || (ci->ci_keyring_key && (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED) | (1 << KEY_FLAG_DEAD))))) return fscrypt_get_crypt_info(inode); return 0; }
int fscrypt_get_encryption_info(struct inode *inode) { struct fscrypt_info *crypt_info; struct fscrypt_context ctx; struct crypto_skcipher *ctfm; const char *cipher_str; int keysize; u8 *raw_key = NULL; int res; if (inode->i_crypt_info) return 0; res = fscrypt_initialize(inode->i_sb->s_cop->flags); if (res) return res; if (!inode->i_sb->s_cop->get_context) return -EOPNOTSUPP; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res < 0) { if (!fscrypt_dummy_context_enabled(inode) || inode->i_sb->s_cop->is_encrypted(inode)) return res; /* Fake up a context for an unencrypted directory */ memset(&ctx, 0, sizeof(ctx)); ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE); } else if (res != sizeof(ctx)) { return -EINVAL; } if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1) return -EINVAL; if (ctx.flags & ~FS_POLICY_FLAGS_VALID) return -EINVAL; crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS); if (!crypt_info) return -ENOMEM; crypt_info->ci_flags = ctx.flags; crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; crypt_info->ci_ctfm = NULL; memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize); if (res) goto out; /* * This cannot be a stack buffer because it is passed to the scatterlist * crypto API as part of key derivation. */ res = -ENOMEM; raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS); if (!raw_key) goto out; res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX); if (res && inode->i_sb->s_cop->key_prefix) { int res2 = validate_user_key(crypt_info, &ctx, raw_key, inode->i_sb->s_cop->key_prefix); if (res2) { if (res2 == -ENOKEY) res = -ENOKEY; goto out; } } else if (res) { goto out; } ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); if (!ctfm || IS_ERR(ctfm)) { res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; printk(KERN_DEBUG "%s: error %d (inode %u) allocating crypto tfm\n", __func__, res, (unsigned) inode->i_ino); goto out; } crypt_info->ci_ctfm = ctfm; crypto_skcipher_clear_flags(ctfm, ~0); crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); res = crypto_skcipher_setkey(ctfm, raw_key, keysize); if (res) goto out; if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL) crypt_info = NULL; out: if (res == -ENOKEY) res = 0; put_crypt_info(crypt_info); kzfree(raw_key); return res; }
{'added': [(98, '\tdown_read(&keyring_key->sem);'), (123, '\tup_read(&keyring_key->sem);'), (169, 'int fscrypt_get_encryption_info(struct inode *inode)'), (179, '\tif (inode->i_crypt_info)'), (180, '\t\treturn 0;'), (181, ''), (261, '\tif (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)'), (262, '\t\tcrypt_info = NULL;'), (270, 'EXPORT_SYMBOL(fscrypt_get_encryption_info);')], 'deleted': [(105, '\tdown_read(&keyring_key->sem);'), (109, '\t\tup_read(&keyring_key->sem);'), (120, '\t\tup_read(&keyring_key->sem);'), (124, '\tup_read(&keyring_key->sem);'), (125, '\tif (res)'), (126, '\t\tgoto out;'), (127, ''), (128, '\tcrypt_info->ci_keyring_key = keyring_key;'), (129, '\treturn 0;'), (172, '\tkey_put(ci->ci_keyring_key);'), (177, 'int fscrypt_get_crypt_info(struct inode *inode)'), (193, 'retry:'), (194, '\tcrypt_info = ACCESS_ONCE(inode->i_crypt_info);'), (195, '\tif (crypt_info) {'), (196, '\t\tif (!crypt_info->ci_keyring_key ||'), (197, '\t\t\t\tkey_validate(crypt_info->ci_keyring_key) == 0)'), (198, '\t\t\treturn 0;'), (199, '\t\tfscrypt_put_encryption_info(inode, crypt_info);'), (200, '\t\tgoto retry;'), (201, '\t}'), (232, '\tcrypt_info->ci_keyring_key = NULL;'), (276, '\tkzfree(raw_key);'), (277, '\traw_key = NULL;'), (278, '\tif (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {'), (279, '\t\tput_crypt_info(crypt_info);'), (280, '\t\tgoto retry;'), (281, '\t}'), (282, '\treturn 0;'), (283, ''), (308, ''), (309, 'int fscrypt_get_encryption_info(struct inode *inode)'), (310, '{'), (311, '\tstruct fscrypt_info *ci = inode->i_crypt_info;'), (312, ''), (313, '\tif (!ci ||'), (314, '\t\t(ci->ci_keyring_key &&'), (315, '\t\t (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |'), (316, '\t\t\t\t\t (1 << KEY_FLAG_REVOKED) |'), (317, '\t\t\t\t\t (1 << KEY_FLAG_DEAD)))))'), (318, '\t\treturn fscrypt_get_crypt_info(inode);'), (319, '\treturn 0;'), (320, '}'), (321, 'EXPORT_SYMBOL(fscrypt_get_encryption_info);')]}
9
43
231
1,351
11
69
4
https://github.com/torvalds/linux
CVE-2017-7374
CWE-476
2,819
ims-pcu.c
C
ims_pcu_get_cdc_union_desc
/* * Driver for IMS Passenger Control Unit Devices * * Copyright (C) 2013 The IMS Company * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. */ #include <linux/completion.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/ihex.h> #include <linux/input.h> #include <linux/kernel.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/usb/input.h> #include <linux/usb/cdc.h> #include <asm/unaligned.h> #define IMS_PCU_KEYMAP_LEN 32 struct ims_pcu_buttons { struct input_dev *input; char name[32]; char phys[32]; unsigned short keymap[IMS_PCU_KEYMAP_LEN]; }; struct ims_pcu_gamepad { struct input_dev *input; char name[32]; char phys[32]; }; struct ims_pcu_backlight { struct led_classdev cdev; struct work_struct work; enum led_brightness desired_brightness; char name[32]; }; #define IMS_PCU_PART_NUMBER_LEN 15 #define IMS_PCU_SERIAL_NUMBER_LEN 8 #define IMS_PCU_DOM_LEN 8 #define IMS_PCU_FW_VERSION_LEN (9 + 1) #define IMS_PCU_BL_VERSION_LEN (9 + 1) #define IMS_PCU_BL_RESET_REASON_LEN (2 + 1) #define IMS_PCU_PCU_B_DEVICE_ID 5 #define IMS_PCU_BUF_SIZE 128 struct ims_pcu { struct usb_device *udev; struct device *dev; /* control interface's device, used for logging */ unsigned int device_no; bool bootloader_mode; char part_number[IMS_PCU_PART_NUMBER_LEN]; char serial_number[IMS_PCU_SERIAL_NUMBER_LEN]; char date_of_manufacturing[IMS_PCU_DOM_LEN]; char fw_version[IMS_PCU_FW_VERSION_LEN]; char bl_version[IMS_PCU_BL_VERSION_LEN]; char reset_reason[IMS_PCU_BL_RESET_REASON_LEN]; int update_firmware_status; u8 device_id; u8 ofn_reg_addr; struct usb_interface *ctrl_intf; struct usb_endpoint_descriptor *ep_ctrl; struct urb *urb_ctrl; u8 *urb_ctrl_buf; dma_addr_t ctrl_dma; size_t max_ctrl_size; struct usb_interface *data_intf; struct usb_endpoint_descriptor *ep_in; struct urb *urb_in; u8 *urb_in_buf; dma_addr_t read_dma; size_t max_in_size; struct usb_endpoint_descriptor *ep_out; u8 *urb_out_buf; size_t max_out_size; u8 read_buf[IMS_PCU_BUF_SIZE]; u8 read_pos; u8 check_sum; bool have_stx; bool have_dle; u8 cmd_buf[IMS_PCU_BUF_SIZE]; u8 ack_id; u8 expected_response; u8 cmd_buf_len; struct completion cmd_done; struct mutex cmd_mutex; u32 fw_start_addr; u32 fw_end_addr; struct completion async_firmware_done; struct ims_pcu_buttons buttons; struct ims_pcu_gamepad *gamepad; struct ims_pcu_backlight backlight; bool setup_complete; /* Input and LED devices have been created */ }; /********************************************************************* * Buttons Input device support * *********************************************************************/ static const unsigned short ims_pcu_keymap_1[] = { [1] = KEY_ATTENDANT_OFF, [2] = KEY_ATTENDANT_ON, [3] = KEY_LIGHTS_TOGGLE, [4] = KEY_VOLUMEUP, [5] = KEY_VOLUMEDOWN, [6] = KEY_INFO, }; static const unsigned short ims_pcu_keymap_2[] = { [4] = KEY_VOLUMEUP, [5] = KEY_VOLUMEDOWN, [6] = KEY_INFO, }; static const unsigned short ims_pcu_keymap_3[] = { [1] = KEY_HOMEPAGE, [2] = KEY_ATTENDANT_TOGGLE, [3] = KEY_LIGHTS_TOGGLE, [4] = KEY_VOLUMEUP, [5] = KEY_VOLUMEDOWN, [6] = KEY_DISPLAYTOGGLE, [18] = KEY_PLAYPAUSE, }; static const unsigned short ims_pcu_keymap_4[] = { [1] = KEY_ATTENDANT_OFF, [2] = KEY_ATTENDANT_ON, [3] = KEY_LIGHTS_TOGGLE, [4] = KEY_VOLUMEUP, [5] = KEY_VOLUMEDOWN, [6] = KEY_INFO, [18] = KEY_PLAYPAUSE, }; static const unsigned short ims_pcu_keymap_5[] = { [1] = KEY_ATTENDANT_OFF, [2] = KEY_ATTENDANT_ON, [3] = KEY_LIGHTS_TOGGLE, }; struct ims_pcu_device_info { const unsigned short *keymap; size_t keymap_len; bool has_gamepad; }; #define IMS_PCU_DEVINFO(_n, _gamepad) \ [_n] = { \ .keymap = ims_pcu_keymap_##_n, \ .keymap_len = ARRAY_SIZE(ims_pcu_keymap_##_n), \ .has_gamepad = _gamepad, \ } static const struct ims_pcu_device_info ims_pcu_device_info[] = { IMS_PCU_DEVINFO(1, true), IMS_PCU_DEVINFO(2, true), IMS_PCU_DEVINFO(3, true), IMS_PCU_DEVINFO(4, true), IMS_PCU_DEVINFO(5, false), }; static void ims_pcu_buttons_report(struct ims_pcu *pcu, u32 data) { struct ims_pcu_buttons *buttons = &pcu->buttons; struct input_dev *input = buttons->input; int i; for (i = 0; i < 32; i++) { unsigned short keycode = buttons->keymap[i]; if (keycode != KEY_RESERVED) input_report_key(input, keycode, data & (1UL << i)); } input_sync(input); } static int ims_pcu_setup_buttons(struct ims_pcu *pcu, const unsigned short *keymap, size_t keymap_len) { struct ims_pcu_buttons *buttons = &pcu->buttons; struct input_dev *input; int i; int error; input = input_allocate_device(); if (!input) { dev_err(pcu->dev, "Not enough memory for input input device\n"); return -ENOMEM; } snprintf(buttons->name, sizeof(buttons->name), "IMS PCU#%d Button Interface", pcu->device_no); usb_make_path(pcu->udev, buttons->phys, sizeof(buttons->phys)); strlcat(buttons->phys, "/input0", sizeof(buttons->phys)); memcpy(buttons->keymap, keymap, sizeof(*keymap) * keymap_len); input->name = buttons->name; input->phys = buttons->phys; usb_to_input_id(pcu->udev, &input->id); input->dev.parent = &pcu->ctrl_intf->dev; input->keycode = buttons->keymap; input->keycodemax = ARRAY_SIZE(buttons->keymap); input->keycodesize = sizeof(buttons->keymap[0]); __set_bit(EV_KEY, input->evbit); for (i = 0; i < IMS_PCU_KEYMAP_LEN; i++) __set_bit(buttons->keymap[i], input->keybit); __clear_bit(KEY_RESERVED, input->keybit); error = input_register_device(input); if (error) { dev_err(pcu->dev, "Failed to register buttons input device: %d\n", error); input_free_device(input); return error; } buttons->input = input; return 0; } static void ims_pcu_destroy_buttons(struct ims_pcu *pcu) { struct ims_pcu_buttons *buttons = &pcu->buttons; input_unregister_device(buttons->input); } /********************************************************************* * Gamepad Input device support * *********************************************************************/ static void ims_pcu_gamepad_report(struct ims_pcu *pcu, u32 data) { struct ims_pcu_gamepad *gamepad = pcu->gamepad; struct input_dev *input = gamepad->input; int x, y; x = !!(data & (1 << 14)) - !!(data & (1 << 13)); y = !!(data & (1 << 12)) - !!(data & (1 << 11)); input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_report_key(input, BTN_A, data & (1 << 7)); input_report_key(input, BTN_B, data & (1 << 8)); input_report_key(input, BTN_X, data & (1 << 9)); input_report_key(input, BTN_Y, data & (1 << 10)); input_report_key(input, BTN_START, data & (1 << 15)); input_report_key(input, BTN_SELECT, data & (1 << 16)); input_sync(input); } static int ims_pcu_setup_gamepad(struct ims_pcu *pcu) { struct ims_pcu_gamepad *gamepad; struct input_dev *input; int error; gamepad = kzalloc(sizeof(struct ims_pcu_gamepad), GFP_KERNEL); input = input_allocate_device(); if (!gamepad || !input) { dev_err(pcu->dev, "Not enough memory for gamepad device\n"); error = -ENOMEM; goto err_free_mem; } gamepad->input = input; snprintf(gamepad->name, sizeof(gamepad->name), "IMS PCU#%d Gamepad Interface", pcu->device_no); usb_make_path(pcu->udev, gamepad->phys, sizeof(gamepad->phys)); strlcat(gamepad->phys, "/input1", sizeof(gamepad->phys)); input->name = gamepad->name; input->phys = gamepad->phys; usb_to_input_id(pcu->udev, &input->id); input->dev.parent = &pcu->ctrl_intf->dev; __set_bit(EV_KEY, input->evbit); __set_bit(BTN_A, input->keybit); __set_bit(BTN_B, input->keybit); __set_bit(BTN_X, input->keybit); __set_bit(BTN_Y, input->keybit); __set_bit(BTN_START, input->keybit); __set_bit(BTN_SELECT, input->keybit); __set_bit(EV_ABS, input->evbit); input_set_abs_params(input, ABS_X, -1, 1, 0, 0); input_set_abs_params(input, ABS_Y, -1, 1, 0, 0); error = input_register_device(input); if (error) { dev_err(pcu->dev, "Failed to register gamepad input device: %d\n", error); goto err_free_mem; } pcu->gamepad = gamepad; return 0; err_free_mem: input_free_device(input); kfree(gamepad); return -ENOMEM; } static void ims_pcu_destroy_gamepad(struct ims_pcu *pcu) { struct ims_pcu_gamepad *gamepad = pcu->gamepad; input_unregister_device(gamepad->input); kfree(gamepad); } /********************************************************************* * PCU Communication protocol handling * *********************************************************************/ #define IMS_PCU_PROTOCOL_STX 0x02 #define IMS_PCU_PROTOCOL_ETX 0x03 #define IMS_PCU_PROTOCOL_DLE 0x10 /* PCU commands */ #define IMS_PCU_CMD_STATUS 0xa0 #define IMS_PCU_CMD_PCU_RESET 0xa1 #define IMS_PCU_CMD_RESET_REASON 0xa2 #define IMS_PCU_CMD_SEND_BUTTONS 0xa3 #define IMS_PCU_CMD_JUMP_TO_BTLDR 0xa4 #define IMS_PCU_CMD_GET_INFO 0xa5 #define IMS_PCU_CMD_SET_BRIGHTNESS 0xa6 #define IMS_PCU_CMD_EEPROM 0xa7 #define IMS_PCU_CMD_GET_FW_VERSION 0xa8 #define IMS_PCU_CMD_GET_BL_VERSION 0xa9 #define IMS_PCU_CMD_SET_INFO 0xab #define IMS_PCU_CMD_GET_BRIGHTNESS 0xac #define IMS_PCU_CMD_GET_DEVICE_ID 0xae #define IMS_PCU_CMD_SPECIAL_INFO 0xb0 #define IMS_PCU_CMD_BOOTLOADER 0xb1 /* Pass data to bootloader */ #define IMS_PCU_CMD_OFN_SET_CONFIG 0xb3 #define IMS_PCU_CMD_OFN_GET_CONFIG 0xb4 /* PCU responses */ #define IMS_PCU_RSP_STATUS 0xc0 #define IMS_PCU_RSP_PCU_RESET 0 /* Originally 0xc1 */ #define IMS_PCU_RSP_RESET_REASON 0xc2 #define IMS_PCU_RSP_SEND_BUTTONS 0xc3 #define IMS_PCU_RSP_JUMP_TO_BTLDR 0 /* Originally 0xc4 */ #define IMS_PCU_RSP_GET_INFO 0xc5 #define IMS_PCU_RSP_SET_BRIGHTNESS 0xc6 #define IMS_PCU_RSP_EEPROM 0xc7 #define IMS_PCU_RSP_GET_FW_VERSION 0xc8 #define IMS_PCU_RSP_GET_BL_VERSION 0xc9 #define IMS_PCU_RSP_SET_INFO 0xcb #define IMS_PCU_RSP_GET_BRIGHTNESS 0xcc #define IMS_PCU_RSP_CMD_INVALID 0xcd #define IMS_PCU_RSP_GET_DEVICE_ID 0xce #define IMS_PCU_RSP_SPECIAL_INFO 0xd0 #define IMS_PCU_RSP_BOOTLOADER 0xd1 /* Bootloader response */ #define IMS_PCU_RSP_OFN_SET_CONFIG 0xd2 #define IMS_PCU_RSP_OFN_GET_CONFIG 0xd3 #define IMS_PCU_RSP_EVNT_BUTTONS 0xe0 /* Unsolicited, button state */ #define IMS_PCU_GAMEPAD_MASK 0x0001ff80UL /* Bits 7 through 16 */ #define IMS_PCU_MIN_PACKET_LEN 3 #define IMS_PCU_DATA_OFFSET 2 #define IMS_PCU_CMD_WRITE_TIMEOUT 100 /* msec */ #define IMS_PCU_CMD_RESPONSE_TIMEOUT 500 /* msec */ static void ims_pcu_report_events(struct ims_pcu *pcu) { u32 data = get_unaligned_be32(&pcu->read_buf[3]); ims_pcu_buttons_report(pcu, data & ~IMS_PCU_GAMEPAD_MASK); if (pcu->gamepad) ims_pcu_gamepad_report(pcu, data); } static void ims_pcu_handle_response(struct ims_pcu *pcu) { switch (pcu->read_buf[0]) { case IMS_PCU_RSP_EVNT_BUTTONS: if (likely(pcu->setup_complete)) ims_pcu_report_events(pcu); break; default: /* * See if we got command completion. * If both the sequence and response code match save * the data and signal completion. */ if (pcu->read_buf[0] == pcu->expected_response && pcu->read_buf[1] == pcu->ack_id - 1) { memcpy(pcu->cmd_buf, pcu->read_buf, pcu->read_pos); pcu->cmd_buf_len = pcu->read_pos; complete(&pcu->cmd_done); } break; } } static void ims_pcu_process_data(struct ims_pcu *pcu, struct urb *urb) { int i; for (i = 0; i < urb->actual_length; i++) { u8 data = pcu->urb_in_buf[i]; /* Skip everything until we get Start Xmit */ if (!pcu->have_stx && data != IMS_PCU_PROTOCOL_STX) continue; if (pcu->have_dle) { pcu->have_dle = false; pcu->read_buf[pcu->read_pos++] = data; pcu->check_sum += data; continue; } switch (data) { case IMS_PCU_PROTOCOL_STX: if (pcu->have_stx) dev_warn(pcu->dev, "Unexpected STX at byte %d, discarding old data\n", pcu->read_pos); pcu->have_stx = true; pcu->have_dle = false; pcu->read_pos = 0; pcu->check_sum = 0; break; case IMS_PCU_PROTOCOL_DLE: pcu->have_dle = true; break; case IMS_PCU_PROTOCOL_ETX: if (pcu->read_pos < IMS_PCU_MIN_PACKET_LEN) { dev_warn(pcu->dev, "Short packet received (%d bytes), ignoring\n", pcu->read_pos); } else if (pcu->check_sum != 0) { dev_warn(pcu->dev, "Invalid checksum in packet (%d bytes), ignoring\n", pcu->read_pos); } else { ims_pcu_handle_response(pcu); } pcu->have_stx = false; pcu->have_dle = false; pcu->read_pos = 0; break; default: pcu->read_buf[pcu->read_pos++] = data; pcu->check_sum += data; break; } } } static bool ims_pcu_byte_needs_escape(u8 byte) { return byte == IMS_PCU_PROTOCOL_STX || byte == IMS_PCU_PROTOCOL_ETX || byte == IMS_PCU_PROTOCOL_DLE; } static int ims_pcu_send_cmd_chunk(struct ims_pcu *pcu, u8 command, int chunk, int len) { int error; error = usb_bulk_msg(pcu->udev, usb_sndbulkpipe(pcu->udev, pcu->ep_out->bEndpointAddress), pcu->urb_out_buf, len, NULL, IMS_PCU_CMD_WRITE_TIMEOUT); if (error < 0) { dev_dbg(pcu->dev, "Sending 0x%02x command failed at chunk %d: %d\n", command, chunk, error); return error; } return 0; } static int ims_pcu_send_command(struct ims_pcu *pcu, u8 command, const u8 *data, int len) { int count = 0; int chunk = 0; int delta; int i; int error; u8 csum = 0; u8 ack_id; pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_STX; /* We know the command need not be escaped */ pcu->urb_out_buf[count++] = command; csum += command; ack_id = pcu->ack_id++; if (ack_id == 0xff) ack_id = pcu->ack_id++; if (ims_pcu_byte_needs_escape(ack_id)) pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_DLE; pcu->urb_out_buf[count++] = ack_id; csum += ack_id; for (i = 0; i < len; i++) { delta = ims_pcu_byte_needs_escape(data[i]) ? 2 : 1; if (count + delta >= pcu->max_out_size) { error = ims_pcu_send_cmd_chunk(pcu, command, ++chunk, count); if (error) return error; count = 0; } if (delta == 2) pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_DLE; pcu->urb_out_buf[count++] = data[i]; csum += data[i]; } csum = 1 + ~csum; delta = ims_pcu_byte_needs_escape(csum) ? 3 : 2; if (count + delta >= pcu->max_out_size) { error = ims_pcu_send_cmd_chunk(pcu, command, ++chunk, count); if (error) return error; count = 0; } if (delta == 3) pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_DLE; pcu->urb_out_buf[count++] = csum; pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_ETX; return ims_pcu_send_cmd_chunk(pcu, command, ++chunk, count); } static int __ims_pcu_execute_command(struct ims_pcu *pcu, u8 command, const void *data, size_t len, u8 expected_response, int response_time) { int error; pcu->expected_response = expected_response; init_completion(&pcu->cmd_done); error = ims_pcu_send_command(pcu, command, data, len); if (error) return error; if (expected_response && !wait_for_completion_timeout(&pcu->cmd_done, msecs_to_jiffies(response_time))) { dev_dbg(pcu->dev, "Command 0x%02x timed out\n", command); return -ETIMEDOUT; } return 0; } #define ims_pcu_execute_command(pcu, code, data, len) \ __ims_pcu_execute_command(pcu, \ IMS_PCU_CMD_##code, data, len, \ IMS_PCU_RSP_##code, \ IMS_PCU_CMD_RESPONSE_TIMEOUT) #define ims_pcu_execute_query(pcu, code) \ ims_pcu_execute_command(pcu, code, NULL, 0) /* Bootloader commands */ #define IMS_PCU_BL_CMD_QUERY_DEVICE 0xa1 #define IMS_PCU_BL_CMD_UNLOCK_CONFIG 0xa2 #define IMS_PCU_BL_CMD_ERASE_APP 0xa3 #define IMS_PCU_BL_CMD_PROGRAM_DEVICE 0xa4 #define IMS_PCU_BL_CMD_PROGRAM_COMPLETE 0xa5 #define IMS_PCU_BL_CMD_READ_APP 0xa6 #define IMS_PCU_BL_CMD_RESET_DEVICE 0xa7 #define IMS_PCU_BL_CMD_LAUNCH_APP 0xa8 /* Bootloader commands */ #define IMS_PCU_BL_RSP_QUERY_DEVICE 0xc1 #define IMS_PCU_BL_RSP_UNLOCK_CONFIG 0xc2 #define IMS_PCU_BL_RSP_ERASE_APP 0xc3 #define IMS_PCU_BL_RSP_PROGRAM_DEVICE 0xc4 #define IMS_PCU_BL_RSP_PROGRAM_COMPLETE 0xc5 #define IMS_PCU_BL_RSP_READ_APP 0xc6 #define IMS_PCU_BL_RSP_RESET_DEVICE 0 /* originally 0xa7 */ #define IMS_PCU_BL_RSP_LAUNCH_APP 0 /* originally 0xa8 */ #define IMS_PCU_BL_DATA_OFFSET 3 static int __ims_pcu_execute_bl_command(struct ims_pcu *pcu, u8 command, const void *data, size_t len, u8 expected_response, int response_time) { int error; pcu->cmd_buf[0] = command; if (data) memcpy(&pcu->cmd_buf[1], data, len); error = __ims_pcu_execute_command(pcu, IMS_PCU_CMD_BOOTLOADER, pcu->cmd_buf, len + 1, expected_response ? IMS_PCU_RSP_BOOTLOADER : 0, response_time); if (error) { dev_err(pcu->dev, "Failure when sending 0x%02x command to bootloader, error: %d\n", pcu->cmd_buf[0], error); return error; } if (expected_response && pcu->cmd_buf[2] != expected_response) { dev_err(pcu->dev, "Unexpected response from bootloader: 0x%02x, wanted 0x%02x\n", pcu->cmd_buf[2], expected_response); return -EINVAL; } return 0; } #define ims_pcu_execute_bl_command(pcu, code, data, len, timeout) \ __ims_pcu_execute_bl_command(pcu, \ IMS_PCU_BL_CMD_##code, data, len, \ IMS_PCU_BL_RSP_##code, timeout) \ #define IMS_PCU_INFO_PART_OFFSET 2 #define IMS_PCU_INFO_DOM_OFFSET 17 #define IMS_PCU_INFO_SERIAL_OFFSET 25 #define IMS_PCU_SET_INFO_SIZE 31 static int ims_pcu_get_info(struct ims_pcu *pcu) { int error; error = ims_pcu_execute_query(pcu, GET_INFO); if (error) { dev_err(pcu->dev, "GET_INFO command failed, error: %d\n", error); return error; } memcpy(pcu->part_number, &pcu->cmd_buf[IMS_PCU_INFO_PART_OFFSET], sizeof(pcu->part_number)); memcpy(pcu->date_of_manufacturing, &pcu->cmd_buf[IMS_PCU_INFO_DOM_OFFSET], sizeof(pcu->date_of_manufacturing)); memcpy(pcu->serial_number, &pcu->cmd_buf[IMS_PCU_INFO_SERIAL_OFFSET], sizeof(pcu->serial_number)); return 0; } static int ims_pcu_set_info(struct ims_pcu *pcu) { int error; memcpy(&pcu->cmd_buf[IMS_PCU_INFO_PART_OFFSET], pcu->part_number, sizeof(pcu->part_number)); memcpy(&pcu->cmd_buf[IMS_PCU_INFO_DOM_OFFSET], pcu->date_of_manufacturing, sizeof(pcu->date_of_manufacturing)); memcpy(&pcu->cmd_buf[IMS_PCU_INFO_SERIAL_OFFSET], pcu->serial_number, sizeof(pcu->serial_number)); error = ims_pcu_execute_command(pcu, SET_INFO, &pcu->cmd_buf[IMS_PCU_DATA_OFFSET], IMS_PCU_SET_INFO_SIZE); if (error) { dev_err(pcu->dev, "Failed to update device information, error: %d\n", error); return error; } return 0; } static int ims_pcu_switch_to_bootloader(struct ims_pcu *pcu) { int error; /* Execute jump to the bootoloader */ error = ims_pcu_execute_command(pcu, JUMP_TO_BTLDR, NULL, 0); if (error) { dev_err(pcu->dev, "Failure when sending JUMP TO BOOLTLOADER command, error: %d\n", error); return error; } return 0; } /********************************************************************* * Firmware Update handling * *********************************************************************/ #define IMS_PCU_FIRMWARE_NAME "imspcu.fw" struct ims_pcu_flash_fmt { __le32 addr; u8 len; u8 data[]; }; static unsigned int ims_pcu_count_fw_records(const struct firmware *fw) { const struct ihex_binrec *rec = (const struct ihex_binrec *)fw->data; unsigned int count = 0; while (rec) { count++; rec = ihex_next_binrec(rec); } return count; } static int ims_pcu_verify_block(struct ims_pcu *pcu, u32 addr, u8 len, const u8 *data) { struct ims_pcu_flash_fmt *fragment; int error; fragment = (void *)&pcu->cmd_buf[1]; put_unaligned_le32(addr, &fragment->addr); fragment->len = len; error = ims_pcu_execute_bl_command(pcu, READ_APP, NULL, 5, IMS_PCU_CMD_RESPONSE_TIMEOUT); if (error) { dev_err(pcu->dev, "Failed to retrieve block at 0x%08x, len %d, error: %d\n", addr, len, error); return error; } fragment = (void *)&pcu->cmd_buf[IMS_PCU_BL_DATA_OFFSET]; if (get_unaligned_le32(&fragment->addr) != addr || fragment->len != len) { dev_err(pcu->dev, "Wrong block when retrieving 0x%08x (0x%08x), len %d (%d)\n", addr, get_unaligned_le32(&fragment->addr), len, fragment->len); return -EINVAL; } if (memcmp(fragment->data, data, len)) { dev_err(pcu->dev, "Mismatch in block at 0x%08x, len %d\n", addr, len); return -EINVAL; } return 0; } static int ims_pcu_flash_firmware(struct ims_pcu *pcu, const struct firmware *fw, unsigned int n_fw_records) { const struct ihex_binrec *rec = (const struct ihex_binrec *)fw->data; struct ims_pcu_flash_fmt *fragment; unsigned int count = 0; u32 addr; u8 len; int error; error = ims_pcu_execute_bl_command(pcu, ERASE_APP, NULL, 0, 2000); if (error) { dev_err(pcu->dev, "Failed to erase application image, error: %d\n", error); return error; } while (rec) { /* * The firmware format is messed up for some reason. * The address twice that of what is needed for some * reason and we end up overwriting half of the data * with the next record. */ addr = be32_to_cpu(rec->addr) / 2; len = be16_to_cpu(rec->len); fragment = (void *)&pcu->cmd_buf[1]; put_unaligned_le32(addr, &fragment->addr); fragment->len = len; memcpy(fragment->data, rec->data, len); error = ims_pcu_execute_bl_command(pcu, PROGRAM_DEVICE, NULL, len + 5, IMS_PCU_CMD_RESPONSE_TIMEOUT); if (error) { dev_err(pcu->dev, "Failed to write block at 0x%08x, len %d, error: %d\n", addr, len, error); return error; } if (addr >= pcu->fw_start_addr && addr < pcu->fw_end_addr) { error = ims_pcu_verify_block(pcu, addr, len, rec->data); if (error) return error; } count++; pcu->update_firmware_status = (count * 100) / n_fw_records; rec = ihex_next_binrec(rec); } error = ims_pcu_execute_bl_command(pcu, PROGRAM_COMPLETE, NULL, 0, 2000); if (error) dev_err(pcu->dev, "Failed to send PROGRAM_COMPLETE, error: %d\n", error); return 0; } static int ims_pcu_handle_firmware_update(struct ims_pcu *pcu, const struct firmware *fw) { unsigned int n_fw_records; int retval; dev_info(pcu->dev, "Updating firmware %s, size: %zu\n", IMS_PCU_FIRMWARE_NAME, fw->size); n_fw_records = ims_pcu_count_fw_records(fw); retval = ims_pcu_flash_firmware(pcu, fw, n_fw_records); if (retval) goto out; retval = ims_pcu_execute_bl_command(pcu, LAUNCH_APP, NULL, 0, 0); if (retval) dev_err(pcu->dev, "Failed to start application image, error: %d\n", retval); out: pcu->update_firmware_status = retval; sysfs_notify(&pcu->dev->kobj, NULL, "update_firmware_status"); return retval; } static void ims_pcu_process_async_firmware(const struct firmware *fw, void *context) { struct ims_pcu *pcu = context; int error; if (!fw) { dev_err(pcu->dev, "Failed to get firmware %s\n", IMS_PCU_FIRMWARE_NAME); goto out; } error = ihex_validate_fw(fw); if (error) { dev_err(pcu->dev, "Firmware %s is invalid\n", IMS_PCU_FIRMWARE_NAME); goto out; } mutex_lock(&pcu->cmd_mutex); ims_pcu_handle_firmware_update(pcu, fw); mutex_unlock(&pcu->cmd_mutex); release_firmware(fw); out: complete(&pcu->async_firmware_done); } /********************************************************************* * Backlight LED device support * *********************************************************************/ #define IMS_PCU_MAX_BRIGHTNESS 31998 static void ims_pcu_backlight_work(struct work_struct *work) { struct ims_pcu_backlight *backlight = container_of(work, struct ims_pcu_backlight, work); struct ims_pcu *pcu = container_of(backlight, struct ims_pcu, backlight); int desired_brightness = backlight->desired_brightness; __le16 br_val = cpu_to_le16(desired_brightness); int error; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_execute_command(pcu, SET_BRIGHTNESS, &br_val, sizeof(br_val)); if (error && error != -ENODEV) dev_warn(pcu->dev, "Failed to set desired brightness %u, error: %d\n", desired_brightness, error); mutex_unlock(&pcu->cmd_mutex); } static void ims_pcu_backlight_set_brightness(struct led_classdev *cdev, enum led_brightness value) { struct ims_pcu_backlight *backlight = container_of(cdev, struct ims_pcu_backlight, cdev); backlight->desired_brightness = value; schedule_work(&backlight->work); } static enum led_brightness ims_pcu_backlight_get_brightness(struct led_classdev *cdev) { struct ims_pcu_backlight *backlight = container_of(cdev, struct ims_pcu_backlight, cdev); struct ims_pcu *pcu = container_of(backlight, struct ims_pcu, backlight); int brightness; int error; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_execute_query(pcu, GET_BRIGHTNESS); if (error) { dev_warn(pcu->dev, "Failed to get current brightness, error: %d\n", error); /* Assume the LED is OFF */ brightness = LED_OFF; } else { brightness = get_unaligned_le16(&pcu->cmd_buf[IMS_PCU_DATA_OFFSET]); } mutex_unlock(&pcu->cmd_mutex); return brightness; } static int ims_pcu_setup_backlight(struct ims_pcu *pcu) { struct ims_pcu_backlight *backlight = &pcu->backlight; int error; INIT_WORK(&backlight->work, ims_pcu_backlight_work); snprintf(backlight->name, sizeof(backlight->name), "pcu%d::kbd_backlight", pcu->device_no); backlight->cdev.name = backlight->name; backlight->cdev.max_brightness = IMS_PCU_MAX_BRIGHTNESS; backlight->cdev.brightness_get = ims_pcu_backlight_get_brightness; backlight->cdev.brightness_set = ims_pcu_backlight_set_brightness; error = led_classdev_register(pcu->dev, &backlight->cdev); if (error) { dev_err(pcu->dev, "Failed to register backlight LED device, error: %d\n", error); return error; } return 0; } static void ims_pcu_destroy_backlight(struct ims_pcu *pcu) { struct ims_pcu_backlight *backlight = &pcu->backlight; led_classdev_unregister(&backlight->cdev); cancel_work_sync(&backlight->work); } /********************************************************************* * Sysfs attributes handling * *********************************************************************/ struct ims_pcu_attribute { struct device_attribute dattr; size_t field_offset; int field_length; }; static ssize_t ims_pcu_attribute_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); struct ims_pcu_attribute *attr = container_of(dattr, struct ims_pcu_attribute, dattr); char *field = (char *)pcu + attr->field_offset; return scnprintf(buf, PAGE_SIZE, "%.*s\n", attr->field_length, field); } static ssize_t ims_pcu_attribute_store(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); struct ims_pcu_attribute *attr = container_of(dattr, struct ims_pcu_attribute, dattr); char *field = (char *)pcu + attr->field_offset; size_t data_len; int error; if (count > attr->field_length) return -EINVAL; data_len = strnlen(buf, attr->field_length); if (data_len > attr->field_length) return -EINVAL; error = mutex_lock_interruptible(&pcu->cmd_mutex); if (error) return error; memset(field, 0, attr->field_length); memcpy(field, buf, data_len); error = ims_pcu_set_info(pcu); /* * Even if update failed, let's fetch the info again as we just * clobbered one of the fields. */ ims_pcu_get_info(pcu); mutex_unlock(&pcu->cmd_mutex); return error < 0 ? error : count; } #define IMS_PCU_ATTR(_field, _mode) \ struct ims_pcu_attribute ims_pcu_attr_##_field = { \ .dattr = __ATTR(_field, _mode, \ ims_pcu_attribute_show, \ ims_pcu_attribute_store), \ .field_offset = offsetof(struct ims_pcu, _field), \ .field_length = sizeof(((struct ims_pcu *)NULL)->_field), \ } #define IMS_PCU_RO_ATTR(_field) \ IMS_PCU_ATTR(_field, S_IRUGO) #define IMS_PCU_RW_ATTR(_field) \ IMS_PCU_ATTR(_field, S_IRUGO | S_IWUSR) static IMS_PCU_RW_ATTR(part_number); static IMS_PCU_RW_ATTR(serial_number); static IMS_PCU_RW_ATTR(date_of_manufacturing); static IMS_PCU_RO_ATTR(fw_version); static IMS_PCU_RO_ATTR(bl_version); static IMS_PCU_RO_ATTR(reset_reason); static ssize_t ims_pcu_reset_device(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { static const u8 reset_byte = 1; struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); int value; int error; error = kstrtoint(buf, 0, &value); if (error) return error; if (value != 1) return -EINVAL; dev_info(pcu->dev, "Attempting to reset device\n"); error = ims_pcu_execute_command(pcu, PCU_RESET, &reset_byte, 1); if (error) { dev_info(pcu->dev, "Failed to reset device, error: %d\n", error); return error; } return count; } static DEVICE_ATTR(reset_device, S_IWUSR, NULL, ims_pcu_reset_device); static ssize_t ims_pcu_update_firmware_store(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); const struct firmware *fw = NULL; int value; int error; error = kstrtoint(buf, 0, &value); if (error) return error; if (value != 1) return -EINVAL; error = mutex_lock_interruptible(&pcu->cmd_mutex); if (error) return error; error = request_ihex_firmware(&fw, IMS_PCU_FIRMWARE_NAME, pcu->dev); if (error) { dev_err(pcu->dev, "Failed to request firmware %s, error: %d\n", IMS_PCU_FIRMWARE_NAME, error); goto out; } /* * If we are already in bootloader mode we can proceed with * flashing the firmware. * * If we are in application mode, then we need to switch into * bootloader mode, which will cause the device to disconnect * and reconnect as different device. */ if (pcu->bootloader_mode) error = ims_pcu_handle_firmware_update(pcu, fw); else error = ims_pcu_switch_to_bootloader(pcu); release_firmware(fw); out: mutex_unlock(&pcu->cmd_mutex); return error ?: count; } static DEVICE_ATTR(update_firmware, S_IWUSR, NULL, ims_pcu_update_firmware_store); static ssize_t ims_pcu_update_firmware_status_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); return scnprintf(buf, PAGE_SIZE, "%d\n", pcu->update_firmware_status); } static DEVICE_ATTR(update_firmware_status, S_IRUGO, ims_pcu_update_firmware_status_show, NULL); static struct attribute *ims_pcu_attrs[] = { &ims_pcu_attr_part_number.dattr.attr, &ims_pcu_attr_serial_number.dattr.attr, &ims_pcu_attr_date_of_manufacturing.dattr.attr, &ims_pcu_attr_fw_version.dattr.attr, &ims_pcu_attr_bl_version.dattr.attr, &ims_pcu_attr_reset_reason.dattr.attr, &dev_attr_reset_device.attr, &dev_attr_update_firmware.attr, &dev_attr_update_firmware_status.attr, NULL }; static umode_t ims_pcu_is_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); umode_t mode = attr->mode; if (pcu->bootloader_mode) { if (attr != &dev_attr_update_firmware_status.attr && attr != &dev_attr_update_firmware.attr && attr != &dev_attr_reset_device.attr) { mode = 0; } } else { if (attr == &dev_attr_update_firmware_status.attr) mode = 0; } return mode; } static const struct attribute_group ims_pcu_attr_group = { .is_visible = ims_pcu_is_attr_visible, .attrs = ims_pcu_attrs, }; /* Support for a separate OFN attribute group */ #define OFN_REG_RESULT_OFFSET 2 static int ims_pcu_read_ofn_config(struct ims_pcu *pcu, u8 addr, u8 *data) { int error; s16 result; error = ims_pcu_execute_command(pcu, OFN_GET_CONFIG, &addr, sizeof(addr)); if (error) return error; result = (s16)get_unaligned_le16(pcu->cmd_buf + OFN_REG_RESULT_OFFSET); if (result < 0) return -EIO; /* We only need LSB */ *data = pcu->cmd_buf[OFN_REG_RESULT_OFFSET]; return 0; } static int ims_pcu_write_ofn_config(struct ims_pcu *pcu, u8 addr, u8 data) { u8 buffer[] = { addr, data }; int error; s16 result; error = ims_pcu_execute_command(pcu, OFN_SET_CONFIG, &buffer, sizeof(buffer)); if (error) return error; result = (s16)get_unaligned_le16(pcu->cmd_buf + OFN_REG_RESULT_OFFSET); if (result < 0) return -EIO; return 0; } static ssize_t ims_pcu_ofn_reg_data_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); int error; u8 data; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_read_ofn_config(pcu, pcu->ofn_reg_addr, &data); mutex_unlock(&pcu->cmd_mutex); if (error) return error; return scnprintf(buf, PAGE_SIZE, "%x\n", data); } static ssize_t ims_pcu_ofn_reg_data_store(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); int error; u8 value; error = kstrtou8(buf, 0, &value); if (error) return error; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_write_ofn_config(pcu, pcu->ofn_reg_addr, value); mutex_unlock(&pcu->cmd_mutex); return error ?: count; } static DEVICE_ATTR(reg_data, S_IRUGO | S_IWUSR, ims_pcu_ofn_reg_data_show, ims_pcu_ofn_reg_data_store); static ssize_t ims_pcu_ofn_reg_addr_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); int error; mutex_lock(&pcu->cmd_mutex); error = scnprintf(buf, PAGE_SIZE, "%x\n", pcu->ofn_reg_addr); mutex_unlock(&pcu->cmd_mutex); return error; } static ssize_t ims_pcu_ofn_reg_addr_store(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); int error; u8 value; error = kstrtou8(buf, 0, &value); if (error) return error; mutex_lock(&pcu->cmd_mutex); pcu->ofn_reg_addr = value; mutex_unlock(&pcu->cmd_mutex); return count; } static DEVICE_ATTR(reg_addr, S_IRUGO | S_IWUSR, ims_pcu_ofn_reg_addr_show, ims_pcu_ofn_reg_addr_store); struct ims_pcu_ofn_bit_attribute { struct device_attribute dattr; u8 addr; u8 nr; }; static ssize_t ims_pcu_ofn_bit_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); struct ims_pcu_ofn_bit_attribute *attr = container_of(dattr, struct ims_pcu_ofn_bit_attribute, dattr); int error; u8 data; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_read_ofn_config(pcu, attr->addr, &data); mutex_unlock(&pcu->cmd_mutex); if (error) return error; return scnprintf(buf, PAGE_SIZE, "%d\n", !!(data & (1 << attr->nr))); } static ssize_t ims_pcu_ofn_bit_store(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); struct ims_pcu_ofn_bit_attribute *attr = container_of(dattr, struct ims_pcu_ofn_bit_attribute, dattr); int error; int value; u8 data; error = kstrtoint(buf, 0, &value); if (error) return error; if (value > 1) return -EINVAL; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_read_ofn_config(pcu, attr->addr, &data); if (!error) { if (value) data |= 1U << attr->nr; else data &= ~(1U << attr->nr); error = ims_pcu_write_ofn_config(pcu, attr->addr, data); } mutex_unlock(&pcu->cmd_mutex); return error ?: count; } #define IMS_PCU_OFN_BIT_ATTR(_field, _addr, _nr) \ struct ims_pcu_ofn_bit_attribute ims_pcu_ofn_attr_##_field = { \ .dattr = __ATTR(_field, S_IWUSR | S_IRUGO, \ ims_pcu_ofn_bit_show, ims_pcu_ofn_bit_store), \ .addr = _addr, \ .nr = _nr, \ } static IMS_PCU_OFN_BIT_ATTR(engine_enable, 0x60, 7); static IMS_PCU_OFN_BIT_ATTR(speed_enable, 0x60, 6); static IMS_PCU_OFN_BIT_ATTR(assert_enable, 0x60, 5); static IMS_PCU_OFN_BIT_ATTR(xyquant_enable, 0x60, 4); static IMS_PCU_OFN_BIT_ATTR(xyscale_enable, 0x60, 1); static IMS_PCU_OFN_BIT_ATTR(scale_x2, 0x63, 6); static IMS_PCU_OFN_BIT_ATTR(scale_y2, 0x63, 7); static struct attribute *ims_pcu_ofn_attrs[] = { &dev_attr_reg_data.attr, &dev_attr_reg_addr.attr, &ims_pcu_ofn_attr_engine_enable.dattr.attr, &ims_pcu_ofn_attr_speed_enable.dattr.attr, &ims_pcu_ofn_attr_assert_enable.dattr.attr, &ims_pcu_ofn_attr_xyquant_enable.dattr.attr, &ims_pcu_ofn_attr_xyscale_enable.dattr.attr, &ims_pcu_ofn_attr_scale_x2.dattr.attr, &ims_pcu_ofn_attr_scale_y2.dattr.attr, NULL }; static const struct attribute_group ims_pcu_ofn_attr_group = { .name = "ofn", .attrs = ims_pcu_ofn_attrs, }; static void ims_pcu_irq(struct urb *urb) { struct ims_pcu *pcu = urb->context; int retval, status; status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(pcu->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(pcu->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } dev_dbg(pcu->dev, "%s: received %d: %*ph\n", __func__, urb->actual_length, urb->actual_length, pcu->urb_in_buf); if (urb == pcu->urb_in) ims_pcu_process_data(pcu, urb); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval && retval != -ENODEV) dev_err(pcu->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static int ims_pcu_buffers_alloc(struct ims_pcu *pcu) { int error; pcu->urb_in_buf = usb_alloc_coherent(pcu->udev, pcu->max_in_size, GFP_KERNEL, &pcu->read_dma); if (!pcu->urb_in_buf) { dev_err(pcu->dev, "Failed to allocate memory for read buffer\n"); return -ENOMEM; } pcu->urb_in = usb_alloc_urb(0, GFP_KERNEL); if (!pcu->urb_in) { dev_err(pcu->dev, "Failed to allocate input URB\n"); error = -ENOMEM; goto err_free_urb_in_buf; } pcu->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; pcu->urb_in->transfer_dma = pcu->read_dma; usb_fill_bulk_urb(pcu->urb_in, pcu->udev, usb_rcvbulkpipe(pcu->udev, pcu->ep_in->bEndpointAddress), pcu->urb_in_buf, pcu->max_in_size, ims_pcu_irq, pcu); /* * We are using usb_bulk_msg() for sending so there is no point * in allocating memory with usb_alloc_coherent(). */ pcu->urb_out_buf = kmalloc(pcu->max_out_size, GFP_KERNEL); if (!pcu->urb_out_buf) { dev_err(pcu->dev, "Failed to allocate memory for write buffer\n"); error = -ENOMEM; goto err_free_in_urb; } pcu->urb_ctrl_buf = usb_alloc_coherent(pcu->udev, pcu->max_ctrl_size, GFP_KERNEL, &pcu->ctrl_dma); if (!pcu->urb_ctrl_buf) { dev_err(pcu->dev, "Failed to allocate memory for read buffer\n"); error = -ENOMEM; goto err_free_urb_out_buf; } pcu->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL); if (!pcu->urb_ctrl) { dev_err(pcu->dev, "Failed to allocate input URB\n"); error = -ENOMEM; goto err_free_urb_ctrl_buf; } pcu->urb_ctrl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; pcu->urb_ctrl->transfer_dma = pcu->ctrl_dma; usb_fill_int_urb(pcu->urb_ctrl, pcu->udev, usb_rcvintpipe(pcu->udev, pcu->ep_ctrl->bEndpointAddress), pcu->urb_ctrl_buf, pcu->max_ctrl_size, ims_pcu_irq, pcu, pcu->ep_ctrl->bInterval); return 0; err_free_urb_ctrl_buf: usb_free_coherent(pcu->udev, pcu->max_ctrl_size, pcu->urb_ctrl_buf, pcu->ctrl_dma); err_free_urb_out_buf: kfree(pcu->urb_out_buf); err_free_in_urb: usb_free_urb(pcu->urb_in); err_free_urb_in_buf: usb_free_coherent(pcu->udev, pcu->max_in_size, pcu->urb_in_buf, pcu->read_dma); return error; } static void ims_pcu_buffers_free(struct ims_pcu *pcu) { usb_kill_urb(pcu->urb_in); usb_free_urb(pcu->urb_in); usb_free_coherent(pcu->udev, pcu->max_out_size, pcu->urb_in_buf, pcu->read_dma); kfree(pcu->urb_out_buf); usb_kill_urb(pcu->urb_ctrl); usb_free_urb(pcu->urb_ctrl); usb_free_coherent(pcu->udev, pcu->max_ctrl_size, pcu->urb_ctrl_buf, pcu->ctrl_dma); } static const struct usb_cdc_union_desc * ims_pcu_get_cdc_union_desc(struct usb_interface *intf) { const void *buf = intf->altsetting->extra; size_t buflen = intf->altsetting->extralen; struct usb_cdc_union_desc *union_desc; if (!buf) { dev_err(&intf->dev, "Missing descriptor data\n"); return NULL; } if (!buflen) { dev_err(&intf->dev, "Zero length descriptor\n"); return NULL; } while (buflen > 0) { union_desc = (struct usb_cdc_union_desc *)buf; if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { dev_dbg(&intf->dev, "Found union header\n"); return union_desc; } buflen -= union_desc->bLength; buf += union_desc->bLength; } dev_err(&intf->dev, "Missing CDC union descriptor\n"); return NULL; } static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pcu) { const struct usb_cdc_union_desc *union_desc; struct usb_host_interface *alt; union_desc = ims_pcu_get_cdc_union_desc(intf); if (!union_desc) return -EINVAL; pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev, union_desc->bMasterInterface0); if (!pcu->ctrl_intf) return -EINVAL; alt = pcu->ctrl_intf->cur_altsetting; if (alt->desc.bNumEndpoints < 1) return -ENODEV; pcu->ep_ctrl = &alt->endpoint[0].desc; pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); pcu->data_intf = usb_ifnum_to_if(pcu->udev, union_desc->bSlaveInterface0); if (!pcu->data_intf) return -EINVAL; alt = pcu->data_intf->cur_altsetting; if (alt->desc.bNumEndpoints != 2) { dev_err(pcu->dev, "Incorrect number of endpoints on data interface (%d)\n", alt->desc.bNumEndpoints); return -EINVAL; } pcu->ep_out = &alt->endpoint[0].desc; if (!usb_endpoint_is_bulk_out(pcu->ep_out)) { dev_err(pcu->dev, "First endpoint on data interface is not BULK OUT\n"); return -EINVAL; } pcu->max_out_size = usb_endpoint_maxp(pcu->ep_out); if (pcu->max_out_size < 8) { dev_err(pcu->dev, "Max OUT packet size is too small (%zd)\n", pcu->max_out_size); return -EINVAL; } pcu->ep_in = &alt->endpoint[1].desc; if (!usb_endpoint_is_bulk_in(pcu->ep_in)) { dev_err(pcu->dev, "Second endpoint on data interface is not BULK IN\n"); return -EINVAL; } pcu->max_in_size = usb_endpoint_maxp(pcu->ep_in); if (pcu->max_in_size < 8) { dev_err(pcu->dev, "Max IN packet size is too small (%zd)\n", pcu->max_in_size); return -EINVAL; } return 0; } static int ims_pcu_start_io(struct ims_pcu *pcu) { int error; error = usb_submit_urb(pcu->urb_ctrl, GFP_KERNEL); if (error) { dev_err(pcu->dev, "Failed to start control IO - usb_submit_urb failed with result: %d\n", error); return -EIO; } error = usb_submit_urb(pcu->urb_in, GFP_KERNEL); if (error) { dev_err(pcu->dev, "Failed to start IO - usb_submit_urb failed with result: %d\n", error); usb_kill_urb(pcu->urb_ctrl); return -EIO; } return 0; } static void ims_pcu_stop_io(struct ims_pcu *pcu) { usb_kill_urb(pcu->urb_in); usb_kill_urb(pcu->urb_ctrl); } static int ims_pcu_line_setup(struct ims_pcu *pcu) { struct usb_host_interface *interface = pcu->ctrl_intf->cur_altsetting; struct usb_cdc_line_coding *line = (void *)pcu->cmd_buf; int error; memset(line, 0, sizeof(*line)); line->dwDTERate = cpu_to_le32(57600); line->bDataBits = 8; error = usb_control_msg(pcu->udev, usb_sndctrlpipe(pcu->udev, 0), USB_CDC_REQ_SET_LINE_CODING, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, interface->desc.bInterfaceNumber, line, sizeof(struct usb_cdc_line_coding), 5000); if (error < 0) { dev_err(pcu->dev, "Failed to set line coding, error: %d\n", error); return error; } error = usb_control_msg(pcu->udev, usb_sndctrlpipe(pcu->udev, 0), USB_CDC_REQ_SET_CONTROL_LINE_STATE, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0x03, interface->desc.bInterfaceNumber, NULL, 0, 5000); if (error < 0) { dev_err(pcu->dev, "Failed to set line state, error: %d\n", error); return error; } return 0; } static int ims_pcu_get_device_info(struct ims_pcu *pcu) { int error; error = ims_pcu_get_info(pcu); if (error) return error; error = ims_pcu_execute_query(pcu, GET_FW_VERSION); if (error) { dev_err(pcu->dev, "GET_FW_VERSION command failed, error: %d\n", error); return error; } snprintf(pcu->fw_version, sizeof(pcu->fw_version), "%02d%02d%02d%02d.%c%c", pcu->cmd_buf[2], pcu->cmd_buf[3], pcu->cmd_buf[4], pcu->cmd_buf[5], pcu->cmd_buf[6], pcu->cmd_buf[7]); error = ims_pcu_execute_query(pcu, GET_BL_VERSION); if (error) { dev_err(pcu->dev, "GET_BL_VERSION command failed, error: %d\n", error); return error; } snprintf(pcu->bl_version, sizeof(pcu->bl_version), "%02d%02d%02d%02d.%c%c", pcu->cmd_buf[2], pcu->cmd_buf[3], pcu->cmd_buf[4], pcu->cmd_buf[5], pcu->cmd_buf[6], pcu->cmd_buf[7]); error = ims_pcu_execute_query(pcu, RESET_REASON); if (error) { dev_err(pcu->dev, "RESET_REASON command failed, error: %d\n", error); return error; } snprintf(pcu->reset_reason, sizeof(pcu->reset_reason), "%02x", pcu->cmd_buf[IMS_PCU_DATA_OFFSET]); dev_dbg(pcu->dev, "P/N: %s, MD: %s, S/N: %s, FW: %s, BL: %s, RR: %s\n", pcu->part_number, pcu->date_of_manufacturing, pcu->serial_number, pcu->fw_version, pcu->bl_version, pcu->reset_reason); return 0; } static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id) { int error; error = ims_pcu_execute_query(pcu, GET_DEVICE_ID); if (error) { dev_err(pcu->dev, "GET_DEVICE_ID command failed, error: %d\n", error); return error; } *device_id = pcu->cmd_buf[IMS_PCU_DATA_OFFSET]; dev_dbg(pcu->dev, "Detected device ID: %d\n", *device_id); return 0; } static int ims_pcu_init_application_mode(struct ims_pcu *pcu) { static atomic_t device_no = ATOMIC_INIT(-1); const struct ims_pcu_device_info *info; int error; error = ims_pcu_get_device_info(pcu); if (error) { /* Device does not respond to basic queries, hopeless */ return error; } error = ims_pcu_identify_type(pcu, &pcu->device_id); if (error) { dev_err(pcu->dev, "Failed to identify device, error: %d\n", error); /* * Do not signal error, but do not create input nor * backlight devices either, let userspace figure this * out (flash a new firmware?). */ return 0; } if (pcu->device_id >= ARRAY_SIZE(ims_pcu_device_info) || !ims_pcu_device_info[pcu->device_id].keymap) { dev_err(pcu->dev, "Device ID %d is not valid\n", pcu->device_id); /* Same as above, punt to userspace */ return 0; } /* Device appears to be operable, complete initialization */ pcu->device_no = atomic_inc_return(&device_no); /* * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor */ if (pcu->device_id != IMS_PCU_PCU_B_DEVICE_ID) { error = sysfs_create_group(&pcu->dev->kobj, &ims_pcu_ofn_attr_group); if (error) return error; } error = ims_pcu_setup_backlight(pcu); if (error) return error; info = &ims_pcu_device_info[pcu->device_id]; error = ims_pcu_setup_buttons(pcu, info->keymap, info->keymap_len); if (error) goto err_destroy_backlight; if (info->has_gamepad) { error = ims_pcu_setup_gamepad(pcu); if (error) goto err_destroy_buttons; } pcu->setup_complete = true; return 0; err_destroy_buttons: ims_pcu_destroy_buttons(pcu); err_destroy_backlight: ims_pcu_destroy_backlight(pcu); return error; } static void ims_pcu_destroy_application_mode(struct ims_pcu *pcu) { if (pcu->setup_complete) { pcu->setup_complete = false; mb(); /* make sure flag setting is not reordered */ if (pcu->gamepad) ims_pcu_destroy_gamepad(pcu); ims_pcu_destroy_buttons(pcu); ims_pcu_destroy_backlight(pcu); if (pcu->device_id != IMS_PCU_PCU_B_DEVICE_ID) sysfs_remove_group(&pcu->dev->kobj, &ims_pcu_ofn_attr_group); } } static int ims_pcu_init_bootloader_mode(struct ims_pcu *pcu) { int error; error = ims_pcu_execute_bl_command(pcu, QUERY_DEVICE, NULL, 0, IMS_PCU_CMD_RESPONSE_TIMEOUT); if (error) { dev_err(pcu->dev, "Bootloader does not respond, aborting\n"); return error; } pcu->fw_start_addr = get_unaligned_le32(&pcu->cmd_buf[IMS_PCU_DATA_OFFSET + 11]); pcu->fw_end_addr = get_unaligned_le32(&pcu->cmd_buf[IMS_PCU_DATA_OFFSET + 15]); dev_info(pcu->dev, "Device is in bootloader mode (addr 0x%08x-0x%08x), requesting firmware\n", pcu->fw_start_addr, pcu->fw_end_addr); error = request_firmware_nowait(THIS_MODULE, true, IMS_PCU_FIRMWARE_NAME, pcu->dev, GFP_KERNEL, pcu, ims_pcu_process_async_firmware); if (error) { /* This error is not fatal, let userspace have another chance */ complete(&pcu->async_firmware_done); } return 0; } static void ims_pcu_destroy_bootloader_mode(struct ims_pcu *pcu) { /* Make sure our initial firmware request has completed */ wait_for_completion(&pcu->async_firmware_done); } #define IMS_PCU_APPLICATION_MODE 0 #define IMS_PCU_BOOTLOADER_MODE 1 static struct usb_driver ims_pcu_driver; static int ims_pcu_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct ims_pcu *pcu; int error; pcu = kzalloc(sizeof(struct ims_pcu), GFP_KERNEL); if (!pcu) return -ENOMEM; pcu->dev = &intf->dev; pcu->udev = udev; pcu->bootloader_mode = id->driver_info == IMS_PCU_BOOTLOADER_MODE; mutex_init(&pcu->cmd_mutex); init_completion(&pcu->cmd_done); init_completion(&pcu->async_firmware_done); error = ims_pcu_parse_cdc_data(intf, pcu); if (error) goto err_free_mem; error = usb_driver_claim_interface(&ims_pcu_driver, pcu->data_intf, pcu); if (error) { dev_err(&intf->dev, "Unable to claim corresponding data interface: %d\n", error); goto err_free_mem; } usb_set_intfdata(pcu->ctrl_intf, pcu); usb_set_intfdata(pcu->data_intf, pcu); error = ims_pcu_buffers_alloc(pcu); if (error) goto err_unclaim_intf; error = ims_pcu_start_io(pcu); if (error) goto err_free_buffers; error = ims_pcu_line_setup(pcu); if (error) goto err_stop_io; error = sysfs_create_group(&intf->dev.kobj, &ims_pcu_attr_group); if (error) goto err_stop_io; error = pcu->bootloader_mode ? ims_pcu_init_bootloader_mode(pcu) : ims_pcu_init_application_mode(pcu); if (error) goto err_remove_sysfs; return 0; err_remove_sysfs: sysfs_remove_group(&intf->dev.kobj, &ims_pcu_attr_group); err_stop_io: ims_pcu_stop_io(pcu); err_free_buffers: ims_pcu_buffers_free(pcu); err_unclaim_intf: usb_driver_release_interface(&ims_pcu_driver, pcu->data_intf); err_free_mem: kfree(pcu); return error; } static void ims_pcu_disconnect(struct usb_interface *intf) { struct ims_pcu *pcu = usb_get_intfdata(intf); struct usb_host_interface *alt = intf->cur_altsetting; usb_set_intfdata(intf, NULL); /* * See if we are dealing with control or data interface. The cleanup * happens when we unbind primary (control) interface. */ if (alt->desc.bInterfaceClass != USB_CLASS_COMM) return; sysfs_remove_group(&intf->dev.kobj, &ims_pcu_attr_group); ims_pcu_stop_io(pcu); if (pcu->bootloader_mode) ims_pcu_destroy_bootloader_mode(pcu); else ims_pcu_destroy_application_mode(pcu); ims_pcu_buffers_free(pcu); kfree(pcu); } #ifdef CONFIG_PM static int ims_pcu_suspend(struct usb_interface *intf, pm_message_t message) { struct ims_pcu *pcu = usb_get_intfdata(intf); struct usb_host_interface *alt = intf->cur_altsetting; if (alt->desc.bInterfaceClass == USB_CLASS_COMM) ims_pcu_stop_io(pcu); return 0; } static int ims_pcu_resume(struct usb_interface *intf) { struct ims_pcu *pcu = usb_get_intfdata(intf); struct usb_host_interface *alt = intf->cur_altsetting; int retval = 0; if (alt->desc.bInterfaceClass == USB_CLASS_COMM) { retval = ims_pcu_start_io(pcu); if (retval == 0) retval = ims_pcu_line_setup(pcu); } return retval; } #endif static const struct usb_device_id ims_pcu_id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x04d8, 0x0082, USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_V25TER), .driver_info = IMS_PCU_APPLICATION_MODE, }, { USB_DEVICE_AND_INTERFACE_INFO(0x04d8, 0x0083, USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_V25TER), .driver_info = IMS_PCU_BOOTLOADER_MODE, }, { } }; static struct usb_driver ims_pcu_driver = { .name = "ims_pcu", .id_table = ims_pcu_id_table, .probe = ims_pcu_probe, .disconnect = ims_pcu_disconnect, #ifdef CONFIG_PM .suspend = ims_pcu_suspend, .resume = ims_pcu_resume, .reset_resume = ims_pcu_resume, #endif }; module_usb_driver(ims_pcu_driver); MODULE_DESCRIPTION("IMS Passenger Control Unit driver"); MODULE_AUTHOR("Dmitry Torokhov <dmitry.torokhov@gmail.com>"); MODULE_LICENSE("GPL");
/* * Driver for IMS Passenger Control Unit Devices * * Copyright (C) 2013 The IMS Company * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. */ #include <linux/completion.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/ihex.h> #include <linux/input.h> #include <linux/kernel.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/usb/input.h> #include <linux/usb/cdc.h> #include <asm/unaligned.h> #define IMS_PCU_KEYMAP_LEN 32 struct ims_pcu_buttons { struct input_dev *input; char name[32]; char phys[32]; unsigned short keymap[IMS_PCU_KEYMAP_LEN]; }; struct ims_pcu_gamepad { struct input_dev *input; char name[32]; char phys[32]; }; struct ims_pcu_backlight { struct led_classdev cdev; struct work_struct work; enum led_brightness desired_brightness; char name[32]; }; #define IMS_PCU_PART_NUMBER_LEN 15 #define IMS_PCU_SERIAL_NUMBER_LEN 8 #define IMS_PCU_DOM_LEN 8 #define IMS_PCU_FW_VERSION_LEN (9 + 1) #define IMS_PCU_BL_VERSION_LEN (9 + 1) #define IMS_PCU_BL_RESET_REASON_LEN (2 + 1) #define IMS_PCU_PCU_B_DEVICE_ID 5 #define IMS_PCU_BUF_SIZE 128 struct ims_pcu { struct usb_device *udev; struct device *dev; /* control interface's device, used for logging */ unsigned int device_no; bool bootloader_mode; char part_number[IMS_PCU_PART_NUMBER_LEN]; char serial_number[IMS_PCU_SERIAL_NUMBER_LEN]; char date_of_manufacturing[IMS_PCU_DOM_LEN]; char fw_version[IMS_PCU_FW_VERSION_LEN]; char bl_version[IMS_PCU_BL_VERSION_LEN]; char reset_reason[IMS_PCU_BL_RESET_REASON_LEN]; int update_firmware_status; u8 device_id; u8 ofn_reg_addr; struct usb_interface *ctrl_intf; struct usb_endpoint_descriptor *ep_ctrl; struct urb *urb_ctrl; u8 *urb_ctrl_buf; dma_addr_t ctrl_dma; size_t max_ctrl_size; struct usb_interface *data_intf; struct usb_endpoint_descriptor *ep_in; struct urb *urb_in; u8 *urb_in_buf; dma_addr_t read_dma; size_t max_in_size; struct usb_endpoint_descriptor *ep_out; u8 *urb_out_buf; size_t max_out_size; u8 read_buf[IMS_PCU_BUF_SIZE]; u8 read_pos; u8 check_sum; bool have_stx; bool have_dle; u8 cmd_buf[IMS_PCU_BUF_SIZE]; u8 ack_id; u8 expected_response; u8 cmd_buf_len; struct completion cmd_done; struct mutex cmd_mutex; u32 fw_start_addr; u32 fw_end_addr; struct completion async_firmware_done; struct ims_pcu_buttons buttons; struct ims_pcu_gamepad *gamepad; struct ims_pcu_backlight backlight; bool setup_complete; /* Input and LED devices have been created */ }; /********************************************************************* * Buttons Input device support * *********************************************************************/ static const unsigned short ims_pcu_keymap_1[] = { [1] = KEY_ATTENDANT_OFF, [2] = KEY_ATTENDANT_ON, [3] = KEY_LIGHTS_TOGGLE, [4] = KEY_VOLUMEUP, [5] = KEY_VOLUMEDOWN, [6] = KEY_INFO, }; static const unsigned short ims_pcu_keymap_2[] = { [4] = KEY_VOLUMEUP, [5] = KEY_VOLUMEDOWN, [6] = KEY_INFO, }; static const unsigned short ims_pcu_keymap_3[] = { [1] = KEY_HOMEPAGE, [2] = KEY_ATTENDANT_TOGGLE, [3] = KEY_LIGHTS_TOGGLE, [4] = KEY_VOLUMEUP, [5] = KEY_VOLUMEDOWN, [6] = KEY_DISPLAYTOGGLE, [18] = KEY_PLAYPAUSE, }; static const unsigned short ims_pcu_keymap_4[] = { [1] = KEY_ATTENDANT_OFF, [2] = KEY_ATTENDANT_ON, [3] = KEY_LIGHTS_TOGGLE, [4] = KEY_VOLUMEUP, [5] = KEY_VOLUMEDOWN, [6] = KEY_INFO, [18] = KEY_PLAYPAUSE, }; static const unsigned short ims_pcu_keymap_5[] = { [1] = KEY_ATTENDANT_OFF, [2] = KEY_ATTENDANT_ON, [3] = KEY_LIGHTS_TOGGLE, }; struct ims_pcu_device_info { const unsigned short *keymap; size_t keymap_len; bool has_gamepad; }; #define IMS_PCU_DEVINFO(_n, _gamepad) \ [_n] = { \ .keymap = ims_pcu_keymap_##_n, \ .keymap_len = ARRAY_SIZE(ims_pcu_keymap_##_n), \ .has_gamepad = _gamepad, \ } static const struct ims_pcu_device_info ims_pcu_device_info[] = { IMS_PCU_DEVINFO(1, true), IMS_PCU_DEVINFO(2, true), IMS_PCU_DEVINFO(3, true), IMS_PCU_DEVINFO(4, true), IMS_PCU_DEVINFO(5, false), }; static void ims_pcu_buttons_report(struct ims_pcu *pcu, u32 data) { struct ims_pcu_buttons *buttons = &pcu->buttons; struct input_dev *input = buttons->input; int i; for (i = 0; i < 32; i++) { unsigned short keycode = buttons->keymap[i]; if (keycode != KEY_RESERVED) input_report_key(input, keycode, data & (1UL << i)); } input_sync(input); } static int ims_pcu_setup_buttons(struct ims_pcu *pcu, const unsigned short *keymap, size_t keymap_len) { struct ims_pcu_buttons *buttons = &pcu->buttons; struct input_dev *input; int i; int error; input = input_allocate_device(); if (!input) { dev_err(pcu->dev, "Not enough memory for input input device\n"); return -ENOMEM; } snprintf(buttons->name, sizeof(buttons->name), "IMS PCU#%d Button Interface", pcu->device_no); usb_make_path(pcu->udev, buttons->phys, sizeof(buttons->phys)); strlcat(buttons->phys, "/input0", sizeof(buttons->phys)); memcpy(buttons->keymap, keymap, sizeof(*keymap) * keymap_len); input->name = buttons->name; input->phys = buttons->phys; usb_to_input_id(pcu->udev, &input->id); input->dev.parent = &pcu->ctrl_intf->dev; input->keycode = buttons->keymap; input->keycodemax = ARRAY_SIZE(buttons->keymap); input->keycodesize = sizeof(buttons->keymap[0]); __set_bit(EV_KEY, input->evbit); for (i = 0; i < IMS_PCU_KEYMAP_LEN; i++) __set_bit(buttons->keymap[i], input->keybit); __clear_bit(KEY_RESERVED, input->keybit); error = input_register_device(input); if (error) { dev_err(pcu->dev, "Failed to register buttons input device: %d\n", error); input_free_device(input); return error; } buttons->input = input; return 0; } static void ims_pcu_destroy_buttons(struct ims_pcu *pcu) { struct ims_pcu_buttons *buttons = &pcu->buttons; input_unregister_device(buttons->input); } /********************************************************************* * Gamepad Input device support * *********************************************************************/ static void ims_pcu_gamepad_report(struct ims_pcu *pcu, u32 data) { struct ims_pcu_gamepad *gamepad = pcu->gamepad; struct input_dev *input = gamepad->input; int x, y; x = !!(data & (1 << 14)) - !!(data & (1 << 13)); y = !!(data & (1 << 12)) - !!(data & (1 << 11)); input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_report_key(input, BTN_A, data & (1 << 7)); input_report_key(input, BTN_B, data & (1 << 8)); input_report_key(input, BTN_X, data & (1 << 9)); input_report_key(input, BTN_Y, data & (1 << 10)); input_report_key(input, BTN_START, data & (1 << 15)); input_report_key(input, BTN_SELECT, data & (1 << 16)); input_sync(input); } static int ims_pcu_setup_gamepad(struct ims_pcu *pcu) { struct ims_pcu_gamepad *gamepad; struct input_dev *input; int error; gamepad = kzalloc(sizeof(struct ims_pcu_gamepad), GFP_KERNEL); input = input_allocate_device(); if (!gamepad || !input) { dev_err(pcu->dev, "Not enough memory for gamepad device\n"); error = -ENOMEM; goto err_free_mem; } gamepad->input = input; snprintf(gamepad->name, sizeof(gamepad->name), "IMS PCU#%d Gamepad Interface", pcu->device_no); usb_make_path(pcu->udev, gamepad->phys, sizeof(gamepad->phys)); strlcat(gamepad->phys, "/input1", sizeof(gamepad->phys)); input->name = gamepad->name; input->phys = gamepad->phys; usb_to_input_id(pcu->udev, &input->id); input->dev.parent = &pcu->ctrl_intf->dev; __set_bit(EV_KEY, input->evbit); __set_bit(BTN_A, input->keybit); __set_bit(BTN_B, input->keybit); __set_bit(BTN_X, input->keybit); __set_bit(BTN_Y, input->keybit); __set_bit(BTN_START, input->keybit); __set_bit(BTN_SELECT, input->keybit); __set_bit(EV_ABS, input->evbit); input_set_abs_params(input, ABS_X, -1, 1, 0, 0); input_set_abs_params(input, ABS_Y, -1, 1, 0, 0); error = input_register_device(input); if (error) { dev_err(pcu->dev, "Failed to register gamepad input device: %d\n", error); goto err_free_mem; } pcu->gamepad = gamepad; return 0; err_free_mem: input_free_device(input); kfree(gamepad); return -ENOMEM; } static void ims_pcu_destroy_gamepad(struct ims_pcu *pcu) { struct ims_pcu_gamepad *gamepad = pcu->gamepad; input_unregister_device(gamepad->input); kfree(gamepad); } /********************************************************************* * PCU Communication protocol handling * *********************************************************************/ #define IMS_PCU_PROTOCOL_STX 0x02 #define IMS_PCU_PROTOCOL_ETX 0x03 #define IMS_PCU_PROTOCOL_DLE 0x10 /* PCU commands */ #define IMS_PCU_CMD_STATUS 0xa0 #define IMS_PCU_CMD_PCU_RESET 0xa1 #define IMS_PCU_CMD_RESET_REASON 0xa2 #define IMS_PCU_CMD_SEND_BUTTONS 0xa3 #define IMS_PCU_CMD_JUMP_TO_BTLDR 0xa4 #define IMS_PCU_CMD_GET_INFO 0xa5 #define IMS_PCU_CMD_SET_BRIGHTNESS 0xa6 #define IMS_PCU_CMD_EEPROM 0xa7 #define IMS_PCU_CMD_GET_FW_VERSION 0xa8 #define IMS_PCU_CMD_GET_BL_VERSION 0xa9 #define IMS_PCU_CMD_SET_INFO 0xab #define IMS_PCU_CMD_GET_BRIGHTNESS 0xac #define IMS_PCU_CMD_GET_DEVICE_ID 0xae #define IMS_PCU_CMD_SPECIAL_INFO 0xb0 #define IMS_PCU_CMD_BOOTLOADER 0xb1 /* Pass data to bootloader */ #define IMS_PCU_CMD_OFN_SET_CONFIG 0xb3 #define IMS_PCU_CMD_OFN_GET_CONFIG 0xb4 /* PCU responses */ #define IMS_PCU_RSP_STATUS 0xc0 #define IMS_PCU_RSP_PCU_RESET 0 /* Originally 0xc1 */ #define IMS_PCU_RSP_RESET_REASON 0xc2 #define IMS_PCU_RSP_SEND_BUTTONS 0xc3 #define IMS_PCU_RSP_JUMP_TO_BTLDR 0 /* Originally 0xc4 */ #define IMS_PCU_RSP_GET_INFO 0xc5 #define IMS_PCU_RSP_SET_BRIGHTNESS 0xc6 #define IMS_PCU_RSP_EEPROM 0xc7 #define IMS_PCU_RSP_GET_FW_VERSION 0xc8 #define IMS_PCU_RSP_GET_BL_VERSION 0xc9 #define IMS_PCU_RSP_SET_INFO 0xcb #define IMS_PCU_RSP_GET_BRIGHTNESS 0xcc #define IMS_PCU_RSP_CMD_INVALID 0xcd #define IMS_PCU_RSP_GET_DEVICE_ID 0xce #define IMS_PCU_RSP_SPECIAL_INFO 0xd0 #define IMS_PCU_RSP_BOOTLOADER 0xd1 /* Bootloader response */ #define IMS_PCU_RSP_OFN_SET_CONFIG 0xd2 #define IMS_PCU_RSP_OFN_GET_CONFIG 0xd3 #define IMS_PCU_RSP_EVNT_BUTTONS 0xe0 /* Unsolicited, button state */ #define IMS_PCU_GAMEPAD_MASK 0x0001ff80UL /* Bits 7 through 16 */ #define IMS_PCU_MIN_PACKET_LEN 3 #define IMS_PCU_DATA_OFFSET 2 #define IMS_PCU_CMD_WRITE_TIMEOUT 100 /* msec */ #define IMS_PCU_CMD_RESPONSE_TIMEOUT 500 /* msec */ static void ims_pcu_report_events(struct ims_pcu *pcu) { u32 data = get_unaligned_be32(&pcu->read_buf[3]); ims_pcu_buttons_report(pcu, data & ~IMS_PCU_GAMEPAD_MASK); if (pcu->gamepad) ims_pcu_gamepad_report(pcu, data); } static void ims_pcu_handle_response(struct ims_pcu *pcu) { switch (pcu->read_buf[0]) { case IMS_PCU_RSP_EVNT_BUTTONS: if (likely(pcu->setup_complete)) ims_pcu_report_events(pcu); break; default: /* * See if we got command completion. * If both the sequence and response code match save * the data and signal completion. */ if (pcu->read_buf[0] == pcu->expected_response && pcu->read_buf[1] == pcu->ack_id - 1) { memcpy(pcu->cmd_buf, pcu->read_buf, pcu->read_pos); pcu->cmd_buf_len = pcu->read_pos; complete(&pcu->cmd_done); } break; } } static void ims_pcu_process_data(struct ims_pcu *pcu, struct urb *urb) { int i; for (i = 0; i < urb->actual_length; i++) { u8 data = pcu->urb_in_buf[i]; /* Skip everything until we get Start Xmit */ if (!pcu->have_stx && data != IMS_PCU_PROTOCOL_STX) continue; if (pcu->have_dle) { pcu->have_dle = false; pcu->read_buf[pcu->read_pos++] = data; pcu->check_sum += data; continue; } switch (data) { case IMS_PCU_PROTOCOL_STX: if (pcu->have_stx) dev_warn(pcu->dev, "Unexpected STX at byte %d, discarding old data\n", pcu->read_pos); pcu->have_stx = true; pcu->have_dle = false; pcu->read_pos = 0; pcu->check_sum = 0; break; case IMS_PCU_PROTOCOL_DLE: pcu->have_dle = true; break; case IMS_PCU_PROTOCOL_ETX: if (pcu->read_pos < IMS_PCU_MIN_PACKET_LEN) { dev_warn(pcu->dev, "Short packet received (%d bytes), ignoring\n", pcu->read_pos); } else if (pcu->check_sum != 0) { dev_warn(pcu->dev, "Invalid checksum in packet (%d bytes), ignoring\n", pcu->read_pos); } else { ims_pcu_handle_response(pcu); } pcu->have_stx = false; pcu->have_dle = false; pcu->read_pos = 0; break; default: pcu->read_buf[pcu->read_pos++] = data; pcu->check_sum += data; break; } } } static bool ims_pcu_byte_needs_escape(u8 byte) { return byte == IMS_PCU_PROTOCOL_STX || byte == IMS_PCU_PROTOCOL_ETX || byte == IMS_PCU_PROTOCOL_DLE; } static int ims_pcu_send_cmd_chunk(struct ims_pcu *pcu, u8 command, int chunk, int len) { int error; error = usb_bulk_msg(pcu->udev, usb_sndbulkpipe(pcu->udev, pcu->ep_out->bEndpointAddress), pcu->urb_out_buf, len, NULL, IMS_PCU_CMD_WRITE_TIMEOUT); if (error < 0) { dev_dbg(pcu->dev, "Sending 0x%02x command failed at chunk %d: %d\n", command, chunk, error); return error; } return 0; } static int ims_pcu_send_command(struct ims_pcu *pcu, u8 command, const u8 *data, int len) { int count = 0; int chunk = 0; int delta; int i; int error; u8 csum = 0; u8 ack_id; pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_STX; /* We know the command need not be escaped */ pcu->urb_out_buf[count++] = command; csum += command; ack_id = pcu->ack_id++; if (ack_id == 0xff) ack_id = pcu->ack_id++; if (ims_pcu_byte_needs_escape(ack_id)) pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_DLE; pcu->urb_out_buf[count++] = ack_id; csum += ack_id; for (i = 0; i < len; i++) { delta = ims_pcu_byte_needs_escape(data[i]) ? 2 : 1; if (count + delta >= pcu->max_out_size) { error = ims_pcu_send_cmd_chunk(pcu, command, ++chunk, count); if (error) return error; count = 0; } if (delta == 2) pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_DLE; pcu->urb_out_buf[count++] = data[i]; csum += data[i]; } csum = 1 + ~csum; delta = ims_pcu_byte_needs_escape(csum) ? 3 : 2; if (count + delta >= pcu->max_out_size) { error = ims_pcu_send_cmd_chunk(pcu, command, ++chunk, count); if (error) return error; count = 0; } if (delta == 3) pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_DLE; pcu->urb_out_buf[count++] = csum; pcu->urb_out_buf[count++] = IMS_PCU_PROTOCOL_ETX; return ims_pcu_send_cmd_chunk(pcu, command, ++chunk, count); } static int __ims_pcu_execute_command(struct ims_pcu *pcu, u8 command, const void *data, size_t len, u8 expected_response, int response_time) { int error; pcu->expected_response = expected_response; init_completion(&pcu->cmd_done); error = ims_pcu_send_command(pcu, command, data, len); if (error) return error; if (expected_response && !wait_for_completion_timeout(&pcu->cmd_done, msecs_to_jiffies(response_time))) { dev_dbg(pcu->dev, "Command 0x%02x timed out\n", command); return -ETIMEDOUT; } return 0; } #define ims_pcu_execute_command(pcu, code, data, len) \ __ims_pcu_execute_command(pcu, \ IMS_PCU_CMD_##code, data, len, \ IMS_PCU_RSP_##code, \ IMS_PCU_CMD_RESPONSE_TIMEOUT) #define ims_pcu_execute_query(pcu, code) \ ims_pcu_execute_command(pcu, code, NULL, 0) /* Bootloader commands */ #define IMS_PCU_BL_CMD_QUERY_DEVICE 0xa1 #define IMS_PCU_BL_CMD_UNLOCK_CONFIG 0xa2 #define IMS_PCU_BL_CMD_ERASE_APP 0xa3 #define IMS_PCU_BL_CMD_PROGRAM_DEVICE 0xa4 #define IMS_PCU_BL_CMD_PROGRAM_COMPLETE 0xa5 #define IMS_PCU_BL_CMD_READ_APP 0xa6 #define IMS_PCU_BL_CMD_RESET_DEVICE 0xa7 #define IMS_PCU_BL_CMD_LAUNCH_APP 0xa8 /* Bootloader commands */ #define IMS_PCU_BL_RSP_QUERY_DEVICE 0xc1 #define IMS_PCU_BL_RSP_UNLOCK_CONFIG 0xc2 #define IMS_PCU_BL_RSP_ERASE_APP 0xc3 #define IMS_PCU_BL_RSP_PROGRAM_DEVICE 0xc4 #define IMS_PCU_BL_RSP_PROGRAM_COMPLETE 0xc5 #define IMS_PCU_BL_RSP_READ_APP 0xc6 #define IMS_PCU_BL_RSP_RESET_DEVICE 0 /* originally 0xa7 */ #define IMS_PCU_BL_RSP_LAUNCH_APP 0 /* originally 0xa8 */ #define IMS_PCU_BL_DATA_OFFSET 3 static int __ims_pcu_execute_bl_command(struct ims_pcu *pcu, u8 command, const void *data, size_t len, u8 expected_response, int response_time) { int error; pcu->cmd_buf[0] = command; if (data) memcpy(&pcu->cmd_buf[1], data, len); error = __ims_pcu_execute_command(pcu, IMS_PCU_CMD_BOOTLOADER, pcu->cmd_buf, len + 1, expected_response ? IMS_PCU_RSP_BOOTLOADER : 0, response_time); if (error) { dev_err(pcu->dev, "Failure when sending 0x%02x command to bootloader, error: %d\n", pcu->cmd_buf[0], error); return error; } if (expected_response && pcu->cmd_buf[2] != expected_response) { dev_err(pcu->dev, "Unexpected response from bootloader: 0x%02x, wanted 0x%02x\n", pcu->cmd_buf[2], expected_response); return -EINVAL; } return 0; } #define ims_pcu_execute_bl_command(pcu, code, data, len, timeout) \ __ims_pcu_execute_bl_command(pcu, \ IMS_PCU_BL_CMD_##code, data, len, \ IMS_PCU_BL_RSP_##code, timeout) \ #define IMS_PCU_INFO_PART_OFFSET 2 #define IMS_PCU_INFO_DOM_OFFSET 17 #define IMS_PCU_INFO_SERIAL_OFFSET 25 #define IMS_PCU_SET_INFO_SIZE 31 static int ims_pcu_get_info(struct ims_pcu *pcu) { int error; error = ims_pcu_execute_query(pcu, GET_INFO); if (error) { dev_err(pcu->dev, "GET_INFO command failed, error: %d\n", error); return error; } memcpy(pcu->part_number, &pcu->cmd_buf[IMS_PCU_INFO_PART_OFFSET], sizeof(pcu->part_number)); memcpy(pcu->date_of_manufacturing, &pcu->cmd_buf[IMS_PCU_INFO_DOM_OFFSET], sizeof(pcu->date_of_manufacturing)); memcpy(pcu->serial_number, &pcu->cmd_buf[IMS_PCU_INFO_SERIAL_OFFSET], sizeof(pcu->serial_number)); return 0; } static int ims_pcu_set_info(struct ims_pcu *pcu) { int error; memcpy(&pcu->cmd_buf[IMS_PCU_INFO_PART_OFFSET], pcu->part_number, sizeof(pcu->part_number)); memcpy(&pcu->cmd_buf[IMS_PCU_INFO_DOM_OFFSET], pcu->date_of_manufacturing, sizeof(pcu->date_of_manufacturing)); memcpy(&pcu->cmd_buf[IMS_PCU_INFO_SERIAL_OFFSET], pcu->serial_number, sizeof(pcu->serial_number)); error = ims_pcu_execute_command(pcu, SET_INFO, &pcu->cmd_buf[IMS_PCU_DATA_OFFSET], IMS_PCU_SET_INFO_SIZE); if (error) { dev_err(pcu->dev, "Failed to update device information, error: %d\n", error); return error; } return 0; } static int ims_pcu_switch_to_bootloader(struct ims_pcu *pcu) { int error; /* Execute jump to the bootoloader */ error = ims_pcu_execute_command(pcu, JUMP_TO_BTLDR, NULL, 0); if (error) { dev_err(pcu->dev, "Failure when sending JUMP TO BOOLTLOADER command, error: %d\n", error); return error; } return 0; } /********************************************************************* * Firmware Update handling * *********************************************************************/ #define IMS_PCU_FIRMWARE_NAME "imspcu.fw" struct ims_pcu_flash_fmt { __le32 addr; u8 len; u8 data[]; }; static unsigned int ims_pcu_count_fw_records(const struct firmware *fw) { const struct ihex_binrec *rec = (const struct ihex_binrec *)fw->data; unsigned int count = 0; while (rec) { count++; rec = ihex_next_binrec(rec); } return count; } static int ims_pcu_verify_block(struct ims_pcu *pcu, u32 addr, u8 len, const u8 *data) { struct ims_pcu_flash_fmt *fragment; int error; fragment = (void *)&pcu->cmd_buf[1]; put_unaligned_le32(addr, &fragment->addr); fragment->len = len; error = ims_pcu_execute_bl_command(pcu, READ_APP, NULL, 5, IMS_PCU_CMD_RESPONSE_TIMEOUT); if (error) { dev_err(pcu->dev, "Failed to retrieve block at 0x%08x, len %d, error: %d\n", addr, len, error); return error; } fragment = (void *)&pcu->cmd_buf[IMS_PCU_BL_DATA_OFFSET]; if (get_unaligned_le32(&fragment->addr) != addr || fragment->len != len) { dev_err(pcu->dev, "Wrong block when retrieving 0x%08x (0x%08x), len %d (%d)\n", addr, get_unaligned_le32(&fragment->addr), len, fragment->len); return -EINVAL; } if (memcmp(fragment->data, data, len)) { dev_err(pcu->dev, "Mismatch in block at 0x%08x, len %d\n", addr, len); return -EINVAL; } return 0; } static int ims_pcu_flash_firmware(struct ims_pcu *pcu, const struct firmware *fw, unsigned int n_fw_records) { const struct ihex_binrec *rec = (const struct ihex_binrec *)fw->data; struct ims_pcu_flash_fmt *fragment; unsigned int count = 0; u32 addr; u8 len; int error; error = ims_pcu_execute_bl_command(pcu, ERASE_APP, NULL, 0, 2000); if (error) { dev_err(pcu->dev, "Failed to erase application image, error: %d\n", error); return error; } while (rec) { /* * The firmware format is messed up for some reason. * The address twice that of what is needed for some * reason and we end up overwriting half of the data * with the next record. */ addr = be32_to_cpu(rec->addr) / 2; len = be16_to_cpu(rec->len); fragment = (void *)&pcu->cmd_buf[1]; put_unaligned_le32(addr, &fragment->addr); fragment->len = len; memcpy(fragment->data, rec->data, len); error = ims_pcu_execute_bl_command(pcu, PROGRAM_DEVICE, NULL, len + 5, IMS_PCU_CMD_RESPONSE_TIMEOUT); if (error) { dev_err(pcu->dev, "Failed to write block at 0x%08x, len %d, error: %d\n", addr, len, error); return error; } if (addr >= pcu->fw_start_addr && addr < pcu->fw_end_addr) { error = ims_pcu_verify_block(pcu, addr, len, rec->data); if (error) return error; } count++; pcu->update_firmware_status = (count * 100) / n_fw_records; rec = ihex_next_binrec(rec); } error = ims_pcu_execute_bl_command(pcu, PROGRAM_COMPLETE, NULL, 0, 2000); if (error) dev_err(pcu->dev, "Failed to send PROGRAM_COMPLETE, error: %d\n", error); return 0; } static int ims_pcu_handle_firmware_update(struct ims_pcu *pcu, const struct firmware *fw) { unsigned int n_fw_records; int retval; dev_info(pcu->dev, "Updating firmware %s, size: %zu\n", IMS_PCU_FIRMWARE_NAME, fw->size); n_fw_records = ims_pcu_count_fw_records(fw); retval = ims_pcu_flash_firmware(pcu, fw, n_fw_records); if (retval) goto out; retval = ims_pcu_execute_bl_command(pcu, LAUNCH_APP, NULL, 0, 0); if (retval) dev_err(pcu->dev, "Failed to start application image, error: %d\n", retval); out: pcu->update_firmware_status = retval; sysfs_notify(&pcu->dev->kobj, NULL, "update_firmware_status"); return retval; } static void ims_pcu_process_async_firmware(const struct firmware *fw, void *context) { struct ims_pcu *pcu = context; int error; if (!fw) { dev_err(pcu->dev, "Failed to get firmware %s\n", IMS_PCU_FIRMWARE_NAME); goto out; } error = ihex_validate_fw(fw); if (error) { dev_err(pcu->dev, "Firmware %s is invalid\n", IMS_PCU_FIRMWARE_NAME); goto out; } mutex_lock(&pcu->cmd_mutex); ims_pcu_handle_firmware_update(pcu, fw); mutex_unlock(&pcu->cmd_mutex); release_firmware(fw); out: complete(&pcu->async_firmware_done); } /********************************************************************* * Backlight LED device support * *********************************************************************/ #define IMS_PCU_MAX_BRIGHTNESS 31998 static void ims_pcu_backlight_work(struct work_struct *work) { struct ims_pcu_backlight *backlight = container_of(work, struct ims_pcu_backlight, work); struct ims_pcu *pcu = container_of(backlight, struct ims_pcu, backlight); int desired_brightness = backlight->desired_brightness; __le16 br_val = cpu_to_le16(desired_brightness); int error; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_execute_command(pcu, SET_BRIGHTNESS, &br_val, sizeof(br_val)); if (error && error != -ENODEV) dev_warn(pcu->dev, "Failed to set desired brightness %u, error: %d\n", desired_brightness, error); mutex_unlock(&pcu->cmd_mutex); } static void ims_pcu_backlight_set_brightness(struct led_classdev *cdev, enum led_brightness value) { struct ims_pcu_backlight *backlight = container_of(cdev, struct ims_pcu_backlight, cdev); backlight->desired_brightness = value; schedule_work(&backlight->work); } static enum led_brightness ims_pcu_backlight_get_brightness(struct led_classdev *cdev) { struct ims_pcu_backlight *backlight = container_of(cdev, struct ims_pcu_backlight, cdev); struct ims_pcu *pcu = container_of(backlight, struct ims_pcu, backlight); int brightness; int error; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_execute_query(pcu, GET_BRIGHTNESS); if (error) { dev_warn(pcu->dev, "Failed to get current brightness, error: %d\n", error); /* Assume the LED is OFF */ brightness = LED_OFF; } else { brightness = get_unaligned_le16(&pcu->cmd_buf[IMS_PCU_DATA_OFFSET]); } mutex_unlock(&pcu->cmd_mutex); return brightness; } static int ims_pcu_setup_backlight(struct ims_pcu *pcu) { struct ims_pcu_backlight *backlight = &pcu->backlight; int error; INIT_WORK(&backlight->work, ims_pcu_backlight_work); snprintf(backlight->name, sizeof(backlight->name), "pcu%d::kbd_backlight", pcu->device_no); backlight->cdev.name = backlight->name; backlight->cdev.max_brightness = IMS_PCU_MAX_BRIGHTNESS; backlight->cdev.brightness_get = ims_pcu_backlight_get_brightness; backlight->cdev.brightness_set = ims_pcu_backlight_set_brightness; error = led_classdev_register(pcu->dev, &backlight->cdev); if (error) { dev_err(pcu->dev, "Failed to register backlight LED device, error: %d\n", error); return error; } return 0; } static void ims_pcu_destroy_backlight(struct ims_pcu *pcu) { struct ims_pcu_backlight *backlight = &pcu->backlight; led_classdev_unregister(&backlight->cdev); cancel_work_sync(&backlight->work); } /********************************************************************* * Sysfs attributes handling * *********************************************************************/ struct ims_pcu_attribute { struct device_attribute dattr; size_t field_offset; int field_length; }; static ssize_t ims_pcu_attribute_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); struct ims_pcu_attribute *attr = container_of(dattr, struct ims_pcu_attribute, dattr); char *field = (char *)pcu + attr->field_offset; return scnprintf(buf, PAGE_SIZE, "%.*s\n", attr->field_length, field); } static ssize_t ims_pcu_attribute_store(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); struct ims_pcu_attribute *attr = container_of(dattr, struct ims_pcu_attribute, dattr); char *field = (char *)pcu + attr->field_offset; size_t data_len; int error; if (count > attr->field_length) return -EINVAL; data_len = strnlen(buf, attr->field_length); if (data_len > attr->field_length) return -EINVAL; error = mutex_lock_interruptible(&pcu->cmd_mutex); if (error) return error; memset(field, 0, attr->field_length); memcpy(field, buf, data_len); error = ims_pcu_set_info(pcu); /* * Even if update failed, let's fetch the info again as we just * clobbered one of the fields. */ ims_pcu_get_info(pcu); mutex_unlock(&pcu->cmd_mutex); return error < 0 ? error : count; } #define IMS_PCU_ATTR(_field, _mode) \ struct ims_pcu_attribute ims_pcu_attr_##_field = { \ .dattr = __ATTR(_field, _mode, \ ims_pcu_attribute_show, \ ims_pcu_attribute_store), \ .field_offset = offsetof(struct ims_pcu, _field), \ .field_length = sizeof(((struct ims_pcu *)NULL)->_field), \ } #define IMS_PCU_RO_ATTR(_field) \ IMS_PCU_ATTR(_field, S_IRUGO) #define IMS_PCU_RW_ATTR(_field) \ IMS_PCU_ATTR(_field, S_IRUGO | S_IWUSR) static IMS_PCU_RW_ATTR(part_number); static IMS_PCU_RW_ATTR(serial_number); static IMS_PCU_RW_ATTR(date_of_manufacturing); static IMS_PCU_RO_ATTR(fw_version); static IMS_PCU_RO_ATTR(bl_version); static IMS_PCU_RO_ATTR(reset_reason); static ssize_t ims_pcu_reset_device(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { static const u8 reset_byte = 1; struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); int value; int error; error = kstrtoint(buf, 0, &value); if (error) return error; if (value != 1) return -EINVAL; dev_info(pcu->dev, "Attempting to reset device\n"); error = ims_pcu_execute_command(pcu, PCU_RESET, &reset_byte, 1); if (error) { dev_info(pcu->dev, "Failed to reset device, error: %d\n", error); return error; } return count; } static DEVICE_ATTR(reset_device, S_IWUSR, NULL, ims_pcu_reset_device); static ssize_t ims_pcu_update_firmware_store(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); const struct firmware *fw = NULL; int value; int error; error = kstrtoint(buf, 0, &value); if (error) return error; if (value != 1) return -EINVAL; error = mutex_lock_interruptible(&pcu->cmd_mutex); if (error) return error; error = request_ihex_firmware(&fw, IMS_PCU_FIRMWARE_NAME, pcu->dev); if (error) { dev_err(pcu->dev, "Failed to request firmware %s, error: %d\n", IMS_PCU_FIRMWARE_NAME, error); goto out; } /* * If we are already in bootloader mode we can proceed with * flashing the firmware. * * If we are in application mode, then we need to switch into * bootloader mode, which will cause the device to disconnect * and reconnect as different device. */ if (pcu->bootloader_mode) error = ims_pcu_handle_firmware_update(pcu, fw); else error = ims_pcu_switch_to_bootloader(pcu); release_firmware(fw); out: mutex_unlock(&pcu->cmd_mutex); return error ?: count; } static DEVICE_ATTR(update_firmware, S_IWUSR, NULL, ims_pcu_update_firmware_store); static ssize_t ims_pcu_update_firmware_status_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); return scnprintf(buf, PAGE_SIZE, "%d\n", pcu->update_firmware_status); } static DEVICE_ATTR(update_firmware_status, S_IRUGO, ims_pcu_update_firmware_status_show, NULL); static struct attribute *ims_pcu_attrs[] = { &ims_pcu_attr_part_number.dattr.attr, &ims_pcu_attr_serial_number.dattr.attr, &ims_pcu_attr_date_of_manufacturing.dattr.attr, &ims_pcu_attr_fw_version.dattr.attr, &ims_pcu_attr_bl_version.dattr.attr, &ims_pcu_attr_reset_reason.dattr.attr, &dev_attr_reset_device.attr, &dev_attr_update_firmware.attr, &dev_attr_update_firmware_status.attr, NULL }; static umode_t ims_pcu_is_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); umode_t mode = attr->mode; if (pcu->bootloader_mode) { if (attr != &dev_attr_update_firmware_status.attr && attr != &dev_attr_update_firmware.attr && attr != &dev_attr_reset_device.attr) { mode = 0; } } else { if (attr == &dev_attr_update_firmware_status.attr) mode = 0; } return mode; } static const struct attribute_group ims_pcu_attr_group = { .is_visible = ims_pcu_is_attr_visible, .attrs = ims_pcu_attrs, }; /* Support for a separate OFN attribute group */ #define OFN_REG_RESULT_OFFSET 2 static int ims_pcu_read_ofn_config(struct ims_pcu *pcu, u8 addr, u8 *data) { int error; s16 result; error = ims_pcu_execute_command(pcu, OFN_GET_CONFIG, &addr, sizeof(addr)); if (error) return error; result = (s16)get_unaligned_le16(pcu->cmd_buf + OFN_REG_RESULT_OFFSET); if (result < 0) return -EIO; /* We only need LSB */ *data = pcu->cmd_buf[OFN_REG_RESULT_OFFSET]; return 0; } static int ims_pcu_write_ofn_config(struct ims_pcu *pcu, u8 addr, u8 data) { u8 buffer[] = { addr, data }; int error; s16 result; error = ims_pcu_execute_command(pcu, OFN_SET_CONFIG, &buffer, sizeof(buffer)); if (error) return error; result = (s16)get_unaligned_le16(pcu->cmd_buf + OFN_REG_RESULT_OFFSET); if (result < 0) return -EIO; return 0; } static ssize_t ims_pcu_ofn_reg_data_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); int error; u8 data; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_read_ofn_config(pcu, pcu->ofn_reg_addr, &data); mutex_unlock(&pcu->cmd_mutex); if (error) return error; return scnprintf(buf, PAGE_SIZE, "%x\n", data); } static ssize_t ims_pcu_ofn_reg_data_store(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); int error; u8 value; error = kstrtou8(buf, 0, &value); if (error) return error; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_write_ofn_config(pcu, pcu->ofn_reg_addr, value); mutex_unlock(&pcu->cmd_mutex); return error ?: count; } static DEVICE_ATTR(reg_data, S_IRUGO | S_IWUSR, ims_pcu_ofn_reg_data_show, ims_pcu_ofn_reg_data_store); static ssize_t ims_pcu_ofn_reg_addr_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); int error; mutex_lock(&pcu->cmd_mutex); error = scnprintf(buf, PAGE_SIZE, "%x\n", pcu->ofn_reg_addr); mutex_unlock(&pcu->cmd_mutex); return error; } static ssize_t ims_pcu_ofn_reg_addr_store(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); int error; u8 value; error = kstrtou8(buf, 0, &value); if (error) return error; mutex_lock(&pcu->cmd_mutex); pcu->ofn_reg_addr = value; mutex_unlock(&pcu->cmd_mutex); return count; } static DEVICE_ATTR(reg_addr, S_IRUGO | S_IWUSR, ims_pcu_ofn_reg_addr_show, ims_pcu_ofn_reg_addr_store); struct ims_pcu_ofn_bit_attribute { struct device_attribute dattr; u8 addr; u8 nr; }; static ssize_t ims_pcu_ofn_bit_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); struct ims_pcu_ofn_bit_attribute *attr = container_of(dattr, struct ims_pcu_ofn_bit_attribute, dattr); int error; u8 data; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_read_ofn_config(pcu, attr->addr, &data); mutex_unlock(&pcu->cmd_mutex); if (error) return error; return scnprintf(buf, PAGE_SIZE, "%d\n", !!(data & (1 << attr->nr))); } static ssize_t ims_pcu_ofn_bit_store(struct device *dev, struct device_attribute *dattr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct ims_pcu *pcu = usb_get_intfdata(intf); struct ims_pcu_ofn_bit_attribute *attr = container_of(dattr, struct ims_pcu_ofn_bit_attribute, dattr); int error; int value; u8 data; error = kstrtoint(buf, 0, &value); if (error) return error; if (value > 1) return -EINVAL; mutex_lock(&pcu->cmd_mutex); error = ims_pcu_read_ofn_config(pcu, attr->addr, &data); if (!error) { if (value) data |= 1U << attr->nr; else data &= ~(1U << attr->nr); error = ims_pcu_write_ofn_config(pcu, attr->addr, data); } mutex_unlock(&pcu->cmd_mutex); return error ?: count; } #define IMS_PCU_OFN_BIT_ATTR(_field, _addr, _nr) \ struct ims_pcu_ofn_bit_attribute ims_pcu_ofn_attr_##_field = { \ .dattr = __ATTR(_field, S_IWUSR | S_IRUGO, \ ims_pcu_ofn_bit_show, ims_pcu_ofn_bit_store), \ .addr = _addr, \ .nr = _nr, \ } static IMS_PCU_OFN_BIT_ATTR(engine_enable, 0x60, 7); static IMS_PCU_OFN_BIT_ATTR(speed_enable, 0x60, 6); static IMS_PCU_OFN_BIT_ATTR(assert_enable, 0x60, 5); static IMS_PCU_OFN_BIT_ATTR(xyquant_enable, 0x60, 4); static IMS_PCU_OFN_BIT_ATTR(xyscale_enable, 0x60, 1); static IMS_PCU_OFN_BIT_ATTR(scale_x2, 0x63, 6); static IMS_PCU_OFN_BIT_ATTR(scale_y2, 0x63, 7); static struct attribute *ims_pcu_ofn_attrs[] = { &dev_attr_reg_data.attr, &dev_attr_reg_addr.attr, &ims_pcu_ofn_attr_engine_enable.dattr.attr, &ims_pcu_ofn_attr_speed_enable.dattr.attr, &ims_pcu_ofn_attr_assert_enable.dattr.attr, &ims_pcu_ofn_attr_xyquant_enable.dattr.attr, &ims_pcu_ofn_attr_xyscale_enable.dattr.attr, &ims_pcu_ofn_attr_scale_x2.dattr.attr, &ims_pcu_ofn_attr_scale_y2.dattr.attr, NULL }; static const struct attribute_group ims_pcu_ofn_attr_group = { .name = "ofn", .attrs = ims_pcu_ofn_attrs, }; static void ims_pcu_irq(struct urb *urb) { struct ims_pcu *pcu = urb->context; int retval, status; status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(pcu->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(pcu->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } dev_dbg(pcu->dev, "%s: received %d: %*ph\n", __func__, urb->actual_length, urb->actual_length, pcu->urb_in_buf); if (urb == pcu->urb_in) ims_pcu_process_data(pcu, urb); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval && retval != -ENODEV) dev_err(pcu->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static int ims_pcu_buffers_alloc(struct ims_pcu *pcu) { int error; pcu->urb_in_buf = usb_alloc_coherent(pcu->udev, pcu->max_in_size, GFP_KERNEL, &pcu->read_dma); if (!pcu->urb_in_buf) { dev_err(pcu->dev, "Failed to allocate memory for read buffer\n"); return -ENOMEM; } pcu->urb_in = usb_alloc_urb(0, GFP_KERNEL); if (!pcu->urb_in) { dev_err(pcu->dev, "Failed to allocate input URB\n"); error = -ENOMEM; goto err_free_urb_in_buf; } pcu->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; pcu->urb_in->transfer_dma = pcu->read_dma; usb_fill_bulk_urb(pcu->urb_in, pcu->udev, usb_rcvbulkpipe(pcu->udev, pcu->ep_in->bEndpointAddress), pcu->urb_in_buf, pcu->max_in_size, ims_pcu_irq, pcu); /* * We are using usb_bulk_msg() for sending so there is no point * in allocating memory with usb_alloc_coherent(). */ pcu->urb_out_buf = kmalloc(pcu->max_out_size, GFP_KERNEL); if (!pcu->urb_out_buf) { dev_err(pcu->dev, "Failed to allocate memory for write buffer\n"); error = -ENOMEM; goto err_free_in_urb; } pcu->urb_ctrl_buf = usb_alloc_coherent(pcu->udev, pcu->max_ctrl_size, GFP_KERNEL, &pcu->ctrl_dma); if (!pcu->urb_ctrl_buf) { dev_err(pcu->dev, "Failed to allocate memory for read buffer\n"); error = -ENOMEM; goto err_free_urb_out_buf; } pcu->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL); if (!pcu->urb_ctrl) { dev_err(pcu->dev, "Failed to allocate input URB\n"); error = -ENOMEM; goto err_free_urb_ctrl_buf; } pcu->urb_ctrl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; pcu->urb_ctrl->transfer_dma = pcu->ctrl_dma; usb_fill_int_urb(pcu->urb_ctrl, pcu->udev, usb_rcvintpipe(pcu->udev, pcu->ep_ctrl->bEndpointAddress), pcu->urb_ctrl_buf, pcu->max_ctrl_size, ims_pcu_irq, pcu, pcu->ep_ctrl->bInterval); return 0; err_free_urb_ctrl_buf: usb_free_coherent(pcu->udev, pcu->max_ctrl_size, pcu->urb_ctrl_buf, pcu->ctrl_dma); err_free_urb_out_buf: kfree(pcu->urb_out_buf); err_free_in_urb: usb_free_urb(pcu->urb_in); err_free_urb_in_buf: usb_free_coherent(pcu->udev, pcu->max_in_size, pcu->urb_in_buf, pcu->read_dma); return error; } static void ims_pcu_buffers_free(struct ims_pcu *pcu) { usb_kill_urb(pcu->urb_in); usb_free_urb(pcu->urb_in); usb_free_coherent(pcu->udev, pcu->max_out_size, pcu->urb_in_buf, pcu->read_dma); kfree(pcu->urb_out_buf); usb_kill_urb(pcu->urb_ctrl); usb_free_urb(pcu->urb_ctrl); usb_free_coherent(pcu->udev, pcu->max_ctrl_size, pcu->urb_ctrl_buf, pcu->ctrl_dma); } static const struct usb_cdc_union_desc * ims_pcu_get_cdc_union_desc(struct usb_interface *intf) { const void *buf = intf->altsetting->extra; size_t buflen = intf->altsetting->extralen; struct usb_cdc_union_desc *union_desc; if (!buf) { dev_err(&intf->dev, "Missing descriptor data\n"); return NULL; } if (!buflen) { dev_err(&intf->dev, "Zero length descriptor\n"); return NULL; } while (buflen >= sizeof(*union_desc)) { union_desc = (struct usb_cdc_union_desc *)buf; if (union_desc->bLength > buflen) { dev_err(&intf->dev, "Too large descriptor\n"); return NULL; } if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { dev_dbg(&intf->dev, "Found union header\n"); if (union_desc->bLength >= sizeof(*union_desc)) return union_desc; dev_err(&intf->dev, "Union descriptor to short (%d vs %zd\n)", union_desc->bLength, sizeof(*union_desc)); return NULL; } buflen -= union_desc->bLength; buf += union_desc->bLength; } dev_err(&intf->dev, "Missing CDC union descriptor\n"); return NULL; } static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pcu) { const struct usb_cdc_union_desc *union_desc; struct usb_host_interface *alt; union_desc = ims_pcu_get_cdc_union_desc(intf); if (!union_desc) return -EINVAL; pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev, union_desc->bMasterInterface0); if (!pcu->ctrl_intf) return -EINVAL; alt = pcu->ctrl_intf->cur_altsetting; if (alt->desc.bNumEndpoints < 1) return -ENODEV; pcu->ep_ctrl = &alt->endpoint[0].desc; pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); pcu->data_intf = usb_ifnum_to_if(pcu->udev, union_desc->bSlaveInterface0); if (!pcu->data_intf) return -EINVAL; alt = pcu->data_intf->cur_altsetting; if (alt->desc.bNumEndpoints != 2) { dev_err(pcu->dev, "Incorrect number of endpoints on data interface (%d)\n", alt->desc.bNumEndpoints); return -EINVAL; } pcu->ep_out = &alt->endpoint[0].desc; if (!usb_endpoint_is_bulk_out(pcu->ep_out)) { dev_err(pcu->dev, "First endpoint on data interface is not BULK OUT\n"); return -EINVAL; } pcu->max_out_size = usb_endpoint_maxp(pcu->ep_out); if (pcu->max_out_size < 8) { dev_err(pcu->dev, "Max OUT packet size is too small (%zd)\n", pcu->max_out_size); return -EINVAL; } pcu->ep_in = &alt->endpoint[1].desc; if (!usb_endpoint_is_bulk_in(pcu->ep_in)) { dev_err(pcu->dev, "Second endpoint on data interface is not BULK IN\n"); return -EINVAL; } pcu->max_in_size = usb_endpoint_maxp(pcu->ep_in); if (pcu->max_in_size < 8) { dev_err(pcu->dev, "Max IN packet size is too small (%zd)\n", pcu->max_in_size); return -EINVAL; } return 0; } static int ims_pcu_start_io(struct ims_pcu *pcu) { int error; error = usb_submit_urb(pcu->urb_ctrl, GFP_KERNEL); if (error) { dev_err(pcu->dev, "Failed to start control IO - usb_submit_urb failed with result: %d\n", error); return -EIO; } error = usb_submit_urb(pcu->urb_in, GFP_KERNEL); if (error) { dev_err(pcu->dev, "Failed to start IO - usb_submit_urb failed with result: %d\n", error); usb_kill_urb(pcu->urb_ctrl); return -EIO; } return 0; } static void ims_pcu_stop_io(struct ims_pcu *pcu) { usb_kill_urb(pcu->urb_in); usb_kill_urb(pcu->urb_ctrl); } static int ims_pcu_line_setup(struct ims_pcu *pcu) { struct usb_host_interface *interface = pcu->ctrl_intf->cur_altsetting; struct usb_cdc_line_coding *line = (void *)pcu->cmd_buf; int error; memset(line, 0, sizeof(*line)); line->dwDTERate = cpu_to_le32(57600); line->bDataBits = 8; error = usb_control_msg(pcu->udev, usb_sndctrlpipe(pcu->udev, 0), USB_CDC_REQ_SET_LINE_CODING, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, interface->desc.bInterfaceNumber, line, sizeof(struct usb_cdc_line_coding), 5000); if (error < 0) { dev_err(pcu->dev, "Failed to set line coding, error: %d\n", error); return error; } error = usb_control_msg(pcu->udev, usb_sndctrlpipe(pcu->udev, 0), USB_CDC_REQ_SET_CONTROL_LINE_STATE, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0x03, interface->desc.bInterfaceNumber, NULL, 0, 5000); if (error < 0) { dev_err(pcu->dev, "Failed to set line state, error: %d\n", error); return error; } return 0; } static int ims_pcu_get_device_info(struct ims_pcu *pcu) { int error; error = ims_pcu_get_info(pcu); if (error) return error; error = ims_pcu_execute_query(pcu, GET_FW_VERSION); if (error) { dev_err(pcu->dev, "GET_FW_VERSION command failed, error: %d\n", error); return error; } snprintf(pcu->fw_version, sizeof(pcu->fw_version), "%02d%02d%02d%02d.%c%c", pcu->cmd_buf[2], pcu->cmd_buf[3], pcu->cmd_buf[4], pcu->cmd_buf[5], pcu->cmd_buf[6], pcu->cmd_buf[7]); error = ims_pcu_execute_query(pcu, GET_BL_VERSION); if (error) { dev_err(pcu->dev, "GET_BL_VERSION command failed, error: %d\n", error); return error; } snprintf(pcu->bl_version, sizeof(pcu->bl_version), "%02d%02d%02d%02d.%c%c", pcu->cmd_buf[2], pcu->cmd_buf[3], pcu->cmd_buf[4], pcu->cmd_buf[5], pcu->cmd_buf[6], pcu->cmd_buf[7]); error = ims_pcu_execute_query(pcu, RESET_REASON); if (error) { dev_err(pcu->dev, "RESET_REASON command failed, error: %d\n", error); return error; } snprintf(pcu->reset_reason, sizeof(pcu->reset_reason), "%02x", pcu->cmd_buf[IMS_PCU_DATA_OFFSET]); dev_dbg(pcu->dev, "P/N: %s, MD: %s, S/N: %s, FW: %s, BL: %s, RR: %s\n", pcu->part_number, pcu->date_of_manufacturing, pcu->serial_number, pcu->fw_version, pcu->bl_version, pcu->reset_reason); return 0; } static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id) { int error; error = ims_pcu_execute_query(pcu, GET_DEVICE_ID); if (error) { dev_err(pcu->dev, "GET_DEVICE_ID command failed, error: %d\n", error); return error; } *device_id = pcu->cmd_buf[IMS_PCU_DATA_OFFSET]; dev_dbg(pcu->dev, "Detected device ID: %d\n", *device_id); return 0; } static int ims_pcu_init_application_mode(struct ims_pcu *pcu) { static atomic_t device_no = ATOMIC_INIT(-1); const struct ims_pcu_device_info *info; int error; error = ims_pcu_get_device_info(pcu); if (error) { /* Device does not respond to basic queries, hopeless */ return error; } error = ims_pcu_identify_type(pcu, &pcu->device_id); if (error) { dev_err(pcu->dev, "Failed to identify device, error: %d\n", error); /* * Do not signal error, but do not create input nor * backlight devices either, let userspace figure this * out (flash a new firmware?). */ return 0; } if (pcu->device_id >= ARRAY_SIZE(ims_pcu_device_info) || !ims_pcu_device_info[pcu->device_id].keymap) { dev_err(pcu->dev, "Device ID %d is not valid\n", pcu->device_id); /* Same as above, punt to userspace */ return 0; } /* Device appears to be operable, complete initialization */ pcu->device_no = atomic_inc_return(&device_no); /* * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor */ if (pcu->device_id != IMS_PCU_PCU_B_DEVICE_ID) { error = sysfs_create_group(&pcu->dev->kobj, &ims_pcu_ofn_attr_group); if (error) return error; } error = ims_pcu_setup_backlight(pcu); if (error) return error; info = &ims_pcu_device_info[pcu->device_id]; error = ims_pcu_setup_buttons(pcu, info->keymap, info->keymap_len); if (error) goto err_destroy_backlight; if (info->has_gamepad) { error = ims_pcu_setup_gamepad(pcu); if (error) goto err_destroy_buttons; } pcu->setup_complete = true; return 0; err_destroy_buttons: ims_pcu_destroy_buttons(pcu); err_destroy_backlight: ims_pcu_destroy_backlight(pcu); return error; } static void ims_pcu_destroy_application_mode(struct ims_pcu *pcu) { if (pcu->setup_complete) { pcu->setup_complete = false; mb(); /* make sure flag setting is not reordered */ if (pcu->gamepad) ims_pcu_destroy_gamepad(pcu); ims_pcu_destroy_buttons(pcu); ims_pcu_destroy_backlight(pcu); if (pcu->device_id != IMS_PCU_PCU_B_DEVICE_ID) sysfs_remove_group(&pcu->dev->kobj, &ims_pcu_ofn_attr_group); } } static int ims_pcu_init_bootloader_mode(struct ims_pcu *pcu) { int error; error = ims_pcu_execute_bl_command(pcu, QUERY_DEVICE, NULL, 0, IMS_PCU_CMD_RESPONSE_TIMEOUT); if (error) { dev_err(pcu->dev, "Bootloader does not respond, aborting\n"); return error; } pcu->fw_start_addr = get_unaligned_le32(&pcu->cmd_buf[IMS_PCU_DATA_OFFSET + 11]); pcu->fw_end_addr = get_unaligned_le32(&pcu->cmd_buf[IMS_PCU_DATA_OFFSET + 15]); dev_info(pcu->dev, "Device is in bootloader mode (addr 0x%08x-0x%08x), requesting firmware\n", pcu->fw_start_addr, pcu->fw_end_addr); error = request_firmware_nowait(THIS_MODULE, true, IMS_PCU_FIRMWARE_NAME, pcu->dev, GFP_KERNEL, pcu, ims_pcu_process_async_firmware); if (error) { /* This error is not fatal, let userspace have another chance */ complete(&pcu->async_firmware_done); } return 0; } static void ims_pcu_destroy_bootloader_mode(struct ims_pcu *pcu) { /* Make sure our initial firmware request has completed */ wait_for_completion(&pcu->async_firmware_done); } #define IMS_PCU_APPLICATION_MODE 0 #define IMS_PCU_BOOTLOADER_MODE 1 static struct usb_driver ims_pcu_driver; static int ims_pcu_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct ims_pcu *pcu; int error; pcu = kzalloc(sizeof(struct ims_pcu), GFP_KERNEL); if (!pcu) return -ENOMEM; pcu->dev = &intf->dev; pcu->udev = udev; pcu->bootloader_mode = id->driver_info == IMS_PCU_BOOTLOADER_MODE; mutex_init(&pcu->cmd_mutex); init_completion(&pcu->cmd_done); init_completion(&pcu->async_firmware_done); error = ims_pcu_parse_cdc_data(intf, pcu); if (error) goto err_free_mem; error = usb_driver_claim_interface(&ims_pcu_driver, pcu->data_intf, pcu); if (error) { dev_err(&intf->dev, "Unable to claim corresponding data interface: %d\n", error); goto err_free_mem; } usb_set_intfdata(pcu->ctrl_intf, pcu); usb_set_intfdata(pcu->data_intf, pcu); error = ims_pcu_buffers_alloc(pcu); if (error) goto err_unclaim_intf; error = ims_pcu_start_io(pcu); if (error) goto err_free_buffers; error = ims_pcu_line_setup(pcu); if (error) goto err_stop_io; error = sysfs_create_group(&intf->dev.kobj, &ims_pcu_attr_group); if (error) goto err_stop_io; error = pcu->bootloader_mode ? ims_pcu_init_bootloader_mode(pcu) : ims_pcu_init_application_mode(pcu); if (error) goto err_remove_sysfs; return 0; err_remove_sysfs: sysfs_remove_group(&intf->dev.kobj, &ims_pcu_attr_group); err_stop_io: ims_pcu_stop_io(pcu); err_free_buffers: ims_pcu_buffers_free(pcu); err_unclaim_intf: usb_driver_release_interface(&ims_pcu_driver, pcu->data_intf); err_free_mem: kfree(pcu); return error; } static void ims_pcu_disconnect(struct usb_interface *intf) { struct ims_pcu *pcu = usb_get_intfdata(intf); struct usb_host_interface *alt = intf->cur_altsetting; usb_set_intfdata(intf, NULL); /* * See if we are dealing with control or data interface. The cleanup * happens when we unbind primary (control) interface. */ if (alt->desc.bInterfaceClass != USB_CLASS_COMM) return; sysfs_remove_group(&intf->dev.kobj, &ims_pcu_attr_group); ims_pcu_stop_io(pcu); if (pcu->bootloader_mode) ims_pcu_destroy_bootloader_mode(pcu); else ims_pcu_destroy_application_mode(pcu); ims_pcu_buffers_free(pcu); kfree(pcu); } #ifdef CONFIG_PM static int ims_pcu_suspend(struct usb_interface *intf, pm_message_t message) { struct ims_pcu *pcu = usb_get_intfdata(intf); struct usb_host_interface *alt = intf->cur_altsetting; if (alt->desc.bInterfaceClass == USB_CLASS_COMM) ims_pcu_stop_io(pcu); return 0; } static int ims_pcu_resume(struct usb_interface *intf) { struct ims_pcu *pcu = usb_get_intfdata(intf); struct usb_host_interface *alt = intf->cur_altsetting; int retval = 0; if (alt->desc.bInterfaceClass == USB_CLASS_COMM) { retval = ims_pcu_start_io(pcu); if (retval == 0) retval = ims_pcu_line_setup(pcu); } return retval; } #endif static const struct usb_device_id ims_pcu_id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x04d8, 0x0082, USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_V25TER), .driver_info = IMS_PCU_APPLICATION_MODE, }, { USB_DEVICE_AND_INTERFACE_INFO(0x04d8, 0x0083, USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_V25TER), .driver_info = IMS_PCU_BOOTLOADER_MODE, }, { } }; static struct usb_driver ims_pcu_driver = { .name = "ims_pcu", .id_table = ims_pcu_id_table, .probe = ims_pcu_probe, .disconnect = ims_pcu_disconnect, #ifdef CONFIG_PM .suspend = ims_pcu_suspend, .resume = ims_pcu_resume, .reset_resume = ims_pcu_resume, #endif }; module_usb_driver(ims_pcu_driver); MODULE_DESCRIPTION("IMS Passenger Control Unit driver"); MODULE_AUTHOR("Dmitry Torokhov <dmitry.torokhov@gmail.com>"); MODULE_LICENSE("GPL");
static const struct usb_cdc_union_desc * ims_pcu_get_cdc_union_desc(struct usb_interface *intf) { const void *buf = intf->altsetting->extra; size_t buflen = intf->altsetting->extralen; struct usb_cdc_union_desc *union_desc; if (!buf) { dev_err(&intf->dev, "Missing descriptor data\n"); return NULL; } if (!buflen) { dev_err(&intf->dev, "Zero length descriptor\n"); return NULL; } while (buflen > 0) { union_desc = (struct usb_cdc_union_desc *)buf; if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { dev_dbg(&intf->dev, "Found union header\n"); return union_desc; } buflen -= union_desc->bLength; buf += union_desc->bLength; } dev_err(&intf->dev, "Missing CDC union descriptor\n"); return NULL;
static const struct usb_cdc_union_desc * ims_pcu_get_cdc_union_desc(struct usb_interface *intf) { const void *buf = intf->altsetting->extra; size_t buflen = intf->altsetting->extralen; struct usb_cdc_union_desc *union_desc; if (!buf) { dev_err(&intf->dev, "Missing descriptor data\n"); return NULL; } if (!buflen) { dev_err(&intf->dev, "Zero length descriptor\n"); return NULL; } while (buflen >= sizeof(*union_desc)) { union_desc = (struct usb_cdc_union_desc *)buf; if (union_desc->bLength > buflen) { dev_err(&intf->dev, "Too large descriptor\n"); return NULL; } if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { dev_dbg(&intf->dev, "Found union header\n"); if (union_desc->bLength >= sizeof(*union_desc)) return union_desc; dev_err(&intf->dev, "Union descriptor to short (%d vs %zd\n)", union_desc->bLength, sizeof(*union_desc)); return NULL; } buflen -= union_desc->bLength; buf += union_desc->bLength; } dev_err(&intf->dev, "Missing CDC union descriptor\n"); return NULL;
{'added': [(1638, '\twhile (buflen >= sizeof(*union_desc)) {'), (1641, '\t\tif (union_desc->bLength > buflen) {'), (1642, '\t\t\tdev_err(&intf->dev, "Too large descriptor\\n");'), (1643, '\t\t\treturn NULL;'), (1644, '\t\t}'), (1645, ''), (1649, ''), (1650, '\t\t\tif (union_desc->bLength >= sizeof(*union_desc))'), (1651, '\t\t\t\treturn union_desc;'), (1652, ''), (1653, '\t\t\tdev_err(&intf->dev,'), (1654, '\t\t\t\t"Union descriptor to short (%d vs %zd\\n)",'), (1655, '\t\t\t\tunion_desc->bLength, sizeof(*union_desc));'), (1656, '\t\t\treturn NULL;')], 'deleted': [(1638, '\twhile (buflen > 0) {'), (1644, '\t\t\treturn union_desc;')]}
14
2
1,552
9,127
26
145
6
https://github.com/torvalds/linux
CVE-2017-16645
CWE-125
2,409
tlb.c
C
flush_tlb_mm_range
#include <linux/init.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cache.h> #include <asm/apic.h> #include <asm/uv/uv.h> #include <linux/debugfs.h> /* * Smarter SMP flushing macros. * c/o Linus Torvalds. * * These mean you can really definitely utterly forget about * writing to user space from interrupts. (Its not allowed anyway). * * Optimizations Manfred Spraul <manfred@colorfullife.com> * * More scalable flush, from Andi Kleen * * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ struct flush_tlb_info { struct mm_struct *flush_mm; unsigned long flush_start; unsigned long flush_end; }; /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ void leave_mm(int cpu) { struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) BUG(); if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); load_cr3(swapper_pg_dir); /* * This gets called in the idle path where RCU * functions differently. Tracing normally * uses RCU, so we have to call the tracepoint * specially here. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } } EXPORT_SYMBOL_GPL(leave_mm); /* * The flush IPI assumes that a thread switch happens in this order: * [cpu0: the cpu that switches] * 1) switch_mm() either 1a) or 1b) * 1a) thread switch to a different mm * 1a1) set cpu_tlbstate to TLBSTATE_OK * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm * if cpu0 was in lazy tlb mode. * 1a2) update cpu active_mm * Now cpu0 accepts tlb flushes for the new mm. * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); * Now the other cpus will send tlb flush ipis. * 1a4) change cr3. * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); * Stop ipi delivery for the old mm. This is not synchronized with * the other cpus, but flush_tlb_func ignore flush ipis for the wrong * mm, and in the worst case we perform a superfluous tlb flush. * 1b) thread switch without mm change * cpu active_mm is correct, cpu0 already handles flush ipis. * 1b1) set cpu_tlbstate to TLBSTATE_OK * 1b2) test_and_set the cpu bit in cpu_vm_mask. * Atomically set the bit [other cpus will start sending flush ipis], * and test the bit. * 1b3) if the bit was 0: leave_mm was called, flush the tlb. * 2) switch %%esp, ie current * * The interrupt must handle 2 special cases: * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. * - the cpu performs speculative tlb reads, i.e. even if the cpu only * runs in kernel space, the cpu could load tlb entries for user space * pages. * * The good news is that cpu_tlbstate is local to each cpu, no * write/read ordering problems. */ /* * TLB flush funcation: * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. */ static void flush_tlb_func(void *info) { struct flush_tlb_info *f = info; inc_irq_stat(irq_tlb_count); if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; if (!f->flush_end) f->flush_end = f->flush_start + PAGE_SIZE; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) { local_flush_tlb(); trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); } else { unsigned long addr; unsigned long nr_pages = (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); addr += PAGE_SIZE; } trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); } } else leave_mm(smp_processor_id()); } void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end) { struct flush_tlb_info info; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); if (is_uv_system()) { unsigned int cpu; cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func, &info, 1); return; } smp_call_function_many(cpumask, flush_tlb_func, &info, 1); } void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; preempt_disable(); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } /* * See Documentation/x86/tlb.txt for details. We choose 33 * because it is large enough to cover the vast majority (at * least 95%) of allocations, and is small enough that we are * confident it will not cause too much overhead. Each single * flush is about 100 ns, so this caps the maximum overhead at * _about_ 3,000 ns. * * This is in units of pages. */ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) goto out; if (!current->mm) { leave_mm(smp_processor_id()); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) __flush_tlb_one(start); else leave_mm(smp_processor_id()); } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); } static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); __flush_tlb_all(); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); } void flush_tlb_all(void) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); on_each_cpu(do_flush_tlb_all, NULL, 1); } static void do_kernel_range_flush(void *info) { struct flush_tlb_info *f = info; unsigned long addr; /* flush range by one by one 'invlpg' */ for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) __flush_tlb_single(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { /* Balance as user space task's flush, a bit conservative */ if (end == TLB_FLUSH_ALL || (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { on_each_cpu(do_flush_tlb_all, NULL, 1); } else { struct flush_tlb_info info; info.flush_start = start; info.flush_end = end; on_each_cpu(do_kernel_range_flush, &info, 1); } } static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; unsigned int len; len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t tlbflush_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; ssize_t len; int ceiling; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (kstrtoint(buf, 0, &ceiling)) return -EINVAL; if (ceiling < 0) return -EINVAL; tlb_single_page_flush_ceiling = ceiling; return count; } static const struct file_operations fops_tlbflush = { .read = tlbflush_read_file, .write = tlbflush_write_file, .llseek = default_llseek, }; static int __init create_tlb_single_page_flush_ceiling(void) { debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, arch_debugfs_dir, NULL, &fops_tlbflush); return 0; } late_initcall(create_tlb_single_page_flush_ceiling);
#include <linux/init.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cache.h> #include <asm/apic.h> #include <asm/uv/uv.h> #include <linux/debugfs.h> /* * Smarter SMP flushing macros. * c/o Linus Torvalds. * * These mean you can really definitely utterly forget about * writing to user space from interrupts. (Its not allowed anyway). * * Optimizations Manfred Spraul <manfred@colorfullife.com> * * More scalable flush, from Andi Kleen * * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ struct flush_tlb_info { struct mm_struct *flush_mm; unsigned long flush_start; unsigned long flush_end; }; /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ void leave_mm(int cpu) { struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) BUG(); if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); load_cr3(swapper_pg_dir); /* * This gets called in the idle path where RCU * functions differently. Tracing normally * uses RCU, so we have to call the tracepoint * specially here. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } } EXPORT_SYMBOL_GPL(leave_mm); /* * The flush IPI assumes that a thread switch happens in this order: * [cpu0: the cpu that switches] * 1) switch_mm() either 1a) or 1b) * 1a) thread switch to a different mm * 1a1) set cpu_tlbstate to TLBSTATE_OK * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm * if cpu0 was in lazy tlb mode. * 1a2) update cpu active_mm * Now cpu0 accepts tlb flushes for the new mm. * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); * Now the other cpus will send tlb flush ipis. * 1a4) change cr3. * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); * Stop ipi delivery for the old mm. This is not synchronized with * the other cpus, but flush_tlb_func ignore flush ipis for the wrong * mm, and in the worst case we perform a superfluous tlb flush. * 1b) thread switch without mm change * cpu active_mm is correct, cpu0 already handles flush ipis. * 1b1) set cpu_tlbstate to TLBSTATE_OK * 1b2) test_and_set the cpu bit in cpu_vm_mask. * Atomically set the bit [other cpus will start sending flush ipis], * and test the bit. * 1b3) if the bit was 0: leave_mm was called, flush the tlb. * 2) switch %%esp, ie current * * The interrupt must handle 2 special cases: * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. * - the cpu performs speculative tlb reads, i.e. even if the cpu only * runs in kernel space, the cpu could load tlb entries for user space * pages. * * The good news is that cpu_tlbstate is local to each cpu, no * write/read ordering problems. */ /* * TLB flush funcation: * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. */ static void flush_tlb_func(void *info) { struct flush_tlb_info *f = info; inc_irq_stat(irq_tlb_count); if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; if (!f->flush_end) f->flush_end = f->flush_start + PAGE_SIZE; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) { local_flush_tlb(); trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); } else { unsigned long addr; unsigned long nr_pages = (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); addr += PAGE_SIZE; } trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); } } else leave_mm(smp_processor_id()); } void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end) { struct flush_tlb_info info; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); if (is_uv_system()) { unsigned int cpu; cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func, &info, 1); return; } smp_call_function_many(cpumask, flush_tlb_func, &info, 1); } void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; preempt_disable(); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); /* This is an implicit full barrier that synchronizes with switch_mm. */ local_flush_tlb(); trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } /* * See Documentation/x86/tlb.txt for details. We choose 33 * because it is large enough to cover the vast majority (at * least 95%) of allocations, and is small enough that we are * confident it will not cause too much overhead. Each single * flush is about 100 ns, so this caps the maximum overhead at * _about_ 3,000 ns. * * This is in units of pages. */ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) { /* Synchronize with switch_mm. */ smp_mb(); goto out; } if (!current->mm) { leave_mm(smp_processor_id()); /* Synchronize with switch_mm. */ smp_mb(); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; /* * Both branches below are implicit full barriers (MOV to CR or * INVLPG) that synchronize with switch_mm. */ if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) { /* * Implicit full barrier (INVLPG) that synchronizes * with switch_mm. */ __flush_tlb_one(start); } else { leave_mm(smp_processor_id()); /* Synchronize with switch_mm. */ smp_mb(); } } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); } static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); __flush_tlb_all(); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); } void flush_tlb_all(void) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); on_each_cpu(do_flush_tlb_all, NULL, 1); } static void do_kernel_range_flush(void *info) { struct flush_tlb_info *f = info; unsigned long addr; /* flush range by one by one 'invlpg' */ for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) __flush_tlb_single(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { /* Balance as user space task's flush, a bit conservative */ if (end == TLB_FLUSH_ALL || (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { on_each_cpu(do_flush_tlb_all, NULL, 1); } else { struct flush_tlb_info info; info.flush_start = start; info.flush_end = end; on_each_cpu(do_kernel_range_flush, &info, 1); } } static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; unsigned int len; len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t tlbflush_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; ssize_t len; int ceiling; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (kstrtoint(buf, 0, &ceiling)) return -EINVAL; if (ceiling < 0) return -EINVAL; tlb_single_page_flush_ceiling = ceiling; return count; } static const struct file_operations fops_tlbflush = { .read = tlbflush_read_file, .write = tlbflush_write_file, .llseek = default_llseek, }; static int __init create_tlb_single_page_flush_ceiling(void) { debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, arch_debugfs_dir, NULL, &fops_tlbflush); return 0; } late_initcall(create_tlb_single_page_flush_ceiling);
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) goto out; if (!current->mm) { leave_mm(smp_processor_id()); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); }
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) { /* Synchronize with switch_mm. */ smp_mb(); goto out; } if (!current->mm) { leave_mm(smp_processor_id()); /* Synchronize with switch_mm. */ smp_mb(); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; /* * Both branches below are implicit full barriers (MOV to CR or * INVLPG) that synchronize with switch_mm. */ if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); }
{'added': [(164, ''), (165, '\t/* This is an implicit full barrier that synchronizes with switch_mm. */'), (167, ''), (194, '\tif (current->active_mm != mm) {'), (195, '\t\t/* Synchronize with switch_mm. */'), (196, '\t\tsmp_mb();'), (197, ''), (199, '\t}'), (203, ''), (204, '\t\t/* Synchronize with switch_mm. */'), (205, '\t\tsmp_mb();'), (206, ''), (213, '\t/*'), (214, '\t * Both branches below are implicit full barriers (MOV to CR or'), (215, '\t * INVLPG) that synchronize with switch_mm.'), (216, '\t */'), (246, '\t\tif (current->mm) {'), (247, '\t\t\t/*'), (248, '\t\t\t * Implicit full barrier (INVLPG) that synchronizes'), (249, '\t\t\t * with switch_mm.'), (250, '\t\t\t */'), (252, '\t\t} else {'), (254, ''), (255, '\t\t\t/* Synchronize with switch_mm. */'), (256, '\t\t\tsmp_mb();'), (257, '\t\t}')], 'deleted': [(191, '\tif (current->active_mm != mm)'), (231, '\t\tif (current->mm)'), (233, '\t\telse')]}
26
3
211
1,177
34
200
9
https://github.com/torvalds/linux
CVE-2016-2069
CWE-362
2,490
pkcs15-gemsafeV1.c
C
gemsafe_get_cert_len
/* * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Initially written by David Mattes <david.mattes@boeing.com> */ /* Support for multiple key containers by Lukas Wunner <lukas@wunner.de> */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include "internal.h" #include "pkcs15.h" #define MANU_ID "Gemplus" #define APPLET_NAME "GemSAFE V1" #define DRIVER_SERIAL_NUMBER "v0.9" #define GEMSAFE_APP_PATH "3F001600" #define GEMSAFE_PATH "3F0016000004" /* Apparently, the Applet max read "quanta" is 248 bytes * Gemalto ClassicClient reads files in chunks of 238 bytes */ #define GEMSAFE_READ_QUANTUM 248 #define GEMSAFE_MAX_OBJLEN 28672 int sc_pkcs15emu_gemsafeV1_init_ex(sc_pkcs15_card_t *, struct sc_aid *,sc_pkcs15emu_opt_t *); static int sc_pkcs15emu_add_cert(sc_pkcs15_card_t *p15card, int type, int authority, const sc_path_t *path, const sc_pkcs15_id_t *id, const char *label, int obj_flags); static int sc_pkcs15emu_add_pin(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, const sc_path_t *path, int ref, int type, unsigned int min_length, unsigned int max_length, int flags, int tries_left, const char pad_char, int obj_flags); static int sc_pkcs15emu_add_prkey(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, int type, unsigned int modulus_length, int usage, const sc_path_t *path, int ref, const sc_pkcs15_id_t *auth_id, int obj_flags); typedef struct cdata_st { char *label; int authority; const char *path; size_t index; size_t count; const char *id; int obj_flags; } cdata; const unsigned int gemsafe_cert_max = 12; cdata gemsafe_cert[] = { {"DS certificate #1", 0, GEMSAFE_PATH, 0, 0, "45", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #2", 0, GEMSAFE_PATH, 0, 0, "46", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #3", 0, GEMSAFE_PATH, 0, 0, "47", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #4", 0, GEMSAFE_PATH, 0, 0, "48", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #5", 0, GEMSAFE_PATH, 0, 0, "49", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #6", 0, GEMSAFE_PATH, 0, 0, "50", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #7", 0, GEMSAFE_PATH, 0, 0, "51", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #8", 0, GEMSAFE_PATH, 0, 0, "52", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #9", 0, GEMSAFE_PATH, 0, 0, "53", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #10", 0, GEMSAFE_PATH, 0, 0, "54", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #11", 0, GEMSAFE_PATH, 0, 0, "55", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #12", 0, GEMSAFE_PATH, 0, 0, "56", SC_PKCS15_CO_FLAG_MODIFIABLE}, }; typedef struct pdata_st { const u8 atr[SC_MAX_ATR_SIZE]; const size_t atr_len; const char *id; const char *label; const char *path; const int ref; const int type; const unsigned int maxlen; const unsigned int minlen; const int flags; const int tries_left; const char pad_char; const int obj_flags; } pindata; const unsigned int gemsafe_pin_max = 2; const pindata gemsafe_pin[] = { /* ATR-specific PIN policies, first match found is used: */ { {0x3B, 0x7D, 0x96, 0x00, 0x00, 0x80, 0x31, 0x80, 0x65, 0xB0, 0x83, 0x11, 0x48, 0xC8, 0x83, 0x00, 0x90, 0x00}, 18, "01", "DS pin", GEMSAFE_PATH, 0x01, SC_PKCS15_PIN_TYPE_ASCII_NUMERIC, 8, 4, SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL, 3, 0x00, SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE }, /* default PIN policy comes last: */ { { 0 }, 0, "01", "DS pin", GEMSAFE_PATH, 0x01, SC_PKCS15_PIN_TYPE_BCD, 16, 6, SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL, 3, 0xFF, SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE } }; typedef struct prdata_st { const char *id; char *label; unsigned int modulus_len; int usage; const char *path; int ref; const char *auth_id; int obj_flags; } prdata; #define USAGE_NONREP SC_PKCS15_PRKEY_USAGE_NONREPUDIATION #define USAGE_KE SC_PKCS15_PRKEY_USAGE_ENCRYPT | \ SC_PKCS15_PRKEY_USAGE_DECRYPT | \ SC_PKCS15_PRKEY_USAGE_WRAP | \ SC_PKCS15_PRKEY_USAGE_UNWRAP #define USAGE_AUT SC_PKCS15_PRKEY_USAGE_ENCRYPT | \ SC_PKCS15_PRKEY_USAGE_DECRYPT | \ SC_PKCS15_PRKEY_USAGE_WRAP | \ SC_PKCS15_PRKEY_USAGE_UNWRAP | \ SC_PKCS15_PRKEY_USAGE_SIGN prdata gemsafe_prkeys[] = { { "45", "DS key #1", 1024, USAGE_AUT, GEMSAFE_PATH, 0x03, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "46", "DS key #2", 1024, USAGE_AUT, GEMSAFE_PATH, 0x04, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "47", "DS key #3", 1024, USAGE_AUT, GEMSAFE_PATH, 0x05, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "48", "DS key #4", 1024, USAGE_AUT, GEMSAFE_PATH, 0x06, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "49", "DS key #5", 1024, USAGE_AUT, GEMSAFE_PATH, 0x07, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "50", "DS key #6", 1024, USAGE_AUT, GEMSAFE_PATH, 0x08, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "51", "DS key #7", 1024, USAGE_AUT, GEMSAFE_PATH, 0x09, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "52", "DS key #8", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0a, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "53", "DS key #9", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0b, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "54", "DS key #10", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0c, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "55", "DS key #11", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0d, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "56", "DS key #12", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0e, "01", SC_PKCS15_CO_FLAG_PRIVATE}, }; static int gemsafe_get_cert_len(sc_card_t *card) { int r; u8 ibuf[GEMSAFE_MAX_OBJLEN]; u8 *iptr; struct sc_path path; struct sc_file *file; size_t objlen, certlen; unsigned int ind, i=0; sc_format_path(GEMSAFE_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* Initial read */ r = sc_read_binary(card, 0, ibuf, GEMSAFE_READ_QUANTUM, 0); if (r < 0) return SC_ERROR_INTERNAL; /* Actual stored object size is encoded in first 2 bytes * (allocated EF space is much greater!) */ objlen = (((size_t) ibuf[0]) << 8) | ibuf[1]; sc_log(card->ctx, "Stored object is of size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); if (objlen < 1 || objlen > GEMSAFE_MAX_OBJLEN) { sc_log(card->ctx, "Invalid object size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); return SC_ERROR_INTERNAL; } /* It looks like the first thing in the block is a table of * which keys are allocated. The table is small and is in the * first 248 bytes. Example for a card with 10 key containers: * 01 f0 00 03 03 b0 00 03 <= 1st key unallocated * 01 f0 00 04 03 b0 00 04 <= 2nd key unallocated * 01 fe 14 00 05 03 b0 00 05 <= 3rd key allocated * 01 fe 14 01 06 03 b0 00 06 <= 4th key allocated * 01 f0 00 07 03 b0 00 07 <= 5th key unallocated * ... * 01 f0 00 0c 03 b0 00 0c <= 10th key unallocated * For allocated keys, the fourth byte seems to indicate the * default key and the fifth byte indicates the key_ref of * the private key. */ ind = 2; /* skip length */ while (ibuf[ind] == 0x01) { if (ibuf[ind+1] == 0xFE) { gemsafe_prkeys[i].ref = ibuf[ind+4]; sc_log(card->ctx, "Key container %d is allocated and uses key_ref %d", i+1, gemsafe_prkeys[i].ref); ind += 9; } else { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; sc_log(card->ctx, "Key container %d is unallocated", i+1); ind += 8; } i++; } /* Delete additional key containers from the data structures if * this card can't accommodate them. */ for (; i < gemsafe_cert_max; i++) { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } /* Read entire file, then dissect in memory. * Gemalto ClassicClient seems to do it the same way. */ iptr = ibuf + GEMSAFE_READ_QUANTUM; while ((size_t)(iptr - ibuf) < objlen) { r = sc_read_binary(card, iptr - ibuf, iptr, MIN(GEMSAFE_READ_QUANTUM, objlen - (iptr - ibuf)), 0); if (r < 0) { sc_log(card->ctx, "Could not read cert object"); return SC_ERROR_INTERNAL; } iptr += GEMSAFE_READ_QUANTUM; } /* Search buffer for certificates, they start with 0x3082. */ i = 0; while (ind < objlen - 1) { if (ibuf[ind] == 0x30 && ibuf[ind+1] == 0x82) { /* Find next allocated key container */ while (i < gemsafe_cert_max && gemsafe_cert[i].label == NULL) i++; if (i == gemsafe_cert_max) { sc_log(card->ctx, "Warning: Found orphaned certificate at offset %d", ind); return SC_SUCCESS; } /* DER cert len is encoded this way */ if (ind+3 >= sizeof ibuf) return SC_ERROR_INVALID_DATA; certlen = ((((size_t) ibuf[ind+2]) << 8) | ibuf[ind+3]) + 4; sc_log(card->ctx, "Found certificate of key container %d at offset %d, len %"SC_FORMAT_LEN_SIZE_T"u", i+1, ind, certlen); gemsafe_cert[i].index = ind; gemsafe_cert[i].count = certlen; ind += certlen; i++; } else ind++; } /* Delete additional key containers from the data structures if * they're missing on the card. */ for (; i < gemsafe_cert_max; i++) { if (gemsafe_cert[i].label) { sc_log(card->ctx, "Warning: Certificate of key container %d is missing", i+1); gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } } return SC_SUCCESS; } static int gemsafe_detect_card( sc_pkcs15_card_t *p15card) { if (strcmp(p15card->card->name, "GemSAFE V1")) return SC_ERROR_WRONG_CARD; return SC_SUCCESS; } static int sc_pkcs15emu_gemsafeV1_init( sc_pkcs15_card_t *p15card) { int r; unsigned int i; struct sc_path path; struct sc_file *file = NULL; struct sc_card *card = p15card->card; struct sc_apdu apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; sc_log(p15card->card->ctx, "Setting pkcs15 parameters"); if (p15card->tokeninfo->label) free(p15card->tokeninfo->label); p15card->tokeninfo->label = malloc(strlen(APPLET_NAME) + 1); if (!p15card->tokeninfo->label) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->label, APPLET_NAME); if (p15card->tokeninfo->serial_number) free(p15card->tokeninfo->serial_number); p15card->tokeninfo->serial_number = malloc(strlen(DRIVER_SERIAL_NUMBER) + 1); if (!p15card->tokeninfo->serial_number) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->serial_number, DRIVER_SERIAL_NUMBER); /* the GemSAFE applet version number */ sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, 0xca, 0xdf, 0x03); apdu.cla = 0x80; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); /* Manual says Le=0x05, but should be 0x08 to return full version number */ apdu.le = 0x08; apdu.lc = 0; apdu.datalen = 0; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (apdu.sw1 != 0x90 || apdu.sw2 != 0x00) return SC_ERROR_INTERNAL; if (r != SC_SUCCESS) return SC_ERROR_INTERNAL; /* the manufacturer ID, in this case GemPlus */ if (p15card->tokeninfo->manufacturer_id) free(p15card->tokeninfo->manufacturer_id); p15card->tokeninfo->manufacturer_id = malloc(strlen(MANU_ID) + 1); if (!p15card->tokeninfo->manufacturer_id) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->manufacturer_id, MANU_ID); /* determine allocated key containers and length of certificates */ r = gemsafe_get_cert_len(card); if (r != SC_SUCCESS) return SC_ERROR_INTERNAL; /* set certs */ sc_log(p15card->card->ctx, "Setting certificates"); for (i = 0; i < gemsafe_cert_max; i++) { struct sc_pkcs15_id p15Id; struct sc_path path; if (gemsafe_cert[i].label == NULL) continue; sc_format_path(gemsafe_cert[i].path, &path); sc_pkcs15_format_id(gemsafe_cert[i].id, &p15Id); path.index = gemsafe_cert[i].index; path.count = gemsafe_cert[i].count; sc_pkcs15emu_add_cert(p15card, SC_PKCS15_TYPE_CERT_X509, gemsafe_cert[i].authority, &path, &p15Id, gemsafe_cert[i].label, gemsafe_cert[i].obj_flags); } /* set gemsafe_pin */ sc_log(p15card->card->ctx, "Setting PIN"); for (i=0; i < gemsafe_pin_max; i++) { struct sc_pkcs15_id p15Id; struct sc_path path; sc_pkcs15_format_id(gemsafe_pin[i].id, &p15Id); sc_format_path(gemsafe_pin[i].path, &path); if (gemsafe_pin[i].atr_len == 0 || (gemsafe_pin[i].atr_len == p15card->card->atr.len && memcmp(p15card->card->atr.value, gemsafe_pin[i].atr, p15card->card->atr.len) == 0)) { sc_pkcs15emu_add_pin(p15card, &p15Id, gemsafe_pin[i].label, &path, gemsafe_pin[i].ref, gemsafe_pin[i].type, gemsafe_pin[i].minlen, gemsafe_pin[i].maxlen, gemsafe_pin[i].flags, gemsafe_pin[i].tries_left, gemsafe_pin[i].pad_char, gemsafe_pin[i].obj_flags); break; } }; /* set private keys */ sc_log(p15card->card->ctx, "Setting private keys"); for (i = 0; i < gemsafe_cert_max; i++) { struct sc_pkcs15_id p15Id, authId, *pauthId; struct sc_path path; int key_ref = 0x03; if (gemsafe_prkeys[i].label == NULL) continue; sc_pkcs15_format_id(gemsafe_prkeys[i].id, &p15Id); if (gemsafe_prkeys[i].auth_id) { sc_pkcs15_format_id(gemsafe_prkeys[i].auth_id, &authId); pauthId = &authId; } else pauthId = NULL; sc_format_path(gemsafe_prkeys[i].path, &path); /* * The key ref may be different for different sites; * by adding flags=n where the low order 4 bits can be * the key ref we can force it. */ if ( p15card->card->flags & 0x0F) { key_ref = p15card->card->flags & 0x0F; sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL, "Overriding key_ref %d with %d\n", gemsafe_prkeys[i].ref, key_ref); } else key_ref = gemsafe_prkeys[i].ref; sc_pkcs15emu_add_prkey(p15card, &p15Id, gemsafe_prkeys[i].label, SC_PKCS15_TYPE_PRKEY_RSA, gemsafe_prkeys[i].modulus_len, gemsafe_prkeys[i].usage, &path, key_ref, pauthId, gemsafe_prkeys[i].obj_flags); } /* select the application DF */ sc_log(p15card->card->ctx, "Selecting application DF"); sc_format_path(GEMSAFE_APP_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* set the application DF */ if (p15card->file_app) free(p15card->file_app); p15card->file_app = file; return SC_SUCCESS; } int sc_pkcs15emu_gemsafeV1_init_ex( sc_pkcs15_card_t *p15card, struct sc_aid *aid, sc_pkcs15emu_opt_t *opts) { if (opts && opts->flags & SC_PKCS15EMU_FLAGS_NO_CHECK) return sc_pkcs15emu_gemsafeV1_init(p15card); else { int r = gemsafe_detect_card(p15card); if (r) return SC_ERROR_WRONG_CARD; return sc_pkcs15emu_gemsafeV1_init(p15card); } } static sc_pkcs15_df_t * sc_pkcs15emu_get_df(sc_pkcs15_card_t *p15card, unsigned int type) { sc_pkcs15_df_t *df; sc_file_t *file; int created = 0; while (1) { for (df = p15card->df_list; df; df = df->next) { if (df->type == type) { if (created) df->enumerated = 1; return df; } } assert(created == 0); file = sc_file_new(); if (!file) return NULL; sc_format_path("11001101", &file->path); sc_pkcs15_add_df(p15card, type, &file->path); sc_file_free(file); created++; } } static int sc_pkcs15emu_add_object(sc_pkcs15_card_t *p15card, int type, const char *label, void *data, const sc_pkcs15_id_t *auth_id, int obj_flags) { sc_pkcs15_object_t *obj; int df_type; obj = calloc(1, sizeof(*obj)); obj->type = type; obj->data = data; if (label) strncpy(obj->label, label, sizeof(obj->label)-1); obj->flags = obj_flags; if (auth_id) obj->auth_id = *auth_id; switch (type & SC_PKCS15_TYPE_CLASS_MASK) { case SC_PKCS15_TYPE_AUTH: df_type = SC_PKCS15_AODF; break; case SC_PKCS15_TYPE_PRKEY: df_type = SC_PKCS15_PRKDF; break; case SC_PKCS15_TYPE_PUBKEY: df_type = SC_PKCS15_PUKDF; break; case SC_PKCS15_TYPE_CERT: df_type = SC_PKCS15_CDF; break; default: sc_log(p15card->card->ctx, "Unknown PKCS15 object type %d", type); free(obj); return SC_ERROR_INVALID_ARGUMENTS; } obj->df = sc_pkcs15emu_get_df(p15card, df_type); sc_pkcs15_add_object(p15card, obj); return 0; } static int sc_pkcs15emu_add_pin(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, const sc_path_t *path, int ref, int type, unsigned int min_length, unsigned int max_length, int flags, int tries_left, const char pad_char, int obj_flags) { sc_pkcs15_auth_info_t *info; info = calloc(1, sizeof(*info)); if (!info) LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); info->auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; info->auth_method = SC_AC_CHV; info->auth_id = *id; info->attrs.pin.min_length = min_length; info->attrs.pin.max_length = max_length; info->attrs.pin.stored_length = max_length; info->attrs.pin.type = type; info->attrs.pin.reference = ref; info->attrs.pin.flags = flags; info->attrs.pin.pad_char = pad_char; info->tries_left = tries_left; info->logged_in = SC_PIN_STATE_UNKNOWN; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, SC_PKCS15_TYPE_AUTH_PIN, label, info, NULL, obj_flags); } static int sc_pkcs15emu_add_cert(sc_pkcs15_card_t *p15card, int type, int authority, const sc_path_t *path, const sc_pkcs15_id_t *id, const char *label, int obj_flags) { sc_pkcs15_cert_info_t *info; info = calloc(1, sizeof(*info)); if (!info) { LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); } info->id = *id; info->authority = authority; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, type, label, info, NULL, obj_flags); } static int sc_pkcs15emu_add_prkey(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, int type, unsigned int modulus_length, int usage, const sc_path_t *path, int ref, const sc_pkcs15_id_t *auth_id, int obj_flags) { sc_pkcs15_prkey_info_t *info; info = calloc(1, sizeof(*info)); if (!info) { LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); } info->id = *id; info->modulus_length = modulus_length; info->usage = usage; info->native = 1; info->access_flags = SC_PKCS15_PRKEY_ACCESS_SENSITIVE | SC_PKCS15_PRKEY_ACCESS_ALWAYSSENSITIVE | SC_PKCS15_PRKEY_ACCESS_NEVEREXTRACTABLE | SC_PKCS15_PRKEY_ACCESS_LOCAL; info->key_reference = ref; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, type, label, info, auth_id, obj_flags); } /* SC_IMPLEMENT_DRIVER_VERSION("0.9.4") */
/* * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Initially written by David Mattes <david.mattes@boeing.com> */ /* Support for multiple key containers by Lukas Wunner <lukas@wunner.de> */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include "internal.h" #include "pkcs15.h" #define MANU_ID "Gemplus" #define APPLET_NAME "GemSAFE V1" #define DRIVER_SERIAL_NUMBER "v0.9" #define GEMSAFE_APP_PATH "3F001600" #define GEMSAFE_PATH "3F0016000004" /* Apparently, the Applet max read "quanta" is 248 bytes * Gemalto ClassicClient reads files in chunks of 238 bytes */ #define GEMSAFE_READ_QUANTUM 248 #define GEMSAFE_MAX_OBJLEN 28672 int sc_pkcs15emu_gemsafeV1_init_ex(sc_pkcs15_card_t *, struct sc_aid *,sc_pkcs15emu_opt_t *); static int sc_pkcs15emu_add_cert(sc_pkcs15_card_t *p15card, int type, int authority, const sc_path_t *path, const sc_pkcs15_id_t *id, const char *label, int obj_flags); static int sc_pkcs15emu_add_pin(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, const sc_path_t *path, int ref, int type, unsigned int min_length, unsigned int max_length, int flags, int tries_left, const char pad_char, int obj_flags); static int sc_pkcs15emu_add_prkey(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, int type, unsigned int modulus_length, int usage, const sc_path_t *path, int ref, const sc_pkcs15_id_t *auth_id, int obj_flags); typedef struct cdata_st { char *label; int authority; const char *path; size_t index; size_t count; const char *id; int obj_flags; } cdata; const unsigned int gemsafe_cert_max = 12; cdata gemsafe_cert[] = { {"DS certificate #1", 0, GEMSAFE_PATH, 0, 0, "45", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #2", 0, GEMSAFE_PATH, 0, 0, "46", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #3", 0, GEMSAFE_PATH, 0, 0, "47", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #4", 0, GEMSAFE_PATH, 0, 0, "48", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #5", 0, GEMSAFE_PATH, 0, 0, "49", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #6", 0, GEMSAFE_PATH, 0, 0, "50", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #7", 0, GEMSAFE_PATH, 0, 0, "51", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #8", 0, GEMSAFE_PATH, 0, 0, "52", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #9", 0, GEMSAFE_PATH, 0, 0, "53", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #10", 0, GEMSAFE_PATH, 0, 0, "54", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #11", 0, GEMSAFE_PATH, 0, 0, "55", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #12", 0, GEMSAFE_PATH, 0, 0, "56", SC_PKCS15_CO_FLAG_MODIFIABLE}, }; typedef struct pdata_st { const u8 atr[SC_MAX_ATR_SIZE]; const size_t atr_len; const char *id; const char *label; const char *path; const int ref; const int type; const unsigned int maxlen; const unsigned int minlen; const int flags; const int tries_left; const char pad_char; const int obj_flags; } pindata; const unsigned int gemsafe_pin_max = 2; const pindata gemsafe_pin[] = { /* ATR-specific PIN policies, first match found is used: */ { {0x3B, 0x7D, 0x96, 0x00, 0x00, 0x80, 0x31, 0x80, 0x65, 0xB0, 0x83, 0x11, 0x48, 0xC8, 0x83, 0x00, 0x90, 0x00}, 18, "01", "DS pin", GEMSAFE_PATH, 0x01, SC_PKCS15_PIN_TYPE_ASCII_NUMERIC, 8, 4, SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL, 3, 0x00, SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE }, /* default PIN policy comes last: */ { { 0 }, 0, "01", "DS pin", GEMSAFE_PATH, 0x01, SC_PKCS15_PIN_TYPE_BCD, 16, 6, SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL, 3, 0xFF, SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE } }; typedef struct prdata_st { const char *id; char *label; unsigned int modulus_len; int usage; const char *path; int ref; const char *auth_id; int obj_flags; } prdata; #define USAGE_NONREP SC_PKCS15_PRKEY_USAGE_NONREPUDIATION #define USAGE_KE SC_PKCS15_PRKEY_USAGE_ENCRYPT | \ SC_PKCS15_PRKEY_USAGE_DECRYPT | \ SC_PKCS15_PRKEY_USAGE_WRAP | \ SC_PKCS15_PRKEY_USAGE_UNWRAP #define USAGE_AUT SC_PKCS15_PRKEY_USAGE_ENCRYPT | \ SC_PKCS15_PRKEY_USAGE_DECRYPT | \ SC_PKCS15_PRKEY_USAGE_WRAP | \ SC_PKCS15_PRKEY_USAGE_UNWRAP | \ SC_PKCS15_PRKEY_USAGE_SIGN prdata gemsafe_prkeys[] = { { "45", "DS key #1", 1024, USAGE_AUT, GEMSAFE_PATH, 0x03, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "46", "DS key #2", 1024, USAGE_AUT, GEMSAFE_PATH, 0x04, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "47", "DS key #3", 1024, USAGE_AUT, GEMSAFE_PATH, 0x05, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "48", "DS key #4", 1024, USAGE_AUT, GEMSAFE_PATH, 0x06, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "49", "DS key #5", 1024, USAGE_AUT, GEMSAFE_PATH, 0x07, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "50", "DS key #6", 1024, USAGE_AUT, GEMSAFE_PATH, 0x08, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "51", "DS key #7", 1024, USAGE_AUT, GEMSAFE_PATH, 0x09, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "52", "DS key #8", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0a, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "53", "DS key #9", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0b, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "54", "DS key #10", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0c, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "55", "DS key #11", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0d, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "56", "DS key #12", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0e, "01", SC_PKCS15_CO_FLAG_PRIVATE}, }; static int gemsafe_get_cert_len(sc_card_t *card) { int r; u8 ibuf[GEMSAFE_MAX_OBJLEN]; u8 *iptr; struct sc_path path; struct sc_file *file; size_t objlen, certlen; unsigned int ind, i=0; sc_format_path(GEMSAFE_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* Initial read */ r = sc_read_binary(card, 0, ibuf, GEMSAFE_READ_QUANTUM, 0); if (r < 0) return SC_ERROR_INTERNAL; /* Actual stored object size is encoded in first 2 bytes * (allocated EF space is much greater!) */ objlen = (((size_t) ibuf[0]) << 8) | ibuf[1]; sc_log(card->ctx, "Stored object is of size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); if (objlen < 1 || objlen > GEMSAFE_MAX_OBJLEN) { sc_log(card->ctx, "Invalid object size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); return SC_ERROR_INTERNAL; } /* It looks like the first thing in the block is a table of * which keys are allocated. The table is small and is in the * first 248 bytes. Example for a card with 10 key containers: * 01 f0 00 03 03 b0 00 03 <= 1st key unallocated * 01 f0 00 04 03 b0 00 04 <= 2nd key unallocated * 01 fe 14 00 05 03 b0 00 05 <= 3rd key allocated * 01 fe 14 01 06 03 b0 00 06 <= 4th key allocated * 01 f0 00 07 03 b0 00 07 <= 5th key unallocated * ... * 01 f0 00 0c 03 b0 00 0c <= 10th key unallocated * For allocated keys, the fourth byte seems to indicate the * default key and the fifth byte indicates the key_ref of * the private key. */ ind = 2; /* skip length */ while (ibuf[ind] == 0x01 && i < gemsafe_cert_max) { if (ibuf[ind+1] == 0xFE) { gemsafe_prkeys[i].ref = ibuf[ind+4]; sc_log(card->ctx, "Key container %d is allocated and uses key_ref %d", i+1, gemsafe_prkeys[i].ref); ind += 9; } else { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; sc_log(card->ctx, "Key container %d is unallocated", i+1); ind += 8; } i++; } /* Delete additional key containers from the data structures if * this card can't accommodate them. */ for (; i < gemsafe_cert_max; i++) { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } /* Read entire file, then dissect in memory. * Gemalto ClassicClient seems to do it the same way. */ iptr = ibuf + GEMSAFE_READ_QUANTUM; while ((size_t)(iptr - ibuf) < objlen) { r = sc_read_binary(card, iptr - ibuf, iptr, MIN(GEMSAFE_READ_QUANTUM, objlen - (iptr - ibuf)), 0); if (r < 0) { sc_log(card->ctx, "Could not read cert object"); return SC_ERROR_INTERNAL; } iptr += GEMSAFE_READ_QUANTUM; } /* Search buffer for certificates, they start with 0x3082. */ i = 0; while (ind < objlen - 1) { if (ibuf[ind] == 0x30 && ibuf[ind+1] == 0x82) { /* Find next allocated key container */ while (i < gemsafe_cert_max && gemsafe_cert[i].label == NULL) i++; if (i == gemsafe_cert_max) { sc_log(card->ctx, "Warning: Found orphaned certificate at offset %d", ind); return SC_SUCCESS; } /* DER cert len is encoded this way */ if (ind+3 >= sizeof ibuf) return SC_ERROR_INVALID_DATA; certlen = ((((size_t) ibuf[ind+2]) << 8) | ibuf[ind+3]) + 4; sc_log(card->ctx, "Found certificate of key container %d at offset %d, len %"SC_FORMAT_LEN_SIZE_T"u", i+1, ind, certlen); gemsafe_cert[i].index = ind; gemsafe_cert[i].count = certlen; ind += certlen; i++; } else ind++; } /* Delete additional key containers from the data structures if * they're missing on the card. */ for (; i < gemsafe_cert_max; i++) { if (gemsafe_cert[i].label) { sc_log(card->ctx, "Warning: Certificate of key container %d is missing", i+1); gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } } return SC_SUCCESS; } static int gemsafe_detect_card( sc_pkcs15_card_t *p15card) { if (strcmp(p15card->card->name, "GemSAFE V1")) return SC_ERROR_WRONG_CARD; return SC_SUCCESS; } static int sc_pkcs15emu_gemsafeV1_init( sc_pkcs15_card_t *p15card) { int r; unsigned int i; struct sc_path path; struct sc_file *file = NULL; struct sc_card *card = p15card->card; struct sc_apdu apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; sc_log(p15card->card->ctx, "Setting pkcs15 parameters"); if (p15card->tokeninfo->label) free(p15card->tokeninfo->label); p15card->tokeninfo->label = malloc(strlen(APPLET_NAME) + 1); if (!p15card->tokeninfo->label) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->label, APPLET_NAME); if (p15card->tokeninfo->serial_number) free(p15card->tokeninfo->serial_number); p15card->tokeninfo->serial_number = malloc(strlen(DRIVER_SERIAL_NUMBER) + 1); if (!p15card->tokeninfo->serial_number) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->serial_number, DRIVER_SERIAL_NUMBER); /* the GemSAFE applet version number */ sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, 0xca, 0xdf, 0x03); apdu.cla = 0x80; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); /* Manual says Le=0x05, but should be 0x08 to return full version number */ apdu.le = 0x08; apdu.lc = 0; apdu.datalen = 0; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (apdu.sw1 != 0x90 || apdu.sw2 != 0x00) return SC_ERROR_INTERNAL; if (r != SC_SUCCESS) return SC_ERROR_INTERNAL; /* the manufacturer ID, in this case GemPlus */ if (p15card->tokeninfo->manufacturer_id) free(p15card->tokeninfo->manufacturer_id); p15card->tokeninfo->manufacturer_id = malloc(strlen(MANU_ID) + 1); if (!p15card->tokeninfo->manufacturer_id) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->manufacturer_id, MANU_ID); /* determine allocated key containers and length of certificates */ r = gemsafe_get_cert_len(card); if (r != SC_SUCCESS) return SC_ERROR_INTERNAL; /* set certs */ sc_log(p15card->card->ctx, "Setting certificates"); for (i = 0; i < gemsafe_cert_max; i++) { struct sc_pkcs15_id p15Id; struct sc_path path; if (gemsafe_cert[i].label == NULL) continue; sc_format_path(gemsafe_cert[i].path, &path); sc_pkcs15_format_id(gemsafe_cert[i].id, &p15Id); path.index = gemsafe_cert[i].index; path.count = gemsafe_cert[i].count; sc_pkcs15emu_add_cert(p15card, SC_PKCS15_TYPE_CERT_X509, gemsafe_cert[i].authority, &path, &p15Id, gemsafe_cert[i].label, gemsafe_cert[i].obj_flags); } /* set gemsafe_pin */ sc_log(p15card->card->ctx, "Setting PIN"); for (i=0; i < gemsafe_pin_max; i++) { struct sc_pkcs15_id p15Id; struct sc_path path; sc_pkcs15_format_id(gemsafe_pin[i].id, &p15Id); sc_format_path(gemsafe_pin[i].path, &path); if (gemsafe_pin[i].atr_len == 0 || (gemsafe_pin[i].atr_len == p15card->card->atr.len && memcmp(p15card->card->atr.value, gemsafe_pin[i].atr, p15card->card->atr.len) == 0)) { sc_pkcs15emu_add_pin(p15card, &p15Id, gemsafe_pin[i].label, &path, gemsafe_pin[i].ref, gemsafe_pin[i].type, gemsafe_pin[i].minlen, gemsafe_pin[i].maxlen, gemsafe_pin[i].flags, gemsafe_pin[i].tries_left, gemsafe_pin[i].pad_char, gemsafe_pin[i].obj_flags); break; } }; /* set private keys */ sc_log(p15card->card->ctx, "Setting private keys"); for (i = 0; i < gemsafe_cert_max; i++) { struct sc_pkcs15_id p15Id, authId, *pauthId; struct sc_path path; int key_ref = 0x03; if (gemsafe_prkeys[i].label == NULL) continue; sc_pkcs15_format_id(gemsafe_prkeys[i].id, &p15Id); if (gemsafe_prkeys[i].auth_id) { sc_pkcs15_format_id(gemsafe_prkeys[i].auth_id, &authId); pauthId = &authId; } else pauthId = NULL; sc_format_path(gemsafe_prkeys[i].path, &path); /* * The key ref may be different for different sites; * by adding flags=n where the low order 4 bits can be * the key ref we can force it. */ if ( p15card->card->flags & 0x0F) { key_ref = p15card->card->flags & 0x0F; sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL, "Overriding key_ref %d with %d\n", gemsafe_prkeys[i].ref, key_ref); } else key_ref = gemsafe_prkeys[i].ref; sc_pkcs15emu_add_prkey(p15card, &p15Id, gemsafe_prkeys[i].label, SC_PKCS15_TYPE_PRKEY_RSA, gemsafe_prkeys[i].modulus_len, gemsafe_prkeys[i].usage, &path, key_ref, pauthId, gemsafe_prkeys[i].obj_flags); } /* select the application DF */ sc_log(p15card->card->ctx, "Selecting application DF"); sc_format_path(GEMSAFE_APP_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* set the application DF */ if (p15card->file_app) free(p15card->file_app); p15card->file_app = file; return SC_SUCCESS; } int sc_pkcs15emu_gemsafeV1_init_ex( sc_pkcs15_card_t *p15card, struct sc_aid *aid, sc_pkcs15emu_opt_t *opts) { if (opts && opts->flags & SC_PKCS15EMU_FLAGS_NO_CHECK) return sc_pkcs15emu_gemsafeV1_init(p15card); else { int r = gemsafe_detect_card(p15card); if (r) return SC_ERROR_WRONG_CARD; return sc_pkcs15emu_gemsafeV1_init(p15card); } } static sc_pkcs15_df_t * sc_pkcs15emu_get_df(sc_pkcs15_card_t *p15card, unsigned int type) { sc_pkcs15_df_t *df; sc_file_t *file; int created = 0; while (1) { for (df = p15card->df_list; df; df = df->next) { if (df->type == type) { if (created) df->enumerated = 1; return df; } } assert(created == 0); file = sc_file_new(); if (!file) return NULL; sc_format_path("11001101", &file->path); sc_pkcs15_add_df(p15card, type, &file->path); sc_file_free(file); created++; } } static int sc_pkcs15emu_add_object(sc_pkcs15_card_t *p15card, int type, const char *label, void *data, const sc_pkcs15_id_t *auth_id, int obj_flags) { sc_pkcs15_object_t *obj; int df_type; obj = calloc(1, sizeof(*obj)); obj->type = type; obj->data = data; if (label) strncpy(obj->label, label, sizeof(obj->label)-1); obj->flags = obj_flags; if (auth_id) obj->auth_id = *auth_id; switch (type & SC_PKCS15_TYPE_CLASS_MASK) { case SC_PKCS15_TYPE_AUTH: df_type = SC_PKCS15_AODF; break; case SC_PKCS15_TYPE_PRKEY: df_type = SC_PKCS15_PRKDF; break; case SC_PKCS15_TYPE_PUBKEY: df_type = SC_PKCS15_PUKDF; break; case SC_PKCS15_TYPE_CERT: df_type = SC_PKCS15_CDF; break; default: sc_log(p15card->card->ctx, "Unknown PKCS15 object type %d", type); free(obj); return SC_ERROR_INVALID_ARGUMENTS; } obj->df = sc_pkcs15emu_get_df(p15card, df_type); sc_pkcs15_add_object(p15card, obj); return 0; } static int sc_pkcs15emu_add_pin(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, const sc_path_t *path, int ref, int type, unsigned int min_length, unsigned int max_length, int flags, int tries_left, const char pad_char, int obj_flags) { sc_pkcs15_auth_info_t *info; info = calloc(1, sizeof(*info)); if (!info) LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); info->auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; info->auth_method = SC_AC_CHV; info->auth_id = *id; info->attrs.pin.min_length = min_length; info->attrs.pin.max_length = max_length; info->attrs.pin.stored_length = max_length; info->attrs.pin.type = type; info->attrs.pin.reference = ref; info->attrs.pin.flags = flags; info->attrs.pin.pad_char = pad_char; info->tries_left = tries_left; info->logged_in = SC_PIN_STATE_UNKNOWN; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, SC_PKCS15_TYPE_AUTH_PIN, label, info, NULL, obj_flags); } static int sc_pkcs15emu_add_cert(sc_pkcs15_card_t *p15card, int type, int authority, const sc_path_t *path, const sc_pkcs15_id_t *id, const char *label, int obj_flags) { sc_pkcs15_cert_info_t *info; info = calloc(1, sizeof(*info)); if (!info) { LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); } info->id = *id; info->authority = authority; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, type, label, info, NULL, obj_flags); } static int sc_pkcs15emu_add_prkey(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, int type, unsigned int modulus_length, int usage, const sc_path_t *path, int ref, const sc_pkcs15_id_t *auth_id, int obj_flags) { sc_pkcs15_prkey_info_t *info; info = calloc(1, sizeof(*info)); if (!info) { LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); } info->id = *id; info->modulus_length = modulus_length; info->usage = usage; info->native = 1; info->access_flags = SC_PKCS15_PRKEY_ACCESS_SENSITIVE | SC_PKCS15_PRKEY_ACCESS_ALWAYSSENSITIVE | SC_PKCS15_PRKEY_ACCESS_NEVEREXTRACTABLE | SC_PKCS15_PRKEY_ACCESS_LOCAL; info->key_reference = ref; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, type, label, info, auth_id, obj_flags); } /* SC_IMPLEMENT_DRIVER_VERSION("0.9.4") */
static int gemsafe_get_cert_len(sc_card_t *card) { int r; u8 ibuf[GEMSAFE_MAX_OBJLEN]; u8 *iptr; struct sc_path path; struct sc_file *file; size_t objlen, certlen; unsigned int ind, i=0; sc_format_path(GEMSAFE_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* Initial read */ r = sc_read_binary(card, 0, ibuf, GEMSAFE_READ_QUANTUM, 0); if (r < 0) return SC_ERROR_INTERNAL; /* Actual stored object size is encoded in first 2 bytes * (allocated EF space is much greater!) */ objlen = (((size_t) ibuf[0]) << 8) | ibuf[1]; sc_log(card->ctx, "Stored object is of size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); if (objlen < 1 || objlen > GEMSAFE_MAX_OBJLEN) { sc_log(card->ctx, "Invalid object size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); return SC_ERROR_INTERNAL; } /* It looks like the first thing in the block is a table of * which keys are allocated. The table is small and is in the * first 248 bytes. Example for a card with 10 key containers: * 01 f0 00 03 03 b0 00 03 <= 1st key unallocated * 01 f0 00 04 03 b0 00 04 <= 2nd key unallocated * 01 fe 14 00 05 03 b0 00 05 <= 3rd key allocated * 01 fe 14 01 06 03 b0 00 06 <= 4th key allocated * 01 f0 00 07 03 b0 00 07 <= 5th key unallocated * ... * 01 f0 00 0c 03 b0 00 0c <= 10th key unallocated * For allocated keys, the fourth byte seems to indicate the * default key and the fifth byte indicates the key_ref of * the private key. */ ind = 2; /* skip length */ while (ibuf[ind] == 0x01) { if (ibuf[ind+1] == 0xFE) { gemsafe_prkeys[i].ref = ibuf[ind+4]; sc_log(card->ctx, "Key container %d is allocated and uses key_ref %d", i+1, gemsafe_prkeys[i].ref); ind += 9; } else { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; sc_log(card->ctx, "Key container %d is unallocated", i+1); ind += 8; } i++; } /* Delete additional key containers from the data structures if * this card can't accommodate them. */ for (; i < gemsafe_cert_max; i++) { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } /* Read entire file, then dissect in memory. * Gemalto ClassicClient seems to do it the same way. */ iptr = ibuf + GEMSAFE_READ_QUANTUM; while ((size_t)(iptr - ibuf) < objlen) { r = sc_read_binary(card, iptr - ibuf, iptr, MIN(GEMSAFE_READ_QUANTUM, objlen - (iptr - ibuf)), 0); if (r < 0) { sc_log(card->ctx, "Could not read cert object"); return SC_ERROR_INTERNAL; } iptr += GEMSAFE_READ_QUANTUM; } /* Search buffer for certificates, they start with 0x3082. */ i = 0; while (ind < objlen - 1) { if (ibuf[ind] == 0x30 && ibuf[ind+1] == 0x82) { /* Find next allocated key container */ while (i < gemsafe_cert_max && gemsafe_cert[i].label == NULL) i++; if (i == gemsafe_cert_max) { sc_log(card->ctx, "Warning: Found orphaned certificate at offset %d", ind); return SC_SUCCESS; } /* DER cert len is encoded this way */ if (ind+3 >= sizeof ibuf) return SC_ERROR_INVALID_DATA; certlen = ((((size_t) ibuf[ind+2]) << 8) | ibuf[ind+3]) + 4; sc_log(card->ctx, "Found certificate of key container %d at offset %d, len %"SC_FORMAT_LEN_SIZE_T"u", i+1, ind, certlen); gemsafe_cert[i].index = ind; gemsafe_cert[i].count = certlen; ind += certlen; i++; } else ind++; } /* Delete additional key containers from the data structures if * they're missing on the card. */ for (; i < gemsafe_cert_max; i++) { if (gemsafe_cert[i].label) { sc_log(card->ctx, "Warning: Certificate of key container %d is missing", i+1); gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } } return SC_SUCCESS; }
static int gemsafe_get_cert_len(sc_card_t *card) { int r; u8 ibuf[GEMSAFE_MAX_OBJLEN]; u8 *iptr; struct sc_path path; struct sc_file *file; size_t objlen, certlen; unsigned int ind, i=0; sc_format_path(GEMSAFE_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* Initial read */ r = sc_read_binary(card, 0, ibuf, GEMSAFE_READ_QUANTUM, 0); if (r < 0) return SC_ERROR_INTERNAL; /* Actual stored object size is encoded in first 2 bytes * (allocated EF space is much greater!) */ objlen = (((size_t) ibuf[0]) << 8) | ibuf[1]; sc_log(card->ctx, "Stored object is of size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); if (objlen < 1 || objlen > GEMSAFE_MAX_OBJLEN) { sc_log(card->ctx, "Invalid object size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); return SC_ERROR_INTERNAL; } /* It looks like the first thing in the block is a table of * which keys are allocated. The table is small and is in the * first 248 bytes. Example for a card with 10 key containers: * 01 f0 00 03 03 b0 00 03 <= 1st key unallocated * 01 f0 00 04 03 b0 00 04 <= 2nd key unallocated * 01 fe 14 00 05 03 b0 00 05 <= 3rd key allocated * 01 fe 14 01 06 03 b0 00 06 <= 4th key allocated * 01 f0 00 07 03 b0 00 07 <= 5th key unallocated * ... * 01 f0 00 0c 03 b0 00 0c <= 10th key unallocated * For allocated keys, the fourth byte seems to indicate the * default key and the fifth byte indicates the key_ref of * the private key. */ ind = 2; /* skip length */ while (ibuf[ind] == 0x01 && i < gemsafe_cert_max) { if (ibuf[ind+1] == 0xFE) { gemsafe_prkeys[i].ref = ibuf[ind+4]; sc_log(card->ctx, "Key container %d is allocated and uses key_ref %d", i+1, gemsafe_prkeys[i].ref); ind += 9; } else { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; sc_log(card->ctx, "Key container %d is unallocated", i+1); ind += 8; } i++; } /* Delete additional key containers from the data structures if * this card can't accommodate them. */ for (; i < gemsafe_cert_max; i++) { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } /* Read entire file, then dissect in memory. * Gemalto ClassicClient seems to do it the same way. */ iptr = ibuf + GEMSAFE_READ_QUANTUM; while ((size_t)(iptr - ibuf) < objlen) { r = sc_read_binary(card, iptr - ibuf, iptr, MIN(GEMSAFE_READ_QUANTUM, objlen - (iptr - ibuf)), 0); if (r < 0) { sc_log(card->ctx, "Could not read cert object"); return SC_ERROR_INTERNAL; } iptr += GEMSAFE_READ_QUANTUM; } /* Search buffer for certificates, they start with 0x3082. */ i = 0; while (ind < objlen - 1) { if (ibuf[ind] == 0x30 && ibuf[ind+1] == 0x82) { /* Find next allocated key container */ while (i < gemsafe_cert_max && gemsafe_cert[i].label == NULL) i++; if (i == gemsafe_cert_max) { sc_log(card->ctx, "Warning: Found orphaned certificate at offset %d", ind); return SC_SUCCESS; } /* DER cert len is encoded this way */ if (ind+3 >= sizeof ibuf) return SC_ERROR_INVALID_DATA; certlen = ((((size_t) ibuf[ind+2]) << 8) | ibuf[ind+3]) + 4; sc_log(card->ctx, "Found certificate of key container %d at offset %d, len %"SC_FORMAT_LEN_SIZE_T"u", i+1, ind, certlen); gemsafe_cert[i].index = ind; gemsafe_cert[i].count = certlen; ind += certlen; i++; } else ind++; } /* Delete additional key containers from the data structures if * they're missing on the card. */ for (; i < gemsafe_cert_max; i++) { if (gemsafe_cert[i].label) { sc_log(card->ctx, "Warning: Certificate of key container %d is missing", i+1); gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } } return SC_SUCCESS; }
{'added': [(211, '\twhile (ibuf[ind] == 0x01 && i < gemsafe_cert_max) {')], 'deleted': [(211, '\twhile (ibuf[ind] == 0x01) {')]}
1
1
455
3,267
85
594
20
https://github.com/OpenSC/OpenSC
CVE-2018-16391
CWE-119
256
futex.c
C
futex_wait_requeue_pi
/* * Fast Userspace Mutexes (which I call "Futexes!"). * (C) Rusty Russell, IBM 2002 * * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar * (C) Copyright 2003 Red Hat Inc, All Rights Reserved * * Removed page pinning, fix privately mapped COW pages and other cleanups * (C) Copyright 2003, 2004 Jamie Lokier * * Robust futex support started by Ingo Molnar * (C) Copyright 2006 Red Hat Inc, All Rights Reserved * Thanks to Thomas Gleixner for suggestions, analysis and fixes. * * PI-futex support started by Ingo Molnar and Thomas Gleixner * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> * * PRIVATE futexes by Eric Dumazet * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> * * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> * Copyright (C) IBM Corporation, 2009 * Thanks to Thomas Gleixner for conceptual design and careful reviews. * * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly * enough at me, Linus for the original (flawed) idea, Matthew * Kirkwood for proof-of-concept implementation. * * "The futexes are also cursed." * "But they come in a choice of three flavours!" * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/slab.h> #include <linux/poll.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/jhash.h> #include <linux/init.h> #include <linux/futex.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/signal.h> #include <linux/module.h> #include <linux/magic.h> #include <linux/pid.h> #include <linux/nsproxy.h> #include <asm/futex.h> #include "rtmutex_common.h" int __read_mostly futex_cmpxchg_enabled; #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) /* * Priority Inheritance state: */ struct futex_pi_state { /* * list of 'owned' pi_state instances - these have to be * cleaned up in do_exit() if the task exits prematurely: */ struct list_head list; /* * The PI object: */ struct rt_mutex pi_mutex; struct task_struct *owner; atomic_t refcount; union futex_key key; }; /** * struct futex_q - The hashed futex queue entry, one per waiting task * @task: the task waiting on the futex * @lock_ptr: the hash bucket lock * @key: the key the futex is hashed on * @pi_state: optional priority inheritance state * @rt_waiter: rt_waiter storage for use with requeue_pi * @requeue_pi_key: the requeue_pi target futex key * @bitset: bitset for the optional bitmasked wakeup * * We use this hashed waitqueue, instead of a normal wait_queue_t, so * we can wake only the relevant ones (hashed queues may be shared). * * A futex_q has a woken state, just like tasks have TASK_RUNNING. * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. * The order of wakup is always to make the first condition true, then * the second. * * PI futexes are typically woken before they are removed from the hash list via * the rt_mutex code. See unqueue_me_pi(). */ struct futex_q { struct plist_node list; struct task_struct *task; spinlock_t *lock_ptr; union futex_key key; struct futex_pi_state *pi_state; struct rt_mutex_waiter *rt_waiter; union futex_key *requeue_pi_key; u32 bitset; }; /* * Hash buckets are shared by all the futex_keys that hash to the same * location. Each key may have multiple futex_q structures, one for each task * waiting on a futex. */ struct futex_hash_bucket { spinlock_t lock; struct plist_head chain; }; static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; /* * We hash on the keys returned from get_futex_key (see below). */ static struct futex_hash_bucket *hash_futex(union futex_key *key) { u32 hash = jhash2((u32*)&key->both.word, (sizeof(key->both.word)+sizeof(key->both.ptr))/4, key->both.offset); return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)]; } /* * Return 1 if two futex_keys are equal, 0 otherwise. */ static inline int match_futex(union futex_key *key1, union futex_key *key2) { return (key1 && key2 && key1->both.word == key2->both.word && key1->both.ptr == key2->both.ptr && key1->both.offset == key2->both.offset); } /* * Take a reference to the resource addressed by a key. * Can be called while holding spinlocks. * */ static void get_futex_key_refs(union futex_key *key) { if (!key->both.ptr) return; switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: atomic_inc(&key->shared.inode->i_count); break; case FUT_OFF_MMSHARED: atomic_inc(&key->private.mm->mm_count); break; } } /* * Drop a reference to the resource addressed by a key. * The hash bucket spinlock must not be held. */ static void drop_futex_key_refs(union futex_key *key) { if (!key->both.ptr) { /* If we're here then we tried to put a key we failed to get */ WARN_ON_ONCE(1); return; } switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: iput(key->shared.inode); break; case FUT_OFF_MMSHARED: mmdrop(key->private.mm); break; } } /** * get_futex_key() - Get parameters which are the keys for a futex * @uaddr: virtual address of the futex * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED * @key: address where result is stored. * * Returns a negative error code or 0 * The key words are stored in *key on success. * * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode, * offset_within_page). For private mappings, it's (uaddr, current->mm). * We can usually work out the index without swapping in the page. * * lock_page() might sleep, the caller should not hold a spinlock. */ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; struct page *page; int err; /* * The futex address must be "naturally" aligned. */ key->both.offset = address % PAGE_SIZE; if (unlikely((address % sizeof(u32)) != 0)) return -EINVAL; address -= key->both.offset; /* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs * virtual address, we dont even have to find the underlying vma. * Note : We do have to check 'uaddr' is a valid user address, * but access_ok() should be faster than find_vma() */ if (!fshared) { if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) return -EFAULT; key->private.mm = mm; key->private.address = address; get_futex_key_refs(key); return 0; } again: err = get_user_pages_fast(address, 1, 1, &page); if (err < 0) return err; page = compound_head(page); lock_page(page); if (!page->mapping) { unlock_page(page); put_page(page); goto again; } /* * Private mappings are handled in a simple way. * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to * the object not the particular process. */ if (PageAnon(page)) { key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; } else { key->both.offset |= FUT_OFF_INODE; /* inode-based key */ key->shared.inode = page->mapping->host; key->shared.pgoff = page->index; } get_futex_key_refs(key); unlock_page(page); put_page(page); return 0; } static inline void put_futex_key(int fshared, union futex_key *key) { drop_futex_key_refs(key); } /** * fault_in_user_writeable() - Fault in user address and verify RW access * @uaddr: pointer to faulting user space address * * Slow path to fixup the fault we just took in the atomic write * access to @uaddr. * * We have no generic implementation of a non destructive write to the * user address. We know that we faulted in the atomic pagefault * disabled section so we can as well avoid the #PF overhead by * calling get_user_pages() right away. */ static int fault_in_user_writeable(u32 __user *uaddr) { struct mm_struct *mm = current->mm; int ret; down_read(&mm->mmap_sem); ret = get_user_pages(current, mm, (unsigned long)uaddr, 1, 1, 0, NULL, NULL); up_read(&mm->mmap_sem); return ret < 0 ? ret : 0; } /** * futex_top_waiter() - Return the highest priority waiter on a futex * @hb: the hash bucket the futex_q's reside in * @key: the futex key (to distinguish it from other futex futex_q's) * * Must be called with the hb lock held. */ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key) { struct futex_q *this; plist_for_each_entry(this, &hb->chain, list) { if (match_futex(&this->key, key)) return this; } return NULL; } static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) { u32 curval; pagefault_disable(); curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); pagefault_enable(); return curval; } static int get_futex_value_locked(u32 *dest, u32 __user *from) { int ret; pagefault_disable(); ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); pagefault_enable(); return ret ? -EFAULT : 0; } /* * PI code: */ static int refill_pi_state_cache(void) { struct futex_pi_state *pi_state; if (likely(current->pi_state_cache)) return 0; pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); if (!pi_state) return -ENOMEM; INIT_LIST_HEAD(&pi_state->list); /* pi_mutex gets initialized later */ pi_state->owner = NULL; atomic_set(&pi_state->refcount, 1); pi_state->key = FUTEX_KEY_INIT; current->pi_state_cache = pi_state; return 0; } static struct futex_pi_state * alloc_pi_state(void) { struct futex_pi_state *pi_state = current->pi_state_cache; WARN_ON(!pi_state); current->pi_state_cache = NULL; return pi_state; } static void free_pi_state(struct futex_pi_state *pi_state) { if (!atomic_dec_and_test(&pi_state->refcount)) return; /* * If pi_state->owner is NULL, the owner is most probably dying * and has cleaned up the pi_state already */ if (pi_state->owner) { raw_spin_lock_irq(&pi_state->owner->pi_lock); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); } if (current->pi_state_cache) kfree(pi_state); else { /* * pi_state->list is already empty. * clear pi_state->owner. * refcount is at 0 - put it back to 1. */ pi_state->owner = NULL; atomic_set(&pi_state->refcount, 1); current->pi_state_cache = pi_state; } } /* * Look up the task based on what TID userspace gave us. * We dont trust it. */ static struct task_struct * futex_find_get_task(pid_t pid) { struct task_struct *p; rcu_read_lock(); p = find_task_by_vpid(pid); if (p) get_task_struct(p); rcu_read_unlock(); return p; } /* * This task is holding PI mutexes at exit time => bad. * Kernel cleans up PI-state, but userspace is likely hosed. * (Robust-futex cleanup is separate and might save the day for userspace.) */ void exit_pi_state_list(struct task_struct *curr) { struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; struct futex_hash_bucket *hb; union futex_key key = FUTEX_KEY_INIT; if (!futex_cmpxchg_enabled) return; /* * We are a ZOMBIE and nobody can enqueue itself on * pi_state_list anymore, but we have to be careful * versus waiters unqueueing themselves: */ raw_spin_lock_irq(&curr->pi_lock); while (!list_empty(head)) { next = head->next; pi_state = list_entry(next, struct futex_pi_state, list); key = pi_state->key; hb = hash_futex(&key); raw_spin_unlock_irq(&curr->pi_lock); spin_lock(&hb->lock); raw_spin_lock_irq(&curr->pi_lock); /* * We dropped the pi-lock, so re-check whether this * task still owns the PI-state: */ if (head->next != next) { spin_unlock(&hb->lock); continue; } WARN_ON(pi_state->owner != curr); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); pi_state->owner = NULL; raw_spin_unlock_irq(&curr->pi_lock); rt_mutex_unlock(&pi_state->pi_mutex); spin_unlock(&hb->lock); raw_spin_lock_irq(&curr->pi_lock); } raw_spin_unlock_irq(&curr->pi_lock); } static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps) { struct futex_pi_state *pi_state = NULL; struct futex_q *this, *next; struct plist_head *head; struct task_struct *p; pid_t pid = uval & FUTEX_TID_MASK; head = &hb->chain; plist_for_each_entry_safe(this, next, head, list) { if (match_futex(&this->key, key)) { /* * Another waiter already exists - bump up * the refcount and return its pi_state: */ pi_state = this->pi_state; /* * Userspace might have messed up non PI and PI futexes */ if (unlikely(!pi_state)) return -EINVAL; WARN_ON(!atomic_read(&pi_state->refcount)); /* * When pi_state->owner is NULL then the owner died * and another waiter is on the fly. pi_state->owner * is fixed up by the task which acquires * pi_state->rt_mutex. * * We do not check for pid == 0 which can happen when * the owner died and robust_list_exit() cleared the * TID. */ if (pid && pi_state->owner) { /* * Bail out if user space manipulated the * futex value. */ if (pid != task_pid_vnr(pi_state->owner)) return -EINVAL; } atomic_inc(&pi_state->refcount); *ps = pi_state; return 0; } } /* * We are the first waiter - try to look up the real owner and attach * the new pi_state to it, but bail out when TID = 0 */ if (!pid) return -ESRCH; p = futex_find_get_task(pid); if (!p) return -ESRCH; /* * We need to look at the task state flags to figure out, * whether the task is exiting. To protect against the do_exit * change of the task flags, we do this protected by * p->pi_lock: */ raw_spin_lock_irq(&p->pi_lock); if (unlikely(p->flags & PF_EXITING)) { /* * The task is on the way out. When PF_EXITPIDONE is * set, we know that the task has finished the * cleanup: */ int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); return ret; } pi_state = alloc_pi_state(); /* * Initialize the pi_mutex in locked state and make 'p' * the owner of it: */ rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); /* Store the key for possible exit cleanups: */ pi_state->key = *key; WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &p->pi_state_list); pi_state->owner = p; raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); *ps = pi_state; return 0; } /** * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex * @uaddr: the pi futex user address * @hb: the pi futex hash bucket * @key: the futex key associated with uaddr and hb * @ps: the pi_state pointer where we store the result of the * lookup * @task: the task to perform the atomic lock work for. This will * be "current" except in the case of requeue pi. * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Returns: * 0 - ready to wait * 1 - acquired the lock * <0 - error * * The hb->lock and futex_key refs shall be held by the caller. */ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps, struct task_struct *task, int set_waiters) { int lock_taken, ret, ownerdied = 0; u32 uval, newval, curval; retry: ret = lock_taken = 0; /* * To avoid races, we attempt to take the lock here again * (by doing a 0 -> TID atomic cmpxchg), while holding all * the locks. It will most likely not succeed. */ newval = task_pid_vnr(task); if (set_waiters) newval |= FUTEX_WAITERS; curval = cmpxchg_futex_value_locked(uaddr, 0, newval); if (unlikely(curval == -EFAULT)) return -EFAULT; /* * Detect deadlocks. */ if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task)))) return -EDEADLK; /* * Surprise - we got the lock. Just return to userspace: */ if (unlikely(!curval)) return 1; uval = curval; /* * Set the FUTEX_WAITERS flag, so the owner will know it has someone * to wake at the next unlock. */ newval = curval | FUTEX_WAITERS; /* * There are two cases, where a futex might have no owner (the * owner TID is 0): OWNER_DIED. We take over the futex in this * case. We also do an unconditional take over, when the owner * of the futex died. * * This is safe as we are protected by the hash bucket lock ! */ if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { /* Keep the OWNER_DIED bit */ newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task); ownerdied = 0; lock_taken = 1; } curval = cmpxchg_futex_value_locked(uaddr, uval, newval); if (unlikely(curval == -EFAULT)) return -EFAULT; if (unlikely(curval != uval)) goto retry; /* * We took the lock due to owner died take over. */ if (unlikely(lock_taken)) return 1; /* * We dont have the lock. Look up the PI state (or create it if * we are the first waiter): */ ret = lookup_pi_state(uval, hb, key, ps); if (unlikely(ret)) { switch (ret) { case -ESRCH: /* * No owner found for this futex. Check if the * OWNER_DIED bit is set to figure out whether * this is a robust futex or not. */ if (get_futex_value_locked(&curval, uaddr)) return -EFAULT; /* * We simply start over in case of a robust * futex. The code above will take the futex * and return happy. */ if (curval & FUTEX_OWNER_DIED) { ownerdied = 1; goto retry; } default: break; } } return ret; } /* * The hash bucket lock must be held when this is called. * Afterwards, the futex_q must not be accessed. */ static void wake_futex(struct futex_q *q) { struct task_struct *p = q->task; /* * We set q->lock_ptr = NULL _before_ we wake up the task. If * a non futex wake up happens on another CPU then the task * might exit and p would dereference a non existing task * struct. Prevent this by holding a reference on p across the * wake up. */ get_task_struct(p); plist_del(&q->list, &q->list.plist); /* * The waiting task can free the futex_q as soon as * q->lock_ptr = NULL is written, without taking any locks. A * memory barrier is required here to prevent the following * store to lock_ptr from getting ahead of the plist_del. */ smp_wmb(); q->lock_ptr = NULL; wake_up_state(p, TASK_NORMAL); put_task_struct(p); } static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) { struct task_struct *new_owner; struct futex_pi_state *pi_state = this->pi_state; u32 curval, newval; if (!pi_state) return -EINVAL; /* * If current does not own the pi_state then the futex is * inconsistent and user space fiddled with the futex value. */ if (pi_state->owner != current) return -EINVAL; raw_spin_lock(&pi_state->pi_mutex.wait_lock); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); /* * This happens when we have stolen the lock and the original * pending owner did not enqueue itself back on the rt_mutex. * Thats not a tragedy. We know that way, that a lock waiter * is on the fly. We make the futex_q waiter the pending owner. */ if (!new_owner) new_owner = this->task; /* * We pass it to the next owner. (The WAITERS bit is always * kept enabled while there is PI state around. We must also * preserve the owner died bit.) */ if (!(uval & FUTEX_OWNER_DIED)) { int ret = 0; newval = FUTEX_WAITERS | task_pid_vnr(new_owner); curval = cmpxchg_futex_value_locked(uaddr, uval, newval); if (curval == -EFAULT) ret = -EFAULT; else if (curval != uval) ret = -EINVAL; if (ret) { raw_spin_unlock(&pi_state->pi_mutex.wait_lock); return ret; } } raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); raw_spin_lock_irq(&new_owner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &new_owner->pi_state_list); pi_state->owner = new_owner; raw_spin_unlock_irq(&new_owner->pi_lock); raw_spin_unlock(&pi_state->pi_mutex.wait_lock); rt_mutex_unlock(&pi_state->pi_mutex); return 0; } static int unlock_futex_pi(u32 __user *uaddr, u32 uval) { u32 oldval; /* * There is no waiter, so we unlock the futex. The owner died * bit has not to be preserved here. We are the owner: */ oldval = cmpxchg_futex_value_locked(uaddr, uval, 0); if (oldval == -EFAULT) return oldval; if (oldval != uval) return -EAGAIN; return 0; } /* * Express the locking dependencies for lockdep: */ static inline void double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { if (hb1 <= hb2) { spin_lock(&hb1->lock); if (hb1 < hb2) spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); } else { /* hb1 > hb2 */ spin_lock(&hb2->lock); spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); } } static inline void double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { spin_unlock(&hb1->lock); if (hb1 != hb2) spin_unlock(&hb2->lock); } /* * Wake up waiters matching bitset queued on this futex (uaddr). */ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) { struct futex_hash_bucket *hb; struct futex_q *this, *next; struct plist_head *head; union futex_key key = FUTEX_KEY_INIT; int ret; if (!bitset) return -EINVAL; ret = get_futex_key(uaddr, fshared, &key); if (unlikely(ret != 0)) goto out; hb = hash_futex(&key); spin_lock(&hb->lock); head = &hb->chain; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key)) { if (this->pi_state || this->rt_waiter) { ret = -EINVAL; break; } /* Check if one of the bits is set in both bitsets */ if (!(this->bitset & bitset)) continue; wake_futex(this); if (++ret >= nr_wake) break; } } spin_unlock(&hb->lock); put_futex_key(fshared, &key); out: return ret; } /* * Wake up all waiters hashed on the physical page that is mapped * to this virtual address: */ static int futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, int nr_wake, int nr_wake2, int op) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; struct futex_hash_bucket *hb1, *hb2; struct plist_head *head; struct futex_q *this, *next; int ret, op_ret; retry: ret = get_futex_key(uaddr1, fshared, &key1); if (unlikely(ret != 0)) goto out; ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) goto out_put_key1; hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); retry_private: double_lock_hb(hb1, hb2); op_ret = futex_atomic_op_inuser(op, uaddr2); if (unlikely(op_ret < 0)) { double_unlock_hb(hb1, hb2); #ifndef CONFIG_MMU /* * we don't get EFAULT from MMU faults if we don't have an MMU, * but we might get them from range checking */ ret = op_ret; goto out_put_keys; #endif if (unlikely(op_ret != -EFAULT)) { ret = op_ret; goto out_put_keys; } ret = fault_in_user_writeable(uaddr2); if (ret) goto out_put_keys; if (!fshared) goto retry_private; put_futex_key(fshared, &key2); put_futex_key(fshared, &key1); goto retry; } head = &hb1->chain; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key1)) { wake_futex(this); if (++ret >= nr_wake) break; } } if (op_ret > 0) { head = &hb2->chain; op_ret = 0; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key2)) { wake_futex(this); if (++op_ret >= nr_wake2) break; } } ret += op_ret; } double_unlock_hb(hb1, hb2); out_put_keys: put_futex_key(fshared, &key2); out_put_key1: put_futex_key(fshared, &key1); out: return ret; } /** * requeue_futex() - Requeue a futex_q from one hb to another * @q: the futex_q to requeue * @hb1: the source hash_bucket * @hb2: the target hash_bucket * @key2: the new key for the requeued futex_q */ static inline void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key2) { /* * If key1 and key2 hash to the same bucket, no need to * requeue. */ if (likely(&hb1->chain != &hb2->chain)) { plist_del(&q->list, &hb1->chain); plist_add(&q->list, &hb2->chain); q->lock_ptr = &hb2->lock; #ifdef CONFIG_DEBUG_PI_LIST q->list.plist.spinlock = &hb2->lock; #endif } get_futex_key_refs(key2); q->key = *key2; } /** * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue * @q: the futex_q * @key: the key of the requeue target futex * @hb: the hash_bucket of the requeue target futex * * During futex_requeue, with requeue_pi=1, it is possible to acquire the * target futex if it is uncontended or via a lock steal. Set the futex_q key * to the requeue target futex so the waiter can detect the wakeup on the right * futex, but remove it from the hb and NULL the rt_waiter so it can detect * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock * to protect access to the pi_state to fixup the owner later. Must be called * with both q->lock_ptr and hb->lock held. */ static inline void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, struct futex_hash_bucket *hb) { get_futex_key_refs(key); q->key = *key; WARN_ON(plist_node_empty(&q->list)); plist_del(&q->list, &q->list.plist); WARN_ON(!q->rt_waiter); q->rt_waiter = NULL; q->lock_ptr = &hb->lock; #ifdef CONFIG_DEBUG_PI_LIST q->list.plist.spinlock = &hb->lock; #endif wake_up_state(q->task, TASK_NORMAL); } /** * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter * @pifutex: the user address of the to futex * @hb1: the from futex hash bucket, must be locked by the caller * @hb2: the to futex hash bucket, must be locked by the caller * @key1: the from futex key * @key2: the to futex key * @ps: address to store the pi_state pointer * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Try and get the lock on behalf of the top waiter if we can do it atomically. * Wake the top waiter if we succeed. If the caller specified set_waiters, * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. * hb1 and hb2 must be held by the caller. * * Returns: * 0 - failed to acquire the lock atomicly * 1 - acquired the lock * <0 - error */ static int futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key1, union futex_key *key2, struct futex_pi_state **ps, int set_waiters) { struct futex_q *top_waiter = NULL; u32 curval; int ret; if (get_futex_value_locked(&curval, pifutex)) return -EFAULT; /* * Find the top_waiter and determine if there are additional waiters. * If the caller intends to requeue more than 1 waiter to pifutex, * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, * as we have means to handle the possible fault. If not, don't set * the bit unecessarily as it will force the subsequent unlock to enter * the kernel. */ top_waiter = futex_top_waiter(hb1, key1); /* There are no waiters, nothing for us to do. */ if (!top_waiter) return 0; /* Ensure we requeue to the expected futex. */ if (!match_futex(top_waiter->requeue_pi_key, key2)) return -EINVAL; /* * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in * the contended case or if set_waiters is 1. The pi_state is returned * in ps in contended cases. */ ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, set_waiters); if (ret == 1) requeue_pi_wake_futex(top_waiter, key2, hb2); return ret; } /** * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 * uaddr1: source futex user address * uaddr2: target futex user address * nr_wake: number of waiters to wake (must be 1 for requeue_pi) * nr_requeue: number of waiters to requeue (0-INT_MAX) * requeue_pi: if we are attempting to requeue from a non-pi futex to a * pi futex (pi to pi requeue is not supported) * * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire * uaddr2 atomically on behalf of the top waiter. * * Returns: * >=0 - on success, the number of tasks requeued or woken * <0 - on error */ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; int drop_count = 0, task_count = 0, ret; struct futex_pi_state *pi_state = NULL; struct futex_hash_bucket *hb1, *hb2; struct plist_head *head1; struct futex_q *this, *next; u32 curval2; if (requeue_pi) { /* * requeue_pi requires a pi_state, try to allocate it now * without any locks in case it fails. */ if (refill_pi_state_cache()) return -ENOMEM; /* * requeue_pi must wake as many tasks as it can, up to nr_wake * + nr_requeue, since it acquires the rt_mutex prior to * returning to userspace, so as to not leave the rt_mutex with * waiters and no owner. However, second and third wake-ups * cannot be predicted as they involve race conditions with the * first wake and a fault while looking up the pi_state. Both * pthread_cond_signal() and pthread_cond_broadcast() should * use nr_wake=1. */ if (nr_wake != 1) return -EINVAL; } retry: if (pi_state != NULL) { /* * We will have to lookup the pi_state again, so free this one * to keep the accounting correct. */ free_pi_state(pi_state); pi_state = NULL; } ret = get_futex_key(uaddr1, fshared, &key1); if (unlikely(ret != 0)) goto out; ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) goto out_put_key1; hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); retry_private: double_lock_hb(hb1, hb2); if (likely(cmpval != NULL)) { u32 curval; ret = get_futex_value_locked(&curval, uaddr1); if (unlikely(ret)) { double_unlock_hb(hb1, hb2); ret = get_user(curval, uaddr1); if (ret) goto out_put_keys; if (!fshared) goto retry_private; put_futex_key(fshared, &key2); put_futex_key(fshared, &key1); goto retry; } if (curval != *cmpval) { ret = -EAGAIN; goto out_unlock; } } if (requeue_pi && (task_count - nr_wake < nr_requeue)) { /* * Attempt to acquire uaddr2 and wake the top waiter. If we * intend to requeue waiters, force setting the FUTEX_WAITERS * bit. We force this here where we are able to easily handle * faults rather in the requeue loop below. */ ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, &key2, &pi_state, nr_requeue); /* * At this point the top_waiter has either taken uaddr2 or is * waiting on it. If the former, then the pi_state will not * exist yet, look it up one more time to ensure we have a * reference to it. */ if (ret == 1) { WARN_ON(pi_state); drop_count++; task_count++; ret = get_futex_value_locked(&curval2, uaddr2); if (!ret) ret = lookup_pi_state(curval2, hb2, &key2, &pi_state); } switch (ret) { case 0: break; case -EFAULT: double_unlock_hb(hb1, hb2); put_futex_key(fshared, &key2); put_futex_key(fshared, &key1); ret = fault_in_user_writeable(uaddr2); if (!ret) goto retry; goto out; case -EAGAIN: /* The owner was exiting, try again. */ double_unlock_hb(hb1, hb2); put_futex_key(fshared, &key2); put_futex_key(fshared, &key1); cond_resched(); goto retry; default: goto out_unlock; } } head1 = &hb1->chain; plist_for_each_entry_safe(this, next, head1, list) { if (task_count - nr_wake >= nr_requeue) break; if (!match_futex(&this->key, &key1)) continue; /* * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always * be paired with each other and no other futex ops. */ if ((requeue_pi && !this->rt_waiter) || (!requeue_pi && this->rt_waiter)) { ret = -EINVAL; break; } /* * Wake nr_wake waiters. For requeue_pi, if we acquired the * lock, we already woke the top_waiter. If not, it will be * woken by futex_unlock_pi(). */ if (++task_count <= nr_wake && !requeue_pi) { wake_futex(this); continue; } /* Ensure we requeue to the expected futex for requeue_pi. */ if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) { ret = -EINVAL; break; } /* * Requeue nr_requeue waiters and possibly one more in the case * of requeue_pi if we couldn't acquire the lock atomically. */ if (requeue_pi) { /* Prepare the waiter to take the rt_mutex. */ atomic_inc(&pi_state->refcount); this->pi_state = pi_state; ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, this->rt_waiter, this->task, 1); if (ret == 1) { /* We got the lock. */ requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; } else if (ret) { /* -EDEADLK */ this->pi_state = NULL; free_pi_state(pi_state); goto out_unlock; } } requeue_futex(this, hb1, hb2, &key2); drop_count++; } out_unlock: double_unlock_hb(hb1, hb2); /* * drop_futex_key_refs() must be called outside the spinlocks. During * the requeue we moved futex_q's from the hash bucket at key1 to the * one at key2 and updated their key pointer. We no longer need to * hold the references to key1. */ while (--drop_count >= 0) drop_futex_key_refs(&key1); out_put_keys: put_futex_key(fshared, &key2); out_put_key1: put_futex_key(fshared, &key1); out: if (pi_state != NULL) free_pi_state(pi_state); return ret ? ret : task_count; } /* The key must be already stored in q->key. */ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) { struct futex_hash_bucket *hb; get_futex_key_refs(&q->key); hb = hash_futex(&q->key); q->lock_ptr = &hb->lock; spin_lock(&hb->lock); return hb; } static inline void queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) { spin_unlock(&hb->lock); drop_futex_key_refs(&q->key); } /** * queue_me() - Enqueue the futex_q on the futex_hash_bucket * @q: The futex_q to enqueue * @hb: The destination hash bucket * * The hb->lock must be held by the caller, and is released here. A call to * queue_me() is typically paired with exactly one call to unqueue_me(). The * exceptions involve the PI related operations, which may use unqueue_me_pi() * or nothing if the unqueue is done as part of the wake process and the unqueue * state is implicit in the state of woken task (see futex_wait_requeue_pi() for * an example). */ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) { int prio; /* * The priority used to register this element is * - either the real thread-priority for the real-time threads * (i.e. threads with a priority lower than MAX_RT_PRIO) * - or MAX_RT_PRIO for non-RT threads. * Thus, all RT-threads are woken first in priority order, and * the others are woken last, in FIFO order. */ prio = min(current->normal_prio, MAX_RT_PRIO); plist_node_init(&q->list, prio); #ifdef CONFIG_DEBUG_PI_LIST q->list.plist.spinlock = &hb->lock; #endif plist_add(&q->list, &hb->chain); q->task = current; spin_unlock(&hb->lock); } /** * unqueue_me() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must * be paired with exactly one earlier call to queue_me(). * * Returns: * 1 - if the futex_q was still queued (and we removed unqueued it) * 0 - if the futex_q was already removed by the waking thread */ static int unqueue_me(struct futex_q *q) { spinlock_t *lock_ptr; int ret = 0; /* In the common case we don't take the spinlock, which is nice. */ retry: lock_ptr = q->lock_ptr; barrier(); if (lock_ptr != NULL) { spin_lock(lock_ptr); /* * q->lock_ptr can change between reading it and * spin_lock(), causing us to take the wrong lock. This * corrects the race condition. * * Reasoning goes like this: if we have the wrong lock, * q->lock_ptr must have changed (maybe several times) * between reading it and the spin_lock(). It can * change again after the spin_lock() but only if it was * already changed before the spin_lock(). It cannot, * however, change back to the original value. Therefore * we can detect whether we acquired the correct lock. */ if (unlikely(lock_ptr != q->lock_ptr)) { spin_unlock(lock_ptr); goto retry; } WARN_ON(plist_node_empty(&q->list)); plist_del(&q->list, &q->list.plist); BUG_ON(q->pi_state); spin_unlock(lock_ptr); ret = 1; } drop_futex_key_refs(&q->key); return ret; } /* * PI futexes can not be requeued and must remove themself from the * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry * and dropped here. */ static void unqueue_me_pi(struct futex_q *q) { WARN_ON(plist_node_empty(&q->list)); plist_del(&q->list, &q->list.plist); BUG_ON(!q->pi_state); free_pi_state(q->pi_state); q->pi_state = NULL; spin_unlock(q->lock_ptr); drop_futex_key_refs(&q->key); } /* * Fixup the pi_state owner with the new owner. * * Must be called with hash bucket lock held and mm->sem held for non * private futexes. */ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, struct task_struct *newowner, int fshared) { u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; struct task_struct *oldowner = pi_state->owner; u32 uval, curval, newval; int ret; /* Owner died? */ if (!pi_state->owner) newtid |= FUTEX_OWNER_DIED; /* * We are here either because we stole the rtmutex from the * pending owner or we are the pending owner which failed to * get the rtmutex. We have to replace the pending owner TID * in the user space variable. This must be atomic as we have * to preserve the owner died bit here. * * Note: We write the user space value _before_ changing the pi_state * because we can fault here. Imagine swapped out pages or a fork * that marked all the anonymous memory readonly for cow. * * Modifying pi_state _before_ the user space value would * leave the pi_state in an inconsistent state when we fault * here, because we need to drop the hash bucket lock to * handle the fault. This might be observed in the PID check * in lookup_pi_state. */ retry: if (get_futex_value_locked(&uval, uaddr)) goto handle_fault; while (1) { newval = (uval & FUTEX_OWNER_DIED) | newtid; curval = cmpxchg_futex_value_locked(uaddr, uval, newval); if (curval == -EFAULT) goto handle_fault; if (curval == uval) break; uval = curval; } /* * We fixed up user space. Now we need to fix the pi_state * itself. */ if (pi_state->owner != NULL) { raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); } pi_state->owner = newowner; raw_spin_lock_irq(&newowner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &newowner->pi_state_list); raw_spin_unlock_irq(&newowner->pi_lock); return 0; /* * To handle the page fault we need to drop the hash bucket * lock here. That gives the other task (either the pending * owner itself or the task which stole the rtmutex) the * chance to try the fixup of the pi_state. So once we are * back from handling the fault we need to check the pi_state * after reacquiring the hash bucket lock and before trying to * do another fixup. When the fixup has been done already we * simply return. */ handle_fault: spin_unlock(q->lock_ptr); ret = fault_in_user_writeable(uaddr); spin_lock(q->lock_ptr); /* * Check if someone else fixed it for us: */ if (pi_state->owner != oldowner) return 0; if (ret) return ret; goto retry; } /* * In case we must use restart_block to restart a futex_wait, * we encode in the 'flags' shared capability */ #define FLAGS_SHARED 0x01 #define FLAGS_CLOCKRT 0x02 #define FLAGS_HAS_TIMEOUT 0x04 static long futex_wait_restart(struct restart_block *restart); /** * fixup_owner() - Post lock pi_state and corner case management * @uaddr: user address of the futex * @fshared: whether the futex is shared (1) or not (0) * @q: futex_q (contains pi_state and access to the rt_mutex) * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) * * After attempting to lock an rt_mutex, this function is called to cleanup * the pi_state owner as well as handle race conditions that may allow us to * acquire the lock. Must be called with the hb lock held. * * Returns: * 1 - success, lock taken * 0 - success, lock not taken * <0 - on error (-EFAULT) */ static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q, int locked) { struct task_struct *owner; int ret = 0; if (locked) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case: */ if (q->pi_state->owner != current) ret = fixup_pi_state_owner(uaddr, q, current, fshared); goto out; } /* * Catch the rare case, where the lock was released when we were on the * way back before we locked the hash bucket. */ if (q->pi_state->owner == current) { /* * Try to get the rt_mutex now. This might fail as some other * task acquired the rt_mutex after we removed ourself from the * rt_mutex waiters list. */ if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { locked = 1; goto out; } /* * pi_state is incorrect, some other task did a lock steal and * we returned due to timeout or signal without taking the * rt_mutex. Too late. We can access the rt_mutex_owner without * locking, as the other task is now blocked on the hash bucket * lock. Fix the state up. */ owner = rt_mutex_owner(&q->pi_state->pi_mutex); ret = fixup_pi_state_owner(uaddr, q, owner, fshared); goto out; } /* * Paranoia check. If we did not take the lock, then we should not be * the owner, nor the pending owner, of the rt_mutex. */ if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " "pi-state %p\n", ret, q->pi_state->pi_mutex.owner, q->pi_state->owner); out: return ret ? ret : locked; } /** * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal * @hb: the futex hash bucket, must be locked by the caller * @q: the futex_q to queue up on * @timeout: the prepared hrtimer_sleeper, or null for no timeout */ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, struct hrtimer_sleeper *timeout) { /* * The task state is guaranteed to be set before another task can * wake it. set_current_state() is implemented using set_mb() and * queue_me() calls spin_unlock() upon completion, both serializing * access to the hash list and forcing another memory barrier. */ set_current_state(TASK_INTERRUPTIBLE); queue_me(q, hb); /* Arm the timer */ if (timeout) { hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&timeout->timer)) timeout->task = NULL; } /* * If we have been removed from the hash list, then another task * has tried to wake us, and we can skip the call to schedule(). */ if (likely(!plist_node_empty(&q->list))) { /* * If the timer has already expired, current will already be * flagged for rescheduling. Only call schedule if there * is no timeout, or if it has yet to expire. */ if (!timeout || timeout->task) schedule(); } __set_current_state(TASK_RUNNING); } /** * futex_wait_setup() - Prepare to wait on a futex * @uaddr: the futex userspace address * @val: the expected value * @fshared: whether the futex is shared (1) or not (0) * @q: the associated futex_q * @hb: storage for hash_bucket pointer to be returned to caller * * Setup the futex_q and locate the hash_bucket. Get the futex value and * compare it with the expected value. Handle atomic faults internally. * Return with the hb lock held and a q.key reference on success, and unlocked * with no q.key reference on failure. * * Returns: * 0 - uaddr contains val and hb has been locked * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked */ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared, struct futex_q *q, struct futex_hash_bucket **hb) { u32 uval; int ret; /* * Access the page AFTER the hash-bucket is locked. * Order is important: * * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } * * The basic logical guarantee of a futex is that it blocks ONLY * if cond(var) is known to be true at the time of blocking, for * any cond. If we queued after testing *uaddr, that would open * a race condition where we could block indefinitely with * cond(var) false, which would violate the guarantee. * * A consequence is that futex_wait() can return zero and absorb * a wakeup when *uaddr != val on entry to the syscall. This is * rare, but normal. */ retry: q->key = FUTEX_KEY_INIT; ret = get_futex_key(uaddr, fshared, &q->key); if (unlikely(ret != 0)) return ret; retry_private: *hb = queue_lock(q); ret = get_futex_value_locked(&uval, uaddr); if (ret) { queue_unlock(q, *hb); ret = get_user(uval, uaddr); if (ret) goto out; if (!fshared) goto retry_private; put_futex_key(fshared, &q->key); goto retry; } if (uval != val) { queue_unlock(q, *hb); ret = -EWOULDBLOCK; } out: if (ret) put_futex_key(fshared, &q->key); return ret; } static int futex_wait(u32 __user *uaddr, int fshared, u32 val, ktime_t *abs_time, u32 bitset, int clockrt) { struct hrtimer_sleeper timeout, *to = NULL; struct restart_block *restart; struct futex_hash_bucket *hb; struct futex_q q; int ret; if (!bitset) return -EINVAL; q.pi_state = NULL; q.bitset = bitset; q.rt_waiter = NULL; q.requeue_pi_key = NULL; if (abs_time) { to = &timeout; hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } retry: /* Prepare to wait on uaddr. */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) goto out; /* queue_me and wait for wakeup, timeout, or a signal. */ futex_wait_queue_me(hb, &q, to); /* If we were woken (and unqueued), we succeeded, whatever. */ ret = 0; if (!unqueue_me(&q)) goto out_put_key; ret = -ETIMEDOUT; if (to && !to->task) goto out_put_key; /* * We expect signal_pending(current), but we might be the * victim of a spurious wakeup as well. */ if (!signal_pending(current)) { put_futex_key(fshared, &q.key); goto retry; } ret = -ERESTARTSYS; if (!abs_time) goto out_put_key; restart = &current_thread_info()->restart_block; restart->fn = futex_wait_restart; restart->futex.uaddr = (u32 *)uaddr; restart->futex.val = val; restart->futex.time = abs_time->tv64; restart->futex.bitset = bitset; restart->futex.flags = FLAGS_HAS_TIMEOUT; if (fshared) restart->futex.flags |= FLAGS_SHARED; if (clockrt) restart->futex.flags |= FLAGS_CLOCKRT; ret = -ERESTART_RESTARTBLOCK; out_put_key: put_futex_key(fshared, &q.key); out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; } static long futex_wait_restart(struct restart_block *restart) { u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; int fshared = 0; ktime_t t, *tp = NULL; if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { t.tv64 = restart->futex.time; tp = &t; } restart->fn = do_no_restart_syscall; if (restart->futex.flags & FLAGS_SHARED) fshared = 1; return (long)futex_wait(uaddr, fshared, restart->futex.val, tp, restart->futex.bitset, restart->futex.flags & FLAGS_CLOCKRT); } /* * Userspace tried a 0 -> TID atomic transition of the futex value * and failed. The kernel side here does the whole locking operation: * if there are waiters then it will block, it does PI, etc. (Due to * races the kernel might see a 0 value of the futex too.) */ static int futex_lock_pi(u32 __user *uaddr, int fshared, int detect, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to = NULL; struct futex_hash_bucket *hb; struct futex_q q; int res, ret; if (refill_pi_state_cache()) return -ENOMEM; if (time) { to = &timeout; hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires(&to->timer, *time); } q.pi_state = NULL; q.rt_waiter = NULL; q.requeue_pi_key = NULL; retry: q.key = FUTEX_KEY_INIT; ret = get_futex_key(uaddr, fshared, &q.key); if (unlikely(ret != 0)) goto out; retry_private: hb = queue_lock(&q); ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); if (unlikely(ret)) { switch (ret) { case 1: /* We got the lock. */ ret = 0; goto out_unlock_put_key; case -EFAULT: goto uaddr_faulted; case -EAGAIN: /* * Task is exiting and we just wait for the * exit to complete. */ queue_unlock(&q, hb); put_futex_key(fshared, &q.key); cond_resched(); goto retry; default: goto out_unlock_put_key; } } /* * Only actually queue now that the atomic ops are done: */ queue_me(&q, hb); WARN_ON(!q.pi_state); /* * Block on the PI mutex: */ if (!trylock) ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1); else { ret = rt_mutex_trylock(&q.pi_state->pi_mutex); /* Fixup the trylock return value: */ ret = ret ? 0 : -EWOULDBLOCK; } spin_lock(q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ res = fixup_owner(uaddr, fshared, &q, !ret); /* * If fixup_owner() returned an error, proprogate that. If it acquired * the lock, clear our -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; /* * If fixup_owner() faulted and was unable to handle the fault, unlock * it and return the fault to userspace. */ if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) rt_mutex_unlock(&q.pi_state->pi_mutex); /* Unqueue and drop the lock */ unqueue_me_pi(&q); goto out_put_key; out_unlock_put_key: queue_unlock(&q, hb); out_put_key: put_futex_key(fshared, &q.key); out: if (to) destroy_hrtimer_on_stack(&to->timer); return ret != -EINTR ? ret : -ERESTARTNOINTR; uaddr_faulted: queue_unlock(&q, hb); ret = fault_in_user_writeable(uaddr); if (ret) goto out_put_key; if (!fshared) goto retry_private; put_futex_key(fshared, &q.key); goto retry; } /* * Userspace attempted a TID -> 0 atomic transition, and failed. * This is the in-kernel slowpath: we look up the PI state (if any), * and do the rt-mutex unlock. */ static int futex_unlock_pi(u32 __user *uaddr, int fshared) { struct futex_hash_bucket *hb; struct futex_q *this, *next; u32 uval; struct plist_head *head; union futex_key key = FUTEX_KEY_INIT; int ret; retry: if (get_user(uval, uaddr)) return -EFAULT; /* * We release only a lock we actually own: */ if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) return -EPERM; ret = get_futex_key(uaddr, fshared, &key); if (unlikely(ret != 0)) goto out; hb = hash_futex(&key); spin_lock(&hb->lock); /* * To avoid races, try to do the TID -> 0 atomic transition * again. If it succeeds then we can return without waking * anyone else up: */ if (!(uval & FUTEX_OWNER_DIED)) uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0); if (unlikely(uval == -EFAULT)) goto pi_faulted; /* * Rare case: we managed to release the lock atomically, * no need to wake anyone else up: */ if (unlikely(uval == task_pid_vnr(current))) goto out_unlock; /* * Ok, other tasks may need to be woken up - check waiters * and do the wakeup if necessary: */ head = &hb->chain; plist_for_each_entry_safe(this, next, head, list) { if (!match_futex (&this->key, &key)) continue; ret = wake_futex_pi(uaddr, uval, this); /* * The atomic access to the futex value * generated a pagefault, so retry the * user-access and the wakeup: */ if (ret == -EFAULT) goto pi_faulted; goto out_unlock; } /* * No waiters - kernel unlocks the futex: */ if (!(uval & FUTEX_OWNER_DIED)) { ret = unlock_futex_pi(uaddr, uval); if (ret == -EFAULT) goto pi_faulted; } out_unlock: spin_unlock(&hb->lock); put_futex_key(fshared, &key); out: return ret; pi_faulted: spin_unlock(&hb->lock); put_futex_key(fshared, &key); ret = fault_in_user_writeable(uaddr); if (!ret) goto retry; return ret; } /** * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex * @hb: the hash_bucket futex_q was original enqueued on * @q: the futex_q woken while waiting to be requeued * @key2: the futex_key of the requeue target futex * @timeout: the timeout associated with the wait (NULL if none) * * Detect if the task was woken on the initial futex as opposed to the requeue * target futex. If so, determine if it was a timeout or a signal that caused * the wakeup and return the appropriate error code to the caller. Must be * called with the hb lock held. * * Returns * 0 - no early wakeup detected * <0 - -ETIMEDOUT or -ERESTARTNOINTR */ static inline int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, struct futex_q *q, union futex_key *key2, struct hrtimer_sleeper *timeout) { int ret = 0; /* * With the hb lock held, we avoid races while we process the wakeup. * We only need to hold hb (and not hb2) to ensure atomicity as the * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. * It can't be requeued from uaddr2 to something else since we don't * support a PI aware source futex for requeue. */ if (!match_futex(&q->key, key2)) { WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr)); /* * We were woken prior to requeue by a timeout or a signal. * Unqueue the futex_q and determine which it was. */ plist_del(&q->list, &q->list.plist); /* Handle spurious wakeups gracefully */ ret = -EWOULDBLOCK; if (timeout && !timeout->task) ret = -ETIMEDOUT; else if (signal_pending(current)) ret = -ERESTARTNOINTR; } return ret; } /** * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 * @uaddr: the futex we initially wait on (non-pi) * @fshared: whether the futexes are shared (1) or not (0). They must be * the same type, no requeueing from private to shared, etc. * @val: the expected value of uaddr * @abs_time: absolute timeout * @bitset: 32 bit wakeup bitset set by userspace, defaults to all * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) * @uaddr2: the pi futex we will take prior to returning to user-space * * The caller will wait on uaddr and will be requeued by futex_requeue() to * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and * complete the acquisition of the rt_mutex prior to returning to userspace. * This ensures the rt_mutex maintains an owner when it has waiters; without * one, the pi logic wouldn't know which task to boost/deboost, if there was a * need to. * * We call schedule in futex_wait_queue_me() when we enqueue and return there * via the following: * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() * 2) wakeup on uaddr2 after a requeue * 3) signal * 4) timeout * * If 3, cleanup and return -ERESTARTNOINTR. * * If 2, we may then block on trying to take the rt_mutex and return via: * 5) successful lock * 6) signal * 7) timeout * 8) other lock acquisition failure * * If 6, return -EWOULDBLOCK (restarting the syscall would do the same). * * If 4 or 7, we cleanup and return with -ETIMEDOUT. * * Returns: * 0 - On success * <0 - On error */ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, u32 val, ktime_t *abs_time, u32 bitset, int clockrt, u32 __user *uaddr2) { struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; struct rt_mutex *pi_mutex = NULL; struct futex_hash_bucket *hb; union futex_key key2; struct futex_q q; int res, ret; if (!bitset) return -EINVAL; if (abs_time) { to = &timeout; hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } /* * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ debug_rt_mutex_init_waiter(&rt_waiter); rt_waiter.task = NULL; key2 = FUTEX_KEY_INIT; ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) goto out; q.pi_state = NULL; q.bitset = bitset; q.rt_waiter = &rt_waiter; q.requeue_pi_key = &key2; /* Prepare to wait on uaddr. */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) goto out_key2; /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); spin_lock(&hb->lock); ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); spin_unlock(&hb->lock); if (ret) goto out_put_keys; /* * In order for us to be here, we know our q.key == key2, and since * we took the hb->lock above, we also know that futex_requeue() has * completed and we no longer have to concern ourselves with a wakeup * race with the atomic proxy lock acquition by the requeue code. */ /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current, fshared); spin_unlock(q.lock_ptr); } } else { /* * We have been woken up by futex_unlock_pi(), a timeout, or a * signal. futex_unlock_pi() will not destroy the lock_ptr nor * the pi_state. */ WARN_ON(!&q.pi_state); pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); spin_lock(q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ res = fixup_owner(uaddr2, fshared, &q, !ret); /* * If fixup_owner() returned an error, proprogate that. If it * acquired the lock, clear -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } /* * If fixup_pi_state_owner() faulted and was unable to handle the * fault, unlock the rt_mutex and return the fault to userspace. */ if (ret == -EFAULT) { if (rt_mutex_owner(pi_mutex) == current) rt_mutex_unlock(pi_mutex); } else if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling * futex_lock_pi() directly. We could restart this syscall, but * it would detect that the user space "val" changed and return * -EWOULDBLOCK. Save the overhead of the restart and return * -EWOULDBLOCK directly. */ ret = -EWOULDBLOCK; } out_put_keys: put_futex_key(fshared, &q.key); out_key2: put_futex_key(fshared, &key2); out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; } /* * Support for robust futexes: the kernel cleans up held futexes at * thread exit time. * * Implementation: user-space maintains a per-thread list of locks it * is holding. Upon do_exit(), the kernel carefully walks this list, * and marks all locks that are owned by this thread with the * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is * always manipulated with the lock held, so the list is private and * per-thread. Userspace also maintains a per-thread 'list_op_pending' * field, to allow the kernel to clean up if the thread dies after * acquiring the lock, but just before it could have added itself to * the list. There can only be one such pending lock. */ /** * sys_set_robust_list() - Set the robust-futex list head of a task * @head: pointer to the list-head * @len: length of the list-head, as userspace expects */ SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, size_t, len) { if (!futex_cmpxchg_enabled) return -ENOSYS; /* * The kernel knows only one size for now: */ if (unlikely(len != sizeof(*head))) return -EINVAL; current->robust_list = head; return 0; } /** * sys_get_robust_list() - Get the robust-futex list head of a task * @pid: pid of the process [zero for current task] * @head_ptr: pointer to a list-head pointer, the kernel fills it in * @len_ptr: pointer to a length field, the kernel fills in the header size */ SYSCALL_DEFINE3(get_robust_list, int, pid, struct robust_list_head __user * __user *, head_ptr, size_t __user *, len_ptr) { struct robust_list_head __user *head; unsigned long ret; const struct cred *cred = current_cred(), *pcred; if (!futex_cmpxchg_enabled) return -ENOSYS; if (!pid) head = current->robust_list; else { struct task_struct *p; ret = -ESRCH; rcu_read_lock(); p = find_task_by_vpid(pid); if (!p) goto err_unlock; ret = -EPERM; pcred = __task_cred(p); if (cred->euid != pcred->euid && cred->euid != pcred->uid && !capable(CAP_SYS_PTRACE)) goto err_unlock; head = p->robust_list; rcu_read_unlock(); } if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(head, head_ptr); err_unlock: rcu_read_unlock(); return ret; } /* * Process a futex-list entry, check whether it's owned by the * dying task, and do notification if so: */ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) { u32 uval, nval, mval; retry: if (get_user(uval, uaddr)) return -1; if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { /* * Ok, this dying thread is truly holding a futex * of interest. Set the OWNER_DIED bit atomically * via cmpxchg, and if the value had FUTEX_WAITERS * set, wake up a waiter (if any). (We have to do a * futex_wake() even if OWNER_DIED is already set - * to handle the rare but possible case of recursive * thread-death.) The rest of the cleanup is done in * userspace. */ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); if (nval == -EFAULT) return -1; if (nval != uval) goto retry; /* * Wake robust non-PI futexes here. The wakeup of * PI futexes happens in exit_pi_state(): */ if (!pi && (uval & FUTEX_WAITERS)) futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); } return 0; } /* * Fetch a robust-list pointer. Bit 0 signals PI futexes: */ static inline int fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, int *pi) { unsigned long uentry; if (get_user(uentry, (unsigned long __user *)head)) return -EFAULT; *entry = (void __user *)(uentry & ~1UL); *pi = uentry & 1; return 0; } /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. */ void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; unsigned long futex_offset; int rc; if (!futex_cmpxchg_enabled) return; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ if (fetch_robust_entry(&entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset: */ if (get_user(futex_offset, &head->futex_offset)) return; /* * Fetch any possibly pending lock-add first, and handle it * if it exists: */ if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) return; next_entry = NULL; /* avoid warning with gcc */ while (entry != &head->list) { /* * Fetch the next entry in the list before calling * handle_futex_death: */ rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); /* * A pending lock might already be on the list, so * don't process it twice: */ if (entry != pending) if (handle_futex_death((void __user *)entry + futex_offset, curr, pi)) return; if (rc) return; entry = next_entry; pi = next_pi; /* * Avoid excessively long or circular lists: */ if (!--limit) break; cond_resched(); } if (pending) handle_futex_death((void __user *)pending + futex_offset, curr, pip); } long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { int clockrt, ret = -ENOSYS; int cmd = op & FUTEX_CMD_MASK; int fshared = 0; if (!(op & FUTEX_PRIVATE_FLAG)) fshared = 1; clockrt = op & FUTEX_CLOCK_REALTIME; if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) return -ENOSYS; switch (cmd) { case FUTEX_WAIT: val3 = FUTEX_BITSET_MATCH_ANY; case FUTEX_WAIT_BITSET: ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt); break; case FUTEX_WAKE: val3 = FUTEX_BITSET_MATCH_ANY; case FUTEX_WAKE_BITSET: ret = futex_wake(uaddr, fshared, val, val3); break; case FUTEX_REQUEUE: ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0); break; case FUTEX_CMP_REQUEUE: ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3, 0); break; case FUTEX_WAKE_OP: ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3); break; case FUTEX_LOCK_PI: if (futex_cmpxchg_enabled) ret = futex_lock_pi(uaddr, fshared, val, timeout, 0); break; case FUTEX_UNLOCK_PI: if (futex_cmpxchg_enabled) ret = futex_unlock_pi(uaddr, fshared); break; case FUTEX_TRYLOCK_PI: if (futex_cmpxchg_enabled) ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1); break; case FUTEX_WAIT_REQUEUE_PI: val3 = FUTEX_BITSET_MATCH_ANY; ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3, clockrt, uaddr2); break; case FUTEX_CMP_REQUEUE_PI: ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3, 1); break; default: ret = -ENOSYS; } return ret; } SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, struct timespec __user *, utime, u32 __user *, uaddr2, u32, val3) { struct timespec ts; ktime_t t, *tp = NULL; u32 val2 = 0; int cmd = op & FUTEX_CMD_MASK; if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || cmd == FUTEX_WAIT_BITSET || cmd == FUTEX_WAIT_REQUEUE_PI)) { if (copy_from_user(&ts, utime, sizeof(ts)) != 0) return -EFAULT; if (!timespec_valid(&ts)) return -EINVAL; t = timespec_to_ktime(ts); if (cmd == FUTEX_WAIT) t = ktime_add_safe(ktime_get(), t); tp = &t; } /* * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*. * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. */ if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) val2 = (u32) (unsigned long) utime; return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } static int __init futex_init(void) { u32 curval; int i; /* * This will fail and we want it. Some arch implementations do * runtime detection of the futex_atomic_cmpxchg_inatomic() * functionality. We want to know that before we call in any * of the complex code paths. Also we want to prevent * registration of robust lists in that case. NULL is * guaranteed to fault and we get -EFAULT on functional * implementation, the non functional ones will return * -ENOSYS. */ curval = cmpxchg_futex_value_locked(NULL, 0, 0); if (curval == -EFAULT) futex_cmpxchg_enabled = 1; for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); spin_lock_init(&futex_queues[i].lock); } return 0; } __initcall(futex_init);
/* * Fast Userspace Mutexes (which I call "Futexes!"). * (C) Rusty Russell, IBM 2002 * * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar * (C) Copyright 2003 Red Hat Inc, All Rights Reserved * * Removed page pinning, fix privately mapped COW pages and other cleanups * (C) Copyright 2003, 2004 Jamie Lokier * * Robust futex support started by Ingo Molnar * (C) Copyright 2006 Red Hat Inc, All Rights Reserved * Thanks to Thomas Gleixner for suggestions, analysis and fixes. * * PI-futex support started by Ingo Molnar and Thomas Gleixner * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> * * PRIVATE futexes by Eric Dumazet * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> * * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> * Copyright (C) IBM Corporation, 2009 * Thanks to Thomas Gleixner for conceptual design and careful reviews. * * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly * enough at me, Linus for the original (flawed) idea, Matthew * Kirkwood for proof-of-concept implementation. * * "The futexes are also cursed." * "But they come in a choice of three flavours!" * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/slab.h> #include <linux/poll.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/jhash.h> #include <linux/init.h> #include <linux/futex.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/signal.h> #include <linux/module.h> #include <linux/magic.h> #include <linux/pid.h> #include <linux/nsproxy.h> #include <asm/futex.h> #include "rtmutex_common.h" int __read_mostly futex_cmpxchg_enabled; #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) /* * Priority Inheritance state: */ struct futex_pi_state { /* * list of 'owned' pi_state instances - these have to be * cleaned up in do_exit() if the task exits prematurely: */ struct list_head list; /* * The PI object: */ struct rt_mutex pi_mutex; struct task_struct *owner; atomic_t refcount; union futex_key key; }; /** * struct futex_q - The hashed futex queue entry, one per waiting task * @task: the task waiting on the futex * @lock_ptr: the hash bucket lock * @key: the key the futex is hashed on * @pi_state: optional priority inheritance state * @rt_waiter: rt_waiter storage for use with requeue_pi * @requeue_pi_key: the requeue_pi target futex key * @bitset: bitset for the optional bitmasked wakeup * * We use this hashed waitqueue, instead of a normal wait_queue_t, so * we can wake only the relevant ones (hashed queues may be shared). * * A futex_q has a woken state, just like tasks have TASK_RUNNING. * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. * The order of wakup is always to make the first condition true, then * the second. * * PI futexes are typically woken before they are removed from the hash list via * the rt_mutex code. See unqueue_me_pi(). */ struct futex_q { struct plist_node list; struct task_struct *task; spinlock_t *lock_ptr; union futex_key key; struct futex_pi_state *pi_state; struct rt_mutex_waiter *rt_waiter; union futex_key *requeue_pi_key; u32 bitset; }; /* * Hash buckets are shared by all the futex_keys that hash to the same * location. Each key may have multiple futex_q structures, one for each task * waiting on a futex. */ struct futex_hash_bucket { spinlock_t lock; struct plist_head chain; }; static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; /* * We hash on the keys returned from get_futex_key (see below). */ static struct futex_hash_bucket *hash_futex(union futex_key *key) { u32 hash = jhash2((u32*)&key->both.word, (sizeof(key->both.word)+sizeof(key->both.ptr))/4, key->both.offset); return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)]; } /* * Return 1 if two futex_keys are equal, 0 otherwise. */ static inline int match_futex(union futex_key *key1, union futex_key *key2) { return (key1 && key2 && key1->both.word == key2->both.word && key1->both.ptr == key2->both.ptr && key1->both.offset == key2->both.offset); } /* * Take a reference to the resource addressed by a key. * Can be called while holding spinlocks. * */ static void get_futex_key_refs(union futex_key *key) { if (!key->both.ptr) return; switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: atomic_inc(&key->shared.inode->i_count); break; case FUT_OFF_MMSHARED: atomic_inc(&key->private.mm->mm_count); break; } } /* * Drop a reference to the resource addressed by a key. * The hash bucket spinlock must not be held. */ static void drop_futex_key_refs(union futex_key *key) { if (!key->both.ptr) { /* If we're here then we tried to put a key we failed to get */ WARN_ON_ONCE(1); return; } switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: iput(key->shared.inode); break; case FUT_OFF_MMSHARED: mmdrop(key->private.mm); break; } } /** * get_futex_key() - Get parameters which are the keys for a futex * @uaddr: virtual address of the futex * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED * @key: address where result is stored. * * Returns a negative error code or 0 * The key words are stored in *key on success. * * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode, * offset_within_page). For private mappings, it's (uaddr, current->mm). * We can usually work out the index without swapping in the page. * * lock_page() might sleep, the caller should not hold a spinlock. */ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; struct page *page; int err; /* * The futex address must be "naturally" aligned. */ key->both.offset = address % PAGE_SIZE; if (unlikely((address % sizeof(u32)) != 0)) return -EINVAL; address -= key->both.offset; /* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs * virtual address, we dont even have to find the underlying vma. * Note : We do have to check 'uaddr' is a valid user address, * but access_ok() should be faster than find_vma() */ if (!fshared) { if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) return -EFAULT; key->private.mm = mm; key->private.address = address; get_futex_key_refs(key); return 0; } again: err = get_user_pages_fast(address, 1, 1, &page); if (err < 0) return err; page = compound_head(page); lock_page(page); if (!page->mapping) { unlock_page(page); put_page(page); goto again; } /* * Private mappings are handled in a simple way. * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to * the object not the particular process. */ if (PageAnon(page)) { key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; } else { key->both.offset |= FUT_OFF_INODE; /* inode-based key */ key->shared.inode = page->mapping->host; key->shared.pgoff = page->index; } get_futex_key_refs(key); unlock_page(page); put_page(page); return 0; } static inline void put_futex_key(int fshared, union futex_key *key) { drop_futex_key_refs(key); } /** * fault_in_user_writeable() - Fault in user address and verify RW access * @uaddr: pointer to faulting user space address * * Slow path to fixup the fault we just took in the atomic write * access to @uaddr. * * We have no generic implementation of a non destructive write to the * user address. We know that we faulted in the atomic pagefault * disabled section so we can as well avoid the #PF overhead by * calling get_user_pages() right away. */ static int fault_in_user_writeable(u32 __user *uaddr) { struct mm_struct *mm = current->mm; int ret; down_read(&mm->mmap_sem); ret = get_user_pages(current, mm, (unsigned long)uaddr, 1, 1, 0, NULL, NULL); up_read(&mm->mmap_sem); return ret < 0 ? ret : 0; } /** * futex_top_waiter() - Return the highest priority waiter on a futex * @hb: the hash bucket the futex_q's reside in * @key: the futex key (to distinguish it from other futex futex_q's) * * Must be called with the hb lock held. */ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key) { struct futex_q *this; plist_for_each_entry(this, &hb->chain, list) { if (match_futex(&this->key, key)) return this; } return NULL; } static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) { u32 curval; pagefault_disable(); curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); pagefault_enable(); return curval; } static int get_futex_value_locked(u32 *dest, u32 __user *from) { int ret; pagefault_disable(); ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); pagefault_enable(); return ret ? -EFAULT : 0; } /* * PI code: */ static int refill_pi_state_cache(void) { struct futex_pi_state *pi_state; if (likely(current->pi_state_cache)) return 0; pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); if (!pi_state) return -ENOMEM; INIT_LIST_HEAD(&pi_state->list); /* pi_mutex gets initialized later */ pi_state->owner = NULL; atomic_set(&pi_state->refcount, 1); pi_state->key = FUTEX_KEY_INIT; current->pi_state_cache = pi_state; return 0; } static struct futex_pi_state * alloc_pi_state(void) { struct futex_pi_state *pi_state = current->pi_state_cache; WARN_ON(!pi_state); current->pi_state_cache = NULL; return pi_state; } static void free_pi_state(struct futex_pi_state *pi_state) { if (!atomic_dec_and_test(&pi_state->refcount)) return; /* * If pi_state->owner is NULL, the owner is most probably dying * and has cleaned up the pi_state already */ if (pi_state->owner) { raw_spin_lock_irq(&pi_state->owner->pi_lock); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); } if (current->pi_state_cache) kfree(pi_state); else { /* * pi_state->list is already empty. * clear pi_state->owner. * refcount is at 0 - put it back to 1. */ pi_state->owner = NULL; atomic_set(&pi_state->refcount, 1); current->pi_state_cache = pi_state; } } /* * Look up the task based on what TID userspace gave us. * We dont trust it. */ static struct task_struct * futex_find_get_task(pid_t pid) { struct task_struct *p; rcu_read_lock(); p = find_task_by_vpid(pid); if (p) get_task_struct(p); rcu_read_unlock(); return p; } /* * This task is holding PI mutexes at exit time => bad. * Kernel cleans up PI-state, but userspace is likely hosed. * (Robust-futex cleanup is separate and might save the day for userspace.) */ void exit_pi_state_list(struct task_struct *curr) { struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; struct futex_hash_bucket *hb; union futex_key key = FUTEX_KEY_INIT; if (!futex_cmpxchg_enabled) return; /* * We are a ZOMBIE and nobody can enqueue itself on * pi_state_list anymore, but we have to be careful * versus waiters unqueueing themselves: */ raw_spin_lock_irq(&curr->pi_lock); while (!list_empty(head)) { next = head->next; pi_state = list_entry(next, struct futex_pi_state, list); key = pi_state->key; hb = hash_futex(&key); raw_spin_unlock_irq(&curr->pi_lock); spin_lock(&hb->lock); raw_spin_lock_irq(&curr->pi_lock); /* * We dropped the pi-lock, so re-check whether this * task still owns the PI-state: */ if (head->next != next) { spin_unlock(&hb->lock); continue; } WARN_ON(pi_state->owner != curr); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); pi_state->owner = NULL; raw_spin_unlock_irq(&curr->pi_lock); rt_mutex_unlock(&pi_state->pi_mutex); spin_unlock(&hb->lock); raw_spin_lock_irq(&curr->pi_lock); } raw_spin_unlock_irq(&curr->pi_lock); } static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps) { struct futex_pi_state *pi_state = NULL; struct futex_q *this, *next; struct plist_head *head; struct task_struct *p; pid_t pid = uval & FUTEX_TID_MASK; head = &hb->chain; plist_for_each_entry_safe(this, next, head, list) { if (match_futex(&this->key, key)) { /* * Another waiter already exists - bump up * the refcount and return its pi_state: */ pi_state = this->pi_state; /* * Userspace might have messed up non PI and PI futexes */ if (unlikely(!pi_state)) return -EINVAL; WARN_ON(!atomic_read(&pi_state->refcount)); /* * When pi_state->owner is NULL then the owner died * and another waiter is on the fly. pi_state->owner * is fixed up by the task which acquires * pi_state->rt_mutex. * * We do not check for pid == 0 which can happen when * the owner died and robust_list_exit() cleared the * TID. */ if (pid && pi_state->owner) { /* * Bail out if user space manipulated the * futex value. */ if (pid != task_pid_vnr(pi_state->owner)) return -EINVAL; } atomic_inc(&pi_state->refcount); *ps = pi_state; return 0; } } /* * We are the first waiter - try to look up the real owner and attach * the new pi_state to it, but bail out when TID = 0 */ if (!pid) return -ESRCH; p = futex_find_get_task(pid); if (!p) return -ESRCH; /* * We need to look at the task state flags to figure out, * whether the task is exiting. To protect against the do_exit * change of the task flags, we do this protected by * p->pi_lock: */ raw_spin_lock_irq(&p->pi_lock); if (unlikely(p->flags & PF_EXITING)) { /* * The task is on the way out. When PF_EXITPIDONE is * set, we know that the task has finished the * cleanup: */ int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); return ret; } pi_state = alloc_pi_state(); /* * Initialize the pi_mutex in locked state and make 'p' * the owner of it: */ rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); /* Store the key for possible exit cleanups: */ pi_state->key = *key; WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &p->pi_state_list); pi_state->owner = p; raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); *ps = pi_state; return 0; } /** * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex * @uaddr: the pi futex user address * @hb: the pi futex hash bucket * @key: the futex key associated with uaddr and hb * @ps: the pi_state pointer where we store the result of the * lookup * @task: the task to perform the atomic lock work for. This will * be "current" except in the case of requeue pi. * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Returns: * 0 - ready to wait * 1 - acquired the lock * <0 - error * * The hb->lock and futex_key refs shall be held by the caller. */ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps, struct task_struct *task, int set_waiters) { int lock_taken, ret, ownerdied = 0; u32 uval, newval, curval; retry: ret = lock_taken = 0; /* * To avoid races, we attempt to take the lock here again * (by doing a 0 -> TID atomic cmpxchg), while holding all * the locks. It will most likely not succeed. */ newval = task_pid_vnr(task); if (set_waiters) newval |= FUTEX_WAITERS; curval = cmpxchg_futex_value_locked(uaddr, 0, newval); if (unlikely(curval == -EFAULT)) return -EFAULT; /* * Detect deadlocks. */ if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task)))) return -EDEADLK; /* * Surprise - we got the lock. Just return to userspace: */ if (unlikely(!curval)) return 1; uval = curval; /* * Set the FUTEX_WAITERS flag, so the owner will know it has someone * to wake at the next unlock. */ newval = curval | FUTEX_WAITERS; /* * There are two cases, where a futex might have no owner (the * owner TID is 0): OWNER_DIED. We take over the futex in this * case. We also do an unconditional take over, when the owner * of the futex died. * * This is safe as we are protected by the hash bucket lock ! */ if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { /* Keep the OWNER_DIED bit */ newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task); ownerdied = 0; lock_taken = 1; } curval = cmpxchg_futex_value_locked(uaddr, uval, newval); if (unlikely(curval == -EFAULT)) return -EFAULT; if (unlikely(curval != uval)) goto retry; /* * We took the lock due to owner died take over. */ if (unlikely(lock_taken)) return 1; /* * We dont have the lock. Look up the PI state (or create it if * we are the first waiter): */ ret = lookup_pi_state(uval, hb, key, ps); if (unlikely(ret)) { switch (ret) { case -ESRCH: /* * No owner found for this futex. Check if the * OWNER_DIED bit is set to figure out whether * this is a robust futex or not. */ if (get_futex_value_locked(&curval, uaddr)) return -EFAULT; /* * We simply start over in case of a robust * futex. The code above will take the futex * and return happy. */ if (curval & FUTEX_OWNER_DIED) { ownerdied = 1; goto retry; } default: break; } } return ret; } /* * The hash bucket lock must be held when this is called. * Afterwards, the futex_q must not be accessed. */ static void wake_futex(struct futex_q *q) { struct task_struct *p = q->task; /* * We set q->lock_ptr = NULL _before_ we wake up the task. If * a non futex wake up happens on another CPU then the task * might exit and p would dereference a non existing task * struct. Prevent this by holding a reference on p across the * wake up. */ get_task_struct(p); plist_del(&q->list, &q->list.plist); /* * The waiting task can free the futex_q as soon as * q->lock_ptr = NULL is written, without taking any locks. A * memory barrier is required here to prevent the following * store to lock_ptr from getting ahead of the plist_del. */ smp_wmb(); q->lock_ptr = NULL; wake_up_state(p, TASK_NORMAL); put_task_struct(p); } static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) { struct task_struct *new_owner; struct futex_pi_state *pi_state = this->pi_state; u32 curval, newval; if (!pi_state) return -EINVAL; /* * If current does not own the pi_state then the futex is * inconsistent and user space fiddled with the futex value. */ if (pi_state->owner != current) return -EINVAL; raw_spin_lock(&pi_state->pi_mutex.wait_lock); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); /* * This happens when we have stolen the lock and the original * pending owner did not enqueue itself back on the rt_mutex. * Thats not a tragedy. We know that way, that a lock waiter * is on the fly. We make the futex_q waiter the pending owner. */ if (!new_owner) new_owner = this->task; /* * We pass it to the next owner. (The WAITERS bit is always * kept enabled while there is PI state around. We must also * preserve the owner died bit.) */ if (!(uval & FUTEX_OWNER_DIED)) { int ret = 0; newval = FUTEX_WAITERS | task_pid_vnr(new_owner); curval = cmpxchg_futex_value_locked(uaddr, uval, newval); if (curval == -EFAULT) ret = -EFAULT; else if (curval != uval) ret = -EINVAL; if (ret) { raw_spin_unlock(&pi_state->pi_mutex.wait_lock); return ret; } } raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); raw_spin_lock_irq(&new_owner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &new_owner->pi_state_list); pi_state->owner = new_owner; raw_spin_unlock_irq(&new_owner->pi_lock); raw_spin_unlock(&pi_state->pi_mutex.wait_lock); rt_mutex_unlock(&pi_state->pi_mutex); return 0; } static int unlock_futex_pi(u32 __user *uaddr, u32 uval) { u32 oldval; /* * There is no waiter, so we unlock the futex. The owner died * bit has not to be preserved here. We are the owner: */ oldval = cmpxchg_futex_value_locked(uaddr, uval, 0); if (oldval == -EFAULT) return oldval; if (oldval != uval) return -EAGAIN; return 0; } /* * Express the locking dependencies for lockdep: */ static inline void double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { if (hb1 <= hb2) { spin_lock(&hb1->lock); if (hb1 < hb2) spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); } else { /* hb1 > hb2 */ spin_lock(&hb2->lock); spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); } } static inline void double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { spin_unlock(&hb1->lock); if (hb1 != hb2) spin_unlock(&hb2->lock); } /* * Wake up waiters matching bitset queued on this futex (uaddr). */ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) { struct futex_hash_bucket *hb; struct futex_q *this, *next; struct plist_head *head; union futex_key key = FUTEX_KEY_INIT; int ret; if (!bitset) return -EINVAL; ret = get_futex_key(uaddr, fshared, &key); if (unlikely(ret != 0)) goto out; hb = hash_futex(&key); spin_lock(&hb->lock); head = &hb->chain; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key)) { if (this->pi_state || this->rt_waiter) { ret = -EINVAL; break; } /* Check if one of the bits is set in both bitsets */ if (!(this->bitset & bitset)) continue; wake_futex(this); if (++ret >= nr_wake) break; } } spin_unlock(&hb->lock); put_futex_key(fshared, &key); out: return ret; } /* * Wake up all waiters hashed on the physical page that is mapped * to this virtual address: */ static int futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, int nr_wake, int nr_wake2, int op) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; struct futex_hash_bucket *hb1, *hb2; struct plist_head *head; struct futex_q *this, *next; int ret, op_ret; retry: ret = get_futex_key(uaddr1, fshared, &key1); if (unlikely(ret != 0)) goto out; ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) goto out_put_key1; hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); retry_private: double_lock_hb(hb1, hb2); op_ret = futex_atomic_op_inuser(op, uaddr2); if (unlikely(op_ret < 0)) { double_unlock_hb(hb1, hb2); #ifndef CONFIG_MMU /* * we don't get EFAULT from MMU faults if we don't have an MMU, * but we might get them from range checking */ ret = op_ret; goto out_put_keys; #endif if (unlikely(op_ret != -EFAULT)) { ret = op_ret; goto out_put_keys; } ret = fault_in_user_writeable(uaddr2); if (ret) goto out_put_keys; if (!fshared) goto retry_private; put_futex_key(fshared, &key2); put_futex_key(fshared, &key1); goto retry; } head = &hb1->chain; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key1)) { wake_futex(this); if (++ret >= nr_wake) break; } } if (op_ret > 0) { head = &hb2->chain; op_ret = 0; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key2)) { wake_futex(this); if (++op_ret >= nr_wake2) break; } } ret += op_ret; } double_unlock_hb(hb1, hb2); out_put_keys: put_futex_key(fshared, &key2); out_put_key1: put_futex_key(fshared, &key1); out: return ret; } /** * requeue_futex() - Requeue a futex_q from one hb to another * @q: the futex_q to requeue * @hb1: the source hash_bucket * @hb2: the target hash_bucket * @key2: the new key for the requeued futex_q */ static inline void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key2) { /* * If key1 and key2 hash to the same bucket, no need to * requeue. */ if (likely(&hb1->chain != &hb2->chain)) { plist_del(&q->list, &hb1->chain); plist_add(&q->list, &hb2->chain); q->lock_ptr = &hb2->lock; #ifdef CONFIG_DEBUG_PI_LIST q->list.plist.spinlock = &hb2->lock; #endif } get_futex_key_refs(key2); q->key = *key2; } /** * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue * @q: the futex_q * @key: the key of the requeue target futex * @hb: the hash_bucket of the requeue target futex * * During futex_requeue, with requeue_pi=1, it is possible to acquire the * target futex if it is uncontended or via a lock steal. Set the futex_q key * to the requeue target futex so the waiter can detect the wakeup on the right * futex, but remove it from the hb and NULL the rt_waiter so it can detect * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock * to protect access to the pi_state to fixup the owner later. Must be called * with both q->lock_ptr and hb->lock held. */ static inline void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, struct futex_hash_bucket *hb) { get_futex_key_refs(key); q->key = *key; WARN_ON(plist_node_empty(&q->list)); plist_del(&q->list, &q->list.plist); WARN_ON(!q->rt_waiter); q->rt_waiter = NULL; q->lock_ptr = &hb->lock; #ifdef CONFIG_DEBUG_PI_LIST q->list.plist.spinlock = &hb->lock; #endif wake_up_state(q->task, TASK_NORMAL); } /** * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter * @pifutex: the user address of the to futex * @hb1: the from futex hash bucket, must be locked by the caller * @hb2: the to futex hash bucket, must be locked by the caller * @key1: the from futex key * @key2: the to futex key * @ps: address to store the pi_state pointer * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Try and get the lock on behalf of the top waiter if we can do it atomically. * Wake the top waiter if we succeed. If the caller specified set_waiters, * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. * hb1 and hb2 must be held by the caller. * * Returns: * 0 - failed to acquire the lock atomicly * 1 - acquired the lock * <0 - error */ static int futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key1, union futex_key *key2, struct futex_pi_state **ps, int set_waiters) { struct futex_q *top_waiter = NULL; u32 curval; int ret; if (get_futex_value_locked(&curval, pifutex)) return -EFAULT; /* * Find the top_waiter and determine if there are additional waiters. * If the caller intends to requeue more than 1 waiter to pifutex, * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, * as we have means to handle the possible fault. If not, don't set * the bit unecessarily as it will force the subsequent unlock to enter * the kernel. */ top_waiter = futex_top_waiter(hb1, key1); /* There are no waiters, nothing for us to do. */ if (!top_waiter) return 0; /* Ensure we requeue to the expected futex. */ if (!match_futex(top_waiter->requeue_pi_key, key2)) return -EINVAL; /* * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in * the contended case or if set_waiters is 1. The pi_state is returned * in ps in contended cases. */ ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, set_waiters); if (ret == 1) requeue_pi_wake_futex(top_waiter, key2, hb2); return ret; } /** * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 * uaddr1: source futex user address * uaddr2: target futex user address * nr_wake: number of waiters to wake (must be 1 for requeue_pi) * nr_requeue: number of waiters to requeue (0-INT_MAX) * requeue_pi: if we are attempting to requeue from a non-pi futex to a * pi futex (pi to pi requeue is not supported) * * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire * uaddr2 atomically on behalf of the top waiter. * * Returns: * >=0 - on success, the number of tasks requeued or woken * <0 - on error */ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; int drop_count = 0, task_count = 0, ret; struct futex_pi_state *pi_state = NULL; struct futex_hash_bucket *hb1, *hb2; struct plist_head *head1; struct futex_q *this, *next; u32 curval2; if (requeue_pi) { /* * requeue_pi requires a pi_state, try to allocate it now * without any locks in case it fails. */ if (refill_pi_state_cache()) return -ENOMEM; /* * requeue_pi must wake as many tasks as it can, up to nr_wake * + nr_requeue, since it acquires the rt_mutex prior to * returning to userspace, so as to not leave the rt_mutex with * waiters and no owner. However, second and third wake-ups * cannot be predicted as they involve race conditions with the * first wake and a fault while looking up the pi_state. Both * pthread_cond_signal() and pthread_cond_broadcast() should * use nr_wake=1. */ if (nr_wake != 1) return -EINVAL; } retry: if (pi_state != NULL) { /* * We will have to lookup the pi_state again, so free this one * to keep the accounting correct. */ free_pi_state(pi_state); pi_state = NULL; } ret = get_futex_key(uaddr1, fshared, &key1); if (unlikely(ret != 0)) goto out; ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) goto out_put_key1; hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); retry_private: double_lock_hb(hb1, hb2); if (likely(cmpval != NULL)) { u32 curval; ret = get_futex_value_locked(&curval, uaddr1); if (unlikely(ret)) { double_unlock_hb(hb1, hb2); ret = get_user(curval, uaddr1); if (ret) goto out_put_keys; if (!fshared) goto retry_private; put_futex_key(fshared, &key2); put_futex_key(fshared, &key1); goto retry; } if (curval != *cmpval) { ret = -EAGAIN; goto out_unlock; } } if (requeue_pi && (task_count - nr_wake < nr_requeue)) { /* * Attempt to acquire uaddr2 and wake the top waiter. If we * intend to requeue waiters, force setting the FUTEX_WAITERS * bit. We force this here where we are able to easily handle * faults rather in the requeue loop below. */ ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, &key2, &pi_state, nr_requeue); /* * At this point the top_waiter has either taken uaddr2 or is * waiting on it. If the former, then the pi_state will not * exist yet, look it up one more time to ensure we have a * reference to it. */ if (ret == 1) { WARN_ON(pi_state); drop_count++; task_count++; ret = get_futex_value_locked(&curval2, uaddr2); if (!ret) ret = lookup_pi_state(curval2, hb2, &key2, &pi_state); } switch (ret) { case 0: break; case -EFAULT: double_unlock_hb(hb1, hb2); put_futex_key(fshared, &key2); put_futex_key(fshared, &key1); ret = fault_in_user_writeable(uaddr2); if (!ret) goto retry; goto out; case -EAGAIN: /* The owner was exiting, try again. */ double_unlock_hb(hb1, hb2); put_futex_key(fshared, &key2); put_futex_key(fshared, &key1); cond_resched(); goto retry; default: goto out_unlock; } } head1 = &hb1->chain; plist_for_each_entry_safe(this, next, head1, list) { if (task_count - nr_wake >= nr_requeue) break; if (!match_futex(&this->key, &key1)) continue; /* * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always * be paired with each other and no other futex ops. */ if ((requeue_pi && !this->rt_waiter) || (!requeue_pi && this->rt_waiter)) { ret = -EINVAL; break; } /* * Wake nr_wake waiters. For requeue_pi, if we acquired the * lock, we already woke the top_waiter. If not, it will be * woken by futex_unlock_pi(). */ if (++task_count <= nr_wake && !requeue_pi) { wake_futex(this); continue; } /* Ensure we requeue to the expected futex for requeue_pi. */ if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) { ret = -EINVAL; break; } /* * Requeue nr_requeue waiters and possibly one more in the case * of requeue_pi if we couldn't acquire the lock atomically. */ if (requeue_pi) { /* Prepare the waiter to take the rt_mutex. */ atomic_inc(&pi_state->refcount); this->pi_state = pi_state; ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, this->rt_waiter, this->task, 1); if (ret == 1) { /* We got the lock. */ requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; } else if (ret) { /* -EDEADLK */ this->pi_state = NULL; free_pi_state(pi_state); goto out_unlock; } } requeue_futex(this, hb1, hb2, &key2); drop_count++; } out_unlock: double_unlock_hb(hb1, hb2); /* * drop_futex_key_refs() must be called outside the spinlocks. During * the requeue we moved futex_q's from the hash bucket at key1 to the * one at key2 and updated their key pointer. We no longer need to * hold the references to key1. */ while (--drop_count >= 0) drop_futex_key_refs(&key1); out_put_keys: put_futex_key(fshared, &key2); out_put_key1: put_futex_key(fshared, &key1); out: if (pi_state != NULL) free_pi_state(pi_state); return ret ? ret : task_count; } /* The key must be already stored in q->key. */ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) { struct futex_hash_bucket *hb; hb = hash_futex(&q->key); q->lock_ptr = &hb->lock; spin_lock(&hb->lock); return hb; } static inline void queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) { spin_unlock(&hb->lock); } /** * queue_me() - Enqueue the futex_q on the futex_hash_bucket * @q: The futex_q to enqueue * @hb: The destination hash bucket * * The hb->lock must be held by the caller, and is released here. A call to * queue_me() is typically paired with exactly one call to unqueue_me(). The * exceptions involve the PI related operations, which may use unqueue_me_pi() * or nothing if the unqueue is done as part of the wake process and the unqueue * state is implicit in the state of woken task (see futex_wait_requeue_pi() for * an example). */ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) { int prio; /* * The priority used to register this element is * - either the real thread-priority for the real-time threads * (i.e. threads with a priority lower than MAX_RT_PRIO) * - or MAX_RT_PRIO for non-RT threads. * Thus, all RT-threads are woken first in priority order, and * the others are woken last, in FIFO order. */ prio = min(current->normal_prio, MAX_RT_PRIO); plist_node_init(&q->list, prio); #ifdef CONFIG_DEBUG_PI_LIST q->list.plist.spinlock = &hb->lock; #endif plist_add(&q->list, &hb->chain); q->task = current; spin_unlock(&hb->lock); } /** * unqueue_me() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must * be paired with exactly one earlier call to queue_me(). * * Returns: * 1 - if the futex_q was still queued (and we removed unqueued it) * 0 - if the futex_q was already removed by the waking thread */ static int unqueue_me(struct futex_q *q) { spinlock_t *lock_ptr; int ret = 0; /* In the common case we don't take the spinlock, which is nice. */ retry: lock_ptr = q->lock_ptr; barrier(); if (lock_ptr != NULL) { spin_lock(lock_ptr); /* * q->lock_ptr can change between reading it and * spin_lock(), causing us to take the wrong lock. This * corrects the race condition. * * Reasoning goes like this: if we have the wrong lock, * q->lock_ptr must have changed (maybe several times) * between reading it and the spin_lock(). It can * change again after the spin_lock() but only if it was * already changed before the spin_lock(). It cannot, * however, change back to the original value. Therefore * we can detect whether we acquired the correct lock. */ if (unlikely(lock_ptr != q->lock_ptr)) { spin_unlock(lock_ptr); goto retry; } WARN_ON(plist_node_empty(&q->list)); plist_del(&q->list, &q->list.plist); BUG_ON(q->pi_state); spin_unlock(lock_ptr); ret = 1; } drop_futex_key_refs(&q->key); return ret; } /* * PI futexes can not be requeued and must remove themself from the * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry * and dropped here. */ static void unqueue_me_pi(struct futex_q *q) { WARN_ON(plist_node_empty(&q->list)); plist_del(&q->list, &q->list.plist); BUG_ON(!q->pi_state); free_pi_state(q->pi_state); q->pi_state = NULL; spin_unlock(q->lock_ptr); } /* * Fixup the pi_state owner with the new owner. * * Must be called with hash bucket lock held and mm->sem held for non * private futexes. */ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, struct task_struct *newowner, int fshared) { u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; struct task_struct *oldowner = pi_state->owner; u32 uval, curval, newval; int ret; /* Owner died? */ if (!pi_state->owner) newtid |= FUTEX_OWNER_DIED; /* * We are here either because we stole the rtmutex from the * pending owner or we are the pending owner which failed to * get the rtmutex. We have to replace the pending owner TID * in the user space variable. This must be atomic as we have * to preserve the owner died bit here. * * Note: We write the user space value _before_ changing the pi_state * because we can fault here. Imagine swapped out pages or a fork * that marked all the anonymous memory readonly for cow. * * Modifying pi_state _before_ the user space value would * leave the pi_state in an inconsistent state when we fault * here, because we need to drop the hash bucket lock to * handle the fault. This might be observed in the PID check * in lookup_pi_state. */ retry: if (get_futex_value_locked(&uval, uaddr)) goto handle_fault; while (1) { newval = (uval & FUTEX_OWNER_DIED) | newtid; curval = cmpxchg_futex_value_locked(uaddr, uval, newval); if (curval == -EFAULT) goto handle_fault; if (curval == uval) break; uval = curval; } /* * We fixed up user space. Now we need to fix the pi_state * itself. */ if (pi_state->owner != NULL) { raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); raw_spin_unlock_irq(&pi_state->owner->pi_lock); } pi_state->owner = newowner; raw_spin_lock_irq(&newowner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &newowner->pi_state_list); raw_spin_unlock_irq(&newowner->pi_lock); return 0; /* * To handle the page fault we need to drop the hash bucket * lock here. That gives the other task (either the pending * owner itself or the task which stole the rtmutex) the * chance to try the fixup of the pi_state. So once we are * back from handling the fault we need to check the pi_state * after reacquiring the hash bucket lock and before trying to * do another fixup. When the fixup has been done already we * simply return. */ handle_fault: spin_unlock(q->lock_ptr); ret = fault_in_user_writeable(uaddr); spin_lock(q->lock_ptr); /* * Check if someone else fixed it for us: */ if (pi_state->owner != oldowner) return 0; if (ret) return ret; goto retry; } /* * In case we must use restart_block to restart a futex_wait, * we encode in the 'flags' shared capability */ #define FLAGS_SHARED 0x01 #define FLAGS_CLOCKRT 0x02 #define FLAGS_HAS_TIMEOUT 0x04 static long futex_wait_restart(struct restart_block *restart); /** * fixup_owner() - Post lock pi_state and corner case management * @uaddr: user address of the futex * @fshared: whether the futex is shared (1) or not (0) * @q: futex_q (contains pi_state and access to the rt_mutex) * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) * * After attempting to lock an rt_mutex, this function is called to cleanup * the pi_state owner as well as handle race conditions that may allow us to * acquire the lock. Must be called with the hb lock held. * * Returns: * 1 - success, lock taken * 0 - success, lock not taken * <0 - on error (-EFAULT) */ static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q, int locked) { struct task_struct *owner; int ret = 0; if (locked) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case: */ if (q->pi_state->owner != current) ret = fixup_pi_state_owner(uaddr, q, current, fshared); goto out; } /* * Catch the rare case, where the lock was released when we were on the * way back before we locked the hash bucket. */ if (q->pi_state->owner == current) { /* * Try to get the rt_mutex now. This might fail as some other * task acquired the rt_mutex after we removed ourself from the * rt_mutex waiters list. */ if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { locked = 1; goto out; } /* * pi_state is incorrect, some other task did a lock steal and * we returned due to timeout or signal without taking the * rt_mutex. Too late. We can access the rt_mutex_owner without * locking, as the other task is now blocked on the hash bucket * lock. Fix the state up. */ owner = rt_mutex_owner(&q->pi_state->pi_mutex); ret = fixup_pi_state_owner(uaddr, q, owner, fshared); goto out; } /* * Paranoia check. If we did not take the lock, then we should not be * the owner, nor the pending owner, of the rt_mutex. */ if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " "pi-state %p\n", ret, q->pi_state->pi_mutex.owner, q->pi_state->owner); out: return ret ? ret : locked; } /** * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal * @hb: the futex hash bucket, must be locked by the caller * @q: the futex_q to queue up on * @timeout: the prepared hrtimer_sleeper, or null for no timeout */ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, struct hrtimer_sleeper *timeout) { /* * The task state is guaranteed to be set before another task can * wake it. set_current_state() is implemented using set_mb() and * queue_me() calls spin_unlock() upon completion, both serializing * access to the hash list and forcing another memory barrier. */ set_current_state(TASK_INTERRUPTIBLE); queue_me(q, hb); /* Arm the timer */ if (timeout) { hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&timeout->timer)) timeout->task = NULL; } /* * If we have been removed from the hash list, then another task * has tried to wake us, and we can skip the call to schedule(). */ if (likely(!plist_node_empty(&q->list))) { /* * If the timer has already expired, current will already be * flagged for rescheduling. Only call schedule if there * is no timeout, or if it has yet to expire. */ if (!timeout || timeout->task) schedule(); } __set_current_state(TASK_RUNNING); } /** * futex_wait_setup() - Prepare to wait on a futex * @uaddr: the futex userspace address * @val: the expected value * @fshared: whether the futex is shared (1) or not (0) * @q: the associated futex_q * @hb: storage for hash_bucket pointer to be returned to caller * * Setup the futex_q and locate the hash_bucket. Get the futex value and * compare it with the expected value. Handle atomic faults internally. * Return with the hb lock held and a q.key reference on success, and unlocked * with no q.key reference on failure. * * Returns: * 0 - uaddr contains val and hb has been locked * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked */ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared, struct futex_q *q, struct futex_hash_bucket **hb) { u32 uval; int ret; /* * Access the page AFTER the hash-bucket is locked. * Order is important: * * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } * * The basic logical guarantee of a futex is that it blocks ONLY * if cond(var) is known to be true at the time of blocking, for * any cond. If we queued after testing *uaddr, that would open * a race condition where we could block indefinitely with * cond(var) false, which would violate the guarantee. * * A consequence is that futex_wait() can return zero and absorb * a wakeup when *uaddr != val on entry to the syscall. This is * rare, but normal. */ retry: q->key = FUTEX_KEY_INIT; ret = get_futex_key(uaddr, fshared, &q->key); if (unlikely(ret != 0)) return ret; retry_private: *hb = queue_lock(q); ret = get_futex_value_locked(&uval, uaddr); if (ret) { queue_unlock(q, *hb); ret = get_user(uval, uaddr); if (ret) goto out; if (!fshared) goto retry_private; put_futex_key(fshared, &q->key); goto retry; } if (uval != val) { queue_unlock(q, *hb); ret = -EWOULDBLOCK; } out: if (ret) put_futex_key(fshared, &q->key); return ret; } static int futex_wait(u32 __user *uaddr, int fshared, u32 val, ktime_t *abs_time, u32 bitset, int clockrt) { struct hrtimer_sleeper timeout, *to = NULL; struct restart_block *restart; struct futex_hash_bucket *hb; struct futex_q q; int ret; if (!bitset) return -EINVAL; q.pi_state = NULL; q.bitset = bitset; q.rt_waiter = NULL; q.requeue_pi_key = NULL; if (abs_time) { to = &timeout; hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } retry: /* * Prepare to wait on uaddr. On success, holds hb lock and increments * q.key refs. */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) goto out; /* queue_me and wait for wakeup, timeout, or a signal. */ futex_wait_queue_me(hb, &q, to); /* If we were woken (and unqueued), we succeeded, whatever. */ ret = 0; /* unqueue_me() drops q.key ref */ if (!unqueue_me(&q)) goto out; ret = -ETIMEDOUT; if (to && !to->task) goto out; /* * We expect signal_pending(current), but we might be the * victim of a spurious wakeup as well. */ if (!signal_pending(current)) goto retry; ret = -ERESTARTSYS; if (!abs_time) goto out; restart = &current_thread_info()->restart_block; restart->fn = futex_wait_restart; restart->futex.uaddr = (u32 *)uaddr; restart->futex.val = val; restart->futex.time = abs_time->tv64; restart->futex.bitset = bitset; restart->futex.flags = FLAGS_HAS_TIMEOUT; if (fshared) restart->futex.flags |= FLAGS_SHARED; if (clockrt) restart->futex.flags |= FLAGS_CLOCKRT; ret = -ERESTART_RESTARTBLOCK; out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; } static long futex_wait_restart(struct restart_block *restart) { u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; int fshared = 0; ktime_t t, *tp = NULL; if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { t.tv64 = restart->futex.time; tp = &t; } restart->fn = do_no_restart_syscall; if (restart->futex.flags & FLAGS_SHARED) fshared = 1; return (long)futex_wait(uaddr, fshared, restart->futex.val, tp, restart->futex.bitset, restart->futex.flags & FLAGS_CLOCKRT); } /* * Userspace tried a 0 -> TID atomic transition of the futex value * and failed. The kernel side here does the whole locking operation: * if there are waiters then it will block, it does PI, etc. (Due to * races the kernel might see a 0 value of the futex too.) */ static int futex_lock_pi(u32 __user *uaddr, int fshared, int detect, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to = NULL; struct futex_hash_bucket *hb; struct futex_q q; int res, ret; if (refill_pi_state_cache()) return -ENOMEM; if (time) { to = &timeout; hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires(&to->timer, *time); } q.pi_state = NULL; q.rt_waiter = NULL; q.requeue_pi_key = NULL; retry: q.key = FUTEX_KEY_INIT; ret = get_futex_key(uaddr, fshared, &q.key); if (unlikely(ret != 0)) goto out; retry_private: hb = queue_lock(&q); ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); if (unlikely(ret)) { switch (ret) { case 1: /* We got the lock. */ ret = 0; goto out_unlock_put_key; case -EFAULT: goto uaddr_faulted; case -EAGAIN: /* * Task is exiting and we just wait for the * exit to complete. */ queue_unlock(&q, hb); put_futex_key(fshared, &q.key); cond_resched(); goto retry; default: goto out_unlock_put_key; } } /* * Only actually queue now that the atomic ops are done: */ queue_me(&q, hb); WARN_ON(!q.pi_state); /* * Block on the PI mutex: */ if (!trylock) ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1); else { ret = rt_mutex_trylock(&q.pi_state->pi_mutex); /* Fixup the trylock return value: */ ret = ret ? 0 : -EWOULDBLOCK; } spin_lock(q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ res = fixup_owner(uaddr, fshared, &q, !ret); /* * If fixup_owner() returned an error, proprogate that. If it acquired * the lock, clear our -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; /* * If fixup_owner() faulted and was unable to handle the fault, unlock * it and return the fault to userspace. */ if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) rt_mutex_unlock(&q.pi_state->pi_mutex); /* Unqueue and drop the lock */ unqueue_me_pi(&q); goto out_put_key; out_unlock_put_key: queue_unlock(&q, hb); out_put_key: put_futex_key(fshared, &q.key); out: if (to) destroy_hrtimer_on_stack(&to->timer); return ret != -EINTR ? ret : -ERESTARTNOINTR; uaddr_faulted: queue_unlock(&q, hb); ret = fault_in_user_writeable(uaddr); if (ret) goto out_put_key; if (!fshared) goto retry_private; put_futex_key(fshared, &q.key); goto retry; } /* * Userspace attempted a TID -> 0 atomic transition, and failed. * This is the in-kernel slowpath: we look up the PI state (if any), * and do the rt-mutex unlock. */ static int futex_unlock_pi(u32 __user *uaddr, int fshared) { struct futex_hash_bucket *hb; struct futex_q *this, *next; u32 uval; struct plist_head *head; union futex_key key = FUTEX_KEY_INIT; int ret; retry: if (get_user(uval, uaddr)) return -EFAULT; /* * We release only a lock we actually own: */ if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) return -EPERM; ret = get_futex_key(uaddr, fshared, &key); if (unlikely(ret != 0)) goto out; hb = hash_futex(&key); spin_lock(&hb->lock); /* * To avoid races, try to do the TID -> 0 atomic transition * again. If it succeeds then we can return without waking * anyone else up: */ if (!(uval & FUTEX_OWNER_DIED)) uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0); if (unlikely(uval == -EFAULT)) goto pi_faulted; /* * Rare case: we managed to release the lock atomically, * no need to wake anyone else up: */ if (unlikely(uval == task_pid_vnr(current))) goto out_unlock; /* * Ok, other tasks may need to be woken up - check waiters * and do the wakeup if necessary: */ head = &hb->chain; plist_for_each_entry_safe(this, next, head, list) { if (!match_futex (&this->key, &key)) continue; ret = wake_futex_pi(uaddr, uval, this); /* * The atomic access to the futex value * generated a pagefault, so retry the * user-access and the wakeup: */ if (ret == -EFAULT) goto pi_faulted; goto out_unlock; } /* * No waiters - kernel unlocks the futex: */ if (!(uval & FUTEX_OWNER_DIED)) { ret = unlock_futex_pi(uaddr, uval); if (ret == -EFAULT) goto pi_faulted; } out_unlock: spin_unlock(&hb->lock); put_futex_key(fshared, &key); out: return ret; pi_faulted: spin_unlock(&hb->lock); put_futex_key(fshared, &key); ret = fault_in_user_writeable(uaddr); if (!ret) goto retry; return ret; } /** * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex * @hb: the hash_bucket futex_q was original enqueued on * @q: the futex_q woken while waiting to be requeued * @key2: the futex_key of the requeue target futex * @timeout: the timeout associated with the wait (NULL if none) * * Detect if the task was woken on the initial futex as opposed to the requeue * target futex. If so, determine if it was a timeout or a signal that caused * the wakeup and return the appropriate error code to the caller. Must be * called with the hb lock held. * * Returns * 0 - no early wakeup detected * <0 - -ETIMEDOUT or -ERESTARTNOINTR */ static inline int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, struct futex_q *q, union futex_key *key2, struct hrtimer_sleeper *timeout) { int ret = 0; /* * With the hb lock held, we avoid races while we process the wakeup. * We only need to hold hb (and not hb2) to ensure atomicity as the * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. * It can't be requeued from uaddr2 to something else since we don't * support a PI aware source futex for requeue. */ if (!match_futex(&q->key, key2)) { WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr)); /* * We were woken prior to requeue by a timeout or a signal. * Unqueue the futex_q and determine which it was. */ plist_del(&q->list, &q->list.plist); /* Handle spurious wakeups gracefully */ ret = -EWOULDBLOCK; if (timeout && !timeout->task) ret = -ETIMEDOUT; else if (signal_pending(current)) ret = -ERESTARTNOINTR; } return ret; } /** * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 * @uaddr: the futex we initially wait on (non-pi) * @fshared: whether the futexes are shared (1) or not (0). They must be * the same type, no requeueing from private to shared, etc. * @val: the expected value of uaddr * @abs_time: absolute timeout * @bitset: 32 bit wakeup bitset set by userspace, defaults to all * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) * @uaddr2: the pi futex we will take prior to returning to user-space * * The caller will wait on uaddr and will be requeued by futex_requeue() to * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and * complete the acquisition of the rt_mutex prior to returning to userspace. * This ensures the rt_mutex maintains an owner when it has waiters; without * one, the pi logic wouldn't know which task to boost/deboost, if there was a * need to. * * We call schedule in futex_wait_queue_me() when we enqueue and return there * via the following: * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() * 2) wakeup on uaddr2 after a requeue * 3) signal * 4) timeout * * If 3, cleanup and return -ERESTARTNOINTR. * * If 2, we may then block on trying to take the rt_mutex and return via: * 5) successful lock * 6) signal * 7) timeout * 8) other lock acquisition failure * * If 6, return -EWOULDBLOCK (restarting the syscall would do the same). * * If 4 or 7, we cleanup and return with -ETIMEDOUT. * * Returns: * 0 - On success * <0 - On error */ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, u32 val, ktime_t *abs_time, u32 bitset, int clockrt, u32 __user *uaddr2) { struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; struct rt_mutex *pi_mutex = NULL; struct futex_hash_bucket *hb; union futex_key key2; struct futex_q q; int res, ret; if (!bitset) return -EINVAL; if (abs_time) { to = &timeout; hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } /* * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ debug_rt_mutex_init_waiter(&rt_waiter); rt_waiter.task = NULL; key2 = FUTEX_KEY_INIT; ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) goto out; q.pi_state = NULL; q.bitset = bitset; q.rt_waiter = &rt_waiter; q.requeue_pi_key = &key2; /* * Prepare to wait on uaddr. On success, increments q.key (key1) ref * count. */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) goto out_key2; /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); spin_lock(&hb->lock); ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); spin_unlock(&hb->lock); if (ret) goto out_put_keys; /* * In order for us to be here, we know our q.key == key2, and since * we took the hb->lock above, we also know that futex_requeue() has * completed and we no longer have to concern ourselves with a wakeup * race with the atomic proxy lock acquisition by the requeue code. The * futex_requeue dropped our key1 reference and incremented our key2 * reference count. */ /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current, fshared); spin_unlock(q.lock_ptr); } } else { /* * We have been woken up by futex_unlock_pi(), a timeout, or a * signal. futex_unlock_pi() will not destroy the lock_ptr nor * the pi_state. */ WARN_ON(!&q.pi_state); pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); spin_lock(q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ res = fixup_owner(uaddr2, fshared, &q, !ret); /* * If fixup_owner() returned an error, proprogate that. If it * acquired the lock, clear -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } /* * If fixup_pi_state_owner() faulted and was unable to handle the * fault, unlock the rt_mutex and return the fault to userspace. */ if (ret == -EFAULT) { if (rt_mutex_owner(pi_mutex) == current) rt_mutex_unlock(pi_mutex); } else if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling * futex_lock_pi() directly. We could restart this syscall, but * it would detect that the user space "val" changed and return * -EWOULDBLOCK. Save the overhead of the restart and return * -EWOULDBLOCK directly. */ ret = -EWOULDBLOCK; } out_put_keys: put_futex_key(fshared, &q.key); out_key2: put_futex_key(fshared, &key2); out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; } /* * Support for robust futexes: the kernel cleans up held futexes at * thread exit time. * * Implementation: user-space maintains a per-thread list of locks it * is holding. Upon do_exit(), the kernel carefully walks this list, * and marks all locks that are owned by this thread with the * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is * always manipulated with the lock held, so the list is private and * per-thread. Userspace also maintains a per-thread 'list_op_pending' * field, to allow the kernel to clean up if the thread dies after * acquiring the lock, but just before it could have added itself to * the list. There can only be one such pending lock. */ /** * sys_set_robust_list() - Set the robust-futex list head of a task * @head: pointer to the list-head * @len: length of the list-head, as userspace expects */ SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, size_t, len) { if (!futex_cmpxchg_enabled) return -ENOSYS; /* * The kernel knows only one size for now: */ if (unlikely(len != sizeof(*head))) return -EINVAL; current->robust_list = head; return 0; } /** * sys_get_robust_list() - Get the robust-futex list head of a task * @pid: pid of the process [zero for current task] * @head_ptr: pointer to a list-head pointer, the kernel fills it in * @len_ptr: pointer to a length field, the kernel fills in the header size */ SYSCALL_DEFINE3(get_robust_list, int, pid, struct robust_list_head __user * __user *, head_ptr, size_t __user *, len_ptr) { struct robust_list_head __user *head; unsigned long ret; const struct cred *cred = current_cred(), *pcred; if (!futex_cmpxchg_enabled) return -ENOSYS; if (!pid) head = current->robust_list; else { struct task_struct *p; ret = -ESRCH; rcu_read_lock(); p = find_task_by_vpid(pid); if (!p) goto err_unlock; ret = -EPERM; pcred = __task_cred(p); if (cred->euid != pcred->euid && cred->euid != pcred->uid && !capable(CAP_SYS_PTRACE)) goto err_unlock; head = p->robust_list; rcu_read_unlock(); } if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(head, head_ptr); err_unlock: rcu_read_unlock(); return ret; } /* * Process a futex-list entry, check whether it's owned by the * dying task, and do notification if so: */ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) { u32 uval, nval, mval; retry: if (get_user(uval, uaddr)) return -1; if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { /* * Ok, this dying thread is truly holding a futex * of interest. Set the OWNER_DIED bit atomically * via cmpxchg, and if the value had FUTEX_WAITERS * set, wake up a waiter (if any). (We have to do a * futex_wake() even if OWNER_DIED is already set - * to handle the rare but possible case of recursive * thread-death.) The rest of the cleanup is done in * userspace. */ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); if (nval == -EFAULT) return -1; if (nval != uval) goto retry; /* * Wake robust non-PI futexes here. The wakeup of * PI futexes happens in exit_pi_state(): */ if (!pi && (uval & FUTEX_WAITERS)) futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); } return 0; } /* * Fetch a robust-list pointer. Bit 0 signals PI futexes: */ static inline int fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, int *pi) { unsigned long uentry; if (get_user(uentry, (unsigned long __user *)head)) return -EFAULT; *entry = (void __user *)(uentry & ~1UL); *pi = uentry & 1; return 0; } /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. */ void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; unsigned long futex_offset; int rc; if (!futex_cmpxchg_enabled) return; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ if (fetch_robust_entry(&entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset: */ if (get_user(futex_offset, &head->futex_offset)) return; /* * Fetch any possibly pending lock-add first, and handle it * if it exists: */ if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) return; next_entry = NULL; /* avoid warning with gcc */ while (entry != &head->list) { /* * Fetch the next entry in the list before calling * handle_futex_death: */ rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); /* * A pending lock might already be on the list, so * don't process it twice: */ if (entry != pending) if (handle_futex_death((void __user *)entry + futex_offset, curr, pi)) return; if (rc) return; entry = next_entry; pi = next_pi; /* * Avoid excessively long or circular lists: */ if (!--limit) break; cond_resched(); } if (pending) handle_futex_death((void __user *)pending + futex_offset, curr, pip); } long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { int clockrt, ret = -ENOSYS; int cmd = op & FUTEX_CMD_MASK; int fshared = 0; if (!(op & FUTEX_PRIVATE_FLAG)) fshared = 1; clockrt = op & FUTEX_CLOCK_REALTIME; if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) return -ENOSYS; switch (cmd) { case FUTEX_WAIT: val3 = FUTEX_BITSET_MATCH_ANY; case FUTEX_WAIT_BITSET: ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt); break; case FUTEX_WAKE: val3 = FUTEX_BITSET_MATCH_ANY; case FUTEX_WAKE_BITSET: ret = futex_wake(uaddr, fshared, val, val3); break; case FUTEX_REQUEUE: ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0); break; case FUTEX_CMP_REQUEUE: ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3, 0); break; case FUTEX_WAKE_OP: ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3); break; case FUTEX_LOCK_PI: if (futex_cmpxchg_enabled) ret = futex_lock_pi(uaddr, fshared, val, timeout, 0); break; case FUTEX_UNLOCK_PI: if (futex_cmpxchg_enabled) ret = futex_unlock_pi(uaddr, fshared); break; case FUTEX_TRYLOCK_PI: if (futex_cmpxchg_enabled) ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1); break; case FUTEX_WAIT_REQUEUE_PI: val3 = FUTEX_BITSET_MATCH_ANY; ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3, clockrt, uaddr2); break; case FUTEX_CMP_REQUEUE_PI: ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3, 1); break; default: ret = -ENOSYS; } return ret; } SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, struct timespec __user *, utime, u32 __user *, uaddr2, u32, val3) { struct timespec ts; ktime_t t, *tp = NULL; u32 val2 = 0; int cmd = op & FUTEX_CMD_MASK; if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || cmd == FUTEX_WAIT_BITSET || cmd == FUTEX_WAIT_REQUEUE_PI)) { if (copy_from_user(&ts, utime, sizeof(ts)) != 0) return -EFAULT; if (!timespec_valid(&ts)) return -EINVAL; t = timespec_to_ktime(ts); if (cmd == FUTEX_WAIT) t = ktime_add_safe(ktime_get(), t); tp = &t; } /* * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*. * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. */ if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) val2 = (u32) (unsigned long) utime; return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } static int __init futex_init(void) { u32 curval; int i; /* * This will fail and we want it. Some arch implementations do * runtime detection of the futex_atomic_cmpxchg_inatomic() * functionality. We want to know that before we call in any * of the complex code paths. Also we want to prevent * registration of robust lists in that case. NULL is * guaranteed to fault and we get -EFAULT on functional * implementation, the non functional ones will return * -ENOSYS. */ curval = cmpxchg_futex_value_locked(NULL, 0, 0); if (curval == -EFAULT) futex_cmpxchg_enabled = 1; for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); spin_lock_init(&futex_queues[i].lock); } return 0; } __initcall(futex_init);
static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, u32 val, ktime_t *abs_time, u32 bitset, int clockrt, u32 __user *uaddr2) { struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; struct rt_mutex *pi_mutex = NULL; struct futex_hash_bucket *hb; union futex_key key2; struct futex_q q; int res, ret; if (!bitset) return -EINVAL; if (abs_time) { to = &timeout; hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } /* * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ debug_rt_mutex_init_waiter(&rt_waiter); rt_waiter.task = NULL; key2 = FUTEX_KEY_INIT; ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) goto out; q.pi_state = NULL; q.bitset = bitset; q.rt_waiter = &rt_waiter; q.requeue_pi_key = &key2; /* Prepare to wait on uaddr. */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) goto out_key2; /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); spin_lock(&hb->lock); ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); spin_unlock(&hb->lock); if (ret) goto out_put_keys; /* * In order for us to be here, we know our q.key == key2, and since * we took the hb->lock above, we also know that futex_requeue() has * completed and we no longer have to concern ourselves with a wakeup * race with the atomic proxy lock acquition by the requeue code. */ /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current, fshared); spin_unlock(q.lock_ptr); } } else { /* * We have been woken up by futex_unlock_pi(), a timeout, or a * signal. futex_unlock_pi() will not destroy the lock_ptr nor * the pi_state. */ WARN_ON(!&q.pi_state); pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); spin_lock(q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ res = fixup_owner(uaddr2, fshared, &q, !ret); /* * If fixup_owner() returned an error, proprogate that. If it * acquired the lock, clear -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } /* * If fixup_pi_state_owner() faulted and was unable to handle the * fault, unlock the rt_mutex and return the fault to userspace. */ if (ret == -EFAULT) { if (rt_mutex_owner(pi_mutex) == current) rt_mutex_unlock(pi_mutex); } else if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling * futex_lock_pi() directly. We could restart this syscall, but * it would detect that the user space "val" changed and return * -EWOULDBLOCK. Save the overhead of the restart and return * -EWOULDBLOCK directly. */ ret = -EWOULDBLOCK; } out_put_keys: put_futex_key(fshared, &q.key); out_key2: put_futex_key(fshared, &key2); out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; }
static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, u32 val, ktime_t *abs_time, u32 bitset, int clockrt, u32 __user *uaddr2) { struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; struct rt_mutex *pi_mutex = NULL; struct futex_hash_bucket *hb; union futex_key key2; struct futex_q q; int res, ret; if (!bitset) return -EINVAL; if (abs_time) { to = &timeout; hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } /* * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ debug_rt_mutex_init_waiter(&rt_waiter); rt_waiter.task = NULL; key2 = FUTEX_KEY_INIT; ret = get_futex_key(uaddr2, fshared, &key2); if (unlikely(ret != 0)) goto out; q.pi_state = NULL; q.bitset = bitset; q.rt_waiter = &rt_waiter; q.requeue_pi_key = &key2; /* * Prepare to wait on uaddr. On success, increments q.key (key1) ref * count. */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) goto out_key2; /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); spin_lock(&hb->lock); ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); spin_unlock(&hb->lock); if (ret) goto out_put_keys; /* * In order for us to be here, we know our q.key == key2, and since * we took the hb->lock above, we also know that futex_requeue() has * completed and we no longer have to concern ourselves with a wakeup * race with the atomic proxy lock acquisition by the requeue code. The * futex_requeue dropped our key1 reference and incremented our key2 * reference count. */ /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current, fshared); spin_unlock(q.lock_ptr); } } else { /* * We have been woken up by futex_unlock_pi(), a timeout, or a * signal. futex_unlock_pi() will not destroy the lock_ptr nor * the pi_state. */ WARN_ON(!&q.pi_state); pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); spin_lock(q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ res = fixup_owner(uaddr2, fshared, &q, !ret); /* * If fixup_owner() returned an error, proprogate that. If it * acquired the lock, clear -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } /* * If fixup_pi_state_owner() faulted and was unable to handle the * fault, unlock the rt_mutex and return the fault to userspace. */ if (ret == -EFAULT) { if (rt_mutex_owner(pi_mutex) == current) rt_mutex_unlock(pi_mutex); } else if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling * futex_lock_pi() directly. We could restart this syscall, but * it would detect that the user space "val" changed and return * -EWOULDBLOCK. Save the overhead of the restart and return * -EWOULDBLOCK directly. */ ret = -EWOULDBLOCK; } out_put_keys: put_futex_key(fshared, &q.key); out_key2: put_futex_key(fshared, &key2); out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; }
{'added': [(1811, '\t/*'), (1812, '\t * Prepare to wait on uaddr. On success, holds hb lock and increments'), (1813, '\t * q.key refs.'), (1814, '\t */'), (1824, '\t/* unqueue_me() drops q.key ref */'), (1826, '\t\tgoto out;'), (1829, '\t\tgoto out;'), (1835, '\tif (!signal_pending(current))'), (1840, '\t\tgoto out;'), (2235, '\t/*'), (2236, '\t * Prepare to wait on uaddr. On success, increments q.key (key1) ref'), (2237, '\t * count.'), (2238, '\t */'), (2256, '\t * race with the atomic proxy lock acquisition by the requeue code. The'), (2257, '\t * futex_requeue dropped our key1 reference and incremented our key2'), (2258, '\t * reference count.')], 'deleted': [(1366, '\tget_futex_key_refs(&q->key);'), (1378, '\tdrop_futex_key_refs(&q->key);'), (1483, ''), (1484, '\tdrop_futex_key_refs(&q->key);'), (1815, '\t/* Prepare to wait on uaddr. */'), (1826, '\t\tgoto out_put_key;'), (1829, '\t\tgoto out_put_key;'), (1835, '\tif (!signal_pending(current)) {'), (1836, '\t\tput_futex_key(fshared, &q.key);'), (1838, '\t}'), (1842, '\t\tgoto out_put_key;'), (1859, 'out_put_key:'), (1860, '\tput_futex_key(fshared, &q.key);'), (2239, '\t/* Prepare to wait on uaddr. */'), (2257, '\t * race with the atomic proxy lock acquition by the requeue code.')]}
16
15
1,378
8,411
75
491
16
https://github.com/torvalds/linux
CVE-2014-0205
CWE-119
1,495
ntlm_message.c
C
ntlm_print_message_fields
/** * WinPR: Windows Portable Runtime * NTLM Security Package (Message) * * Copyright 2011-2014 Marc-Andre Moreau <marcandre.moreau@gmail.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "ntlm.h" #include "../sspi.h" #include <winpr/crt.h> #include <winpr/print.h> #include <winpr/stream.h> #include <winpr/sysinfo.h> #include "ntlm_compute.h" #include "ntlm_message.h" #include "../log.h" #define TAG WINPR_TAG("sspi.NTLM") static const char NTLM_SIGNATURE[8] = { 'N', 'T', 'L', 'M', 'S', 'S', 'P', '\0' }; static const char* const NTLM_NEGOTIATE_STRINGS[] = { "NTLMSSP_NEGOTIATE_56", "NTLMSSP_NEGOTIATE_KEY_EXCH", "NTLMSSP_NEGOTIATE_128", "NTLMSSP_RESERVED1", "NTLMSSP_RESERVED2", "NTLMSSP_RESERVED3", "NTLMSSP_NEGOTIATE_VERSION", "NTLMSSP_RESERVED4", "NTLMSSP_NEGOTIATE_TARGET_INFO", "NTLMSSP_REQUEST_NON_NT_SESSION_KEY", "NTLMSSP_RESERVED5", "NTLMSSP_NEGOTIATE_IDENTIFY", "NTLMSSP_NEGOTIATE_EXTENDED_SESSION_SECURITY", "NTLMSSP_RESERVED6", "NTLMSSP_TARGET_TYPE_SERVER", "NTLMSSP_TARGET_TYPE_DOMAIN", "NTLMSSP_NEGOTIATE_ALWAYS_SIGN", "NTLMSSP_RESERVED7", "NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED", "NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED", "NTLMSSP_NEGOTIATE_ANONYMOUS", "NTLMSSP_RESERVED8", "NTLMSSP_NEGOTIATE_NTLM", "NTLMSSP_RESERVED9", "NTLMSSP_NEGOTIATE_LM_KEY", "NTLMSSP_NEGOTIATE_DATAGRAM", "NTLMSSP_NEGOTIATE_SEAL", "NTLMSSP_NEGOTIATE_SIGN", "NTLMSSP_RESERVED10", "NTLMSSP_REQUEST_TARGET", "NTLMSSP_NEGOTIATE_OEM", "NTLMSSP_NEGOTIATE_UNICODE" }; void ntlm_print_negotiate_flags(UINT32 flags) { int i; const char* str; WLog_INFO(TAG, "negotiateFlags \"0x%08"PRIX32"\"", flags); for (i = 31; i >= 0; i--) { if ((flags >> i) & 1) { str = NTLM_NEGOTIATE_STRINGS[(31 - i)]; WLog_INFO(TAG, "\t%s (%d),", str, (31 - i)); } } } int ntlm_read_message_header(wStream* s, NTLM_MESSAGE_HEADER* header) { if (Stream_GetRemainingLength(s) < 12) return -1; Stream_Read(s, header->Signature, 8); Stream_Read_UINT32(s, header->MessageType); if (strncmp((char*) header->Signature, NTLM_SIGNATURE, 8) != 0) return -1; return 1; } void ntlm_write_message_header(wStream* s, NTLM_MESSAGE_HEADER* header) { Stream_Write(s, header->Signature, sizeof(NTLM_SIGNATURE)); Stream_Write_UINT32(s, header->MessageType); } void ntlm_populate_message_header(NTLM_MESSAGE_HEADER* header, UINT32 MessageType) { CopyMemory(header->Signature, NTLM_SIGNATURE, sizeof(NTLM_SIGNATURE)); header->MessageType = MessageType; } int ntlm_read_message_fields(wStream* s, NTLM_MESSAGE_FIELDS* fields) { if (Stream_GetRemainingLength(s) < 8) return -1; Stream_Read_UINT16(s, fields->Len); /* Len (2 bytes) */ Stream_Read_UINT16(s, fields->MaxLen); /* MaxLen (2 bytes) */ Stream_Read_UINT32(s, fields->BufferOffset); /* BufferOffset (4 bytes) */ return 1; } void ntlm_write_message_fields(wStream* s, NTLM_MESSAGE_FIELDS* fields) { if (fields->MaxLen < 1) fields->MaxLen = fields->Len; Stream_Write_UINT16(s, fields->Len); /* Len (2 bytes) */ Stream_Write_UINT16(s, fields->MaxLen); /* MaxLen (2 bytes) */ Stream_Write_UINT32(s, fields->BufferOffset); /* BufferOffset (4 bytes) */ } int ntlm_read_message_fields_buffer(wStream* s, NTLM_MESSAGE_FIELDS* fields) { if (fields->Len > 0) { if ((fields->BufferOffset + fields->Len) > Stream_Length(s)) return -1; fields->Buffer = (PBYTE) malloc(fields->Len); if (!fields->Buffer) return -1; Stream_SetPosition(s, fields->BufferOffset); Stream_Read(s, fields->Buffer, fields->Len); } return 1; } void ntlm_write_message_fields_buffer(wStream* s, NTLM_MESSAGE_FIELDS* fields) { if (fields->Len > 0) { Stream_SetPosition(s, fields->BufferOffset); Stream_Write(s, fields->Buffer, fields->Len); } } void ntlm_free_message_fields_buffer(NTLM_MESSAGE_FIELDS* fields) { if (fields) { if (fields->Buffer) { free(fields->Buffer); fields->Len = 0; fields->MaxLen = 0; fields->Buffer = NULL; fields->BufferOffset = 0; } } } void ntlm_print_message_fields(NTLM_MESSAGE_FIELDS* fields, const char* name) { WLog_DBG(TAG, "%s (Len: %"PRIu16" MaxLen: %"PRIu16" BufferOffset: %"PRIu32")", name, fields->Len, fields->MaxLen, fields->BufferOffset); if (fields->Len > 0) winpr_HexDump(TAG, WLOG_DEBUG, fields->Buffer, fields->Len); } SECURITY_STATUS ntlm_read_NegotiateMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; size_t length; NTLM_NEGOTIATE_MESSAGE* message; message = &context->NEGOTIATE_MESSAGE; ZeroMemory(message, sizeof(NTLM_NEGOTIATE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; if (ntlm_read_message_header(s, (NTLM_MESSAGE_HEADER*) message) < 0) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (message->MessageType != MESSAGE_TYPE_NEGOTIATE) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ if (!((message->NegotiateFlags & NTLMSSP_REQUEST_TARGET) && (message->NegotiateFlags & NTLMSSP_NEGOTIATE_NTLM) && (message->NegotiateFlags & NTLMSSP_NEGOTIATE_UNICODE))) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } context->NegotiateFlags = message->NegotiateFlags; /* only set if NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED is set */ if (ntlm_read_message_fields(s, &(message->DomainName)) < 0) /* DomainNameFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } /* only set if NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED is set */ if (ntlm_read_message_fields(s, &(message->Workstation)) < 0) /* WorkstationFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) { if (ntlm_read_version_info(s, &(message->Version)) < 0) /* Version (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } } length = Stream_GetPosition(s); buffer->cbBuffer = length; if (!sspi_SecBufferAlloc(&context->NegotiateMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->NegotiateMessage.pvBuffer, buffer->pvBuffer, buffer->cbBuffer); context->NegotiateMessage.BufferType = buffer->BufferType; #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "NEGOTIATE_MESSAGE (length = %"PRIu32")", context->NegotiateMessage.cbBuffer); winpr_HexDump(TAG, WLOG_DEBUG, context->NegotiateMessage.pvBuffer, context->NegotiateMessage.cbBuffer); ntlm_print_negotiate_flags(message->NegotiateFlags); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); #endif context->state = NTLM_STATE_CHALLENGE; Stream_Free(s, FALSE); return SEC_I_CONTINUE_NEEDED; } SECURITY_STATUS ntlm_write_NegotiateMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; size_t length; NTLM_NEGOTIATE_MESSAGE* message; message = &context->NEGOTIATE_MESSAGE; ZeroMemory(message, sizeof(NTLM_NEGOTIATE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; ntlm_populate_message_header((NTLM_MESSAGE_HEADER*) message, MESSAGE_TYPE_NEGOTIATE); if (context->NTLMv2) { message->NegotiateFlags |= NTLMSSP_NEGOTIATE_56; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_VERSION; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_LM_KEY; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_OEM; } message->NegotiateFlags |= NTLMSSP_NEGOTIATE_KEY_EXCH; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_128; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_EXTENDED_SESSION_SECURITY; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_NTLM; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_SIGN; message->NegotiateFlags |= NTLMSSP_REQUEST_TARGET; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_UNICODE; if (context->confidentiality) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_SEAL; if (context->SendVersionInfo) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_VERSION; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_get_version_info(&(message->Version)); context->NegotiateFlags = message->NegotiateFlags; /* Message Header (12 bytes) */ ntlm_write_message_header(s, (NTLM_MESSAGE_HEADER*) message); Stream_Write_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ /* only set if NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED is set */ /* DomainNameFields (8 bytes) */ ntlm_write_message_fields(s, &(message->DomainName)); /* only set if NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED is set */ /* WorkstationFields (8 bytes) */ ntlm_write_message_fields(s, &(message->Workstation)); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_write_version_info(s, &(message->Version)); length = Stream_GetPosition(s); buffer->cbBuffer = length; if (!sspi_SecBufferAlloc(&context->NegotiateMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->NegotiateMessage.pvBuffer, buffer->pvBuffer, buffer->cbBuffer); context->NegotiateMessage.BufferType = buffer->BufferType; #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "NEGOTIATE_MESSAGE (length = %d)", length); winpr_HexDump(TAG, WLOG_DEBUG, Stream_Buffer(s), length); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); #endif context->state = NTLM_STATE_CHALLENGE; Stream_Free(s, FALSE); return SEC_I_CONTINUE_NEEDED; } SECURITY_STATUS ntlm_read_ChallengeMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; int length; PBYTE StartOffset; PBYTE PayloadOffset; NTLM_AV_PAIR* AvTimestamp; NTLM_CHALLENGE_MESSAGE* message; ntlm_generate_client_challenge(context); message = &context->CHALLENGE_MESSAGE; ZeroMemory(message, sizeof(NTLM_CHALLENGE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; StartOffset = Stream_Pointer(s); if (ntlm_read_message_header(s, (NTLM_MESSAGE_HEADER*) message) < 0) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (message->MessageType != MESSAGE_TYPE_CHALLENGE) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->TargetName)) < 0) /* TargetNameFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (Stream_GetRemainingLength(s) < 4) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ context->NegotiateFlags = message->NegotiateFlags; if (Stream_GetRemainingLength(s) < 8) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read(s, message->ServerChallenge, 8); /* ServerChallenge (8 bytes) */ CopyMemory(context->ServerChallenge, message->ServerChallenge, 8); if (Stream_GetRemainingLength(s) < 8) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read(s, message->Reserved, 8); /* Reserved (8 bytes), should be ignored */ if (ntlm_read_message_fields(s, &(message->TargetInfo)) < 0) /* TargetInfoFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (context->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) { if (ntlm_read_version_info(s, &(message->Version)) < 0) /* Version (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } } /* Payload (variable) */ PayloadOffset = Stream_Pointer(s); if (message->TargetName.Len > 0) { if (ntlm_read_message_fields_buffer(s, &(message->TargetName)) < 0) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } } if (message->TargetInfo.Len > 0) { if (ntlm_read_message_fields_buffer(s, &(message->TargetInfo)) < 0) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } context->ChallengeTargetInfo.pvBuffer = message->TargetInfo.Buffer; context->ChallengeTargetInfo.cbBuffer = message->TargetInfo.Len; AvTimestamp = ntlm_av_pair_get((NTLM_AV_PAIR*) message->TargetInfo.Buffer, MsvAvTimestamp); if (AvTimestamp) { if (context->NTLMv2) context->UseMIC = TRUE; CopyMemory(context->ChallengeTimestamp, ntlm_av_pair_get_value_pointer(AvTimestamp), 8); } } length = (PayloadOffset - StartOffset) + message->TargetName.Len + message->TargetInfo.Len; if (!sspi_SecBufferAlloc(&context->ChallengeMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->ChallengeMessage.pvBuffer, StartOffset, length); #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "CHALLENGE_MESSAGE (length = %d)", length); winpr_HexDump(TAG, WLOG_DEBUG, context->ChallengeMessage.pvBuffer, context->ChallengeMessage.cbBuffer); ntlm_print_negotiate_flags(context->NegotiateFlags); if (context->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); ntlm_print_message_fields(&(message->TargetName), "TargetName"); ntlm_print_message_fields(&(message->TargetInfo), "TargetInfo"); if (context->ChallengeTargetInfo.cbBuffer > 0) { WLog_DBG(TAG, "ChallengeTargetInfo (%"PRIu32"):", context->ChallengeTargetInfo.cbBuffer); ntlm_print_av_pair_list(context->ChallengeTargetInfo.pvBuffer); } #endif /* AV_PAIRs */ if (context->NTLMv2) { if (ntlm_construct_authenticate_target_info(context) < 0) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } sspi_SecBufferFree(&context->ChallengeTargetInfo); context->ChallengeTargetInfo.pvBuffer = context->AuthenticateTargetInfo.pvBuffer; context->ChallengeTargetInfo.cbBuffer = context->AuthenticateTargetInfo.cbBuffer; } ntlm_generate_timestamp(context); /* Timestamp */ if (ntlm_compute_lm_v2_response(context) < 0) /* LmChallengeResponse */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_compute_ntlm_v2_response(context) < 0) /* NtChallengeResponse */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } ntlm_generate_key_exchange_key(context); /* KeyExchangeKey */ ntlm_generate_random_session_key(context); /* RandomSessionKey */ ntlm_generate_exported_session_key(context); /* ExportedSessionKey */ ntlm_encrypt_random_session_key(context); /* EncryptedRandomSessionKey */ /* Generate signing keys */ ntlm_generate_client_signing_key(context); ntlm_generate_server_signing_key(context); /* Generate sealing keys */ ntlm_generate_client_sealing_key(context); ntlm_generate_server_sealing_key(context); /* Initialize RC4 seal state using client sealing key */ ntlm_init_rc4_seal_states(context); #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "ClientChallenge"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientChallenge, 8); WLog_DBG(TAG, "ServerChallenge"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerChallenge, 8); WLog_DBG(TAG, "SessionBaseKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->SessionBaseKey, 16); WLog_DBG(TAG, "KeyExchangeKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->KeyExchangeKey, 16); WLog_DBG(TAG, "ExportedSessionKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ExportedSessionKey, 16); WLog_DBG(TAG, "RandomSessionKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->RandomSessionKey, 16); WLog_DBG(TAG, "ClientSigningKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientSigningKey, 16); WLog_DBG(TAG, "ClientSealingKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientSealingKey, 16); WLog_DBG(TAG, "ServerSigningKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerSigningKey, 16); WLog_DBG(TAG, "ServerSealingKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerSealingKey, 16); WLog_DBG(TAG, "Timestamp"); winpr_HexDump(TAG, WLOG_DEBUG, context->Timestamp, 8); #endif context->state = NTLM_STATE_AUTHENTICATE; ntlm_free_message_fields_buffer(&(message->TargetName)); Stream_Free(s, FALSE); return SEC_I_CONTINUE_NEEDED; } SECURITY_STATUS ntlm_write_ChallengeMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; size_t length; UINT32 PayloadOffset; NTLM_CHALLENGE_MESSAGE* message; message = &context->CHALLENGE_MESSAGE; ZeroMemory(message, sizeof(NTLM_CHALLENGE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; ntlm_get_version_info(&(message->Version)); /* Version */ ntlm_generate_server_challenge(context); /* Server Challenge */ ntlm_generate_timestamp(context); /* Timestamp */ if (ntlm_construct_challenge_target_info(context) < 0) /* TargetInfo */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(message->ServerChallenge, context->ServerChallenge, 8); /* ServerChallenge */ message->NegotiateFlags = context->NegotiateFlags; ntlm_populate_message_header((NTLM_MESSAGE_HEADER*) message, MESSAGE_TYPE_CHALLENGE); /* Message Header (12 bytes) */ ntlm_write_message_header(s, (NTLM_MESSAGE_HEADER*) message); if (message->NegotiateFlags & NTLMSSP_REQUEST_TARGET) { message->TargetName.Len = (UINT16) context->TargetName.cbBuffer; message->TargetName.Buffer = (PBYTE) context->TargetName.pvBuffer; } message->NegotiateFlags |= NTLMSSP_NEGOTIATE_TARGET_INFO; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_TARGET_INFO) { message->TargetInfo.Len = (UINT16) context->ChallengeTargetInfo.cbBuffer; message->TargetInfo.Buffer = (PBYTE) context->ChallengeTargetInfo.pvBuffer; } PayloadOffset = 48; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) PayloadOffset += 8; message->TargetName.BufferOffset = PayloadOffset; message->TargetInfo.BufferOffset = message->TargetName.BufferOffset + message->TargetName.Len; /* TargetNameFields (8 bytes) */ ntlm_write_message_fields(s, &(message->TargetName)); Stream_Write_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ Stream_Write(s, message->ServerChallenge, 8); /* ServerChallenge (8 bytes) */ Stream_Write(s, message->Reserved, 8); /* Reserved (8 bytes), should be ignored */ /* TargetInfoFields (8 bytes) */ ntlm_write_message_fields(s, &(message->TargetInfo)); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_write_version_info(s, &(message->Version)); /* Version (8 bytes) */ /* Payload (variable) */ if (message->NegotiateFlags & NTLMSSP_REQUEST_TARGET) ntlm_write_message_fields_buffer(s, &(message->TargetName)); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_TARGET_INFO) ntlm_write_message_fields_buffer(s, &(message->TargetInfo)); length = Stream_GetPosition(s); buffer->cbBuffer = length; if (!sspi_SecBufferAlloc(&context->ChallengeMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->ChallengeMessage.pvBuffer, Stream_Buffer(s), length); #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "CHALLENGE_MESSAGE (length = %d)", length); winpr_HexDump(TAG, WLOG_DEBUG, context->ChallengeMessage.pvBuffer, context->ChallengeMessage.cbBuffer); ntlm_print_negotiate_flags(message->NegotiateFlags); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); ntlm_print_message_fields(&(message->TargetName), "TargetName"); ntlm_print_message_fields(&(message->TargetInfo), "TargetInfo"); #endif context->state = NTLM_STATE_AUTHENTICATE; Stream_Free(s, FALSE); return SEC_I_CONTINUE_NEEDED; } SECURITY_STATUS ntlm_read_AuthenticateMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; size_t length; UINT32 flags; NTLM_AV_PAIR* AvFlags; UINT32 PayloadBufferOffset; NTLM_AUTHENTICATE_MESSAGE* message; SSPI_CREDENTIALS* credentials = context->credentials; flags = 0; AvFlags = NULL; message = &context->AUTHENTICATE_MESSAGE; ZeroMemory(message, sizeof(NTLM_AUTHENTICATE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; if (ntlm_read_message_header(s, (NTLM_MESSAGE_HEADER*) message) < 0) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (message->MessageType != MESSAGE_TYPE_AUTHENTICATE) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->LmChallengeResponse)) < 0) /* LmChallengeResponseFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->NtChallengeResponse)) < 0) /* NtChallengeResponseFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->DomainName)) < 0) /* DomainNameFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->UserName)) < 0) /* UserNameFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->Workstation)) < 0) /* WorkstationFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->EncryptedRandomSessionKey)) < 0) /* EncryptedRandomSessionKeyFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ context->NegotiateKeyExchange = (message->NegotiateFlags & NTLMSSP_NEGOTIATE_KEY_EXCH) ? TRUE : FALSE; if ((context->NegotiateKeyExchange && !message->EncryptedRandomSessionKey.Len) || (!context->NegotiateKeyExchange && message->EncryptedRandomSessionKey.Len)) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) { if (ntlm_read_version_info(s, &(message->Version)) < 0) /* Version (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } } PayloadBufferOffset = Stream_GetPosition(s); if (ntlm_read_message_fields_buffer(s, &(message->DomainName)) < 0) /* DomainName */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_read_message_fields_buffer(s, &(message->UserName)) < 0) /* UserName */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_read_message_fields_buffer(s, &(message->Workstation)) < 0) /* Workstation */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_read_message_fields_buffer(s, &(message->LmChallengeResponse)) < 0) /* LmChallengeResponse */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_read_message_fields_buffer(s, &(message->NtChallengeResponse)) < 0) /* NtChallengeResponse */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (message->NtChallengeResponse.Len > 0) { wStream* snt = Stream_New(message->NtChallengeResponse.Buffer, message->NtChallengeResponse.Len); if (!snt) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_read_ntlm_v2_response(snt, &(context->NTLMv2Response)) < 0) { Stream_Free(s, FALSE); Stream_Free(snt, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Free(snt, FALSE); context->NtChallengeResponse.pvBuffer = message->NtChallengeResponse.Buffer; context->NtChallengeResponse.cbBuffer = message->NtChallengeResponse.Len; sspi_SecBufferFree(&(context->ChallengeTargetInfo)); context->ChallengeTargetInfo.pvBuffer = (void*) context->NTLMv2Response.Challenge.AvPairs; context->ChallengeTargetInfo.cbBuffer = message->NtChallengeResponse.Len - (28 + 16); CopyMemory(context->ClientChallenge, context->NTLMv2Response.Challenge.ClientChallenge, 8); AvFlags = ntlm_av_pair_get(context->NTLMv2Response.Challenge.AvPairs, MsvAvFlags); if (AvFlags) Data_Read_UINT32(ntlm_av_pair_get_value_pointer(AvFlags), flags); } if (ntlm_read_message_fields_buffer(s, &(message->EncryptedRandomSessionKey)) < 0) /* EncryptedRandomSessionKey */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (message->EncryptedRandomSessionKey.Len > 0) { if (message->EncryptedRandomSessionKey.Len != 16) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } CopyMemory(context->EncryptedRandomSessionKey, message->EncryptedRandomSessionKey.Buffer, 16); } length = Stream_GetPosition(s); if (!sspi_SecBufferAlloc(&context->AuthenticateMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->AuthenticateMessage.pvBuffer, Stream_Buffer(s), length); buffer->cbBuffer = length; Stream_SetPosition(s, PayloadBufferOffset); if (flags & MSV_AV_FLAGS_MESSAGE_INTEGRITY_CHECK) { context->MessageIntegrityCheckOffset = (UINT32) Stream_GetPosition(s); if (Stream_GetRemainingLength(s) < 16) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read(s, message->MessageIntegrityCheck, 16); } #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "AUTHENTICATE_MESSAGE (length = %"PRIu32")", context->AuthenticateMessage.cbBuffer); winpr_HexDump(TAG, WLOG_DEBUG, context->AuthenticateMessage.pvBuffer, context->AuthenticateMessage.cbBuffer); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); ntlm_print_message_fields(&(message->DomainName), "DomainName"); ntlm_print_message_fields(&(message->UserName), "UserName"); ntlm_print_message_fields(&(message->Workstation), "Workstation"); ntlm_print_message_fields(&(message->LmChallengeResponse), "LmChallengeResponse"); ntlm_print_message_fields(&(message->NtChallengeResponse), "NtChallengeResponse"); ntlm_print_message_fields(&(message->EncryptedRandomSessionKey), "EncryptedRandomSessionKey"); ntlm_print_av_pair_list(context->NTLMv2Response.Challenge.AvPairs); if (flags & MSV_AV_FLAGS_MESSAGE_INTEGRITY_CHECK) { WLog_DBG(TAG, "MessageIntegrityCheck:"); winpr_HexDump(TAG, WLOG_DEBUG, message->MessageIntegrityCheck, 16); } #endif if (message->UserName.Len > 0) { credentials->identity.User = (UINT16*) malloc(message->UserName.Len); if (!credentials->identity.User) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(credentials->identity.User, message->UserName.Buffer, message->UserName.Len); credentials->identity.UserLength = message->UserName.Len / 2; } if (message->DomainName.Len > 0) { credentials->identity.Domain = (UINT16*) malloc(message->DomainName.Len); if (!credentials->identity.Domain) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(credentials->identity.Domain, message->DomainName.Buffer, message->DomainName.Len); credentials->identity.DomainLength = message->DomainName.Len / 2; } Stream_Free(s, FALSE); /* Computations beyond this point require the NTLM hash of the password */ context->state = NTLM_STATE_COMPLETION; return SEC_I_COMPLETE_NEEDED; } /** * Send NTLMSSP AUTHENTICATE_MESSAGE.\n * AUTHENTICATE_MESSAGE @msdn{cc236643} * @param NTLM context * @param buffer */ SECURITY_STATUS ntlm_write_AuthenticateMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; size_t length; UINT32 PayloadBufferOffset; NTLM_AUTHENTICATE_MESSAGE* message; SSPI_CREDENTIALS* credentials = context->credentials; message = &context->AUTHENTICATE_MESSAGE; ZeroMemory(message, sizeof(NTLM_AUTHENTICATE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; if (context->NTLMv2) { message->NegotiateFlags |= NTLMSSP_NEGOTIATE_56; if (context->SendVersionInfo) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_VERSION; } if (context->UseMIC) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_TARGET_INFO; if (context->SendWorkstationName) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED; if (context->confidentiality) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_SEAL; if (context->CHALLENGE_MESSAGE.NegotiateFlags & NTLMSSP_NEGOTIATE_KEY_EXCH) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_KEY_EXCH; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_128; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_EXTENDED_SESSION_SECURITY; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_NTLM; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_SIGN; message->NegotiateFlags |= NTLMSSP_REQUEST_TARGET; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_UNICODE; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_get_version_info(&(message->Version)); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED) { message->Workstation.Len = context->Workstation.Length; message->Workstation.Buffer = (BYTE*) context->Workstation.Buffer; } if (credentials->identity.DomainLength > 0) { message->NegotiateFlags |= NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED; message->DomainName.Len = (UINT16) credentials->identity.DomainLength * 2; message->DomainName.Buffer = (BYTE*) credentials->identity.Domain; } message->UserName.Len = (UINT16) credentials->identity.UserLength * 2; message->UserName.Buffer = (BYTE*) credentials->identity.User; message->LmChallengeResponse.Len = (UINT16) context->LmChallengeResponse.cbBuffer; message->LmChallengeResponse.Buffer = (BYTE*) context->LmChallengeResponse.pvBuffer; message->NtChallengeResponse.Len = (UINT16) context->NtChallengeResponse.cbBuffer; message->NtChallengeResponse.Buffer = (BYTE*) context->NtChallengeResponse.pvBuffer; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_KEY_EXCH) { message->EncryptedRandomSessionKey.Len = 16; message->EncryptedRandomSessionKey.Buffer = context->EncryptedRandomSessionKey; } PayloadBufferOffset = 64; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) PayloadBufferOffset += 8; /* Version (8 bytes) */ if (context->UseMIC) PayloadBufferOffset += 16; /* Message Integrity Check (16 bytes) */ message->DomainName.BufferOffset = PayloadBufferOffset; message->UserName.BufferOffset = message->DomainName.BufferOffset + message->DomainName.Len; message->Workstation.BufferOffset = message->UserName.BufferOffset + message->UserName.Len; message->LmChallengeResponse.BufferOffset = message->Workstation.BufferOffset + message->Workstation.Len; message->NtChallengeResponse.BufferOffset = message->LmChallengeResponse.BufferOffset + message->LmChallengeResponse.Len; message->EncryptedRandomSessionKey.BufferOffset = message->NtChallengeResponse.BufferOffset + message->NtChallengeResponse.Len; ntlm_populate_message_header((NTLM_MESSAGE_HEADER*) message, MESSAGE_TYPE_AUTHENTICATE); ntlm_write_message_header(s, (NTLM_MESSAGE_HEADER*) message); /* Message Header (12 bytes) */ ntlm_write_message_fields(s, & (message->LmChallengeResponse)); /* LmChallengeResponseFields (8 bytes) */ ntlm_write_message_fields(s, & (message->NtChallengeResponse)); /* NtChallengeResponseFields (8 bytes) */ ntlm_write_message_fields(s, &(message->DomainName)); /* DomainNameFields (8 bytes) */ ntlm_write_message_fields(s, &(message->UserName)); /* UserNameFields (8 bytes) */ ntlm_write_message_fields(s, &(message->Workstation)); /* WorkstationFields (8 bytes) */ ntlm_write_message_fields(s, & (message->EncryptedRandomSessionKey)); /* EncryptedRandomSessionKeyFields (8 bytes) */ Stream_Write_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_write_version_info(s, &(message->Version)); /* Version (8 bytes) */ if (context->UseMIC) { context->MessageIntegrityCheckOffset = (UINT32) Stream_GetPosition(s); Stream_Zero(s, 16); /* Message Integrity Check (16 bytes) */ } if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED) ntlm_write_message_fields_buffer(s, &(message->DomainName)); /* DomainName */ ntlm_write_message_fields_buffer(s, &(message->UserName)); /* UserName */ if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED) ntlm_write_message_fields_buffer(s, &(message->Workstation)); /* Workstation */ ntlm_write_message_fields_buffer(s, &(message->LmChallengeResponse)); /* LmChallengeResponse */ ntlm_write_message_fields_buffer(s, &(message->NtChallengeResponse)); /* NtChallengeResponse */ if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_KEY_EXCH) ntlm_write_message_fields_buffer(s, &(message->EncryptedRandomSessionKey)); /* EncryptedRandomSessionKey */ length = Stream_GetPosition(s); if (!sspi_SecBufferAlloc(&context->AuthenticateMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->AuthenticateMessage.pvBuffer, Stream_Buffer(s), length); buffer->cbBuffer = length; if (context->UseMIC) { /* Message Integrity Check */ ntlm_compute_message_integrity_check(context, message->MessageIntegrityCheck, 16); Stream_SetPosition(s, context->MessageIntegrityCheckOffset); Stream_Write(s, message->MessageIntegrityCheck, 16); Stream_SetPosition(s, length); } #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "AUTHENTICATE_MESSAGE (length = %d)", length); winpr_HexDump(TAG, WLOG_DEBUG, Stream_Buffer(s), length); ntlm_print_negotiate_flags(message->NegotiateFlags); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); if (context->AuthenticateTargetInfo.cbBuffer > 0) { WLog_DBG(TAG, "AuthenticateTargetInfo (%"PRIu32"):", context->AuthenticateTargetInfo.cbBuffer); ntlm_print_av_pair_list(context->AuthenticateTargetInfo.pvBuffer); } ntlm_print_message_fields(&(message->DomainName), "DomainName"); ntlm_print_message_fields(&(message->UserName), "UserName"); ntlm_print_message_fields(&(message->Workstation), "Workstation"); ntlm_print_message_fields(&(message->LmChallengeResponse), "LmChallengeResponse"); ntlm_print_message_fields(&(message->NtChallengeResponse), "NtChallengeResponse"); ntlm_print_message_fields(&(message->EncryptedRandomSessionKey), "EncryptedRandomSessionKey"); if (context->UseMIC) { WLog_DBG(TAG, "MessageIntegrityCheck (length = 16)"); winpr_HexDump(TAG, WLOG_DEBUG, message->MessageIntegrityCheck, 16); } #endif context->state = NTLM_STATE_FINAL; Stream_Free(s, FALSE); return SEC_I_COMPLETE_NEEDED; } SECURITY_STATUS ntlm_server_AuthenticateComplete(NTLM_CONTEXT* context) { UINT32 flags = 0; NTLM_AV_PAIR* AvFlags = NULL; NTLM_AUTHENTICATE_MESSAGE* message; BYTE messageIntegrityCheck[16]; if (context->state != NTLM_STATE_COMPLETION) return SEC_E_OUT_OF_SEQUENCE; message = &context->AUTHENTICATE_MESSAGE; AvFlags = ntlm_av_pair_get(context->NTLMv2Response.Challenge.AvPairs, MsvAvFlags); if (AvFlags) Data_Read_UINT32(ntlm_av_pair_get_value_pointer(AvFlags), flags); if (ntlm_compute_lm_v2_response(context) < 0) /* LmChallengeResponse */ return SEC_E_INTERNAL_ERROR; if (ntlm_compute_ntlm_v2_response(context) < 0) /* NtChallengeResponse */ return SEC_E_INTERNAL_ERROR; /* KeyExchangeKey */ ntlm_generate_key_exchange_key(context); /* EncryptedRandomSessionKey */ ntlm_decrypt_random_session_key(context); /* ExportedSessionKey */ ntlm_generate_exported_session_key(context); if (flags & MSV_AV_FLAGS_MESSAGE_INTEGRITY_CHECK) { ZeroMemory(&((PBYTE) context->AuthenticateMessage.pvBuffer)[context->MessageIntegrityCheckOffset], 16); ntlm_compute_message_integrity_check(context, messageIntegrityCheck, sizeof(messageIntegrityCheck)); CopyMemory(&((PBYTE) context->AuthenticateMessage.pvBuffer)[context->MessageIntegrityCheckOffset], message->MessageIntegrityCheck, 16); if (memcmp(messageIntegrityCheck, message->MessageIntegrityCheck, 16) != 0) { WLog_ERR(TAG, "Message Integrity Check (MIC) verification failed!"); WLog_ERR(TAG, "Expected MIC:"); winpr_HexDump(TAG, WLOG_ERROR, messageIntegrityCheck, 16); WLog_ERR(TAG, "Actual MIC:"); winpr_HexDump(TAG, WLOG_ERROR, message->MessageIntegrityCheck, 16); return SEC_E_MESSAGE_ALTERED; } } /* Generate signing keys */ ntlm_generate_client_signing_key(context); ntlm_generate_server_signing_key(context); /* Generate sealing keys */ ntlm_generate_client_sealing_key(context); ntlm_generate_server_sealing_key(context); /* Initialize RC4 seal state */ ntlm_init_rc4_seal_states(context); #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "ClientChallenge"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientChallenge, 8); WLog_DBG(TAG, "ServerChallenge"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerChallenge, 8); WLog_DBG(TAG, "SessionBaseKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->SessionBaseKey, 16); WLog_DBG(TAG, "KeyExchangeKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->KeyExchangeKey, 16); WLog_DBG(TAG, "ExportedSessionKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ExportedSessionKey, 16); WLog_DBG(TAG, "RandomSessionKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->RandomSessionKey, 16); WLog_DBG(TAG, "ClientSigningKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientSigningKey, 16); WLog_DBG(TAG, "ClientSealingKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientSealingKey, 16); WLog_DBG(TAG, "ServerSigningKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerSigningKey, 16); WLog_DBG(TAG, "ServerSealingKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerSealingKey, 16); WLog_DBG(TAG, "Timestamp"); winpr_HexDump(TAG, WLOG_DEBUG, context->Timestamp, 8); #endif context->state = NTLM_STATE_FINAL; ntlm_free_message_fields_buffer(&(message->DomainName)); ntlm_free_message_fields_buffer(&(message->UserName)); ntlm_free_message_fields_buffer(&(message->Workstation)); ntlm_free_message_fields_buffer(&(message->LmChallengeResponse)); ntlm_free_message_fields_buffer(&(message->NtChallengeResponse)); ntlm_free_message_fields_buffer(&(message->EncryptedRandomSessionKey)); return SEC_E_OK; }
/** * WinPR: Windows Portable Runtime * NTLM Security Package (Message) * * Copyright 2011-2014 Marc-Andre Moreau <marcandre.moreau@gmail.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "ntlm.h" #include "../sspi.h" #include <winpr/crt.h> #include <winpr/print.h> #include <winpr/stream.h> #include <winpr/sysinfo.h> #include "ntlm_compute.h" #include "ntlm_message.h" #include "../log.h" #define TAG WINPR_TAG("sspi.NTLM") static const char NTLM_SIGNATURE[8] = { 'N', 'T', 'L', 'M', 'S', 'S', 'P', '\0' }; static const char* const NTLM_NEGOTIATE_STRINGS[] = { "NTLMSSP_NEGOTIATE_56", "NTLMSSP_NEGOTIATE_KEY_EXCH", "NTLMSSP_NEGOTIATE_128", "NTLMSSP_RESERVED1", "NTLMSSP_RESERVED2", "NTLMSSP_RESERVED3", "NTLMSSP_NEGOTIATE_VERSION", "NTLMSSP_RESERVED4", "NTLMSSP_NEGOTIATE_TARGET_INFO", "NTLMSSP_REQUEST_NON_NT_SESSION_KEY", "NTLMSSP_RESERVED5", "NTLMSSP_NEGOTIATE_IDENTIFY", "NTLMSSP_NEGOTIATE_EXTENDED_SESSION_SECURITY", "NTLMSSP_RESERVED6", "NTLMSSP_TARGET_TYPE_SERVER", "NTLMSSP_TARGET_TYPE_DOMAIN", "NTLMSSP_NEGOTIATE_ALWAYS_SIGN", "NTLMSSP_RESERVED7", "NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED", "NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED", "NTLMSSP_NEGOTIATE_ANONYMOUS", "NTLMSSP_RESERVED8", "NTLMSSP_NEGOTIATE_NTLM", "NTLMSSP_RESERVED9", "NTLMSSP_NEGOTIATE_LM_KEY", "NTLMSSP_NEGOTIATE_DATAGRAM", "NTLMSSP_NEGOTIATE_SEAL", "NTLMSSP_NEGOTIATE_SIGN", "NTLMSSP_RESERVED10", "NTLMSSP_REQUEST_TARGET", "NTLMSSP_NEGOTIATE_OEM", "NTLMSSP_NEGOTIATE_UNICODE" }; static void ntlm_print_negotiate_flags(UINT32 flags) { int i; const char* str; WLog_INFO(TAG, "negotiateFlags \"0x%08"PRIX32"\"", flags); for (i = 31; i >= 0; i--) { if ((flags >> i) & 1) { str = NTLM_NEGOTIATE_STRINGS[(31 - i)]; WLog_INFO(TAG, "\t%s (%d),", str, (31 - i)); } } } static int ntlm_read_message_header(wStream* s, NTLM_MESSAGE_HEADER* header) { if (Stream_GetRemainingLength(s) < 12) return -1; Stream_Read(s, header->Signature, 8); Stream_Read_UINT32(s, header->MessageType); if (strncmp((char*) header->Signature, NTLM_SIGNATURE, 8) != 0) return -1; return 1; } static void ntlm_write_message_header(wStream* s, NTLM_MESSAGE_HEADER* header) { Stream_Write(s, header->Signature, sizeof(NTLM_SIGNATURE)); Stream_Write_UINT32(s, header->MessageType); } static void ntlm_populate_message_header(NTLM_MESSAGE_HEADER* header, UINT32 MessageType) { CopyMemory(header->Signature, NTLM_SIGNATURE, sizeof(NTLM_SIGNATURE)); header->MessageType = MessageType; } static int ntlm_read_message_fields(wStream* s, NTLM_MESSAGE_FIELDS* fields) { if (Stream_GetRemainingLength(s) < 8) return -1; Stream_Read_UINT16(s, fields->Len); /* Len (2 bytes) */ Stream_Read_UINT16(s, fields->MaxLen); /* MaxLen (2 bytes) */ Stream_Read_UINT32(s, fields->BufferOffset); /* BufferOffset (4 bytes) */ return 1; } static void ntlm_write_message_fields(wStream* s, NTLM_MESSAGE_FIELDS* fields) { if (fields->MaxLen < 1) fields->MaxLen = fields->Len; Stream_Write_UINT16(s, fields->Len); /* Len (2 bytes) */ Stream_Write_UINT16(s, fields->MaxLen); /* MaxLen (2 bytes) */ Stream_Write_UINT32(s, fields->BufferOffset); /* BufferOffset (4 bytes) */ } static int ntlm_read_message_fields_buffer(wStream* s, NTLM_MESSAGE_FIELDS* fields) { if (fields->Len > 0) { const UINT64 offset = (UINT64)fields->BufferOffset + (UINT64)fields->Len; if (offset > Stream_Length(s)) return -1; fields->Buffer = (PBYTE) malloc(fields->Len); if (!fields->Buffer) return -1; Stream_SetPosition(s, fields->BufferOffset); Stream_Read(s, fields->Buffer, fields->Len); } return 1; } static void ntlm_write_message_fields_buffer(wStream* s, NTLM_MESSAGE_FIELDS* fields) { if (fields->Len > 0) { Stream_SetPosition(s, fields->BufferOffset); Stream_Write(s, fields->Buffer, fields->Len); } } static void ntlm_free_message_fields_buffer(NTLM_MESSAGE_FIELDS* fields) { if (fields) { if (fields->Buffer) { free(fields->Buffer); fields->Len = 0; fields->MaxLen = 0; fields->Buffer = NULL; fields->BufferOffset = 0; } } } static void ntlm_print_message_fields(NTLM_MESSAGE_FIELDS* fields, const char* name) { WLog_DBG(TAG, "%s (Len: %"PRIu16" MaxLen: %"PRIu16" BufferOffset: %"PRIu32")", name, fields->Len, fields->MaxLen, fields->BufferOffset); if (fields->Len > 0) winpr_HexDump(TAG, WLOG_DEBUG, fields->Buffer, fields->Len); } SECURITY_STATUS ntlm_read_NegotiateMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; size_t length; NTLM_NEGOTIATE_MESSAGE* message; message = &context->NEGOTIATE_MESSAGE; ZeroMemory(message, sizeof(NTLM_NEGOTIATE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; if (ntlm_read_message_header(s, (NTLM_MESSAGE_HEADER*) message) < 0) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (message->MessageType != MESSAGE_TYPE_NEGOTIATE) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ if (!((message->NegotiateFlags & NTLMSSP_REQUEST_TARGET) && (message->NegotiateFlags & NTLMSSP_NEGOTIATE_NTLM) && (message->NegotiateFlags & NTLMSSP_NEGOTIATE_UNICODE))) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } context->NegotiateFlags = message->NegotiateFlags; /* only set if NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED is set */ if (ntlm_read_message_fields(s, &(message->DomainName)) < 0) /* DomainNameFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } /* only set if NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED is set */ if (ntlm_read_message_fields(s, &(message->Workstation)) < 0) /* WorkstationFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) { if (ntlm_read_version_info(s, &(message->Version)) < 0) /* Version (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } } length = Stream_GetPosition(s); buffer->cbBuffer = length; if (!sspi_SecBufferAlloc(&context->NegotiateMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->NegotiateMessage.pvBuffer, buffer->pvBuffer, buffer->cbBuffer); context->NegotiateMessage.BufferType = buffer->BufferType; #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "NEGOTIATE_MESSAGE (length = %"PRIu32")", context->NegotiateMessage.cbBuffer); winpr_HexDump(TAG, WLOG_DEBUG, context->NegotiateMessage.pvBuffer, context->NegotiateMessage.cbBuffer); ntlm_print_negotiate_flags(message->NegotiateFlags); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); #endif context->state = NTLM_STATE_CHALLENGE; Stream_Free(s, FALSE); return SEC_I_CONTINUE_NEEDED; } SECURITY_STATUS ntlm_write_NegotiateMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; size_t length; NTLM_NEGOTIATE_MESSAGE* message; message = &context->NEGOTIATE_MESSAGE; ZeroMemory(message, sizeof(NTLM_NEGOTIATE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; ntlm_populate_message_header((NTLM_MESSAGE_HEADER*) message, MESSAGE_TYPE_NEGOTIATE); if (context->NTLMv2) { message->NegotiateFlags |= NTLMSSP_NEGOTIATE_56; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_VERSION; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_LM_KEY; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_OEM; } message->NegotiateFlags |= NTLMSSP_NEGOTIATE_KEY_EXCH; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_128; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_EXTENDED_SESSION_SECURITY; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_NTLM; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_SIGN; message->NegotiateFlags |= NTLMSSP_REQUEST_TARGET; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_UNICODE; if (context->confidentiality) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_SEAL; if (context->SendVersionInfo) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_VERSION; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_get_version_info(&(message->Version)); context->NegotiateFlags = message->NegotiateFlags; /* Message Header (12 bytes) */ ntlm_write_message_header(s, (NTLM_MESSAGE_HEADER*) message); Stream_Write_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ /* only set if NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED is set */ /* DomainNameFields (8 bytes) */ ntlm_write_message_fields(s, &(message->DomainName)); /* only set if NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED is set */ /* WorkstationFields (8 bytes) */ ntlm_write_message_fields(s, &(message->Workstation)); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_write_version_info(s, &(message->Version)); length = Stream_GetPosition(s); buffer->cbBuffer = length; if (!sspi_SecBufferAlloc(&context->NegotiateMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->NegotiateMessage.pvBuffer, buffer->pvBuffer, buffer->cbBuffer); context->NegotiateMessage.BufferType = buffer->BufferType; #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "NEGOTIATE_MESSAGE (length = %d)", length); winpr_HexDump(TAG, WLOG_DEBUG, Stream_Buffer(s), length); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); #endif context->state = NTLM_STATE_CHALLENGE; Stream_Free(s, FALSE); return SEC_I_CONTINUE_NEEDED; } SECURITY_STATUS ntlm_read_ChallengeMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; int length; PBYTE StartOffset; PBYTE PayloadOffset; NTLM_AV_PAIR* AvTimestamp; NTLM_CHALLENGE_MESSAGE* message; ntlm_generate_client_challenge(context); message = &context->CHALLENGE_MESSAGE; ZeroMemory(message, sizeof(NTLM_CHALLENGE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; StartOffset = Stream_Pointer(s); if (ntlm_read_message_header(s, (NTLM_MESSAGE_HEADER*) message) < 0) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (message->MessageType != MESSAGE_TYPE_CHALLENGE) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->TargetName)) < 0) /* TargetNameFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (Stream_GetRemainingLength(s) < 4) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ context->NegotiateFlags = message->NegotiateFlags; if (Stream_GetRemainingLength(s) < 8) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read(s, message->ServerChallenge, 8); /* ServerChallenge (8 bytes) */ CopyMemory(context->ServerChallenge, message->ServerChallenge, 8); if (Stream_GetRemainingLength(s) < 8) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read(s, message->Reserved, 8); /* Reserved (8 bytes), should be ignored */ if (ntlm_read_message_fields(s, &(message->TargetInfo)) < 0) /* TargetInfoFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (context->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) { if (ntlm_read_version_info(s, &(message->Version)) < 0) /* Version (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } } /* Payload (variable) */ PayloadOffset = Stream_Pointer(s); if (message->TargetName.Len > 0) { if (ntlm_read_message_fields_buffer(s, &(message->TargetName)) < 0) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } } if (message->TargetInfo.Len > 0) { if (ntlm_read_message_fields_buffer(s, &(message->TargetInfo)) < 0) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } context->ChallengeTargetInfo.pvBuffer = message->TargetInfo.Buffer; context->ChallengeTargetInfo.cbBuffer = message->TargetInfo.Len; AvTimestamp = ntlm_av_pair_get((NTLM_AV_PAIR*) message->TargetInfo.Buffer, MsvAvTimestamp); if (AvTimestamp) { if (context->NTLMv2) context->UseMIC = TRUE; CopyMemory(context->ChallengeTimestamp, ntlm_av_pair_get_value_pointer(AvTimestamp), 8); } } length = (PayloadOffset - StartOffset) + message->TargetName.Len + message->TargetInfo.Len; if (!sspi_SecBufferAlloc(&context->ChallengeMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->ChallengeMessage.pvBuffer, StartOffset, length); #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "CHALLENGE_MESSAGE (length = %d)", length); winpr_HexDump(TAG, WLOG_DEBUG, context->ChallengeMessage.pvBuffer, context->ChallengeMessage.cbBuffer); ntlm_print_negotiate_flags(context->NegotiateFlags); if (context->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); ntlm_print_message_fields(&(message->TargetName), "TargetName"); ntlm_print_message_fields(&(message->TargetInfo), "TargetInfo"); if (context->ChallengeTargetInfo.cbBuffer > 0) { WLog_DBG(TAG, "ChallengeTargetInfo (%"PRIu32"):", context->ChallengeTargetInfo.cbBuffer); ntlm_print_av_pair_list(context->ChallengeTargetInfo.pvBuffer); } #endif /* AV_PAIRs */ if (context->NTLMv2) { if (ntlm_construct_authenticate_target_info(context) < 0) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } sspi_SecBufferFree(&context->ChallengeTargetInfo); context->ChallengeTargetInfo.pvBuffer = context->AuthenticateTargetInfo.pvBuffer; context->ChallengeTargetInfo.cbBuffer = context->AuthenticateTargetInfo.cbBuffer; } ntlm_generate_timestamp(context); /* Timestamp */ if (ntlm_compute_lm_v2_response(context) < 0) /* LmChallengeResponse */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_compute_ntlm_v2_response(context) < 0) /* NtChallengeResponse */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } ntlm_generate_key_exchange_key(context); /* KeyExchangeKey */ ntlm_generate_random_session_key(context); /* RandomSessionKey */ ntlm_generate_exported_session_key(context); /* ExportedSessionKey */ ntlm_encrypt_random_session_key(context); /* EncryptedRandomSessionKey */ /* Generate signing keys */ ntlm_generate_client_signing_key(context); ntlm_generate_server_signing_key(context); /* Generate sealing keys */ ntlm_generate_client_sealing_key(context); ntlm_generate_server_sealing_key(context); /* Initialize RC4 seal state using client sealing key */ ntlm_init_rc4_seal_states(context); #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "ClientChallenge"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientChallenge, 8); WLog_DBG(TAG, "ServerChallenge"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerChallenge, 8); WLog_DBG(TAG, "SessionBaseKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->SessionBaseKey, 16); WLog_DBG(TAG, "KeyExchangeKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->KeyExchangeKey, 16); WLog_DBG(TAG, "ExportedSessionKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ExportedSessionKey, 16); WLog_DBG(TAG, "RandomSessionKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->RandomSessionKey, 16); WLog_DBG(TAG, "ClientSigningKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientSigningKey, 16); WLog_DBG(TAG, "ClientSealingKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientSealingKey, 16); WLog_DBG(TAG, "ServerSigningKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerSigningKey, 16); WLog_DBG(TAG, "ServerSealingKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerSealingKey, 16); WLog_DBG(TAG, "Timestamp"); winpr_HexDump(TAG, WLOG_DEBUG, context->Timestamp, 8); #endif context->state = NTLM_STATE_AUTHENTICATE; ntlm_free_message_fields_buffer(&(message->TargetName)); Stream_Free(s, FALSE); return SEC_I_CONTINUE_NEEDED; } SECURITY_STATUS ntlm_write_ChallengeMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; size_t length; UINT32 PayloadOffset; NTLM_CHALLENGE_MESSAGE* message; message = &context->CHALLENGE_MESSAGE; ZeroMemory(message, sizeof(NTLM_CHALLENGE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; ntlm_get_version_info(&(message->Version)); /* Version */ ntlm_generate_server_challenge(context); /* Server Challenge */ ntlm_generate_timestamp(context); /* Timestamp */ if (ntlm_construct_challenge_target_info(context) < 0) /* TargetInfo */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(message->ServerChallenge, context->ServerChallenge, 8); /* ServerChallenge */ message->NegotiateFlags = context->NegotiateFlags; ntlm_populate_message_header((NTLM_MESSAGE_HEADER*) message, MESSAGE_TYPE_CHALLENGE); /* Message Header (12 bytes) */ ntlm_write_message_header(s, (NTLM_MESSAGE_HEADER*) message); if (message->NegotiateFlags & NTLMSSP_REQUEST_TARGET) { message->TargetName.Len = (UINT16) context->TargetName.cbBuffer; message->TargetName.Buffer = (PBYTE) context->TargetName.pvBuffer; } message->NegotiateFlags |= NTLMSSP_NEGOTIATE_TARGET_INFO; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_TARGET_INFO) { message->TargetInfo.Len = (UINT16) context->ChallengeTargetInfo.cbBuffer; message->TargetInfo.Buffer = (PBYTE) context->ChallengeTargetInfo.pvBuffer; } PayloadOffset = 48; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) PayloadOffset += 8; message->TargetName.BufferOffset = PayloadOffset; message->TargetInfo.BufferOffset = message->TargetName.BufferOffset + message->TargetName.Len; /* TargetNameFields (8 bytes) */ ntlm_write_message_fields(s, &(message->TargetName)); Stream_Write_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ Stream_Write(s, message->ServerChallenge, 8); /* ServerChallenge (8 bytes) */ Stream_Write(s, message->Reserved, 8); /* Reserved (8 bytes), should be ignored */ /* TargetInfoFields (8 bytes) */ ntlm_write_message_fields(s, &(message->TargetInfo)); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_write_version_info(s, &(message->Version)); /* Version (8 bytes) */ /* Payload (variable) */ if (message->NegotiateFlags & NTLMSSP_REQUEST_TARGET) ntlm_write_message_fields_buffer(s, &(message->TargetName)); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_TARGET_INFO) ntlm_write_message_fields_buffer(s, &(message->TargetInfo)); length = Stream_GetPosition(s); buffer->cbBuffer = length; if (!sspi_SecBufferAlloc(&context->ChallengeMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->ChallengeMessage.pvBuffer, Stream_Buffer(s), length); #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "CHALLENGE_MESSAGE (length = %d)", length); winpr_HexDump(TAG, WLOG_DEBUG, context->ChallengeMessage.pvBuffer, context->ChallengeMessage.cbBuffer); ntlm_print_negotiate_flags(message->NegotiateFlags); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); ntlm_print_message_fields(&(message->TargetName), "TargetName"); ntlm_print_message_fields(&(message->TargetInfo), "TargetInfo"); #endif context->state = NTLM_STATE_AUTHENTICATE; Stream_Free(s, FALSE); return SEC_I_CONTINUE_NEEDED; } SECURITY_STATUS ntlm_read_AuthenticateMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; size_t length; UINT32 flags; NTLM_AV_PAIR* AvFlags; UINT32 PayloadBufferOffset; NTLM_AUTHENTICATE_MESSAGE* message; SSPI_CREDENTIALS* credentials = context->credentials; flags = 0; AvFlags = NULL; message = &context->AUTHENTICATE_MESSAGE; ZeroMemory(message, sizeof(NTLM_AUTHENTICATE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; if (ntlm_read_message_header(s, (NTLM_MESSAGE_HEADER*) message) < 0) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (message->MessageType != MESSAGE_TYPE_AUTHENTICATE) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->LmChallengeResponse)) < 0) /* LmChallengeResponseFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->NtChallengeResponse)) < 0) /* NtChallengeResponseFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->DomainName)) < 0) /* DomainNameFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->UserName)) < 0) /* UserNameFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->Workstation)) < 0) /* WorkstationFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (ntlm_read_message_fields(s, &(message->EncryptedRandomSessionKey)) < 0) /* EncryptedRandomSessionKeyFields (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ context->NegotiateKeyExchange = (message->NegotiateFlags & NTLMSSP_NEGOTIATE_KEY_EXCH) ? TRUE : FALSE; if ((context->NegotiateKeyExchange && !message->EncryptedRandomSessionKey.Len) || (!context->NegotiateKeyExchange && message->EncryptedRandomSessionKey.Len)) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) { if (ntlm_read_version_info(s, &(message->Version)) < 0) /* Version (8 bytes) */ { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } } PayloadBufferOffset = Stream_GetPosition(s); if (ntlm_read_message_fields_buffer(s, &(message->DomainName)) < 0) /* DomainName */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_read_message_fields_buffer(s, &(message->UserName)) < 0) /* UserName */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_read_message_fields_buffer(s, &(message->Workstation)) < 0) /* Workstation */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_read_message_fields_buffer(s, &(message->LmChallengeResponse)) < 0) /* LmChallengeResponse */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_read_message_fields_buffer(s, &(message->NtChallengeResponse)) < 0) /* NtChallengeResponse */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (message->NtChallengeResponse.Len > 0) { wStream* snt = Stream_New(message->NtChallengeResponse.Buffer, message->NtChallengeResponse.Len); if (!snt) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (ntlm_read_ntlm_v2_response(snt, &(context->NTLMv2Response)) < 0) { Stream_Free(s, FALSE); Stream_Free(snt, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Free(snt, FALSE); context->NtChallengeResponse.pvBuffer = message->NtChallengeResponse.Buffer; context->NtChallengeResponse.cbBuffer = message->NtChallengeResponse.Len; sspi_SecBufferFree(&(context->ChallengeTargetInfo)); context->ChallengeTargetInfo.pvBuffer = (void*) context->NTLMv2Response.Challenge.AvPairs; context->ChallengeTargetInfo.cbBuffer = message->NtChallengeResponse.Len - (28 + 16); CopyMemory(context->ClientChallenge, context->NTLMv2Response.Challenge.ClientChallenge, 8); AvFlags = ntlm_av_pair_get(context->NTLMv2Response.Challenge.AvPairs, MsvAvFlags); if (AvFlags) Data_Read_UINT32(ntlm_av_pair_get_value_pointer(AvFlags), flags); } if (ntlm_read_message_fields_buffer(s, &(message->EncryptedRandomSessionKey)) < 0) /* EncryptedRandomSessionKey */ { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } if (message->EncryptedRandomSessionKey.Len > 0) { if (message->EncryptedRandomSessionKey.Len != 16) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } CopyMemory(context->EncryptedRandomSessionKey, message->EncryptedRandomSessionKey.Buffer, 16); } length = Stream_GetPosition(s); if (!sspi_SecBufferAlloc(&context->AuthenticateMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->AuthenticateMessage.pvBuffer, Stream_Buffer(s), length); buffer->cbBuffer = length; Stream_SetPosition(s, PayloadBufferOffset); if (flags & MSV_AV_FLAGS_MESSAGE_INTEGRITY_CHECK) { context->MessageIntegrityCheckOffset = (UINT32) Stream_GetPosition(s); if (Stream_GetRemainingLength(s) < 16) { Stream_Free(s, FALSE); return SEC_E_INVALID_TOKEN; } Stream_Read(s, message->MessageIntegrityCheck, 16); } #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "AUTHENTICATE_MESSAGE (length = %"PRIu32")", context->AuthenticateMessage.cbBuffer); winpr_HexDump(TAG, WLOG_DEBUG, context->AuthenticateMessage.pvBuffer, context->AuthenticateMessage.cbBuffer); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); ntlm_print_message_fields(&(message->DomainName), "DomainName"); ntlm_print_message_fields(&(message->UserName), "UserName"); ntlm_print_message_fields(&(message->Workstation), "Workstation"); ntlm_print_message_fields(&(message->LmChallengeResponse), "LmChallengeResponse"); ntlm_print_message_fields(&(message->NtChallengeResponse), "NtChallengeResponse"); ntlm_print_message_fields(&(message->EncryptedRandomSessionKey), "EncryptedRandomSessionKey"); ntlm_print_av_pair_list(context->NTLMv2Response.Challenge.AvPairs); if (flags & MSV_AV_FLAGS_MESSAGE_INTEGRITY_CHECK) { WLog_DBG(TAG, "MessageIntegrityCheck:"); winpr_HexDump(TAG, WLOG_DEBUG, message->MessageIntegrityCheck, 16); } #endif if (message->UserName.Len > 0) { credentials->identity.User = (UINT16*) malloc(message->UserName.Len); if (!credentials->identity.User) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(credentials->identity.User, message->UserName.Buffer, message->UserName.Len); credentials->identity.UserLength = message->UserName.Len / 2; } if (message->DomainName.Len > 0) { credentials->identity.Domain = (UINT16*) malloc(message->DomainName.Len); if (!credentials->identity.Domain) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(credentials->identity.Domain, message->DomainName.Buffer, message->DomainName.Len); credentials->identity.DomainLength = message->DomainName.Len / 2; } Stream_Free(s, FALSE); /* Computations beyond this point require the NTLM hash of the password */ context->state = NTLM_STATE_COMPLETION; return SEC_I_COMPLETE_NEEDED; } /** * Send NTLMSSP AUTHENTICATE_MESSAGE.\n * AUTHENTICATE_MESSAGE @msdn{cc236643} * @param NTLM context * @param buffer */ SECURITY_STATUS ntlm_write_AuthenticateMessage(NTLM_CONTEXT* context, PSecBuffer buffer) { wStream* s; size_t length; UINT32 PayloadBufferOffset; NTLM_AUTHENTICATE_MESSAGE* message; SSPI_CREDENTIALS* credentials = context->credentials; message = &context->AUTHENTICATE_MESSAGE; ZeroMemory(message, sizeof(NTLM_AUTHENTICATE_MESSAGE)); s = Stream_New((BYTE*) buffer->pvBuffer, buffer->cbBuffer); if (!s) return SEC_E_INTERNAL_ERROR; if (context->NTLMv2) { message->NegotiateFlags |= NTLMSSP_NEGOTIATE_56; if (context->SendVersionInfo) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_VERSION; } if (context->UseMIC) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_TARGET_INFO; if (context->SendWorkstationName) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED; if (context->confidentiality) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_SEAL; if (context->CHALLENGE_MESSAGE.NegotiateFlags & NTLMSSP_NEGOTIATE_KEY_EXCH) message->NegotiateFlags |= NTLMSSP_NEGOTIATE_KEY_EXCH; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_128; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_EXTENDED_SESSION_SECURITY; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_NTLM; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_SIGN; message->NegotiateFlags |= NTLMSSP_REQUEST_TARGET; message->NegotiateFlags |= NTLMSSP_NEGOTIATE_UNICODE; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_get_version_info(&(message->Version)); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED) { message->Workstation.Len = context->Workstation.Length; message->Workstation.Buffer = (BYTE*) context->Workstation.Buffer; } if (credentials->identity.DomainLength > 0) { message->NegotiateFlags |= NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED; message->DomainName.Len = (UINT16) credentials->identity.DomainLength * 2; message->DomainName.Buffer = (BYTE*) credentials->identity.Domain; } message->UserName.Len = (UINT16) credentials->identity.UserLength * 2; message->UserName.Buffer = (BYTE*) credentials->identity.User; message->LmChallengeResponse.Len = (UINT16) context->LmChallengeResponse.cbBuffer; message->LmChallengeResponse.Buffer = (BYTE*) context->LmChallengeResponse.pvBuffer; message->NtChallengeResponse.Len = (UINT16) context->NtChallengeResponse.cbBuffer; message->NtChallengeResponse.Buffer = (BYTE*) context->NtChallengeResponse.pvBuffer; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_KEY_EXCH) { message->EncryptedRandomSessionKey.Len = 16; message->EncryptedRandomSessionKey.Buffer = context->EncryptedRandomSessionKey; } PayloadBufferOffset = 64; if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) PayloadBufferOffset += 8; /* Version (8 bytes) */ if (context->UseMIC) PayloadBufferOffset += 16; /* Message Integrity Check (16 bytes) */ message->DomainName.BufferOffset = PayloadBufferOffset; message->UserName.BufferOffset = message->DomainName.BufferOffset + message->DomainName.Len; message->Workstation.BufferOffset = message->UserName.BufferOffset + message->UserName.Len; message->LmChallengeResponse.BufferOffset = message->Workstation.BufferOffset + message->Workstation.Len; message->NtChallengeResponse.BufferOffset = message->LmChallengeResponse.BufferOffset + message->LmChallengeResponse.Len; message->EncryptedRandomSessionKey.BufferOffset = message->NtChallengeResponse.BufferOffset + message->NtChallengeResponse.Len; ntlm_populate_message_header((NTLM_MESSAGE_HEADER*) message, MESSAGE_TYPE_AUTHENTICATE); ntlm_write_message_header(s, (NTLM_MESSAGE_HEADER*) message); /* Message Header (12 bytes) */ ntlm_write_message_fields(s, & (message->LmChallengeResponse)); /* LmChallengeResponseFields (8 bytes) */ ntlm_write_message_fields(s, & (message->NtChallengeResponse)); /* NtChallengeResponseFields (8 bytes) */ ntlm_write_message_fields(s, &(message->DomainName)); /* DomainNameFields (8 bytes) */ ntlm_write_message_fields(s, &(message->UserName)); /* UserNameFields (8 bytes) */ ntlm_write_message_fields(s, &(message->Workstation)); /* WorkstationFields (8 bytes) */ ntlm_write_message_fields(s, & (message->EncryptedRandomSessionKey)); /* EncryptedRandomSessionKeyFields (8 bytes) */ Stream_Write_UINT32(s, message->NegotiateFlags); /* NegotiateFlags (4 bytes) */ if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_write_version_info(s, &(message->Version)); /* Version (8 bytes) */ if (context->UseMIC) { context->MessageIntegrityCheckOffset = (UINT32) Stream_GetPosition(s); Stream_Zero(s, 16); /* Message Integrity Check (16 bytes) */ } if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED) ntlm_write_message_fields_buffer(s, &(message->DomainName)); /* DomainName */ ntlm_write_message_fields_buffer(s, &(message->UserName)); /* UserName */ if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED) ntlm_write_message_fields_buffer(s, &(message->Workstation)); /* Workstation */ ntlm_write_message_fields_buffer(s, &(message->LmChallengeResponse)); /* LmChallengeResponse */ ntlm_write_message_fields_buffer(s, &(message->NtChallengeResponse)); /* NtChallengeResponse */ if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_KEY_EXCH) ntlm_write_message_fields_buffer(s, &(message->EncryptedRandomSessionKey)); /* EncryptedRandomSessionKey */ length = Stream_GetPosition(s); if (!sspi_SecBufferAlloc(&context->AuthenticateMessage, length)) { Stream_Free(s, FALSE); return SEC_E_INTERNAL_ERROR; } CopyMemory(context->AuthenticateMessage.pvBuffer, Stream_Buffer(s), length); buffer->cbBuffer = length; if (context->UseMIC) { /* Message Integrity Check */ ntlm_compute_message_integrity_check(context, message->MessageIntegrityCheck, 16); Stream_SetPosition(s, context->MessageIntegrityCheckOffset); Stream_Write(s, message->MessageIntegrityCheck, 16); Stream_SetPosition(s, length); } #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "AUTHENTICATE_MESSAGE (length = %d)", length); winpr_HexDump(TAG, WLOG_DEBUG, Stream_Buffer(s), length); ntlm_print_negotiate_flags(message->NegotiateFlags); if (message->NegotiateFlags & NTLMSSP_NEGOTIATE_VERSION) ntlm_print_version_info(&(message->Version)); if (context->AuthenticateTargetInfo.cbBuffer > 0) { WLog_DBG(TAG, "AuthenticateTargetInfo (%"PRIu32"):", context->AuthenticateTargetInfo.cbBuffer); ntlm_print_av_pair_list(context->AuthenticateTargetInfo.pvBuffer); } ntlm_print_message_fields(&(message->DomainName), "DomainName"); ntlm_print_message_fields(&(message->UserName), "UserName"); ntlm_print_message_fields(&(message->Workstation), "Workstation"); ntlm_print_message_fields(&(message->LmChallengeResponse), "LmChallengeResponse"); ntlm_print_message_fields(&(message->NtChallengeResponse), "NtChallengeResponse"); ntlm_print_message_fields(&(message->EncryptedRandomSessionKey), "EncryptedRandomSessionKey"); if (context->UseMIC) { WLog_DBG(TAG, "MessageIntegrityCheck (length = 16)"); winpr_HexDump(TAG, WLOG_DEBUG, message->MessageIntegrityCheck, 16); } #endif context->state = NTLM_STATE_FINAL; Stream_Free(s, FALSE); return SEC_I_COMPLETE_NEEDED; } SECURITY_STATUS ntlm_server_AuthenticateComplete(NTLM_CONTEXT* context) { UINT32 flags = 0; NTLM_AV_PAIR* AvFlags = NULL; NTLM_AUTHENTICATE_MESSAGE* message; BYTE messageIntegrityCheck[16]; if (context->state != NTLM_STATE_COMPLETION) return SEC_E_OUT_OF_SEQUENCE; message = &context->AUTHENTICATE_MESSAGE; AvFlags = ntlm_av_pair_get(context->NTLMv2Response.Challenge.AvPairs, MsvAvFlags); if (AvFlags) Data_Read_UINT32(ntlm_av_pair_get_value_pointer(AvFlags), flags); if (ntlm_compute_lm_v2_response(context) < 0) /* LmChallengeResponse */ return SEC_E_INTERNAL_ERROR; if (ntlm_compute_ntlm_v2_response(context) < 0) /* NtChallengeResponse */ return SEC_E_INTERNAL_ERROR; /* KeyExchangeKey */ ntlm_generate_key_exchange_key(context); /* EncryptedRandomSessionKey */ ntlm_decrypt_random_session_key(context); /* ExportedSessionKey */ ntlm_generate_exported_session_key(context); if (flags & MSV_AV_FLAGS_MESSAGE_INTEGRITY_CHECK) { ZeroMemory(&((PBYTE) context->AuthenticateMessage.pvBuffer)[context->MessageIntegrityCheckOffset], 16); ntlm_compute_message_integrity_check(context, messageIntegrityCheck, sizeof(messageIntegrityCheck)); CopyMemory(&((PBYTE) context->AuthenticateMessage.pvBuffer)[context->MessageIntegrityCheckOffset], message->MessageIntegrityCheck, 16); if (memcmp(messageIntegrityCheck, message->MessageIntegrityCheck, 16) != 0) { WLog_ERR(TAG, "Message Integrity Check (MIC) verification failed!"); WLog_ERR(TAG, "Expected MIC:"); winpr_HexDump(TAG, WLOG_ERROR, messageIntegrityCheck, 16); WLog_ERR(TAG, "Actual MIC:"); winpr_HexDump(TAG, WLOG_ERROR, message->MessageIntegrityCheck, 16); return SEC_E_MESSAGE_ALTERED; } } /* Generate signing keys */ ntlm_generate_client_signing_key(context); ntlm_generate_server_signing_key(context); /* Generate sealing keys */ ntlm_generate_client_sealing_key(context); ntlm_generate_server_sealing_key(context); /* Initialize RC4 seal state */ ntlm_init_rc4_seal_states(context); #ifdef WITH_DEBUG_NTLM WLog_DBG(TAG, "ClientChallenge"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientChallenge, 8); WLog_DBG(TAG, "ServerChallenge"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerChallenge, 8); WLog_DBG(TAG, "SessionBaseKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->SessionBaseKey, 16); WLog_DBG(TAG, "KeyExchangeKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->KeyExchangeKey, 16); WLog_DBG(TAG, "ExportedSessionKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ExportedSessionKey, 16); WLog_DBG(TAG, "RandomSessionKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->RandomSessionKey, 16); WLog_DBG(TAG, "ClientSigningKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientSigningKey, 16); WLog_DBG(TAG, "ClientSealingKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ClientSealingKey, 16); WLog_DBG(TAG, "ServerSigningKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerSigningKey, 16); WLog_DBG(TAG, "ServerSealingKey"); winpr_HexDump(TAG, WLOG_DEBUG, context->ServerSealingKey, 16); WLog_DBG(TAG, "Timestamp"); winpr_HexDump(TAG, WLOG_DEBUG, context->Timestamp, 8); #endif context->state = NTLM_STATE_FINAL; ntlm_free_message_fields_buffer(&(message->DomainName)); ntlm_free_message_fields_buffer(&(message->UserName)); ntlm_free_message_fields_buffer(&(message->Workstation)); ntlm_free_message_fields_buffer(&(message->LmChallengeResponse)); ntlm_free_message_fields_buffer(&(message->NtChallengeResponse)); ntlm_free_message_fields_buffer(&(message->EncryptedRandomSessionKey)); return SEC_E_OK; }
void ntlm_print_message_fields(NTLM_MESSAGE_FIELDS* fields, const char* name) { WLog_DBG(TAG, "%s (Len: %"PRIu16" MaxLen: %"PRIu16" BufferOffset: %"PRIu32")", name, fields->Len, fields->MaxLen, fields->BufferOffset); if (fields->Len > 0) winpr_HexDump(TAG, WLOG_DEBUG, fields->Buffer, fields->Len); }
static void ntlm_print_message_fields(NTLM_MESSAGE_FIELDS* fields, const char* name) { WLog_DBG(TAG, "%s (Len: %"PRIu16" MaxLen: %"PRIu16" BufferOffset: %"PRIu32")", name, fields->Len, fields->MaxLen, fields->BufferOffset); if (fields->Len > 0) winpr_HexDump(TAG, WLOG_DEBUG, fields->Buffer, fields->Len); }
{'added': [(77, 'static void ntlm_print_negotiate_flags(UINT32 flags)'), (93, 'static int ntlm_read_message_header(wStream* s, NTLM_MESSAGE_HEADER* header)'), (107, 'static void ntlm_write_message_header(wStream* s, NTLM_MESSAGE_HEADER* header)'), (113, 'static void ntlm_populate_message_header(NTLM_MESSAGE_HEADER* header, UINT32 MessageType)'), (119, 'static int ntlm_read_message_fields(wStream* s, NTLM_MESSAGE_FIELDS* fields)'), (130, 'static void ntlm_write_message_fields(wStream* s, NTLM_MESSAGE_FIELDS* fields)'), (140, 'static int ntlm_read_message_fields_buffer(wStream* s, NTLM_MESSAGE_FIELDS* fields)'), (144, '\t\tconst UINT64 offset = (UINT64)fields->BufferOffset + (UINT64)fields->Len;'), (145, ''), (146, '\t\tif (offset > Stream_Length(s))'), (161, 'static void ntlm_write_message_fields_buffer(wStream* s, NTLM_MESSAGE_FIELDS* fields)'), (170, 'static void ntlm_free_message_fields_buffer(NTLM_MESSAGE_FIELDS* fields)'), (185, 'static void ntlm_print_message_fields(NTLM_MESSAGE_FIELDS* fields, const char* name)')], 'deleted': [(77, 'void ntlm_print_negotiate_flags(UINT32 flags)'), (93, 'int ntlm_read_message_header(wStream* s, NTLM_MESSAGE_HEADER* header)'), (107, 'void ntlm_write_message_header(wStream* s, NTLM_MESSAGE_HEADER* header)'), (113, 'void ntlm_populate_message_header(NTLM_MESSAGE_HEADER* header, UINT32 MessageType)'), (119, 'int ntlm_read_message_fields(wStream* s, NTLM_MESSAGE_FIELDS* fields)'), (130, 'void ntlm_write_message_fields(wStream* s, NTLM_MESSAGE_FIELDS* fields)'), (140, 'int ntlm_read_message_fields_buffer(wStream* s, NTLM_MESSAGE_FIELDS* fields)'), (144, '\t\tif ((fields->BufferOffset + fields->Len) > Stream_Length(s))'), (159, 'void ntlm_write_message_fields_buffer(wStream* s, NTLM_MESSAGE_FIELDS* fields)'), (168, 'void ntlm_free_message_fields_buffer(NTLM_MESSAGE_FIELDS* fields)'), (183, 'void ntlm_print_message_fields(NTLM_MESSAGE_FIELDS* fields, const char* name)')]}
13
11
918
6,106
7
63
2
https://github.com/FreeRDP/FreeRDP
CVE-2018-8789
CWE-125
511
util.c
C
extract_sockaddr
/* * Copyright 2011-2013 Con Kolivas * Copyright 2010 Jeff Garzik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <stdarg.h> #include <string.h> #include <jansson.h> #ifdef HAVE_LIBCURL #include <curl/curl.h> #endif #include <time.h> #include <errno.h> #include <unistd.h> #include <sys/types.h> #ifndef WIN32 #include <fcntl.h> # ifdef __linux # include <sys/prctl.h> # endif # include <sys/socket.h> # include <netinet/in.h> # include <netinet/tcp.h> # include <netdb.h> #else # include <winsock2.h> # include <ws2tcpip.h> # include <mmsystem.h> #endif #include "miner.h" #include "elist.h" #include "compat.h" #include "util.h" #define DEFAULT_SOCKWAIT 60 bool successful_connect = false; static void keep_sockalive(SOCKETTYPE fd) { const int tcp_one = 1; #ifndef WIN32 const int tcp_keepidle = 45; const int tcp_keepintvl = 30; int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, O_NONBLOCK | flags); #else u_long flags = 1; ioctlsocket(fd, FIONBIO, &flags); #endif setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const void *)&tcp_one, sizeof(tcp_one)); if (!opt_delaynet) #ifndef __linux setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); #else /* __linux */ setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one)); setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle)); setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl)); #endif /* __linux */ #ifdef __APPLE_CC__ setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl)); #endif /* __APPLE_CC__ */ } struct tq_ent { void *data; struct list_head q_node; }; #ifdef HAVE_LIBCURL struct timeval nettime; struct data_buffer { void *buf; size_t len; }; struct upload_buffer { const void *buf; size_t len; }; struct header_info { char *lp_path; int rolltime; char *reason; char *stratum_url; bool hadrolltime; bool canroll; bool hadexpire; }; static void databuf_free(struct data_buffer *db) { if (!db) return; free(db->buf); memset(db, 0, sizeof(*db)); } static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb, void *user_data) { struct data_buffer *db = user_data; size_t len = size * nmemb; size_t oldlen, newlen; void *newmem; static const unsigned char zero = 0; oldlen = db->len; newlen = oldlen + len; newmem = realloc(db->buf, newlen + 1); if (!newmem) return 0; db->buf = newmem; db->len = newlen; memcpy(db->buf + oldlen, ptr, len); memcpy(db->buf + newlen, &zero, 1); /* null terminate */ return len; } static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb, void *user_data) { struct upload_buffer *ub = user_data; unsigned int len = size * nmemb; if (len > ub->len) len = ub->len; if (len) { memcpy(ptr, ub->buf, len); ub->buf += len; ub->len -= len; } return len; } static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data) { struct header_info *hi = user_data; size_t remlen, slen, ptrlen = size * nmemb; char *rem, *val = NULL, *key = NULL; void *tmp; val = calloc(1, ptrlen); key = calloc(1, ptrlen); if (!key || !val) goto out; tmp = memchr(ptr, ':', ptrlen); if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */ goto out; slen = tmp - ptr; if ((slen + 1) == ptrlen) /* skip key w/ no value */ goto out; memcpy(key, ptr, slen); /* store & nul term key */ key[slen] = 0; rem = ptr + slen + 1; /* trim value's leading whitespace */ remlen = ptrlen - slen - 1; while ((remlen > 0) && (isspace(*rem))) { remlen--; rem++; } memcpy(val, rem, remlen); /* store value, trim trailing ws */ val[remlen] = 0; while ((*val) && (isspace(val[strlen(val) - 1]))) val[strlen(val) - 1] = 0; if (!*val) /* skip blank value */ goto out; if (opt_protocol) applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val); if (!strcasecmp("X-Roll-Ntime", key)) { hi->hadrolltime = true; if (!strncasecmp("N", val, 1)) applog(LOG_DEBUG, "X-Roll-Ntime: N found"); else { hi->canroll = true; /* Check to see if expire= is supported and if not, set * the rolltime to the default scantime */ if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) { sscanf(val + 7, "%d", &hi->rolltime); hi->hadexpire = true; } else hi->rolltime = opt_scantime; applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime); } } if (!strcasecmp("X-Long-Polling", key)) { hi->lp_path = val; /* steal memory reference */ val = NULL; } if (!strcasecmp("X-Reject-Reason", key)) { hi->reason = val; /* steal memory reference */ val = NULL; } if (!strcasecmp("X-Stratum", key)) { hi->stratum_url = val; val = NULL; } out: free(key); free(val); return ptrlen; } static void last_nettime(struct timeval *last) { rd_lock(&netacc_lock); last->tv_sec = nettime.tv_sec; last->tv_usec = nettime.tv_usec; rd_unlock(&netacc_lock); } static void set_nettime(void) { wr_lock(&netacc_lock); cgtime(&nettime); wr_unlock(&netacc_lock); } #if CURL_HAS_KEEPALIVE static void keep_curlalive(CURL *curl) { const int tcp_keepidle = 45; const int tcp_keepintvl = 30; const long int keepalive = 1; curl_easy_setopt(curl, CURLOPT_TCP_KEEPALIVE, keepalive); curl_easy_setopt(curl, CURLOPT_TCP_KEEPIDLE, tcp_keepidle); curl_easy_setopt(curl, CURLOPT_TCP_KEEPINTVL, tcp_keepintvl); } #else static void keep_curlalive(CURL *curl) { SOCKETTYPE sock; curl_easy_getinfo(curl, CURLINFO_LASTSOCKET, (long *)&sock); keep_sockalive(sock); } #endif static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type, __maybe_unused char *data, size_t size, void *userdata) { struct pool *pool = (struct pool *)userdata; switch(type) { case CURLINFO_HEADER_IN: case CURLINFO_DATA_IN: case CURLINFO_SSL_DATA_IN: pool->cgminer_pool_stats.net_bytes_received += size; break; case CURLINFO_HEADER_OUT: case CURLINFO_DATA_OUT: case CURLINFO_SSL_DATA_OUT: pool->cgminer_pool_stats.net_bytes_sent += size; break; case CURLINFO_TEXT: default: break; } return 0; } json_t *json_web_config(const char *url) { struct data_buffer all_data = {NULL, 0}; char curl_err_str[CURL_ERROR_SIZE]; long timeout = 60; json_error_t err; json_t *val; CURL *curl; int rc; memset(&err, 0, sizeof(err)); curl = curl_easy_init(); if (unlikely(!curl)) quithere(1, "CURL initialisation failed"); curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_ENCODING, ""); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); val = NULL; rc = curl_easy_perform(curl); curl_easy_cleanup(curl); if (rc) { applog(LOG_ERR, "HTTP config request of '%s' failed: %s", url, curl_err_str); goto c_out; } if (!all_data.buf) { applog(LOG_ERR, "Empty config data received from '%s'", url); goto c_out; } val = JSON_LOADS(all_data.buf, &err); if (!val) { applog(LOG_ERR, "JSON config decode of '%s' failed(%d): %s", url, err.line, err.text); } databuf_free(&all_data); c_out: return val; } json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass, const char *rpc_req, bool probe, bool longpoll, int *rolltime, struct pool *pool, bool share) { long timeout = longpoll ? (60 * 60) : 60; struct data_buffer all_data = {NULL, 0}; struct header_info hi = {NULL, 0, NULL, NULL, false, false, false}; char len_hdr[64], user_agent_hdr[128]; char curl_err_str[CURL_ERROR_SIZE]; struct curl_slist *headers = NULL; struct upload_buffer upload_data; json_t *val, *err_val, *res_val; bool probing = false; double byte_count; json_error_t err; int rc; memset(&err, 0, sizeof(err)); /* it is assumed that 'curl' is freshly [re]initialized at this pt */ if (probe) probing = !pool->probed; curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); // CURLOPT_VERBOSE won't write to stderr if we use CURLOPT_DEBUGFUNCTION curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb); curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool); curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_ENCODING, ""); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); /* Shares are staggered already and delays in submission can be costly * so do not delay them */ if (!opt_delaynet || share) curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb); curl_easy_setopt(curl, CURLOPT_READDATA, &upload_data); curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb); curl_easy_setopt(curl, CURLOPT_HEADERDATA, &hi); curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); if (pool->rpc_proxy) { curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy); curl_easy_setopt(curl, CURLOPT_PROXYTYPE, pool->rpc_proxytype); } else if (opt_socks_proxy) { curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy); curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); } if (userpass) { curl_easy_setopt(curl, CURLOPT_USERPWD, userpass); curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC); } if (longpoll) keep_curlalive(curl); curl_easy_setopt(curl, CURLOPT_POST, 1); if (opt_protocol) applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req); upload_data.buf = rpc_req; upload_data.len = strlen(rpc_req); sprintf(len_hdr, "Content-Length: %lu", (unsigned long) upload_data.len); sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE_STRING); headers = curl_slist_append(headers, "Content-type: application/json"); headers = curl_slist_append(headers, "X-Mining-Extensions: longpoll midstate rollntime submitold"); if (likely(global_hashrate)) { char ghashrate[255]; sprintf(ghashrate, "X-Mining-Hashrate: %llu", global_hashrate); headers = curl_slist_append(headers, ghashrate); } headers = curl_slist_append(headers, len_hdr); headers = curl_slist_append(headers, user_agent_hdr); headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); if (opt_delaynet) { /* Don't delay share submission, but still track the nettime */ if (!share) { long long now_msecs, last_msecs; struct timeval now, last; cgtime(&now); last_nettime(&last); now_msecs = (long long)now.tv_sec * 1000; now_msecs += now.tv_usec / 1000; last_msecs = (long long)last.tv_sec * 1000; last_msecs += last.tv_usec / 1000; if (now_msecs > last_msecs && now_msecs - last_msecs < 250) { struct timespec rgtp; rgtp.tv_sec = 0; rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000; nanosleep(&rgtp, NULL); } } set_nettime(); } rc = curl_easy_perform(curl); if (rc) { applog(LOG_INFO, "HTTP request failed: %s", curl_err_str); goto err_out; } if (!all_data.buf) { applog(LOG_DEBUG, "Empty data received in json_rpc_call."); goto err_out; } pool->cgminer_pool_stats.times_sent++; if (curl_easy_getinfo(curl, CURLINFO_SIZE_UPLOAD, &byte_count) == CURLE_OK) pool->cgminer_pool_stats.bytes_sent += byte_count; pool->cgminer_pool_stats.times_received++; if (curl_easy_getinfo(curl, CURLINFO_SIZE_DOWNLOAD, &byte_count) == CURLE_OK) pool->cgminer_pool_stats.bytes_received += byte_count; if (probing) { pool->probed = true; /* If X-Long-Polling was found, activate long polling */ if (hi.lp_path) { if (pool->hdr_path != NULL) free(pool->hdr_path); pool->hdr_path = hi.lp_path; } else pool->hdr_path = NULL; if (hi.stratum_url) { pool->stratum_url = hi.stratum_url; hi.stratum_url = NULL; } } else { if (hi.lp_path) { free(hi.lp_path); hi.lp_path = NULL; } if (hi.stratum_url) { free(hi.stratum_url); hi.stratum_url = NULL; } } *rolltime = hi.rolltime; pool->cgminer_pool_stats.rolltime = hi.rolltime; pool->cgminer_pool_stats.hadrolltime = hi.hadrolltime; pool->cgminer_pool_stats.canroll = hi.canroll; pool->cgminer_pool_stats.hadexpire = hi.hadexpire; val = JSON_LOADS(all_data.buf, &err); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); if (opt_protocol) applog(LOG_DEBUG, "JSON protocol response:\n%s", (char *)(all_data.buf)); goto err_out; } if (opt_protocol) { char *s = json_dumps(val, JSON_INDENT(3)); applog(LOG_DEBUG, "JSON protocol response:\n%s", s); free(s); } /* JSON-RPC valid response returns a non-null 'result', * and a null 'error'. */ res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val ||(err_val && !json_is_null(err_val))) { char *s; if (err_val) s = json_dumps(err_val, JSON_INDENT(3)); else s = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC call failed: %s", s); free(s); goto err_out; } if (hi.reason) { json_object_set_new(val, "reject-reason", json_string(hi.reason)); free(hi.reason); hi.reason = NULL; } successful_connect = true; databuf_free(&all_data); curl_slist_free_all(headers); curl_easy_reset(curl); return val; err_out: databuf_free(&all_data); curl_slist_free_all(headers); curl_easy_reset(curl); if (!successful_connect) applog(LOG_DEBUG, "Failed to connect in json_rpc_call"); curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1); return NULL; } #define PROXY_HTTP CURLPROXY_HTTP #define PROXY_HTTP_1_0 CURLPROXY_HTTP_1_0 #define PROXY_SOCKS4 CURLPROXY_SOCKS4 #define PROXY_SOCKS5 CURLPROXY_SOCKS5 #define PROXY_SOCKS4A CURLPROXY_SOCKS4A #define PROXY_SOCKS5H CURLPROXY_SOCKS5_HOSTNAME #else /* HAVE_LIBCURL */ #define PROXY_HTTP 0 #define PROXY_HTTP_1_0 1 #define PROXY_SOCKS4 2 #define PROXY_SOCKS5 3 #define PROXY_SOCKS4A 4 #define PROXY_SOCKS5H 5 #endif /* HAVE_LIBCURL */ static struct { const char *name; proxytypes_t proxytype; } proxynames[] = { { "http:", PROXY_HTTP }, { "http0:", PROXY_HTTP_1_0 }, { "socks4:", PROXY_SOCKS4 }, { "socks5:", PROXY_SOCKS5 }, { "socks4a:", PROXY_SOCKS4A }, { "socks5h:", PROXY_SOCKS5H }, { NULL, 0 } }; const char *proxytype(proxytypes_t proxytype) { int i; for (i = 0; proxynames[i].name; i++) if (proxynames[i].proxytype == proxytype) return proxynames[i].name; return "invalid"; } char *get_proxy(char *url, struct pool *pool) { pool->rpc_proxy = NULL; char *split; int plen, len, i; for (i = 0; proxynames[i].name; i++) { plen = strlen(proxynames[i].name); if (strncmp(url, proxynames[i].name, plen) == 0) { if (!(split = strchr(url, '|'))) return url; *split = '\0'; len = split - url; pool->rpc_proxy = malloc(1 + len - plen); if (!(pool->rpc_proxy)) quithere(1, "Failed to malloc rpc_proxy"); strcpy(pool->rpc_proxy, url + plen); extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); pool->rpc_proxytype = proxynames[i].proxytype; url = split + 1; break; } } return url; } /* Adequate size s==len*2 + 1 must be alloced to use this variant */ void __bin2hex(char *s, const unsigned char *p, size_t len) { int i; static const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; for (i = 0; i < (int)len; i++) { *s++ = hex[p[i] >> 4]; *s++ = hex[p[i] & 0xF]; } *s++ = '\0'; } /* Returns a malloced array string of a binary value of arbitrary length. The * array is rounded up to a 4 byte size to appease architectures that need * aligned array sizes */ char *bin2hex(const unsigned char *p, size_t len) { ssize_t slen; char *s; slen = len * 2 + 1; if (slen % 4) slen += 4 - (slen % 4); s = calloc(slen, 1); if (unlikely(!s)) quithere(1, "Failed to calloc"); __bin2hex(s, p, len); return s; } static const int hex2bin_tbl[256] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, }; /* Does the reverse of bin2hex but does not allocate any ram */ bool hex2bin(unsigned char *p, const char *hexstr, size_t len) { int nibble1, nibble2; unsigned char idx; bool ret = false; while (*hexstr && len) { if (unlikely(!hexstr[1])) { applog(LOG_ERR, "hex2bin str truncated"); return ret; } idx = *hexstr++; nibble1 = hex2bin_tbl[idx]; idx = *hexstr++; nibble2 = hex2bin_tbl[idx]; if (unlikely((nibble1 < 0) || (nibble2 < 0))) { applog(LOG_ERR, "hex2bin scan failed"); return ret; } *p++ = (((unsigned char)nibble1) << 4) | ((unsigned char)nibble2); --len; } if (likely(len == 0 && *hexstr == 0)) ret = true; return ret; } static const int b58tobin_tbl[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, -1, 9, 10, 11, 12, 13, 14, 15, 16, -1, 17, 18, 19, 20, 21, -1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, -1, -1, -1, -1, -1, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57 }; /* b58bin should always be at least 25 bytes long and already checked to be * valid. */ void b58tobin(unsigned char *b58bin, const char *b58) { uint32_t c, bin32[7]; int len, i, j; uint64_t t; memset(bin32, 0, 7 * sizeof(uint32_t)); len = strlen(b58); for (i = 0; i < len; i++) { c = b58[i]; c = b58tobin_tbl[c]; for (j = 6; j >= 0; j--) { t = ((uint64_t)bin32[j]) * 58 + c; c = (t & 0x3f00000000ull) >> 32; bin32[j] = t & 0xffffffffull; } } *(b58bin++) = bin32[0] & 0xff; for (i = 1; i < 7; i++) { *((uint32_t *)b58bin) = htobe32(bin32[i]); b58bin += sizeof(uint32_t); } } void address_to_pubkeyhash(unsigned char *pkh, const char *addr) { unsigned char b58bin[25]; memset(b58bin, 0, 25); b58tobin(b58bin, addr); pkh[0] = 0x76; pkh[1] = 0xa9; pkh[2] = 0x14; memcpy(&pkh[3], &b58bin[1], 20); pkh[23] = 0x88; pkh[24] = 0xac; } /* For encoding nHeight into coinbase, return how many bytes were used */ int ser_number(unsigned char *s, int32_t val) { int32_t *i32 = (int32_t *)&s[1]; int len; if (val < 128) len = 1; else if (val < 16512) len = 2; else if (val < 2113664) len = 3; else len = 4; *i32 = htole32(val); s[0] = len++; return len; } /* For encoding variable length strings */ unsigned char *ser_string(char *s, int *slen) { size_t len = strlen(s); unsigned char *ret; ret = malloc(1 + len + 8); // Leave room for largest size if (unlikely(!ret)) quit(1, "Failed to malloc ret in ser_string"); if (len < 253) { ret[0] = len; memcpy(ret + 1, s, len); *slen = len + 1; } else if (len < 0x10000) { uint16_t *u16 = (uint16_t *)&ret[1]; ret[0] = 253; *u16 = htobe16(len); memcpy(ret + 3, s, len); *slen = len + 3; } else { /* size_t is only 32 bit on many platforms anyway */ uint32_t *u32 = (uint32_t *)&ret[1]; ret[0] = 254; *u32 = htobe32(len); memcpy(ret + 5, s, len); *slen = len + 5; } return ret; } bool fulltest(const unsigned char *hash, const unsigned char *target) { uint32_t *hash32 = (uint32_t *)hash; uint32_t *target32 = (uint32_t *)target; bool rc = true; int i; for (i = 28 / 4; i >= 0; i--) { uint32_t h32tmp = le32toh(hash32[i]); uint32_t t32tmp = le32toh(target32[i]); if (h32tmp > t32tmp) { rc = false; break; } if (h32tmp < t32tmp) { rc = true; break; } } if (opt_debug) { unsigned char hash_swap[32], target_swap[32]; char *hash_str, *target_str; swab256(hash_swap, hash); swab256(target_swap, target); hash_str = bin2hex(hash_swap, 32); target_str = bin2hex(target_swap, 32); applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s", hash_str, target_str, rc ? "YES (hash <= target)" : "no (false positive; hash > target)"); free(hash_str); free(target_str); } return rc; } struct thread_q *tq_new(void) { struct thread_q *tq; tq = calloc(1, sizeof(*tq)); if (!tq) return NULL; INIT_LIST_HEAD(&tq->q); pthread_mutex_init(&tq->mutex, NULL); pthread_cond_init(&tq->cond, NULL); return tq; } void tq_free(struct thread_q *tq) { struct tq_ent *ent, *iter; if (!tq) return; list_for_each_entry_safe(ent, iter, &tq->q, q_node) { list_del(&ent->q_node); free(ent); } pthread_cond_destroy(&tq->cond); pthread_mutex_destroy(&tq->mutex); memset(tq, 0, sizeof(*tq)); /* poison */ free(tq); } static void tq_freezethaw(struct thread_q *tq, bool frozen) { mutex_lock(&tq->mutex); tq->frozen = frozen; pthread_cond_signal(&tq->cond); mutex_unlock(&tq->mutex); } void tq_freeze(struct thread_q *tq) { tq_freezethaw(tq, true); } void tq_thaw(struct thread_q *tq) { tq_freezethaw(tq, false); } bool tq_push(struct thread_q *tq, void *data) { struct tq_ent *ent; bool rc = true; ent = calloc(1, sizeof(*ent)); if (!ent) return false; ent->data = data; INIT_LIST_HEAD(&ent->q_node); mutex_lock(&tq->mutex); if (!tq->frozen) { list_add_tail(&ent->q_node, &tq->q); } else { free(ent); rc = false; } pthread_cond_signal(&tq->cond); mutex_unlock(&tq->mutex); return rc; } void *tq_pop(struct thread_q *tq, const struct timespec *abstime) { struct tq_ent *ent; void *rval = NULL; int rc; mutex_lock(&tq->mutex); if (!list_empty(&tq->q)) goto pop; if (abstime) rc = pthread_cond_timedwait(&tq->cond, &tq->mutex, abstime); else rc = pthread_cond_wait(&tq->cond, &tq->mutex); if (rc) goto out; if (list_empty(&tq->q)) goto out; pop: ent = list_entry(tq->q.next, struct tq_ent, q_node); rval = ent->data; list_del(&ent->q_node); free(ent); out: mutex_unlock(&tq->mutex); return rval; } int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg) { cgsem_init(&thr->sem); return pthread_create(&thr->pth, attr, start, arg); } void thr_info_cancel(struct thr_info *thr) { if (!thr) return; if (PTH(thr) != 0L) { pthread_cancel(thr->pth); PTH(thr) = 0L; } cgsem_destroy(&thr->sem); } void subtime(struct timeval *a, struct timeval *b) { timersub(a, b, b); } void addtime(struct timeval *a, struct timeval *b) { timeradd(a, b, b); } bool time_more(struct timeval *a, struct timeval *b) { return timercmp(a, b, >); } bool time_less(struct timeval *a, struct timeval *b) { return timercmp(a, b, <); } void copy_time(struct timeval *dest, const struct timeval *src) { memcpy(dest, src, sizeof(struct timeval)); } void timespec_to_val(struct timeval *val, const struct timespec *spec) { val->tv_sec = spec->tv_sec; val->tv_usec = spec->tv_nsec / 1000; } void timeval_to_spec(struct timespec *spec, const struct timeval *val) { spec->tv_sec = val->tv_sec; spec->tv_nsec = val->tv_usec * 1000; } void us_to_timeval(struct timeval *val, int64_t us) { lldiv_t tvdiv = lldiv(us, 1000000); val->tv_sec = tvdiv.quot; val->tv_usec = tvdiv.rem; } void us_to_timespec(struct timespec *spec, int64_t us) { lldiv_t tvdiv = lldiv(us, 1000000); spec->tv_sec = tvdiv.quot; spec->tv_nsec = tvdiv.rem * 1000; } void ms_to_timespec(struct timespec *spec, int64_t ms) { lldiv_t tvdiv = lldiv(ms, 1000); spec->tv_sec = tvdiv.quot; spec->tv_nsec = tvdiv.rem * 1000000; } void ms_to_timeval(struct timeval *val, int64_t ms) { lldiv_t tvdiv = lldiv(ms, 1000); val->tv_sec = tvdiv.quot; val->tv_usec = tvdiv.rem * 1000; } void timeraddspec(struct timespec *a, const struct timespec *b) { a->tv_sec += b->tv_sec; a->tv_nsec += b->tv_nsec; if (a->tv_nsec >= 1000000000) { a->tv_nsec -= 1000000000; a->tv_sec++; } } static int __maybe_unused timespec_to_ms(struct timespec *ts) { return ts->tv_sec * 1000 + ts->tv_nsec / 1000000; } /* Subtract b from a */ static void __maybe_unused timersubspec(struct timespec *a, const struct timespec *b) { a->tv_sec -= b->tv_sec; a->tv_nsec -= b->tv_nsec; if (a->tv_nsec < 0) { a->tv_nsec += 1000000000; a->tv_sec--; } } /* These are cgminer specific sleep functions that use an absolute nanosecond * resolution timer to avoid poor usleep accuracy and overruns. */ #ifdef WIN32 /* Windows start time is since 1601 LOL so convert it to unix epoch 1970. */ #define EPOCHFILETIME (116444736000000000LL) /* Return the system time as an lldiv_t in decimicroseconds. */ static void decius_time(lldiv_t *lidiv) { FILETIME ft; LARGE_INTEGER li; GetSystemTimeAsFileTime(&ft); li.LowPart = ft.dwLowDateTime; li.HighPart = ft.dwHighDateTime; li.QuadPart -= EPOCHFILETIME; /* SystemTime is in decimicroseconds so divide by an unusual number */ *lidiv = lldiv(li.QuadPart, 10000000); } /* This is a cgminer gettimeofday wrapper. Since we always call gettimeofday * with tz set to NULL, and windows' default resolution is only 15ms, this * gives us higher resolution times on windows. */ void cgtime(struct timeval *tv) { lldiv_t lidiv; decius_time(&lidiv); tv->tv_sec = lidiv.quot; tv->tv_usec = lidiv.rem / 10; } #else /* WIN32 */ void cgtime(struct timeval *tv) { gettimeofday(tv, NULL); } int cgtimer_to_ms(cgtimer_t *cgt) { return timespec_to_ms(cgt); } /* Subtracts b from a and stores it in res. */ void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) { res->tv_sec = a->tv_sec - b->tv_sec; res->tv_nsec = a->tv_nsec - b->tv_nsec; if (res->tv_nsec < 0) { res->tv_nsec += 1000000000; res->tv_sec--; } } #endif /* WIN32 */ #ifdef CLOCK_MONOTONIC /* Essentially just linux */ void cgtimer_time(cgtimer_t *ts_start) { clock_gettime(CLOCK_MONOTONIC, ts_start); } static void nanosleep_abstime(struct timespec *ts_end) { int ret; do { ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL); } while (ret == EINTR); } /* Reentrant version of cgsleep functions allow start time to be set separately * from the beginning of the actual sleep, allowing scheduling delays to be * counted in the sleep. */ void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { struct timespec ts_end; ms_to_timespec(&ts_end, ms); timeraddspec(&ts_end, ts_start); nanosleep_abstime(&ts_end); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { struct timespec ts_end; us_to_timespec(&ts_end, us); timeraddspec(&ts_end, ts_start); nanosleep_abstime(&ts_end); } #else /* CLOCK_MONOTONIC */ #ifdef __MACH__ #include <mach/clock.h> #include <mach/mach.h> void cgtimer_time(cgtimer_t *ts_start) { clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts_start->tv_sec = mts.tv_sec; ts_start->tv_nsec = mts.tv_nsec; } #elif !defined(WIN32) /* __MACH__ - Everything not linux/macosx/win32 */ void cgtimer_time(cgtimer_t *ts_start) { struct timeval tv; cgtime(&tv); ts_start->tv_sec = tv->tv_sec; ts_start->tv_nsec = tv->tv_usec * 1000; } #endif /* __MACH__ */ #ifdef WIN32 /* For windows we use the SystemTime stored as a LARGE_INTEGER as the cgtimer_t * typedef, allowing us to have sub-microsecond resolution for times, do simple * arithmetic for timer calculations, and use windows' own hTimers to get * accurate absolute timeouts. */ int cgtimer_to_ms(cgtimer_t *cgt) { return (int)(cgt->QuadPart / 10000LL); } /* Subtracts b from a and stores it in res. */ void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) { res->QuadPart = a->QuadPart - b->QuadPart; } /* Note that cgtimer time is NOT offset by the unix epoch since we use absolute * timeouts with hTimers. */ void cgtimer_time(cgtimer_t *ts_start) { FILETIME ft; GetSystemTimeAsFileTime(&ft); ts_start->LowPart = ft.dwLowDateTime; ts_start->HighPart = ft.dwHighDateTime; } static void liSleep(LARGE_INTEGER *li, int timeout) { HANDLE hTimer; DWORD ret; if (unlikely(timeout <= 0)) return; hTimer = CreateWaitableTimer(NULL, TRUE, NULL); if (unlikely(!hTimer)) quit(1, "Failed to create hTimer in liSleep"); ret = SetWaitableTimer(hTimer, li, 0, NULL, NULL, 0); if (unlikely(!ret)) quit(1, "Failed to SetWaitableTimer in liSleep"); /* We still use a timeout as a sanity check in case the system time * is changed while we're running */ ret = WaitForSingleObject(hTimer, timeout); if (unlikely(ret != WAIT_OBJECT_0 && ret != WAIT_TIMEOUT)) quit(1, "Failed to WaitForSingleObject in liSleep"); CloseHandle(hTimer); } void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { LARGE_INTEGER li; li.QuadPart = ts_start->QuadPart + (int64_t)ms * 10000LL; liSleep(&li, ms); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { LARGE_INTEGER li; int ms; li.QuadPart = ts_start->QuadPart + us * 10LL; ms = us / 1000; if (!ms) ms = 1; liSleep(&li, ms); } #else /* WIN32 */ static void cgsleep_spec(struct timespec *ts_diff, const struct timespec *ts_start) { struct timespec now; timeraddspec(ts_diff, ts_start); cgtimer_time(&now); timersubspec(ts_diff, &now); if (unlikely(ts_diff->tv_sec < 0)) return; nanosleep(ts_diff, NULL); } void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { struct timespec ts_diff; ms_to_timespec(&ts_diff, ms); cgsleep_spec(&ts_diff, ts_start); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { struct timespec ts_diff; us_to_timespec(&ts_diff, us); cgsleep_spec(&ts_diff, ts_start); } #endif /* WIN32 */ #endif /* CLOCK_MONOTONIC */ void cgsleep_ms(int ms) { cgtimer_t ts_start; cgsleep_prepare_r(&ts_start); cgsleep_ms_r(&ts_start, ms); } void cgsleep_us(int64_t us) { cgtimer_t ts_start; cgsleep_prepare_r(&ts_start); cgsleep_us_r(&ts_start, us); } /* Returns the microseconds difference between end and start times as a double */ double us_tdiff(struct timeval *end, struct timeval *start) { /* Sanity check. We should only be using this for small differences so * limit the max to 60 seconds. */ if (unlikely(end->tv_sec - start->tv_sec > 60)) return 60000000; return (end->tv_sec - start->tv_sec) * 1000000 + (end->tv_usec - start->tv_usec); } /* Returns the milliseconds difference between end and start times */ int ms_tdiff(struct timeval *end, struct timeval *start) { /* Like us_tdiff, limit to 1 hour. */ if (unlikely(end->tv_sec - start->tv_sec > 3600)) return 3600000; return (end->tv_sec - start->tv_sec) * 1000 + (end->tv_usec - start->tv_usec) / 1000; } /* Returns the seconds difference between end and start times as a double */ double tdiff(struct timeval *end, struct timeval *start) { return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0; } bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) { char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; char url_address[256], port[6]; int url_len, port_len = 0; *sockaddr_url = url; url_begin = strstr(url, "//"); if (!url_begin) url_begin = url; else url_begin += 2; /* Look for numeric ipv6 entries */ ipv6_begin = strstr(url_begin, "["); ipv6_end = strstr(url_begin, "]"); if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) url_end = strstr(ipv6_end, ":"); else url_end = strstr(url_begin, ":"); if (url_end) { url_len = url_end - url_begin; port_len = strlen(url_begin) - url_len - 1; if (port_len < 1) return false; port_start = url_end + 1; } else url_len = strlen(url_begin); if (url_len < 1) return false; sprintf(url_address, "%.*s", url_len, url_begin); if (port_len) { char *slash; snprintf(port, 6, "%.*s", port_len, port_start); slash = strchr(port, '/'); if (slash) *slash = '\0'; } else strcpy(port, "80"); *sockaddr_port = strdup(port); *sockaddr_url = strdup(url_address); return true; } enum send_ret { SEND_OK, SEND_SELECTFAIL, SEND_SENDFAIL, SEND_INACTIVE }; /* Send a single command across a socket, appending \n to it. This should all * be done under stratum lock except when first establishing the socket */ static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len) { SOCKETTYPE sock = pool->sock; ssize_t ssent = 0; strcat(s, "\n"); len++; while (len > 0 ) { struct timeval timeout = {1, 0}; ssize_t sent; fd_set wd; retry: FD_ZERO(&wd); FD_SET(sock, &wd); if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1) { if (interrupted()) goto retry; return SEND_SELECTFAIL; } #ifdef __APPLE__ sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE); #elif WIN32 sent = send(pool->sock, s + ssent, len, 0); #else sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL); #endif if (sent < 0) { if (!sock_blocks()) return SEND_SENDFAIL; sent = 0; } ssent += sent; len -= sent; } pool->cgminer_pool_stats.times_sent++; pool->cgminer_pool_stats.bytes_sent += ssent; pool->cgminer_pool_stats.net_bytes_sent += ssent; return SEND_OK; } bool stratum_send(struct pool *pool, char *s, ssize_t len) { enum send_ret ret = SEND_INACTIVE; if (opt_protocol) applog(LOG_DEBUG, "SEND: %s", s); mutex_lock(&pool->stratum_lock); if (pool->stratum_active) ret = __stratum_send(pool, s, len); mutex_unlock(&pool->stratum_lock); /* This is to avoid doing applog under stratum_lock */ switch (ret) { default: case SEND_OK: break; case SEND_SELECTFAIL: applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no); suspend_stratum(pool); break; case SEND_SENDFAIL: applog(LOG_DEBUG, "Failed to send in stratum_send"); suspend_stratum(pool); break; case SEND_INACTIVE: applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active"); break; } return (ret == SEND_OK); } static bool socket_full(struct pool *pool, int wait) { SOCKETTYPE sock = pool->sock; struct timeval timeout; fd_set rd; if (unlikely(wait < 0)) wait = 0; FD_ZERO(&rd); FD_SET(sock, &rd); timeout.tv_usec = 0; timeout.tv_sec = wait; if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0) return true; return false; } /* Check to see if Santa's been good to you */ bool sock_full(struct pool *pool) { if (strlen(pool->sockbuf)) return true; return (socket_full(pool, 0)); } static void clear_sockbuf(struct pool *pool) { strcpy(pool->sockbuf, ""); } static void clear_sock(struct pool *pool) { ssize_t n; mutex_lock(&pool->stratum_lock); do { if (pool->sock) n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0); else n = 0; } while (n > 0); mutex_unlock(&pool->stratum_lock); clear_sockbuf(pool); } /* Realloc memory to new size and zero any extra memory added */ void _recalloc(void **ptr, size_t old, size_t new, const char *file, const char *func, const int line) { if (new == old) return; *ptr = realloc(*ptr, new); if (unlikely(!*ptr)) quitfrom(1, file, func, line, "Failed to realloc"); if (new > old) memset(*ptr + old, 0, new - old); } /* Make sure the pool sockbuf is large enough to cope with any coinbase size * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE * and zeroing the new memory */ static void recalloc_sock(struct pool *pool, size_t len) { size_t old, new; old = strlen(pool->sockbuf); new = old + len + 1; if (new < pool->sockbuf_size) return; new = new + (RBUFSIZE - (new % RBUFSIZE)); // Avoid potentially recursive locking // applog(LOG_DEBUG, "Recallocing pool sockbuf to %d", new); pool->sockbuf = realloc(pool->sockbuf, new); if (!pool->sockbuf) quithere(1, "Failed to realloc pool sockbuf"); memset(pool->sockbuf + old, 0, new - old); pool->sockbuf_size = new; } /* Peeks at a socket to find the first end of line and then reads just that * from the socket and returns that as a malloced char */ char *recv_line(struct pool *pool) { char *tok, *sret = NULL; ssize_t len, buflen; int waited = 0; if (!strstr(pool->sockbuf, "\n")) { struct timeval rstart, now; cgtime(&rstart); if (!socket_full(pool, DEFAULT_SOCKWAIT)) { applog(LOG_DEBUG, "Timed out waiting for data on socket_full"); goto out; } do { char s[RBUFSIZE]; size_t slen; ssize_t n; memset(s, 0, RBUFSIZE); n = recv(pool->sock, s, RECVSIZE, 0); if (!n) { applog(LOG_DEBUG, "Socket closed waiting in recv_line"); suspend_stratum(pool); break; } cgtime(&now); waited = tdiff(&now, &rstart); if (n < 0) { if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) { applog(LOG_DEBUG, "Failed to recv sock in recv_line"); suspend_stratum(pool); break; } } else { slen = strlen(s); recalloc_sock(pool, slen); strcat(pool->sockbuf, s); } } while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n")); } buflen = strlen(pool->sockbuf); tok = strtok(pool->sockbuf, "\n"); if (!tok) { applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line"); goto out; } sret = strdup(tok); len = strlen(sret); /* Copy what's left in the buffer after the \n, including the * terminating \0 */ if (buflen > len + 1) memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1); else strcpy(pool->sockbuf, ""); pool->cgminer_pool_stats.times_received++; pool->cgminer_pool_stats.bytes_received += len; pool->cgminer_pool_stats.net_bytes_received += len; out: if (!sret) clear_sock(pool); else if (opt_protocol) applog(LOG_DEBUG, "RECVD: %s", sret); return sret; } /* Extracts a string value from a json array with error checking. To be used * when the value of the string returned is only examined and not to be stored. * See json_array_string below */ static char *__json_array_string(json_t *val, unsigned int entry) { json_t *arr_entry; if (json_is_null(val)) return NULL; if (!json_is_array(val)) return NULL; if (entry > json_array_size(val)) return NULL; arr_entry = json_array_get(val, entry); if (!json_is_string(arr_entry)) return NULL; return (char *)json_string_value(arr_entry); } /* Creates a freshly malloced dup of __json_array_string */ static char *json_array_string(json_t *val, unsigned int entry) { char *buf = __json_array_string(val, entry); if (buf) return strdup(buf); return NULL; } static char *blank_merkle = "0000000000000000000000000000000000000000000000000000000000000000"; static bool parse_notify(struct pool *pool, json_t *val) { char *job_id, *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, *ntime, header[228]; unsigned char *cb1 = NULL, *cb2 = NULL; size_t cb1_len, cb2_len, alloc_len; bool clean, ret = false; int merkles, i; json_t *arr; arr = json_array_get(val, 4); if (!arr || !json_is_array(arr)) goto out; merkles = json_array_size(arr); job_id = json_array_string(val, 0); prev_hash = __json_array_string(val, 1); coinbase1 = json_array_string(val, 2); coinbase2 = json_array_string(val, 3); bbversion = __json_array_string(val, 5); nbit = __json_array_string(val, 6); ntime = __json_array_string(val, 7); clean = json_is_true(json_array_get(val, 8)); if (!job_id || !prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime) { /* Annoying but we must not leak memory */ if (job_id) free(job_id); if (coinbase1) free(coinbase1); if (coinbase2) free(coinbase2); goto out; } cg_wlock(&pool->data_lock); free(pool->swork.job_id); pool->swork.job_id = job_id; snprintf(pool->prev_hash, 65, "%s", prev_hash); cb1_len = strlen(coinbase1) / 2; cb2_len = strlen(coinbase2) / 2; snprintf(pool->bbversion, 9, "%s", bbversion); snprintf(pool->nbit, 9, "%s", nbit); snprintf(pool->ntime, 9, "%s", ntime); pool->swork.clean = clean; alloc_len = pool->coinbase_len = cb1_len + pool->n1_len + pool->n2size + cb2_len; pool->nonce2_offset = cb1_len + pool->n1_len; for (i = 0; i < pool->merkles; i++) free(pool->swork.merkle_bin[i]); if (merkles) { pool->swork.merkle_bin = realloc(pool->swork.merkle_bin, sizeof(char *) * merkles + 1); for (i = 0; i < merkles; i++) { char *merkle = json_array_string(arr, i); pool->swork.merkle_bin[i] = malloc(32); if (unlikely(!pool->swork.merkle_bin[i])) quit(1, "Failed to malloc pool swork merkle_bin"); if (opt_protocol) applog(LOG_DEBUG, "merkle %d: %s", i, merkle); ret = hex2bin(pool->swork.merkle_bin[i], merkle, 32); free(merkle); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert merkle to merkle_bin in parse_notify"); goto out_unlock; } } } pool->merkles = merkles; if (clean) pool->nonce2 = 0; #if 0 header_len = strlen(pool->bbversion) + strlen(pool->prev_hash); /* merkle_hash */ 32 + strlen(pool->ntime) + strlen(pool->nbit) + /* nonce */ 8 + /* workpadding */ 96; #endif snprintf(header, 225, "%s%s%s%s%s%s%s", pool->bbversion, pool->prev_hash, blank_merkle, pool->ntime, pool->nbit, "00000000", /* nonce */ workpadding); ret = hex2bin(pool->header_bin, header, 112); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert header to header_bin in parse_notify"); goto out_unlock; } cb1 = alloca(cb1_len); ret = hex2bin(cb1, coinbase1, cb1_len); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert cb1 to cb1_bin in parse_notify"); goto out_unlock; } cb2 = alloca(cb2_len); ret = hex2bin(cb2, coinbase2, cb2_len); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert cb2 to cb2_bin in parse_notify"); goto out_unlock; } free(pool->coinbase); align_len(&alloc_len); pool->coinbase = calloc(alloc_len, 1); if (unlikely(!pool->coinbase)) quit(1, "Failed to calloc pool coinbase in parse_notify"); memcpy(pool->coinbase, cb1, cb1_len); memcpy(pool->coinbase + cb1_len, pool->nonce1bin, pool->n1_len); memcpy(pool->coinbase + cb1_len + pool->n1_len + pool->n2size, cb2, cb2_len); if (opt_debug) { char *cb = bin2hex(pool->coinbase, pool->coinbase_len); applog(LOG_DEBUG, "Pool %d coinbase %s", pool->pool_no, cb); free(cb); } out_unlock: cg_wunlock(&pool->data_lock); if (opt_protocol) { applog(LOG_DEBUG, "job_id: %s", job_id); applog(LOG_DEBUG, "prev_hash: %s", prev_hash); applog(LOG_DEBUG, "coinbase1: %s", coinbase1); applog(LOG_DEBUG, "coinbase2: %s", coinbase2); applog(LOG_DEBUG, "bbversion: %s", bbversion); applog(LOG_DEBUG, "nbit: %s", nbit); applog(LOG_DEBUG, "ntime: %s", ntime); applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no"); } free(coinbase1); free(coinbase2); /* A notify message is the closest stratum gets to a getwork */ pool->getwork_requested++; total_getworks++; if (pool == current_pool()) opt_work_update = true; out: return ret; } static bool parse_diff(struct pool *pool, json_t *val) { double old_diff, diff; diff = json_number_value(json_array_get(val, 0)); if (diff == 0) return false; cg_wlock(&pool->data_lock); old_diff = pool->sdiff; pool->sdiff = diff; cg_wunlock(&pool->data_lock); if (old_diff != diff) { int idiff = diff; if ((double)idiff == diff) applog(LOG_NOTICE, "Pool %d difficulty changed to %d", pool->pool_no, idiff); else applog(LOG_NOTICE, "Pool %d difficulty changed to %.1f", pool->pool_no, diff); } else applog(LOG_DEBUG, "Pool %d difficulty set to %f", pool->pool_no, diff); return true; } static void __suspend_stratum(struct pool *pool) { clear_sockbuf(pool); pool->stratum_active = pool->stratum_notify = false; if (pool->sock) CLOSESOCKET(pool->sock); pool->sock = 0; } static bool parse_reconnect(struct pool *pool, json_t *val) { char *sockaddr_url, *stratum_port, *tmp; char *url, *port, address[256]; memset(address, 0, 255); url = (char *)json_string_value(json_array_get(val, 0)); if (!url) url = pool->sockaddr_url; else { char *dot_pool, *dot_reconnect; dot_pool = strchr(pool->sockaddr_url, '.'); if (!dot_pool) { applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'", pool->sockaddr_url); return false; } dot_reconnect = strchr(url, '.'); if (!dot_reconnect) { applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'", url); return false; } if (strcmp(dot_pool, dot_reconnect)) { applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'", pool->sockaddr_url); return false; } } port = (char *)json_string_value(json_array_get(val, 1)); if (!port) port = pool->stratum_port; sprintf(address, "%s:%s", url, port); if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) return false; applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address); clear_pool_work(pool); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); tmp = pool->sockaddr_url; pool->sockaddr_url = sockaddr_url; pool->stratum_url = pool->sockaddr_url; free(tmp); tmp = pool->stratum_port; pool->stratum_port = stratum_port; free(tmp); mutex_unlock(&pool->stratum_lock); if (!restart_stratum(pool)) { pool_failed(pool); return false; } return true; } static bool send_version(struct pool *pool, json_t *val) { char s[RBUFSIZE]; int id = json_integer_value(json_object_get(val, "id")); if (!id) return false; sprintf(s, "{\"id\": %d, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", id); if (!stratum_send(pool, s, strlen(s))) return false; return true; } static bool show_message(struct pool *pool, json_t *val) { char *msg; if (!json_is_array(val)) return false; msg = (char *)json_string_value(json_array_get(val, 0)); if (!msg) return false; applog(LOG_NOTICE, "Pool %d message: %s", pool->pool_no, msg); return true; } bool parse_method(struct pool *pool, char *s) { json_t *val = NULL, *method, *err_val, *params; json_error_t err; bool ret = false; char *buf; if (!s) goto out; val = JSON_LOADS(s, &err); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } method = json_object_get(val, "method"); if (!method) goto out_decref; err_val = json_object_get(val, "error"); params = json_object_get(val, "params"); if (err_val && !json_is_null(err_val)) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC method decode failed: %s", ss); free(ss); goto out_decref; } buf = (char *)json_string_value(method); if (!buf) goto out_decref; if (!strncasecmp(buf, "mining.notify", 13)) { if (parse_notify(pool, params)) pool->stratum_notify = ret = true; else pool->stratum_notify = ret = false; goto out_decref; } if (!strncasecmp(buf, "mining.set_difficulty", 21)) { ret = parse_diff(pool, params); goto out_decref; } if (!strncasecmp(buf, "client.reconnect", 16)) { ret = parse_reconnect(pool, params); goto out_decref; } if (!strncasecmp(buf, "client.get_version", 18)) { ret = send_version(pool, val); goto out_decref; } if (!strncasecmp(buf, "client.show_message", 19)) { ret = show_message(pool, params); goto out_decref; } out_decref: json_decref(val); out: return ret; } bool auth_stratum(struct pool *pool) { json_t *val = NULL, *res_val, *err_val; char s[RBUFSIZE], *sret = NULL; json_error_t err; bool ret = false; sprintf(s, "{\"id\": %d, \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}", swork_id++, pool->rpc_user, pool->rpc_pass); if (!stratum_send(pool, s, strlen(s))) return ret; /* Parse all data in the queue and anything left should be auth */ while (42) { sret = recv_line(pool); if (!sret) return ret; if (parse_method(pool, sret)) free(sret); else break; } val = JSON_LOADS(sret, &err); free(sret); res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss); free(ss); suspend_stratum(pool); goto out; } ret = true; applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no); pool->probed = true; successful_connect = true; out: json_decref(val); return ret; } static int recv_byte(int sockd) { char c; if (recv(sockd, &c, 1, 0) != -1) return c; return -1; } static bool http_negotiate(struct pool *pool, int sockd, bool http0) { char buf[1024]; int i, len; if (http0) { snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.0\r\n\r\n", pool->sockaddr_url, pool->stratum_port); } else { snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.1\r\nHost: %s:%s\r\n\r\n", pool->sockaddr_url, pool->stratum_port, pool->sockaddr_url, pool->stratum_port); } applog(LOG_DEBUG, "Sending proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); send(sockd, buf, strlen(buf), 0); len = recv(sockd, buf, 12, 0); if (len <= 0) { applog(LOG_WARNING, "Couldn't read from proxy %s:%s after sending CONNECT", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } buf[len] = '\0'; applog(LOG_DEBUG, "Received from proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); if (strcmp(buf, "HTTP/1.1 200") && strcmp(buf, "HTTP/1.0 200")) { applog(LOG_WARNING, "HTTP Error from proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); return false; } /* Ignore unwanted headers till we get desired response */ for (i = 0; i < 4; i++) { buf[i] = recv_byte(sockd); if (buf[i] == (char)-1) { applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } } while (strncmp(buf, "\r\n\r\n", 4)) { for (i = 0; i < 3; i++) buf[i] = buf[i + 1]; buf[3] = recv_byte(sockd); if (buf[3] == (char)-1) { applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } } applog(LOG_DEBUG, "Success negotiating with %s:%s HTTP proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return true; } static bool socks5_negotiate(struct pool *pool, int sockd) { unsigned char atyp, uclen; unsigned short port; char buf[515]; int i, len; buf[0] = 0x05; buf[1] = 0x01; buf[2] = 0x00; applog(LOG_DEBUG, "Attempting to negotiate with %s:%s SOCKS5 proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); send(sockd, buf, 3, 0); if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != buf[2]) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } buf[0] = 0x05; buf[1] = 0x01; buf[2] = 0x00; buf[3] = 0x03; len = (strlen(pool->sockaddr_url)); if (len > 255) len = 255; uclen = len; buf[4] = (uclen & 0xff); memcpy(buf + 5, pool->sockaddr_url, len); port = atoi(pool->stratum_port); buf[5 + len] = (port >> 8); buf[6 + len] = (port & 0xff); send(sockd, buf, (7 + len), 0); if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != 0x00) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } recv_byte(sockd); atyp = recv_byte(sockd); if (atyp == 0x01) { for (i = 0; i < 4; i++) recv_byte(sockd); } else if (atyp == 0x03) { len = recv_byte(sockd); for (i = 0; i < len; i++) recv_byte(sockd); } else { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } for (i = 0; i < 2; i++) recv_byte(sockd); applog(LOG_DEBUG, "Success negotiating with %s:%s SOCKS5 proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return true; } static bool socks4_negotiate(struct pool *pool, int sockd, bool socks4a) { unsigned short port; in_addr_t inp; char buf[515]; int i, len; buf[0] = 0x04; buf[1] = 0x01; port = atoi(pool->stratum_port); buf[2] = port >> 8; buf[3] = port & 0xff; sprintf(&buf[8], "CGMINER"); /* See if we've been given an IP address directly to avoid needing to * resolve it. */ inp = inet_addr(pool->sockaddr_url); inp = ntohl(inp); if ((int)inp != -1) socks4a = false; else { /* Try to extract the IP address ourselves first */ struct addrinfo servinfobase, *servinfo, hints; servinfo = &servinfobase; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_INET; /* IPV4 only */ if (!getaddrinfo(pool->sockaddr_url, NULL, &hints, &servinfo)) { struct sockaddr_in *saddr_in = (struct sockaddr_in *)servinfo->ai_addr; inp = ntohl(saddr_in->sin_addr.s_addr); socks4a = false; freeaddrinfo(servinfo); } } if (!socks4a) { if ((int)inp == -1) { applog(LOG_WARNING, "Invalid IP address specified for socks4 proxy: %s", pool->sockaddr_url); return false; } buf[4] = (inp >> 24) & 0xFF; buf[5] = (inp >> 16) & 0xFF; buf[6] = (inp >> 8) & 0xFF; buf[7] = (inp >> 0) & 0xFF; send(sockd, buf, 16, 0); } else { /* This appears to not be working but hopefully most will be * able to resolve IP addresses themselves. */ buf[4] = 0; buf[5] = 0; buf[6] = 0; buf[7] = 1; len = strlen(pool->sockaddr_url); if (len > 255) len = 255; memcpy(&buf[16], pool->sockaddr_url, len); len += 16; buf[len++] = '\0'; send(sockd, buf, len, 0); } if (recv_byte(sockd) != 0x00 || recv_byte(sockd) != 0x5a) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS4 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } for (i = 0; i < 6; i++) recv_byte(sockd); return true; } static void noblock_socket(SOCKETTYPE fd) { #ifndef WIN32 int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, O_NONBLOCK | flags); #else u_long flags = 1; ioctlsocket(fd, FIONBIO, &flags); #endif } static void block_socket(SOCKETTYPE fd) { #ifndef WIN32 int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, flags & ~O_NONBLOCK); #else u_long flags = 0; ioctlsocket(fd, FIONBIO, &flags); #endif } static bool sock_connecting(void) { #ifndef WIN32 return errno == EINPROGRESS; #else return WSAGetLastError() == WSAEWOULDBLOCK; #endif } static bool setup_stratum_socket(struct pool *pool) { struct addrinfo servinfobase, *servinfo, *hints, *p; char *sockaddr_url, *sockaddr_port; int sockd; mutex_lock(&pool->stratum_lock); pool->stratum_active = false; if (pool->sock) CLOSESOCKET(pool->sock); pool->sock = 0; mutex_unlock(&pool->stratum_lock); hints = &pool->stratum_hints; memset(hints, 0, sizeof(struct addrinfo)); hints->ai_family = AF_UNSPEC; hints->ai_socktype = SOCK_STREAM; servinfo = &servinfobase; if (!pool->rpc_proxy && opt_socks_proxy) { pool->rpc_proxy = opt_socks_proxy; extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); pool->rpc_proxytype = PROXY_SOCKS5; } if (pool->rpc_proxy) { sockaddr_url = pool->sockaddr_proxy_url; sockaddr_port = pool->sockaddr_proxy_port; } else { sockaddr_url = pool->sockaddr_url; sockaddr_port = pool->stratum_port; } if (getaddrinfo(sockaddr_url, sockaddr_port, hints, &servinfo) != 0) { if (!pool->probed) { applog(LOG_WARNING, "Failed to resolve (?wrong URL) %s:%s", sockaddr_url, sockaddr_port); pool->probed = true; } else { applog(LOG_INFO, "Failed to getaddrinfo for %s:%s", sockaddr_url, sockaddr_port); } return false; } for (p = servinfo; p != NULL; p = p->ai_next) { sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); if (sockd == -1) { applog(LOG_DEBUG, "Failed socket"); continue; } /* Iterate non blocking over entries returned by getaddrinfo * to cope with round robin DNS entries, finding the first one * we can connect to quickly. */ noblock_socket(sockd); if (connect(sockd, p->ai_addr, p->ai_addrlen) == -1) { struct timeval tv_timeout = {1, 0}; int selret; fd_set rw; if (!sock_connecting()) { CLOSESOCKET(sockd); applog(LOG_DEBUG, "Failed sock connect"); continue; } retry: FD_ZERO(&rw); FD_SET(sockd, &rw); selret = select(sockd + 1, NULL, &rw, NULL, &tv_timeout); if (selret > 0 && FD_ISSET(sockd, &rw)) { socklen_t len; int err, n; len = sizeof(err); n = getsockopt(sockd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); if (!n && !err) { applog(LOG_DEBUG, "Succeeded delayed connect"); block_socket(sockd); break; } } if (selret < 0 && interrupted()) goto retry; CLOSESOCKET(sockd); applog(LOG_DEBUG, "Select timeout/failed connect"); continue; } applog(LOG_WARNING, "Succeeded immediate connect"); block_socket(sockd); break; } if (p == NULL) { applog(LOG_INFO, "Failed to connect to stratum on %s:%s", sockaddr_url, sockaddr_port); freeaddrinfo(servinfo); return false; } freeaddrinfo(servinfo); if (pool->rpc_proxy) { switch (pool->rpc_proxytype) { case PROXY_HTTP_1_0: if (!http_negotiate(pool, sockd, true)) return false; break; case PROXY_HTTP: if (!http_negotiate(pool, sockd, false)) return false; break; case PROXY_SOCKS5: case PROXY_SOCKS5H: if (!socks5_negotiate(pool, sockd)) return false; break; case PROXY_SOCKS4: if (!socks4_negotiate(pool, sockd, false)) return false; break; case PROXY_SOCKS4A: if (!socks4_negotiate(pool, sockd, true)) return false; break; default: applog(LOG_WARNING, "Unsupported proxy type for %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; break; } } if (!pool->sockbuf) { pool->sockbuf = calloc(RBUFSIZE, 1); if (!pool->sockbuf) quithere(1, "Failed to calloc pool sockbuf"); pool->sockbuf_size = RBUFSIZE; } pool->sock = sockd; keep_sockalive(sockd); return true; } static char *get_sessionid(json_t *val) { char *ret = NULL; json_t *arr_val; int arrsize, i; arr_val = json_array_get(val, 0); if (!arr_val || !json_is_array(arr_val)) goto out; arrsize = json_array_size(arr_val); for (i = 0; i < arrsize; i++) { json_t *arr = json_array_get(arr_val, i); char *notify; if (!arr | !json_is_array(arr)) break; notify = __json_array_string(arr, 0); if (!notify) continue; if (!strncasecmp(notify, "mining.notify", 13)) { ret = json_array_string(arr, 1); break; } } out: return ret; } void suspend_stratum(struct pool *pool) { applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); mutex_unlock(&pool->stratum_lock); } bool initiate_stratum(struct pool *pool) { bool ret = false, recvd = false, noresume = false, sockd = false; char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid; json_t *val = NULL, *res_val, *err_val; json_error_t err; int n2size; resend: if (!setup_stratum_socket(pool)) { sockd = false; goto out; } sockd = true; if (recvd) { /* Get rid of any crap lying around if we're resending */ clear_sock(pool); sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++); } else { if (pool->sessionid) sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid); else sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++); } if (__stratum_send(pool, s, strlen(s)) != SEND_OK) { applog(LOG_DEBUG, "Failed to send s in initiate_stratum"); goto out; } if (!socket_full(pool, DEFAULT_SOCKWAIT)) { applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum"); goto out; } sret = recv_line(pool); if (!sret) goto out; recvd = true; val = JSON_LOADS(sret, &err); free(sret); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_null(res_val) || (err_val && !json_is_null(err_val))) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC decode failed: %s", ss); free(ss); goto out; } sessionid = get_sessionid(res_val); if (!sessionid) applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum"); nonce1 = json_array_string(res_val, 1); if (!nonce1) { applog(LOG_INFO, "Failed to get nonce1 in initiate_stratum"); free(sessionid); goto out; } n2size = json_integer_value(json_array_get(res_val, 2)); if (!n2size) { applog(LOG_INFO, "Failed to get n2size in initiate_stratum"); free(sessionid); free(nonce1); goto out; } cg_wlock(&pool->data_lock); pool->sessionid = sessionid; pool->nonce1 = nonce1; pool->n1_len = strlen(nonce1) / 2; free(pool->nonce1bin); pool->nonce1bin = calloc(pool->n1_len, 1); if (unlikely(!pool->nonce1bin)) quithere(1, "Failed to calloc pool->nonce1bin"); hex2bin(pool->nonce1bin, pool->nonce1, pool->n1_len); pool->n2size = n2size; cg_wunlock(&pool->data_lock); if (sessionid) applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid); ret = true; out: if (ret) { if (!pool->stratum_url) pool->stratum_url = pool->sockaddr_url; pool->stratum_active = true; pool->sdiff = 1; if (opt_protocol) { applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d", pool->pool_no, pool->nonce1, pool->n2size); } } else { if (recvd && !noresume) { /* Reset the sessionid used for stratum resuming in case the pool * does not support it, or does not know how to respond to the * presence of the sessionid parameter. */ cg_wlock(&pool->data_lock); free(pool->sessionid); free(pool->nonce1); pool->sessionid = pool->nonce1 = NULL; cg_wunlock(&pool->data_lock); applog(LOG_DEBUG, "Failed to resume stratum, trying afresh"); noresume = true; json_decref(val); goto resend; } applog(LOG_DEBUG, "Initiate stratum failed"); if (sockd) suspend_stratum(pool); } json_decref(val); return ret; } bool restart_stratum(struct pool *pool) { if (pool->stratum_active) suspend_stratum(pool); if (!initiate_stratum(pool)) return false; if (!auth_stratum(pool)) return false; return true; } void dev_error(struct cgpu_info *dev, enum dev_reason reason) { dev->device_last_not_well = time(NULL); dev->device_not_well_reason = reason; switch (reason) { case REASON_THREAD_FAIL_INIT: dev->thread_fail_init_count++; break; case REASON_THREAD_ZERO_HASH: dev->thread_zero_hash_count++; break; case REASON_THREAD_FAIL_QUEUE: dev->thread_fail_queue_count++; break; case REASON_DEV_SICK_IDLE_60: dev->dev_sick_idle_60_count++; break; case REASON_DEV_DEAD_IDLE_600: dev->dev_dead_idle_600_count++; break; case REASON_DEV_NOSTART: dev->dev_nostart_count++; break; case REASON_DEV_OVER_HEAT: dev->dev_over_heat_count++; break; case REASON_DEV_THERMAL_CUTOFF: dev->dev_thermal_cutoff_count++; break; case REASON_DEV_COMMS_ERROR: dev->dev_comms_error_count++; break; case REASON_DEV_THROTTLE: dev->dev_throttle_count++; break; } } /* Realloc an existing string to fit an extra string s, appending s to it. */ void *realloc_strcat(char *ptr, char *s) { size_t old = 0, len = strlen(s); char *ret; if (!len) return ptr; if (ptr) old = strlen(ptr); len += old + 1; align_len(&len); ret = malloc(len); if (unlikely(!ret)) quithere(1, "Failed to malloc"); if (ptr) { sprintf(ret, "%s%s", ptr, s); free(ptr); } else sprintf(ret, "%s", s); return ret; } /* Make a text readable version of a string using 0xNN for < ' ' or > '~' * Including 0x00 at the end * You must free the result yourself */ void *str_text(char *ptr) { unsigned char *uptr; char *ret, *txt; if (ptr == NULL) { ret = strdup("(null)"); if (unlikely(!ret)) quithere(1, "Failed to malloc null"); } uptr = (unsigned char *)ptr; ret = txt = malloc(strlen(ptr)*4+5); // Guaranteed >= needed if (unlikely(!txt)) quithere(1, "Failed to malloc txt"); do { if (*uptr < ' ' || *uptr > '~') { sprintf(txt, "0x%02x", *uptr); txt += 4; } else *(txt++) = *uptr; } while (*(uptr++)); *txt = '\0'; return ret; } void RenameThread(const char* name) { char buf[16]; snprintf(buf, sizeof(buf), "cg@%s", name); #if defined(PR_SET_NAME) // Only the first 15 characters are used (16 - NUL terminator) prctl(PR_SET_NAME, buf, 0, 0, 0); #elif (defined(__FreeBSD__) || defined(__OpenBSD__)) pthread_set_name_np(pthread_self(), buf); #elif defined(MAC_OSX) pthread_setname_np(buf); #else // Prevent warnings (void)buf; #endif } /* cgminer specific wrappers for true unnamed semaphore usage on platforms * that support them and for apple which does not. We use a single byte across * a pipe to emulate semaphore behaviour there. */ #ifdef __APPLE__ void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) { int flags, fd, i; if (pipe(cgsem->pipefd) == -1) quitfrom(1, file, func, line, "Failed pipe errno=%d", errno); /* Make the pipes FD_CLOEXEC to allow them to close should we call * execv on restart. */ for (i = 0; i < 2; i++) { fd = cgsem->pipefd[i]; flags = fcntl(fd, F_GETFD, 0); flags |= FD_CLOEXEC; if (fcntl(fd, F_SETFD, flags) == -1) quitfrom(1, file, func, line, "Failed to fcntl errno=%d", errno); } } void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) { const char buf = 1; int ret; retry: ret = write(cgsem->pipefd[1], &buf, 1); if (unlikely(ret == 0)) applog(LOG_WARNING, "Failed to write errno=%d" IN_FMT_FFL, errno, file, func, line); else if (unlikely(ret < 0 && interrupted)) goto retry; } void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) { char buf; int ret; retry: ret = read(cgsem->pipefd[0], &buf, 1); if (unlikely(ret == 0)) applog(LOG_WARNING, "Failed to read errno=%d" IN_FMT_FFL, errno, file, func, line); else if (unlikely(ret < 0 && interrupted)) goto retry; } void cgsem_destroy(cgsem_t *cgsem) { close(cgsem->pipefd[1]); close(cgsem->pipefd[0]); } /* This is similar to sem_timedwait but takes a millisecond value */ int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) { struct timeval timeout; int ret, fd; fd_set rd; char buf; retry: fd = cgsem->pipefd[0]; FD_ZERO(&rd); FD_SET(fd, &rd); ms_to_timeval(&timeout, ms); ret = select(fd + 1, &rd, NULL, NULL, &timeout); if (ret > 0) { ret = read(fd, &buf, 1); return 0; } if (likely(!ret)) return ETIMEDOUT; if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); /* We don't reach here */ return 0; } /* Reset semaphore count back to zero */ void cgsem_reset(cgsem_t *cgsem) { int ret, fd; fd_set rd; char buf; fd = cgsem->pipefd[0]; FD_ZERO(&rd); FD_SET(fd, &rd); do { struct timeval timeout = {0, 0}; ret = select(fd + 1, &rd, NULL, NULL, &timeout); if (ret > 0) ret = read(fd, &buf, 1); else if (unlikely(ret < 0 && interrupted())) ret = 1; } while (ret > 0); } #else void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) { int ret; if ((ret = sem_init(cgsem, 0, 0))) quitfrom(1, file, func, line, "Failed to sem_init ret=%d errno=%d", ret, errno); } void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) { if (unlikely(sem_post(cgsem))) quitfrom(1, file, func, line, "Failed to sem_post errno=%d cgsem=0x%p", errno, cgsem); } void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) { retry: if (unlikely(sem_wait(cgsem))) { if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_wait errno=%d cgsem=0x%p", errno, cgsem); } } int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) { struct timespec abs_timeout, ts_now; struct timeval tv_now; int ret; cgtime(&tv_now); timeval_to_spec(&ts_now, &tv_now); ms_to_timespec(&abs_timeout, ms); retry: timeraddspec(&abs_timeout, &ts_now); ret = sem_timedwait(cgsem, &abs_timeout); if (ret) { if (likely(sock_timeout())) return ETIMEDOUT; if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); } return 0; } void cgsem_reset(cgsem_t *cgsem) { int ret; do { ret = sem_trywait(cgsem); if (unlikely(ret < 0 && interrupted())) ret = 0; } while (!ret); } void cgsem_destroy(cgsem_t *cgsem) { sem_destroy(cgsem); } #endif /* Provide a completion_timeout helper function for unreliable functions that * may die due to driver issues etc that time out if the function fails and * can then reliably return. */ struct cg_completion { cgsem_t cgsem; void (*fn)(void *fnarg); void *fnarg; }; void *completion_thread(void *arg) { struct cg_completion *cgc = (struct cg_completion *)arg; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); cgc->fn(cgc->fnarg); cgsem_post(&cgc->cgsem); return NULL; } bool cg_completion_timeout(void *fn, void *fnarg, int timeout) { struct cg_completion *cgc; pthread_t pthread; bool ret = false; cgc = malloc(sizeof(struct cg_completion)); if (unlikely(!cgc)) return ret; cgsem_init(&cgc->cgsem); cgc->fn = fn; cgc->fnarg = fnarg; pthread_create(&pthread, NULL, completion_thread, (void *)cgc); ret = cgsem_mswait(&cgc->cgsem, timeout); if (!ret) { pthread_join(pthread, NULL); free(cgc); } else pthread_cancel(pthread); return !ret; } void _cg_memcpy(void *dest, const void *src, unsigned int n, const char *file, const char *func, const int line) { if (unlikely(n < 1 || n > (1ul << 31))) { applog(LOG_ERR, "ERR: Asked to memcpy %u bytes from %s %s():%d", n, file, func, line); return; } memcpy(dest, src, n); }
/* * Copyright 2011-2014 Con Kolivas * Copyright 2010 Jeff Garzik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <stdarg.h> #include <string.h> #include <jansson.h> #ifdef HAVE_LIBCURL #include <curl/curl.h> #endif #include <time.h> #include <errno.h> #include <unistd.h> #include <sys/types.h> #ifndef WIN32 #include <fcntl.h> # ifdef __linux # include <sys/prctl.h> # endif # include <sys/socket.h> # include <netinet/in.h> # include <netinet/tcp.h> # include <netdb.h> #else # include <winsock2.h> # include <ws2tcpip.h> # include <mmsystem.h> #endif #include "miner.h" #include "elist.h" #include "compat.h" #include "util.h" #define DEFAULT_SOCKWAIT 60 bool successful_connect = false; static void keep_sockalive(SOCKETTYPE fd) { const int tcp_one = 1; #ifndef WIN32 const int tcp_keepidle = 45; const int tcp_keepintvl = 30; int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, O_NONBLOCK | flags); #else u_long flags = 1; ioctlsocket(fd, FIONBIO, &flags); #endif setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const void *)&tcp_one, sizeof(tcp_one)); if (!opt_delaynet) #ifndef __linux setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); #else /* __linux */ setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one)); setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle)); setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl)); #endif /* __linux */ #ifdef __APPLE_CC__ setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl)); #endif /* __APPLE_CC__ */ } struct tq_ent { void *data; struct list_head q_node; }; #ifdef HAVE_LIBCURL struct timeval nettime; struct data_buffer { void *buf; size_t len; }; struct upload_buffer { const void *buf; size_t len; }; struct header_info { char *lp_path; int rolltime; char *reason; char *stratum_url; bool hadrolltime; bool canroll; bool hadexpire; }; static void databuf_free(struct data_buffer *db) { if (!db) return; free(db->buf); memset(db, 0, sizeof(*db)); } static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb, void *user_data) { struct data_buffer *db = user_data; size_t len = size * nmemb; size_t oldlen, newlen; void *newmem; static const unsigned char zero = 0; oldlen = db->len; newlen = oldlen + len; newmem = realloc(db->buf, newlen + 1); if (!newmem) return 0; db->buf = newmem; db->len = newlen; memcpy(db->buf + oldlen, ptr, len); memcpy(db->buf + newlen, &zero, 1); /* null terminate */ return len; } static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb, void *user_data) { struct upload_buffer *ub = user_data; unsigned int len = size * nmemb; if (len > ub->len) len = ub->len; if (len) { memcpy(ptr, ub->buf, len); ub->buf += len; ub->len -= len; } return len; } static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data) { struct header_info *hi = user_data; size_t remlen, slen, ptrlen = size * nmemb; char *rem, *val = NULL, *key = NULL; void *tmp; val = calloc(1, ptrlen); key = calloc(1, ptrlen); if (!key || !val) goto out; tmp = memchr(ptr, ':', ptrlen); if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */ goto out; slen = tmp - ptr; if ((slen + 1) == ptrlen) /* skip key w/ no value */ goto out; memcpy(key, ptr, slen); /* store & nul term key */ key[slen] = 0; rem = ptr + slen + 1; /* trim value's leading whitespace */ remlen = ptrlen - slen - 1; while ((remlen > 0) && (isspace(*rem))) { remlen--; rem++; } memcpy(val, rem, remlen); /* store value, trim trailing ws */ val[remlen] = 0; while ((*val) && (isspace(val[strlen(val) - 1]))) val[strlen(val) - 1] = 0; if (!*val) /* skip blank value */ goto out; if (opt_protocol) applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val); if (!strcasecmp("X-Roll-Ntime", key)) { hi->hadrolltime = true; if (!strncasecmp("N", val, 1)) applog(LOG_DEBUG, "X-Roll-Ntime: N found"); else { hi->canroll = true; /* Check to see if expire= is supported and if not, set * the rolltime to the default scantime */ if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) { sscanf(val + 7, "%d", &hi->rolltime); hi->hadexpire = true; } else hi->rolltime = opt_scantime; applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime); } } if (!strcasecmp("X-Long-Polling", key)) { hi->lp_path = val; /* steal memory reference */ val = NULL; } if (!strcasecmp("X-Reject-Reason", key)) { hi->reason = val; /* steal memory reference */ val = NULL; } if (!strcasecmp("X-Stratum", key)) { hi->stratum_url = val; val = NULL; } out: free(key); free(val); return ptrlen; } static void last_nettime(struct timeval *last) { rd_lock(&netacc_lock); last->tv_sec = nettime.tv_sec; last->tv_usec = nettime.tv_usec; rd_unlock(&netacc_lock); } static void set_nettime(void) { wr_lock(&netacc_lock); cgtime(&nettime); wr_unlock(&netacc_lock); } #if CURL_HAS_KEEPALIVE static void keep_curlalive(CURL *curl) { const int tcp_keepidle = 45; const int tcp_keepintvl = 30; const long int keepalive = 1; curl_easy_setopt(curl, CURLOPT_TCP_KEEPALIVE, keepalive); curl_easy_setopt(curl, CURLOPT_TCP_KEEPIDLE, tcp_keepidle); curl_easy_setopt(curl, CURLOPT_TCP_KEEPINTVL, tcp_keepintvl); } #else static void keep_curlalive(CURL *curl) { SOCKETTYPE sock; curl_easy_getinfo(curl, CURLINFO_LASTSOCKET, (long *)&sock); keep_sockalive(sock); } #endif static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type, __maybe_unused char *data, size_t size, void *userdata) { struct pool *pool = (struct pool *)userdata; switch(type) { case CURLINFO_HEADER_IN: case CURLINFO_DATA_IN: case CURLINFO_SSL_DATA_IN: pool->cgminer_pool_stats.net_bytes_received += size; break; case CURLINFO_HEADER_OUT: case CURLINFO_DATA_OUT: case CURLINFO_SSL_DATA_OUT: pool->cgminer_pool_stats.net_bytes_sent += size; break; case CURLINFO_TEXT: default: break; } return 0; } json_t *json_web_config(const char *url) { struct data_buffer all_data = {NULL, 0}; char curl_err_str[CURL_ERROR_SIZE]; long timeout = 60; json_error_t err; json_t *val; CURL *curl; int rc; memset(&err, 0, sizeof(err)); curl = curl_easy_init(); if (unlikely(!curl)) quithere(1, "CURL initialisation failed"); curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_ENCODING, ""); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); val = NULL; rc = curl_easy_perform(curl); curl_easy_cleanup(curl); if (rc) { applog(LOG_ERR, "HTTP config request of '%s' failed: %s", url, curl_err_str); goto c_out; } if (!all_data.buf) { applog(LOG_ERR, "Empty config data received from '%s'", url); goto c_out; } val = JSON_LOADS(all_data.buf, &err); if (!val) { applog(LOG_ERR, "JSON config decode of '%s' failed(%d): %s", url, err.line, err.text); } databuf_free(&all_data); c_out: return val; } json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass, const char *rpc_req, bool probe, bool longpoll, int *rolltime, struct pool *pool, bool share) { long timeout = longpoll ? (60 * 60) : 60; struct data_buffer all_data = {NULL, 0}; struct header_info hi = {NULL, 0, NULL, NULL, false, false, false}; char len_hdr[64], user_agent_hdr[128]; char curl_err_str[CURL_ERROR_SIZE]; struct curl_slist *headers = NULL; struct upload_buffer upload_data; json_t *val, *err_val, *res_val; bool probing = false; double byte_count; json_error_t err; int rc; memset(&err, 0, sizeof(err)); /* it is assumed that 'curl' is freshly [re]initialized at this pt */ if (probe) probing = !pool->probed; curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); // CURLOPT_VERBOSE won't write to stderr if we use CURLOPT_DEBUGFUNCTION curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb); curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool); curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_ENCODING, ""); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); /* Shares are staggered already and delays in submission can be costly * so do not delay them */ if (!opt_delaynet || share) curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb); curl_easy_setopt(curl, CURLOPT_READDATA, &upload_data); curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb); curl_easy_setopt(curl, CURLOPT_HEADERDATA, &hi); curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); if (pool->rpc_proxy) { curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy); curl_easy_setopt(curl, CURLOPT_PROXYTYPE, pool->rpc_proxytype); } else if (opt_socks_proxy) { curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy); curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); } if (userpass) { curl_easy_setopt(curl, CURLOPT_USERPWD, userpass); curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC); } if (longpoll) keep_curlalive(curl); curl_easy_setopt(curl, CURLOPT_POST, 1); if (opt_protocol) applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req); upload_data.buf = rpc_req; upload_data.len = strlen(rpc_req); sprintf(len_hdr, "Content-Length: %lu", (unsigned long) upload_data.len); sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE_STRING); headers = curl_slist_append(headers, "Content-type: application/json"); headers = curl_slist_append(headers, "X-Mining-Extensions: longpoll midstate rollntime submitold"); if (likely(global_hashrate)) { char ghashrate[255]; sprintf(ghashrate, "X-Mining-Hashrate: %llu", global_hashrate); headers = curl_slist_append(headers, ghashrate); } headers = curl_slist_append(headers, len_hdr); headers = curl_slist_append(headers, user_agent_hdr); headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); if (opt_delaynet) { /* Don't delay share submission, but still track the nettime */ if (!share) { long long now_msecs, last_msecs; struct timeval now, last; cgtime(&now); last_nettime(&last); now_msecs = (long long)now.tv_sec * 1000; now_msecs += now.tv_usec / 1000; last_msecs = (long long)last.tv_sec * 1000; last_msecs += last.tv_usec / 1000; if (now_msecs > last_msecs && now_msecs - last_msecs < 250) { struct timespec rgtp; rgtp.tv_sec = 0; rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000; nanosleep(&rgtp, NULL); } } set_nettime(); } rc = curl_easy_perform(curl); if (rc) { applog(LOG_INFO, "HTTP request failed: %s", curl_err_str); goto err_out; } if (!all_data.buf) { applog(LOG_DEBUG, "Empty data received in json_rpc_call."); goto err_out; } pool->cgminer_pool_stats.times_sent++; if (curl_easy_getinfo(curl, CURLINFO_SIZE_UPLOAD, &byte_count) == CURLE_OK) pool->cgminer_pool_stats.bytes_sent += byte_count; pool->cgminer_pool_stats.times_received++; if (curl_easy_getinfo(curl, CURLINFO_SIZE_DOWNLOAD, &byte_count) == CURLE_OK) pool->cgminer_pool_stats.bytes_received += byte_count; if (probing) { pool->probed = true; /* If X-Long-Polling was found, activate long polling */ if (hi.lp_path) { if (pool->hdr_path != NULL) free(pool->hdr_path); pool->hdr_path = hi.lp_path; } else pool->hdr_path = NULL; if (hi.stratum_url) { pool->stratum_url = hi.stratum_url; hi.stratum_url = NULL; } } else { if (hi.lp_path) { free(hi.lp_path); hi.lp_path = NULL; } if (hi.stratum_url) { free(hi.stratum_url); hi.stratum_url = NULL; } } *rolltime = hi.rolltime; pool->cgminer_pool_stats.rolltime = hi.rolltime; pool->cgminer_pool_stats.hadrolltime = hi.hadrolltime; pool->cgminer_pool_stats.canroll = hi.canroll; pool->cgminer_pool_stats.hadexpire = hi.hadexpire; val = JSON_LOADS(all_data.buf, &err); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); if (opt_protocol) applog(LOG_DEBUG, "JSON protocol response:\n%s", (char *)(all_data.buf)); goto err_out; } if (opt_protocol) { char *s = json_dumps(val, JSON_INDENT(3)); applog(LOG_DEBUG, "JSON protocol response:\n%s", s); free(s); } /* JSON-RPC valid response returns a non-null 'result', * and a null 'error'. */ res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val ||(err_val && !json_is_null(err_val))) { char *s; if (err_val) s = json_dumps(err_val, JSON_INDENT(3)); else s = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC call failed: %s", s); free(s); goto err_out; } if (hi.reason) { json_object_set_new(val, "reject-reason", json_string(hi.reason)); free(hi.reason); hi.reason = NULL; } successful_connect = true; databuf_free(&all_data); curl_slist_free_all(headers); curl_easy_reset(curl); return val; err_out: databuf_free(&all_data); curl_slist_free_all(headers); curl_easy_reset(curl); if (!successful_connect) applog(LOG_DEBUG, "Failed to connect in json_rpc_call"); curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1); return NULL; } #define PROXY_HTTP CURLPROXY_HTTP #define PROXY_HTTP_1_0 CURLPROXY_HTTP_1_0 #define PROXY_SOCKS4 CURLPROXY_SOCKS4 #define PROXY_SOCKS5 CURLPROXY_SOCKS5 #define PROXY_SOCKS4A CURLPROXY_SOCKS4A #define PROXY_SOCKS5H CURLPROXY_SOCKS5_HOSTNAME #else /* HAVE_LIBCURL */ #define PROXY_HTTP 0 #define PROXY_HTTP_1_0 1 #define PROXY_SOCKS4 2 #define PROXY_SOCKS5 3 #define PROXY_SOCKS4A 4 #define PROXY_SOCKS5H 5 #endif /* HAVE_LIBCURL */ static struct { const char *name; proxytypes_t proxytype; } proxynames[] = { { "http:", PROXY_HTTP }, { "http0:", PROXY_HTTP_1_0 }, { "socks4:", PROXY_SOCKS4 }, { "socks5:", PROXY_SOCKS5 }, { "socks4a:", PROXY_SOCKS4A }, { "socks5h:", PROXY_SOCKS5H }, { NULL, 0 } }; const char *proxytype(proxytypes_t proxytype) { int i; for (i = 0; proxynames[i].name; i++) if (proxynames[i].proxytype == proxytype) return proxynames[i].name; return "invalid"; } char *get_proxy(char *url, struct pool *pool) { pool->rpc_proxy = NULL; char *split; int plen, len, i; for (i = 0; proxynames[i].name; i++) { plen = strlen(proxynames[i].name); if (strncmp(url, proxynames[i].name, plen) == 0) { if (!(split = strchr(url, '|'))) return url; *split = '\0'; len = split - url; pool->rpc_proxy = malloc(1 + len - plen); if (!(pool->rpc_proxy)) quithere(1, "Failed to malloc rpc_proxy"); strcpy(pool->rpc_proxy, url + plen); extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); pool->rpc_proxytype = proxynames[i].proxytype; url = split + 1; break; } } return url; } /* Adequate size s==len*2 + 1 must be alloced to use this variant */ void __bin2hex(char *s, const unsigned char *p, size_t len) { int i; static const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; for (i = 0; i < (int)len; i++) { *s++ = hex[p[i] >> 4]; *s++ = hex[p[i] & 0xF]; } *s++ = '\0'; } /* Returns a malloced array string of a binary value of arbitrary length. The * array is rounded up to a 4 byte size to appease architectures that need * aligned array sizes */ char *bin2hex(const unsigned char *p, size_t len) { ssize_t slen; char *s; slen = len * 2 + 1; if (slen % 4) slen += 4 - (slen % 4); s = calloc(slen, 1); if (unlikely(!s)) quithere(1, "Failed to calloc"); __bin2hex(s, p, len); return s; } static const int hex2bin_tbl[256] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, }; /* Does the reverse of bin2hex but does not allocate any ram */ bool hex2bin(unsigned char *p, const char *hexstr, size_t len) { int nibble1, nibble2; unsigned char idx; bool ret = false; while (*hexstr && len) { if (unlikely(!hexstr[1])) { applog(LOG_ERR, "hex2bin str truncated"); return ret; } idx = *hexstr++; nibble1 = hex2bin_tbl[idx]; idx = *hexstr++; nibble2 = hex2bin_tbl[idx]; if (unlikely((nibble1 < 0) || (nibble2 < 0))) { applog(LOG_ERR, "hex2bin scan failed"); return ret; } *p++ = (((unsigned char)nibble1) << 4) | ((unsigned char)nibble2); --len; } if (likely(len == 0 && *hexstr == 0)) ret = true; return ret; } static bool _valid_hex(char *s, const char *file, const char *func, const int line) { bool ret = false; int i, len; if (unlikely(!s)) { applog(LOG_ERR, "Null string passed to valid_hex from"IN_FMT_FFL, file, func, line); return ret; } len = strlen(s); if (unlikely(!len)) { applog(LOG_ERR, "Zero length string passed to valid_hex from"IN_FMT_FFL, file, func, line); return ret; } for (i = 0; i < len; i++) { unsigned char idx = s[i]; if (unlikely(hex2bin_tbl[idx] < 0)) { applog(LOG_ERR, "Invalid char %x passed to valid_hex from"IN_FMT_FFL, idx, file, func, line); return ret; } } ret = true; return ret; } #define valid_hex(s) _valid_hex(s, __FILE__, __func__, __LINE__) static const int b58tobin_tbl[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, -1, 9, 10, 11, 12, 13, 14, 15, 16, -1, 17, 18, 19, 20, 21, -1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, -1, -1, -1, -1, -1, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57 }; /* b58bin should always be at least 25 bytes long and already checked to be * valid. */ void b58tobin(unsigned char *b58bin, const char *b58) { uint32_t c, bin32[7]; int len, i, j; uint64_t t; memset(bin32, 0, 7 * sizeof(uint32_t)); len = strlen(b58); for (i = 0; i < len; i++) { c = b58[i]; c = b58tobin_tbl[c]; for (j = 6; j >= 0; j--) { t = ((uint64_t)bin32[j]) * 58 + c; c = (t & 0x3f00000000ull) >> 32; bin32[j] = t & 0xffffffffull; } } *(b58bin++) = bin32[0] & 0xff; for (i = 1; i < 7; i++) { *((uint32_t *)b58bin) = htobe32(bin32[i]); b58bin += sizeof(uint32_t); } } void address_to_pubkeyhash(unsigned char *pkh, const char *addr) { unsigned char b58bin[25]; memset(b58bin, 0, 25); b58tobin(b58bin, addr); pkh[0] = 0x76; pkh[1] = 0xa9; pkh[2] = 0x14; memcpy(&pkh[3], &b58bin[1], 20); pkh[23] = 0x88; pkh[24] = 0xac; } /* For encoding nHeight into coinbase, return how many bytes were used */ int ser_number(unsigned char *s, int32_t val) { int32_t *i32 = (int32_t *)&s[1]; int len; if (val < 128) len = 1; else if (val < 16512) len = 2; else if (val < 2113664) len = 3; else len = 4; *i32 = htole32(val); s[0] = len++; return len; } /* For encoding variable length strings */ unsigned char *ser_string(char *s, int *slen) { size_t len = strlen(s); unsigned char *ret; ret = malloc(1 + len + 8); // Leave room for largest size if (unlikely(!ret)) quit(1, "Failed to malloc ret in ser_string"); if (len < 253) { ret[0] = len; memcpy(ret + 1, s, len); *slen = len + 1; } else if (len < 0x10000) { uint16_t *u16 = (uint16_t *)&ret[1]; ret[0] = 253; *u16 = htobe16(len); memcpy(ret + 3, s, len); *slen = len + 3; } else { /* size_t is only 32 bit on many platforms anyway */ uint32_t *u32 = (uint32_t *)&ret[1]; ret[0] = 254; *u32 = htobe32(len); memcpy(ret + 5, s, len); *slen = len + 5; } return ret; } bool fulltest(const unsigned char *hash, const unsigned char *target) { uint32_t *hash32 = (uint32_t *)hash; uint32_t *target32 = (uint32_t *)target; bool rc = true; int i; for (i = 28 / 4; i >= 0; i--) { uint32_t h32tmp = le32toh(hash32[i]); uint32_t t32tmp = le32toh(target32[i]); if (h32tmp > t32tmp) { rc = false; break; } if (h32tmp < t32tmp) { rc = true; break; } } if (opt_debug) { unsigned char hash_swap[32], target_swap[32]; char *hash_str, *target_str; swab256(hash_swap, hash); swab256(target_swap, target); hash_str = bin2hex(hash_swap, 32); target_str = bin2hex(target_swap, 32); applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s", hash_str, target_str, rc ? "YES (hash <= target)" : "no (false positive; hash > target)"); free(hash_str); free(target_str); } return rc; } struct thread_q *tq_new(void) { struct thread_q *tq; tq = calloc(1, sizeof(*tq)); if (!tq) return NULL; INIT_LIST_HEAD(&tq->q); pthread_mutex_init(&tq->mutex, NULL); pthread_cond_init(&tq->cond, NULL); return tq; } void tq_free(struct thread_q *tq) { struct tq_ent *ent, *iter; if (!tq) return; list_for_each_entry_safe(ent, iter, &tq->q, q_node) { list_del(&ent->q_node); free(ent); } pthread_cond_destroy(&tq->cond); pthread_mutex_destroy(&tq->mutex); memset(tq, 0, sizeof(*tq)); /* poison */ free(tq); } static void tq_freezethaw(struct thread_q *tq, bool frozen) { mutex_lock(&tq->mutex); tq->frozen = frozen; pthread_cond_signal(&tq->cond); mutex_unlock(&tq->mutex); } void tq_freeze(struct thread_q *tq) { tq_freezethaw(tq, true); } void tq_thaw(struct thread_q *tq) { tq_freezethaw(tq, false); } bool tq_push(struct thread_q *tq, void *data) { struct tq_ent *ent; bool rc = true; ent = calloc(1, sizeof(*ent)); if (!ent) return false; ent->data = data; INIT_LIST_HEAD(&ent->q_node); mutex_lock(&tq->mutex); if (!tq->frozen) { list_add_tail(&ent->q_node, &tq->q); } else { free(ent); rc = false; } pthread_cond_signal(&tq->cond); mutex_unlock(&tq->mutex); return rc; } void *tq_pop(struct thread_q *tq, const struct timespec *abstime) { struct tq_ent *ent; void *rval = NULL; int rc; mutex_lock(&tq->mutex); if (!list_empty(&tq->q)) goto pop; if (abstime) rc = pthread_cond_timedwait(&tq->cond, &tq->mutex, abstime); else rc = pthread_cond_wait(&tq->cond, &tq->mutex); if (rc) goto out; if (list_empty(&tq->q)) goto out; pop: ent = list_entry(tq->q.next, struct tq_ent, q_node); rval = ent->data; list_del(&ent->q_node); free(ent); out: mutex_unlock(&tq->mutex); return rval; } int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg) { cgsem_init(&thr->sem); return pthread_create(&thr->pth, attr, start, arg); } void thr_info_cancel(struct thr_info *thr) { if (!thr) return; if (PTH(thr) != 0L) { pthread_cancel(thr->pth); PTH(thr) = 0L; } cgsem_destroy(&thr->sem); } void subtime(struct timeval *a, struct timeval *b) { timersub(a, b, b); } void addtime(struct timeval *a, struct timeval *b) { timeradd(a, b, b); } bool time_more(struct timeval *a, struct timeval *b) { return timercmp(a, b, >); } bool time_less(struct timeval *a, struct timeval *b) { return timercmp(a, b, <); } void copy_time(struct timeval *dest, const struct timeval *src) { memcpy(dest, src, sizeof(struct timeval)); } void timespec_to_val(struct timeval *val, const struct timespec *spec) { val->tv_sec = spec->tv_sec; val->tv_usec = spec->tv_nsec / 1000; } void timeval_to_spec(struct timespec *spec, const struct timeval *val) { spec->tv_sec = val->tv_sec; spec->tv_nsec = val->tv_usec * 1000; } void us_to_timeval(struct timeval *val, int64_t us) { lldiv_t tvdiv = lldiv(us, 1000000); val->tv_sec = tvdiv.quot; val->tv_usec = tvdiv.rem; } void us_to_timespec(struct timespec *spec, int64_t us) { lldiv_t tvdiv = lldiv(us, 1000000); spec->tv_sec = tvdiv.quot; spec->tv_nsec = tvdiv.rem * 1000; } void ms_to_timespec(struct timespec *spec, int64_t ms) { lldiv_t tvdiv = lldiv(ms, 1000); spec->tv_sec = tvdiv.quot; spec->tv_nsec = tvdiv.rem * 1000000; } void ms_to_timeval(struct timeval *val, int64_t ms) { lldiv_t tvdiv = lldiv(ms, 1000); val->tv_sec = tvdiv.quot; val->tv_usec = tvdiv.rem * 1000; } void timeraddspec(struct timespec *a, const struct timespec *b) { a->tv_sec += b->tv_sec; a->tv_nsec += b->tv_nsec; if (a->tv_nsec >= 1000000000) { a->tv_nsec -= 1000000000; a->tv_sec++; } } static int __maybe_unused timespec_to_ms(struct timespec *ts) { return ts->tv_sec * 1000 + ts->tv_nsec / 1000000; } /* Subtract b from a */ static void __maybe_unused timersubspec(struct timespec *a, const struct timespec *b) { a->tv_sec -= b->tv_sec; a->tv_nsec -= b->tv_nsec; if (a->tv_nsec < 0) { a->tv_nsec += 1000000000; a->tv_sec--; } } /* These are cgminer specific sleep functions that use an absolute nanosecond * resolution timer to avoid poor usleep accuracy and overruns. */ #ifdef WIN32 /* Windows start time is since 1601 LOL so convert it to unix epoch 1970. */ #define EPOCHFILETIME (116444736000000000LL) /* Return the system time as an lldiv_t in decimicroseconds. */ static void decius_time(lldiv_t *lidiv) { FILETIME ft; LARGE_INTEGER li; GetSystemTimeAsFileTime(&ft); li.LowPart = ft.dwLowDateTime; li.HighPart = ft.dwHighDateTime; li.QuadPart -= EPOCHFILETIME; /* SystemTime is in decimicroseconds so divide by an unusual number */ *lidiv = lldiv(li.QuadPart, 10000000); } /* This is a cgminer gettimeofday wrapper. Since we always call gettimeofday * with tz set to NULL, and windows' default resolution is only 15ms, this * gives us higher resolution times on windows. */ void cgtime(struct timeval *tv) { lldiv_t lidiv; decius_time(&lidiv); tv->tv_sec = lidiv.quot; tv->tv_usec = lidiv.rem / 10; } #else /* WIN32 */ void cgtime(struct timeval *tv) { gettimeofday(tv, NULL); } int cgtimer_to_ms(cgtimer_t *cgt) { return timespec_to_ms(cgt); } /* Subtracts b from a and stores it in res. */ void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) { res->tv_sec = a->tv_sec - b->tv_sec; res->tv_nsec = a->tv_nsec - b->tv_nsec; if (res->tv_nsec < 0) { res->tv_nsec += 1000000000; res->tv_sec--; } } #endif /* WIN32 */ #ifdef CLOCK_MONOTONIC /* Essentially just linux */ void cgtimer_time(cgtimer_t *ts_start) { clock_gettime(CLOCK_MONOTONIC, ts_start); } static void nanosleep_abstime(struct timespec *ts_end) { int ret; do { ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL); } while (ret == EINTR); } /* Reentrant version of cgsleep functions allow start time to be set separately * from the beginning of the actual sleep, allowing scheduling delays to be * counted in the sleep. */ void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { struct timespec ts_end; ms_to_timespec(&ts_end, ms); timeraddspec(&ts_end, ts_start); nanosleep_abstime(&ts_end); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { struct timespec ts_end; us_to_timespec(&ts_end, us); timeraddspec(&ts_end, ts_start); nanosleep_abstime(&ts_end); } #else /* CLOCK_MONOTONIC */ #ifdef __MACH__ #include <mach/clock.h> #include <mach/mach.h> void cgtimer_time(cgtimer_t *ts_start) { clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts_start->tv_sec = mts.tv_sec; ts_start->tv_nsec = mts.tv_nsec; } #elif !defined(WIN32) /* __MACH__ - Everything not linux/macosx/win32 */ void cgtimer_time(cgtimer_t *ts_start) { struct timeval tv; cgtime(&tv); ts_start->tv_sec = tv->tv_sec; ts_start->tv_nsec = tv->tv_usec * 1000; } #endif /* __MACH__ */ #ifdef WIN32 /* For windows we use the SystemTime stored as a LARGE_INTEGER as the cgtimer_t * typedef, allowing us to have sub-microsecond resolution for times, do simple * arithmetic for timer calculations, and use windows' own hTimers to get * accurate absolute timeouts. */ int cgtimer_to_ms(cgtimer_t *cgt) { return (int)(cgt->QuadPart / 10000LL); } /* Subtracts b from a and stores it in res. */ void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) { res->QuadPart = a->QuadPart - b->QuadPart; } /* Note that cgtimer time is NOT offset by the unix epoch since we use absolute * timeouts with hTimers. */ void cgtimer_time(cgtimer_t *ts_start) { FILETIME ft; GetSystemTimeAsFileTime(&ft); ts_start->LowPart = ft.dwLowDateTime; ts_start->HighPart = ft.dwHighDateTime; } static void liSleep(LARGE_INTEGER *li, int timeout) { HANDLE hTimer; DWORD ret; if (unlikely(timeout <= 0)) return; hTimer = CreateWaitableTimer(NULL, TRUE, NULL); if (unlikely(!hTimer)) quit(1, "Failed to create hTimer in liSleep"); ret = SetWaitableTimer(hTimer, li, 0, NULL, NULL, 0); if (unlikely(!ret)) quit(1, "Failed to SetWaitableTimer in liSleep"); /* We still use a timeout as a sanity check in case the system time * is changed while we're running */ ret = WaitForSingleObject(hTimer, timeout); if (unlikely(ret != WAIT_OBJECT_0 && ret != WAIT_TIMEOUT)) quit(1, "Failed to WaitForSingleObject in liSleep"); CloseHandle(hTimer); } void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { LARGE_INTEGER li; li.QuadPart = ts_start->QuadPart + (int64_t)ms * 10000LL; liSleep(&li, ms); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { LARGE_INTEGER li; int ms; li.QuadPart = ts_start->QuadPart + us * 10LL; ms = us / 1000; if (!ms) ms = 1; liSleep(&li, ms); } #else /* WIN32 */ static void cgsleep_spec(struct timespec *ts_diff, const struct timespec *ts_start) { struct timespec now; timeraddspec(ts_diff, ts_start); cgtimer_time(&now); timersubspec(ts_diff, &now); if (unlikely(ts_diff->tv_sec < 0)) return; nanosleep(ts_diff, NULL); } void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { struct timespec ts_diff; ms_to_timespec(&ts_diff, ms); cgsleep_spec(&ts_diff, ts_start); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { struct timespec ts_diff; us_to_timespec(&ts_diff, us); cgsleep_spec(&ts_diff, ts_start); } #endif /* WIN32 */ #endif /* CLOCK_MONOTONIC */ void cgsleep_ms(int ms) { cgtimer_t ts_start; cgsleep_prepare_r(&ts_start); cgsleep_ms_r(&ts_start, ms); } void cgsleep_us(int64_t us) { cgtimer_t ts_start; cgsleep_prepare_r(&ts_start); cgsleep_us_r(&ts_start, us); } /* Returns the microseconds difference between end and start times as a double */ double us_tdiff(struct timeval *end, struct timeval *start) { /* Sanity check. We should only be using this for small differences so * limit the max to 60 seconds. */ if (unlikely(end->tv_sec - start->tv_sec > 60)) return 60000000; return (end->tv_sec - start->tv_sec) * 1000000 + (end->tv_usec - start->tv_usec); } /* Returns the milliseconds difference between end and start times */ int ms_tdiff(struct timeval *end, struct timeval *start) { /* Like us_tdiff, limit to 1 hour. */ if (unlikely(end->tv_sec - start->tv_sec > 3600)) return 3600000; return (end->tv_sec - start->tv_sec) * 1000 + (end->tv_usec - start->tv_usec) / 1000; } /* Returns the seconds difference between end and start times as a double */ double tdiff(struct timeval *end, struct timeval *start) { return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0; } bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) { char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; char url_address[256], port[6]; int url_len, port_len = 0; *sockaddr_url = url; url_begin = strstr(url, "//"); if (!url_begin) url_begin = url; else url_begin += 2; /* Look for numeric ipv6 entries */ ipv6_begin = strstr(url_begin, "["); ipv6_end = strstr(url_begin, "]"); if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) url_end = strstr(ipv6_end, ":"); else url_end = strstr(url_begin, ":"); if (url_end) { url_len = url_end - url_begin; port_len = strlen(url_begin) - url_len - 1; if (port_len < 1) return false; port_start = url_end + 1; } else url_len = strlen(url_begin); if (url_len < 1) return false; snprintf(url_address, 254, "%.*s", url_len, url_begin); if (port_len) { char *slash; snprintf(port, 6, "%.*s", port_len, port_start); slash = strchr(port, '/'); if (slash) *slash = '\0'; } else strcpy(port, "80"); *sockaddr_port = strdup(port); *sockaddr_url = strdup(url_address); return true; } enum send_ret { SEND_OK, SEND_SELECTFAIL, SEND_SENDFAIL, SEND_INACTIVE }; /* Send a single command across a socket, appending \n to it. This should all * be done under stratum lock except when first establishing the socket */ static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len) { SOCKETTYPE sock = pool->sock; ssize_t ssent = 0; strcat(s, "\n"); len++; while (len > 0 ) { struct timeval timeout = {1, 0}; ssize_t sent; fd_set wd; retry: FD_ZERO(&wd); FD_SET(sock, &wd); if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1) { if (interrupted()) goto retry; return SEND_SELECTFAIL; } #ifdef __APPLE__ sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE); #elif WIN32 sent = send(pool->sock, s + ssent, len, 0); #else sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL); #endif if (sent < 0) { if (!sock_blocks()) return SEND_SENDFAIL; sent = 0; } ssent += sent; len -= sent; } pool->cgminer_pool_stats.times_sent++; pool->cgminer_pool_stats.bytes_sent += ssent; pool->cgminer_pool_stats.net_bytes_sent += ssent; return SEND_OK; } bool stratum_send(struct pool *pool, char *s, ssize_t len) { enum send_ret ret = SEND_INACTIVE; if (opt_protocol) applog(LOG_DEBUG, "SEND: %s", s); mutex_lock(&pool->stratum_lock); if (pool->stratum_active) ret = __stratum_send(pool, s, len); mutex_unlock(&pool->stratum_lock); /* This is to avoid doing applog under stratum_lock */ switch (ret) { default: case SEND_OK: break; case SEND_SELECTFAIL: applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no); suspend_stratum(pool); break; case SEND_SENDFAIL: applog(LOG_DEBUG, "Failed to send in stratum_send"); suspend_stratum(pool); break; case SEND_INACTIVE: applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active"); break; } return (ret == SEND_OK); } static bool socket_full(struct pool *pool, int wait) { SOCKETTYPE sock = pool->sock; struct timeval timeout; fd_set rd; if (unlikely(wait < 0)) wait = 0; FD_ZERO(&rd); FD_SET(sock, &rd); timeout.tv_usec = 0; timeout.tv_sec = wait; if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0) return true; return false; } /* Check to see if Santa's been good to you */ bool sock_full(struct pool *pool) { if (strlen(pool->sockbuf)) return true; return (socket_full(pool, 0)); } static void clear_sockbuf(struct pool *pool) { strcpy(pool->sockbuf, ""); } static void clear_sock(struct pool *pool) { ssize_t n; mutex_lock(&pool->stratum_lock); do { if (pool->sock) n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0); else n = 0; } while (n > 0); mutex_unlock(&pool->stratum_lock); clear_sockbuf(pool); } /* Realloc memory to new size and zero any extra memory added */ void _recalloc(void **ptr, size_t old, size_t new, const char *file, const char *func, const int line) { if (new == old) return; *ptr = realloc(*ptr, new); if (unlikely(!*ptr)) quitfrom(1, file, func, line, "Failed to realloc"); if (new > old) memset(*ptr + old, 0, new - old); } /* Make sure the pool sockbuf is large enough to cope with any coinbase size * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE * and zeroing the new memory */ static void recalloc_sock(struct pool *pool, size_t len) { size_t old, new; old = strlen(pool->sockbuf); new = old + len + 1; if (new < pool->sockbuf_size) return; new = new + (RBUFSIZE - (new % RBUFSIZE)); // Avoid potentially recursive locking // applog(LOG_DEBUG, "Recallocing pool sockbuf to %d", new); pool->sockbuf = realloc(pool->sockbuf, new); if (!pool->sockbuf) quithere(1, "Failed to realloc pool sockbuf"); memset(pool->sockbuf + old, 0, new - old); pool->sockbuf_size = new; } /* Peeks at a socket to find the first end of line and then reads just that * from the socket and returns that as a malloced char */ char *recv_line(struct pool *pool) { char *tok, *sret = NULL; ssize_t len, buflen; int waited = 0; if (!strstr(pool->sockbuf, "\n")) { struct timeval rstart, now; cgtime(&rstart); if (!socket_full(pool, DEFAULT_SOCKWAIT)) { applog(LOG_DEBUG, "Timed out waiting for data on socket_full"); goto out; } do { char s[RBUFSIZE]; size_t slen; ssize_t n; memset(s, 0, RBUFSIZE); n = recv(pool->sock, s, RECVSIZE, 0); if (!n) { applog(LOG_DEBUG, "Socket closed waiting in recv_line"); suspend_stratum(pool); break; } cgtime(&now); waited = tdiff(&now, &rstart); if (n < 0) { if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) { applog(LOG_DEBUG, "Failed to recv sock in recv_line"); suspend_stratum(pool); break; } } else { slen = strlen(s); recalloc_sock(pool, slen); strcat(pool->sockbuf, s); } } while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n")); } buflen = strlen(pool->sockbuf); tok = strtok(pool->sockbuf, "\n"); if (!tok) { applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line"); goto out; } sret = strdup(tok); len = strlen(sret); /* Copy what's left in the buffer after the \n, including the * terminating \0 */ if (buflen > len + 1) memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1); else strcpy(pool->sockbuf, ""); pool->cgminer_pool_stats.times_received++; pool->cgminer_pool_stats.bytes_received += len; pool->cgminer_pool_stats.net_bytes_received += len; out: if (!sret) clear_sock(pool); else if (opt_protocol) applog(LOG_DEBUG, "RECVD: %s", sret); return sret; } /* Extracts a string value from a json array with error checking. To be used * when the value of the string returned is only examined and not to be stored. * See json_array_string below */ static char *__json_array_string(json_t *val, unsigned int entry) { json_t *arr_entry; if (json_is_null(val)) return NULL; if (!json_is_array(val)) return NULL; if (entry > json_array_size(val)) return NULL; arr_entry = json_array_get(val, entry); if (!json_is_string(arr_entry)) return NULL; return (char *)json_string_value(arr_entry); } /* Creates a freshly malloced dup of __json_array_string */ static char *json_array_string(json_t *val, unsigned int entry) { char *buf = __json_array_string(val, entry); if (buf) return strdup(buf); return NULL; } static char *blank_merkle = "0000000000000000000000000000000000000000000000000000000000000000"; static bool parse_notify(struct pool *pool, json_t *val) { char *job_id, *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, *ntime, header[228]; unsigned char *cb1 = NULL, *cb2 = NULL; size_t cb1_len, cb2_len, alloc_len; bool clean, ret = false; int merkles, i; json_t *arr; arr = json_array_get(val, 4); if (!arr || !json_is_array(arr)) goto out; merkles = json_array_size(arr); job_id = json_array_string(val, 0); prev_hash = __json_array_string(val, 1); coinbase1 = json_array_string(val, 2); coinbase2 = json_array_string(val, 3); bbversion = __json_array_string(val, 5); nbit = __json_array_string(val, 6); ntime = __json_array_string(val, 7); clean = json_is_true(json_array_get(val, 8)); if (!valid_hex(job_id) || !valid_hex(prev_hash) || !valid_hex(coinbase1) || !valid_hex(coinbase2) || !valid_hex(bbversion) || !valid_hex(nbit) || !valid_hex(ntime)) { /* Annoying but we must not leak memory */ free(job_id); free(coinbase1); free(coinbase2); goto out; } cg_wlock(&pool->data_lock); free(pool->swork.job_id); pool->swork.job_id = job_id; snprintf(pool->prev_hash, 65, "%s", prev_hash); cb1_len = strlen(coinbase1) / 2; cb2_len = strlen(coinbase2) / 2; snprintf(pool->bbversion, 9, "%s", bbversion); snprintf(pool->nbit, 9, "%s", nbit); snprintf(pool->ntime, 9, "%s", ntime); pool->swork.clean = clean; alloc_len = pool->coinbase_len = cb1_len + pool->n1_len + pool->n2size + cb2_len; pool->nonce2_offset = cb1_len + pool->n1_len; for (i = 0; i < pool->merkles; i++) free(pool->swork.merkle_bin[i]); if (merkles) { pool->swork.merkle_bin = realloc(pool->swork.merkle_bin, sizeof(char *) * merkles + 1); for (i = 0; i < merkles; i++) { char *merkle = json_array_string(arr, i); pool->swork.merkle_bin[i] = malloc(32); if (unlikely(!pool->swork.merkle_bin[i])) quit(1, "Failed to malloc pool swork merkle_bin"); if (opt_protocol) applog(LOG_DEBUG, "merkle %d: %s", i, merkle); ret = hex2bin(pool->swork.merkle_bin[i], merkle, 32); free(merkle); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert merkle to merkle_bin in parse_notify"); goto out_unlock; } } } pool->merkles = merkles; if (clean) pool->nonce2 = 0; #if 0 header_len = strlen(pool->bbversion) + strlen(pool->prev_hash); /* merkle_hash */ 32 + strlen(pool->ntime) + strlen(pool->nbit) + /* nonce */ 8 + /* workpadding */ 96; #endif snprintf(header, 225, "%s%s%s%s%s%s%s", pool->bbversion, pool->prev_hash, blank_merkle, pool->ntime, pool->nbit, "00000000", /* nonce */ workpadding); ret = hex2bin(pool->header_bin, header, 112); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert header to header_bin in parse_notify"); goto out_unlock; } cb1 = alloca(cb1_len); ret = hex2bin(cb1, coinbase1, cb1_len); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert cb1 to cb1_bin in parse_notify"); goto out_unlock; } cb2 = alloca(cb2_len); ret = hex2bin(cb2, coinbase2, cb2_len); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert cb2 to cb2_bin in parse_notify"); goto out_unlock; } free(pool->coinbase); align_len(&alloc_len); pool->coinbase = calloc(alloc_len, 1); if (unlikely(!pool->coinbase)) quit(1, "Failed to calloc pool coinbase in parse_notify"); memcpy(pool->coinbase, cb1, cb1_len); memcpy(pool->coinbase + cb1_len, pool->nonce1bin, pool->n1_len); memcpy(pool->coinbase + cb1_len + pool->n1_len + pool->n2size, cb2, cb2_len); if (opt_debug) { char *cb = bin2hex(pool->coinbase, pool->coinbase_len); applog(LOG_DEBUG, "Pool %d coinbase %s", pool->pool_no, cb); free(cb); } out_unlock: cg_wunlock(&pool->data_lock); if (opt_protocol) { applog(LOG_DEBUG, "job_id: %s", job_id); applog(LOG_DEBUG, "prev_hash: %s", prev_hash); applog(LOG_DEBUG, "coinbase1: %s", coinbase1); applog(LOG_DEBUG, "coinbase2: %s", coinbase2); applog(LOG_DEBUG, "bbversion: %s", bbversion); applog(LOG_DEBUG, "nbit: %s", nbit); applog(LOG_DEBUG, "ntime: %s", ntime); applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no"); } free(coinbase1); free(coinbase2); /* A notify message is the closest stratum gets to a getwork */ pool->getwork_requested++; total_getworks++; if (pool == current_pool()) opt_work_update = true; out: return ret; } static bool parse_diff(struct pool *pool, json_t *val) { double old_diff, diff; diff = json_number_value(json_array_get(val, 0)); if (diff == 0) return false; cg_wlock(&pool->data_lock); old_diff = pool->sdiff; pool->sdiff = diff; cg_wunlock(&pool->data_lock); if (old_diff != diff) { int idiff = diff; if ((double)idiff == diff) applog(LOG_NOTICE, "Pool %d difficulty changed to %d", pool->pool_no, idiff); else applog(LOG_NOTICE, "Pool %d difficulty changed to %.1f", pool->pool_no, diff); } else applog(LOG_DEBUG, "Pool %d difficulty set to %f", pool->pool_no, diff); return true; } static void __suspend_stratum(struct pool *pool) { clear_sockbuf(pool); pool->stratum_active = pool->stratum_notify = false; if (pool->sock) CLOSESOCKET(pool->sock); pool->sock = 0; } static bool parse_reconnect(struct pool *pool, json_t *val) { char *sockaddr_url, *stratum_port, *tmp; char *url, *port, address[256]; memset(address, 0, 255); url = (char *)json_string_value(json_array_get(val, 0)); if (!url) url = pool->sockaddr_url; else { char *dot_pool, *dot_reconnect; dot_pool = strchr(pool->sockaddr_url, '.'); if (!dot_pool) { applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'", pool->sockaddr_url); return false; } dot_reconnect = strchr(url, '.'); if (!dot_reconnect) { applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'", url); return false; } if (strcmp(dot_pool, dot_reconnect)) { applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'", pool->sockaddr_url); return false; } } port = (char *)json_string_value(json_array_get(val, 1)); if (!port) port = pool->stratum_port; snprintf(address, 254, "%s:%s", url, port); if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) return false; applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address); clear_pool_work(pool); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); tmp = pool->sockaddr_url; pool->sockaddr_url = sockaddr_url; pool->stratum_url = pool->sockaddr_url; free(tmp); tmp = pool->stratum_port; pool->stratum_port = stratum_port; free(tmp); mutex_unlock(&pool->stratum_lock); if (!restart_stratum(pool)) { pool_failed(pool); return false; } return true; } static bool send_version(struct pool *pool, json_t *val) { char s[RBUFSIZE]; int id = json_integer_value(json_object_get(val, "id")); if (!id) return false; sprintf(s, "{\"id\": %d, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", id); if (!stratum_send(pool, s, strlen(s))) return false; return true; } static bool show_message(struct pool *pool, json_t *val) { char *msg; if (!json_is_array(val)) return false; msg = (char *)json_string_value(json_array_get(val, 0)); if (!msg) return false; applog(LOG_NOTICE, "Pool %d message: %s", pool->pool_no, msg); return true; } bool parse_method(struct pool *pool, char *s) { json_t *val = NULL, *method, *err_val, *params; json_error_t err; bool ret = false; char *buf; if (!s) goto out; val = JSON_LOADS(s, &err); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } method = json_object_get(val, "method"); if (!method) goto out_decref; err_val = json_object_get(val, "error"); params = json_object_get(val, "params"); if (err_val && !json_is_null(err_val)) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC method decode failed: %s", ss); free(ss); goto out_decref; } buf = (char *)json_string_value(method); if (!buf) goto out_decref; if (!strncasecmp(buf, "mining.notify", 13)) { if (parse_notify(pool, params)) pool->stratum_notify = ret = true; else pool->stratum_notify = ret = false; goto out_decref; } if (!strncasecmp(buf, "mining.set_difficulty", 21)) { ret = parse_diff(pool, params); goto out_decref; } if (!strncasecmp(buf, "client.reconnect", 16)) { ret = parse_reconnect(pool, params); goto out_decref; } if (!strncasecmp(buf, "client.get_version", 18)) { ret = send_version(pool, val); goto out_decref; } if (!strncasecmp(buf, "client.show_message", 19)) { ret = show_message(pool, params); goto out_decref; } out_decref: json_decref(val); out: return ret; } bool auth_stratum(struct pool *pool) { json_t *val = NULL, *res_val, *err_val; char s[RBUFSIZE], *sret = NULL; json_error_t err; bool ret = false; sprintf(s, "{\"id\": %d, \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}", swork_id++, pool->rpc_user, pool->rpc_pass); if (!stratum_send(pool, s, strlen(s))) return ret; /* Parse all data in the queue and anything left should be auth */ while (42) { sret = recv_line(pool); if (!sret) return ret; if (parse_method(pool, sret)) free(sret); else break; } val = JSON_LOADS(sret, &err); free(sret); res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss); free(ss); suspend_stratum(pool); goto out; } ret = true; applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no); pool->probed = true; successful_connect = true; out: json_decref(val); return ret; } static int recv_byte(int sockd) { char c; if (recv(sockd, &c, 1, 0) != -1) return c; return -1; } static bool http_negotiate(struct pool *pool, int sockd, bool http0) { char buf[1024]; int i, len; if (http0) { snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.0\r\n\r\n", pool->sockaddr_url, pool->stratum_port); } else { snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.1\r\nHost: %s:%s\r\n\r\n", pool->sockaddr_url, pool->stratum_port, pool->sockaddr_url, pool->stratum_port); } applog(LOG_DEBUG, "Sending proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); send(sockd, buf, strlen(buf), 0); len = recv(sockd, buf, 12, 0); if (len <= 0) { applog(LOG_WARNING, "Couldn't read from proxy %s:%s after sending CONNECT", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } buf[len] = '\0'; applog(LOG_DEBUG, "Received from proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); if (strcmp(buf, "HTTP/1.1 200") && strcmp(buf, "HTTP/1.0 200")) { applog(LOG_WARNING, "HTTP Error from proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); return false; } /* Ignore unwanted headers till we get desired response */ for (i = 0; i < 4; i++) { buf[i] = recv_byte(sockd); if (buf[i] == (char)-1) { applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } } while (strncmp(buf, "\r\n\r\n", 4)) { for (i = 0; i < 3; i++) buf[i] = buf[i + 1]; buf[3] = recv_byte(sockd); if (buf[3] == (char)-1) { applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } } applog(LOG_DEBUG, "Success negotiating with %s:%s HTTP proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return true; } static bool socks5_negotiate(struct pool *pool, int sockd) { unsigned char atyp, uclen; unsigned short port; char buf[515]; int i, len; buf[0] = 0x05; buf[1] = 0x01; buf[2] = 0x00; applog(LOG_DEBUG, "Attempting to negotiate with %s:%s SOCKS5 proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); send(sockd, buf, 3, 0); if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != buf[2]) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } buf[0] = 0x05; buf[1] = 0x01; buf[2] = 0x00; buf[3] = 0x03; len = (strlen(pool->sockaddr_url)); if (len > 255) len = 255; uclen = len; buf[4] = (uclen & 0xff); memcpy(buf + 5, pool->sockaddr_url, len); port = atoi(pool->stratum_port); buf[5 + len] = (port >> 8); buf[6 + len] = (port & 0xff); send(sockd, buf, (7 + len), 0); if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != 0x00) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } recv_byte(sockd); atyp = recv_byte(sockd); if (atyp == 0x01) { for (i = 0; i < 4; i++) recv_byte(sockd); } else if (atyp == 0x03) { len = recv_byte(sockd); for (i = 0; i < len; i++) recv_byte(sockd); } else { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } for (i = 0; i < 2; i++) recv_byte(sockd); applog(LOG_DEBUG, "Success negotiating with %s:%s SOCKS5 proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return true; } static bool socks4_negotiate(struct pool *pool, int sockd, bool socks4a) { unsigned short port; in_addr_t inp; char buf[515]; int i, len; buf[0] = 0x04; buf[1] = 0x01; port = atoi(pool->stratum_port); buf[2] = port >> 8; buf[3] = port & 0xff; sprintf(&buf[8], "CGMINER"); /* See if we've been given an IP address directly to avoid needing to * resolve it. */ inp = inet_addr(pool->sockaddr_url); inp = ntohl(inp); if ((int)inp != -1) socks4a = false; else { /* Try to extract the IP address ourselves first */ struct addrinfo servinfobase, *servinfo, hints; servinfo = &servinfobase; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_INET; /* IPV4 only */ if (!getaddrinfo(pool->sockaddr_url, NULL, &hints, &servinfo)) { struct sockaddr_in *saddr_in = (struct sockaddr_in *)servinfo->ai_addr; inp = ntohl(saddr_in->sin_addr.s_addr); socks4a = false; freeaddrinfo(servinfo); } } if (!socks4a) { if ((int)inp == -1) { applog(LOG_WARNING, "Invalid IP address specified for socks4 proxy: %s", pool->sockaddr_url); return false; } buf[4] = (inp >> 24) & 0xFF; buf[5] = (inp >> 16) & 0xFF; buf[6] = (inp >> 8) & 0xFF; buf[7] = (inp >> 0) & 0xFF; send(sockd, buf, 16, 0); } else { /* This appears to not be working but hopefully most will be * able to resolve IP addresses themselves. */ buf[4] = 0; buf[5] = 0; buf[6] = 0; buf[7] = 1; len = strlen(pool->sockaddr_url); if (len > 255) len = 255; memcpy(&buf[16], pool->sockaddr_url, len); len += 16; buf[len++] = '\0'; send(sockd, buf, len, 0); } if (recv_byte(sockd) != 0x00 || recv_byte(sockd) != 0x5a) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS4 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } for (i = 0; i < 6; i++) recv_byte(sockd); return true; } static void noblock_socket(SOCKETTYPE fd) { #ifndef WIN32 int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, O_NONBLOCK | flags); #else u_long flags = 1; ioctlsocket(fd, FIONBIO, &flags); #endif } static void block_socket(SOCKETTYPE fd) { #ifndef WIN32 int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, flags & ~O_NONBLOCK); #else u_long flags = 0; ioctlsocket(fd, FIONBIO, &flags); #endif } static bool sock_connecting(void) { #ifndef WIN32 return errno == EINPROGRESS; #else return WSAGetLastError() == WSAEWOULDBLOCK; #endif } static bool setup_stratum_socket(struct pool *pool) { struct addrinfo servinfobase, *servinfo, *hints, *p; char *sockaddr_url, *sockaddr_port; int sockd; mutex_lock(&pool->stratum_lock); pool->stratum_active = false; if (pool->sock) CLOSESOCKET(pool->sock); pool->sock = 0; mutex_unlock(&pool->stratum_lock); hints = &pool->stratum_hints; memset(hints, 0, sizeof(struct addrinfo)); hints->ai_family = AF_UNSPEC; hints->ai_socktype = SOCK_STREAM; servinfo = &servinfobase; if (!pool->rpc_proxy && opt_socks_proxy) { pool->rpc_proxy = opt_socks_proxy; extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); pool->rpc_proxytype = PROXY_SOCKS5; } if (pool->rpc_proxy) { sockaddr_url = pool->sockaddr_proxy_url; sockaddr_port = pool->sockaddr_proxy_port; } else { sockaddr_url = pool->sockaddr_url; sockaddr_port = pool->stratum_port; } if (getaddrinfo(sockaddr_url, sockaddr_port, hints, &servinfo) != 0) { if (!pool->probed) { applog(LOG_WARNING, "Failed to resolve (?wrong URL) %s:%s", sockaddr_url, sockaddr_port); pool->probed = true; } else { applog(LOG_INFO, "Failed to getaddrinfo for %s:%s", sockaddr_url, sockaddr_port); } return false; } for (p = servinfo; p != NULL; p = p->ai_next) { sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); if (sockd == -1) { applog(LOG_DEBUG, "Failed socket"); continue; } /* Iterate non blocking over entries returned by getaddrinfo * to cope with round robin DNS entries, finding the first one * we can connect to quickly. */ noblock_socket(sockd); if (connect(sockd, p->ai_addr, p->ai_addrlen) == -1) { struct timeval tv_timeout = {1, 0}; int selret; fd_set rw; if (!sock_connecting()) { CLOSESOCKET(sockd); applog(LOG_DEBUG, "Failed sock connect"); continue; } retry: FD_ZERO(&rw); FD_SET(sockd, &rw); selret = select(sockd + 1, NULL, &rw, NULL, &tv_timeout); if (selret > 0 && FD_ISSET(sockd, &rw)) { socklen_t len; int err, n; len = sizeof(err); n = getsockopt(sockd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); if (!n && !err) { applog(LOG_DEBUG, "Succeeded delayed connect"); block_socket(sockd); break; } } if (selret < 0 && interrupted()) goto retry; CLOSESOCKET(sockd); applog(LOG_DEBUG, "Select timeout/failed connect"); continue; } applog(LOG_WARNING, "Succeeded immediate connect"); block_socket(sockd); break; } if (p == NULL) { applog(LOG_INFO, "Failed to connect to stratum on %s:%s", sockaddr_url, sockaddr_port); freeaddrinfo(servinfo); return false; } freeaddrinfo(servinfo); if (pool->rpc_proxy) { switch (pool->rpc_proxytype) { case PROXY_HTTP_1_0: if (!http_negotiate(pool, sockd, true)) return false; break; case PROXY_HTTP: if (!http_negotiate(pool, sockd, false)) return false; break; case PROXY_SOCKS5: case PROXY_SOCKS5H: if (!socks5_negotiate(pool, sockd)) return false; break; case PROXY_SOCKS4: if (!socks4_negotiate(pool, sockd, false)) return false; break; case PROXY_SOCKS4A: if (!socks4_negotiate(pool, sockd, true)) return false; break; default: applog(LOG_WARNING, "Unsupported proxy type for %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; break; } } if (!pool->sockbuf) { pool->sockbuf = calloc(RBUFSIZE, 1); if (!pool->sockbuf) quithere(1, "Failed to calloc pool sockbuf"); pool->sockbuf_size = RBUFSIZE; } pool->sock = sockd; keep_sockalive(sockd); return true; } static char *get_sessionid(json_t *val) { char *ret = NULL; json_t *arr_val; int arrsize, i; arr_val = json_array_get(val, 0); if (!arr_val || !json_is_array(arr_val)) goto out; arrsize = json_array_size(arr_val); for (i = 0; i < arrsize; i++) { json_t *arr = json_array_get(arr_val, i); char *notify; if (!arr | !json_is_array(arr)) break; notify = __json_array_string(arr, 0); if (!notify) continue; if (!strncasecmp(notify, "mining.notify", 13)) { ret = json_array_string(arr, 1); break; } } out: return ret; } void suspend_stratum(struct pool *pool) { applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); mutex_unlock(&pool->stratum_lock); } bool initiate_stratum(struct pool *pool) { bool ret = false, recvd = false, noresume = false, sockd = false; char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid; json_t *val = NULL, *res_val, *err_val; json_error_t err; int n2size; resend: if (!setup_stratum_socket(pool)) { sockd = false; goto out; } sockd = true; if (recvd) { /* Get rid of any crap lying around if we're resending */ clear_sock(pool); sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++); } else { if (pool->sessionid) sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid); else sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++); } if (__stratum_send(pool, s, strlen(s)) != SEND_OK) { applog(LOG_DEBUG, "Failed to send s in initiate_stratum"); goto out; } if (!socket_full(pool, DEFAULT_SOCKWAIT)) { applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum"); goto out; } sret = recv_line(pool); if (!sret) goto out; recvd = true; val = JSON_LOADS(sret, &err); free(sret); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_null(res_val) || (err_val && !json_is_null(err_val))) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC decode failed: %s", ss); free(ss); goto out; } sessionid = get_sessionid(res_val); if (!sessionid) applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum"); nonce1 = json_array_string(res_val, 1); if (!valid_hex(nonce1)) { applog(LOG_INFO, "Failed to get valid nonce1 in initiate_stratum"); free(sessionid); goto out; } n2size = json_integer_value(json_array_get(res_val, 2)); if (n2size < 2 || n2size > 16) { applog(LOG_INFO, "Failed to get valid n2size in initiate_stratum"); free(sessionid); free(nonce1); goto out; } cg_wlock(&pool->data_lock); pool->sessionid = sessionid; pool->nonce1 = nonce1; pool->n1_len = strlen(nonce1) / 2; free(pool->nonce1bin); pool->nonce1bin = calloc(pool->n1_len, 1); if (unlikely(!pool->nonce1bin)) quithere(1, "Failed to calloc pool->nonce1bin"); hex2bin(pool->nonce1bin, pool->nonce1, pool->n1_len); pool->n2size = n2size; cg_wunlock(&pool->data_lock); if (sessionid) applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid); ret = true; out: if (ret) { if (!pool->stratum_url) pool->stratum_url = pool->sockaddr_url; pool->stratum_active = true; pool->sdiff = 1; if (opt_protocol) { applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d", pool->pool_no, pool->nonce1, pool->n2size); } } else { if (recvd && !noresume) { /* Reset the sessionid used for stratum resuming in case the pool * does not support it, or does not know how to respond to the * presence of the sessionid parameter. */ cg_wlock(&pool->data_lock); free(pool->sessionid); free(pool->nonce1); pool->sessionid = pool->nonce1 = NULL; cg_wunlock(&pool->data_lock); applog(LOG_DEBUG, "Failed to resume stratum, trying afresh"); noresume = true; json_decref(val); goto resend; } applog(LOG_DEBUG, "Initiate stratum failed"); if (sockd) suspend_stratum(pool); } json_decref(val); return ret; } bool restart_stratum(struct pool *pool) { if (pool->stratum_active) suspend_stratum(pool); if (!initiate_stratum(pool)) return false; if (!auth_stratum(pool)) return false; return true; } void dev_error(struct cgpu_info *dev, enum dev_reason reason) { dev->device_last_not_well = time(NULL); dev->device_not_well_reason = reason; switch (reason) { case REASON_THREAD_FAIL_INIT: dev->thread_fail_init_count++; break; case REASON_THREAD_ZERO_HASH: dev->thread_zero_hash_count++; break; case REASON_THREAD_FAIL_QUEUE: dev->thread_fail_queue_count++; break; case REASON_DEV_SICK_IDLE_60: dev->dev_sick_idle_60_count++; break; case REASON_DEV_DEAD_IDLE_600: dev->dev_dead_idle_600_count++; break; case REASON_DEV_NOSTART: dev->dev_nostart_count++; break; case REASON_DEV_OVER_HEAT: dev->dev_over_heat_count++; break; case REASON_DEV_THERMAL_CUTOFF: dev->dev_thermal_cutoff_count++; break; case REASON_DEV_COMMS_ERROR: dev->dev_comms_error_count++; break; case REASON_DEV_THROTTLE: dev->dev_throttle_count++; break; } } /* Realloc an existing string to fit an extra string s, appending s to it. */ void *realloc_strcat(char *ptr, char *s) { size_t old = 0, len = strlen(s); char *ret; if (!len) return ptr; if (ptr) old = strlen(ptr); len += old + 1; align_len(&len); ret = malloc(len); if (unlikely(!ret)) quithere(1, "Failed to malloc"); if (ptr) { sprintf(ret, "%s%s", ptr, s); free(ptr); } else sprintf(ret, "%s", s); return ret; } /* Make a text readable version of a string using 0xNN for < ' ' or > '~' * Including 0x00 at the end * You must free the result yourself */ void *str_text(char *ptr) { unsigned char *uptr; char *ret, *txt; if (ptr == NULL) { ret = strdup("(null)"); if (unlikely(!ret)) quithere(1, "Failed to malloc null"); } uptr = (unsigned char *)ptr; ret = txt = malloc(strlen(ptr)*4+5); // Guaranteed >= needed if (unlikely(!txt)) quithere(1, "Failed to malloc txt"); do { if (*uptr < ' ' || *uptr > '~') { sprintf(txt, "0x%02x", *uptr); txt += 4; } else *(txt++) = *uptr; } while (*(uptr++)); *txt = '\0'; return ret; } void RenameThread(const char* name) { char buf[16]; snprintf(buf, sizeof(buf), "cg@%s", name); #if defined(PR_SET_NAME) // Only the first 15 characters are used (16 - NUL terminator) prctl(PR_SET_NAME, buf, 0, 0, 0); #elif (defined(__FreeBSD__) || defined(__OpenBSD__)) pthread_set_name_np(pthread_self(), buf); #elif defined(MAC_OSX) pthread_setname_np(buf); #else // Prevent warnings (void)buf; #endif } /* cgminer specific wrappers for true unnamed semaphore usage on platforms * that support them and for apple which does not. We use a single byte across * a pipe to emulate semaphore behaviour there. */ #ifdef __APPLE__ void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) { int flags, fd, i; if (pipe(cgsem->pipefd) == -1) quitfrom(1, file, func, line, "Failed pipe errno=%d", errno); /* Make the pipes FD_CLOEXEC to allow them to close should we call * execv on restart. */ for (i = 0; i < 2; i++) { fd = cgsem->pipefd[i]; flags = fcntl(fd, F_GETFD, 0); flags |= FD_CLOEXEC; if (fcntl(fd, F_SETFD, flags) == -1) quitfrom(1, file, func, line, "Failed to fcntl errno=%d", errno); } } void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) { const char buf = 1; int ret; retry: ret = write(cgsem->pipefd[1], &buf, 1); if (unlikely(ret == 0)) applog(LOG_WARNING, "Failed to write errno=%d" IN_FMT_FFL, errno, file, func, line); else if (unlikely(ret < 0 && interrupted)) goto retry; } void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) { char buf; int ret; retry: ret = read(cgsem->pipefd[0], &buf, 1); if (unlikely(ret == 0)) applog(LOG_WARNING, "Failed to read errno=%d" IN_FMT_FFL, errno, file, func, line); else if (unlikely(ret < 0 && interrupted)) goto retry; } void cgsem_destroy(cgsem_t *cgsem) { close(cgsem->pipefd[1]); close(cgsem->pipefd[0]); } /* This is similar to sem_timedwait but takes a millisecond value */ int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) { struct timeval timeout; int ret, fd; fd_set rd; char buf; retry: fd = cgsem->pipefd[0]; FD_ZERO(&rd); FD_SET(fd, &rd); ms_to_timeval(&timeout, ms); ret = select(fd + 1, &rd, NULL, NULL, &timeout); if (ret > 0) { ret = read(fd, &buf, 1); return 0; } if (likely(!ret)) return ETIMEDOUT; if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); /* We don't reach here */ return 0; } /* Reset semaphore count back to zero */ void cgsem_reset(cgsem_t *cgsem) { int ret, fd; fd_set rd; char buf; fd = cgsem->pipefd[0]; FD_ZERO(&rd); FD_SET(fd, &rd); do { struct timeval timeout = {0, 0}; ret = select(fd + 1, &rd, NULL, NULL, &timeout); if (ret > 0) ret = read(fd, &buf, 1); else if (unlikely(ret < 0 && interrupted())) ret = 1; } while (ret > 0); } #else void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) { int ret; if ((ret = sem_init(cgsem, 0, 0))) quitfrom(1, file, func, line, "Failed to sem_init ret=%d errno=%d", ret, errno); } void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) { if (unlikely(sem_post(cgsem))) quitfrom(1, file, func, line, "Failed to sem_post errno=%d cgsem=0x%p", errno, cgsem); } void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) { retry: if (unlikely(sem_wait(cgsem))) { if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_wait errno=%d cgsem=0x%p", errno, cgsem); } } int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) { struct timespec abs_timeout, ts_now; struct timeval tv_now; int ret; cgtime(&tv_now); timeval_to_spec(&ts_now, &tv_now); ms_to_timespec(&abs_timeout, ms); retry: timeraddspec(&abs_timeout, &ts_now); ret = sem_timedwait(cgsem, &abs_timeout); if (ret) { if (likely(sock_timeout())) return ETIMEDOUT; if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); } return 0; } void cgsem_reset(cgsem_t *cgsem) { int ret; do { ret = sem_trywait(cgsem); if (unlikely(ret < 0 && interrupted())) ret = 0; } while (!ret); } void cgsem_destroy(cgsem_t *cgsem) { sem_destroy(cgsem); } #endif /* Provide a completion_timeout helper function for unreliable functions that * may die due to driver issues etc that time out if the function fails and * can then reliably return. */ struct cg_completion { cgsem_t cgsem; void (*fn)(void *fnarg); void *fnarg; }; void *completion_thread(void *arg) { struct cg_completion *cgc = (struct cg_completion *)arg; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); cgc->fn(cgc->fnarg); cgsem_post(&cgc->cgsem); return NULL; } bool cg_completion_timeout(void *fn, void *fnarg, int timeout) { struct cg_completion *cgc; pthread_t pthread; bool ret = false; cgc = malloc(sizeof(struct cg_completion)); if (unlikely(!cgc)) return ret; cgsem_init(&cgc->cgsem); cgc->fn = fn; cgc->fnarg = fnarg; pthread_create(&pthread, NULL, completion_thread, (void *)cgc); ret = cgsem_mswait(&cgc->cgsem, timeout); if (!ret) { pthread_join(pthread, NULL); free(cgc); } else pthread_cancel(pthread); return !ret; } void _cg_memcpy(void *dest, const void *src, unsigned int n, const char *file, const char *func, const int line) { if (unlikely(n < 1 || n > (1ul << 31))) { applog(LOG_ERR, "ERR: Asked to memcpy %u bytes from %s %s():%d", n, file, func, line); return; } memcpy(dest, src, n); }
bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) { char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; char url_address[256], port[6]; int url_len, port_len = 0; *sockaddr_url = url; url_begin = strstr(url, "//"); if (!url_begin) url_begin = url; else url_begin += 2; /* Look for numeric ipv6 entries */ ipv6_begin = strstr(url_begin, "["); ipv6_end = strstr(url_begin, "]"); if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) url_end = strstr(ipv6_end, ":"); else url_end = strstr(url_begin, ":"); if (url_end) { url_len = url_end - url_begin; port_len = strlen(url_begin) - url_len - 1; if (port_len < 1) return false; port_start = url_end + 1; } else url_len = strlen(url_begin); if (url_len < 1) return false; sprintf(url_address, "%.*s", url_len, url_begin); if (port_len) { char *slash; snprintf(port, 6, "%.*s", port_len, port_start); slash = strchr(port, '/'); if (slash) *slash = '\0'; } else strcpy(port, "80"); *sockaddr_port = strdup(port); *sockaddr_url = strdup(url_address); return true; }
bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) { char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; char url_address[256], port[6]; int url_len, port_len = 0; *sockaddr_url = url; url_begin = strstr(url, "//"); if (!url_begin) url_begin = url; else url_begin += 2; /* Look for numeric ipv6 entries */ ipv6_begin = strstr(url_begin, "["); ipv6_end = strstr(url_begin, "]"); if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) url_end = strstr(ipv6_end, ":"); else url_end = strstr(url_begin, ":"); if (url_end) { url_len = url_end - url_begin; port_len = strlen(url_begin) - url_len - 1; if (port_len < 1) return false; port_start = url_end + 1; } else url_len = strlen(url_begin); if (url_len < 1) return false; snprintf(url_address, 254, "%.*s", url_len, url_begin); if (port_len) { char *slash; snprintf(port, 6, "%.*s", port_len, port_start); slash = strchr(port, '/'); if (slash) *slash = '\0'; } else strcpy(port, "80"); *sockaddr_port = strdup(port); *sockaddr_url = strdup(url_address); return true; }
{'added': [(2, ' * Copyright 2011-2014 Con Kolivas'), (723, 'static bool _valid_hex(char *s, const char *file, const char *func, const int line)'), (724, '{'), (725, '\tbool ret = false;'), (726, '\tint i, len;'), (727, ''), (728, '\tif (unlikely(!s)) {'), (729, '\t\tapplog(LOG_ERR, "Null string passed to valid_hex from"IN_FMT_FFL, file, func, line);'), (730, '\t\treturn ret;'), (731, '\t}'), (732, '\tlen = strlen(s);'), (733, '\tif (unlikely(!len)) {'), (734, '\t\tapplog(LOG_ERR, "Zero length string passed to valid_hex from"IN_FMT_FFL, file, func, line);'), (735, '\t\treturn ret;'), (736, '\t}'), (737, '\tfor (i = 0; i < len; i++) {'), (738, '\t\tunsigned char idx = s[i];'), (739, ''), (740, '\t\tif (unlikely(hex2bin_tbl[idx] < 0)) {'), (741, '\t\t\tapplog(LOG_ERR, "Invalid char %x passed to valid_hex from"IN_FMT_FFL, idx, file, func, line);'), (742, '\t\t\treturn ret;'), (743, '\t\t}'), (744, '\t}'), (745, '\tret = true;'), (746, '\treturn ret;'), (747, '}'), (748, ''), (749, '#define valid_hex(s) _valid_hex(s, __FILE__, __func__, __LINE__)'), (750, ''), (1406, '\tsnprintf(url_address, 254, "%.*s", url_len, url_begin);'), (1716, '\tif (!valid_hex(job_id) || !valid_hex(prev_hash) || !valid_hex(coinbase1) ||'), (1717, '\t !valid_hex(coinbase2) || !valid_hex(bbversion) || !valid_hex(nbit) ||'), (1718, '\t !valid_hex(ntime)) {'), (1720, '\t\tfree(job_id);'), (1721, '\t\tfree(coinbase1);'), (1722, '\t\tfree(coinbase2);'), (1910, '\tsnprintf(address, 254, "%s:%s", url, port);'), (2581, '\tif (!valid_hex(nonce1)) {'), (2582, '\t\tapplog(LOG_INFO, "Failed to get valid nonce1 in initiate_stratum");'), (2587, '\tif (n2size < 2 || n2size > 16) {'), (2588, '\t\tapplog(LOG_INFO, "Failed to get valid n2size in initiate_stratum");')], 'deleted': [(2, ' * Copyright 2011-2013 Con Kolivas'), (1378, '\tsprintf(url_address, "%.*s", url_len, url_begin);'), (1688, '\tif (!job_id || !prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime) {'), (1690, '\t\tif (job_id)'), (1691, '\t\t\tfree(job_id);'), (1692, '\t\tif (coinbase1)'), (1693, '\t\t\tfree(coinbase1);'), (1694, '\t\tif (coinbase2)'), (1695, '\t\t\tfree(coinbase2);'), (1883, '\tsprintf(address, "%s:%s", url, port);'), (2554, '\tif (!nonce1) {'), (2555, '\t\tapplog(LOG_INFO, "Failed to get nonce1 in initiate_stratum");'), (2560, '\tif (!n2size) {'), (2561, '\t\tapplog(LOG_INFO, "Failed to get n2size in initiate_stratum");')]}
41
14
2,371
16,204
40
261
10
https://github.com/ckolivas/cgminer
CVE-2014-4501
CWE-119
2,885
iommu.c
C
kvm_pin_pages
/* * Copyright (c) 2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Copyright (C) 2006-2008 Intel Corporation * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Author: Allen M. Kay <allen.m.kay@intel.com> * Author: Weidong Han <weidong.han@intel.com> * Author: Ben-Ami Yassour <benami@il.ibm.com> */ #include <linux/list.h> #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/stat.h> #include <linux/dmar.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> static bool allow_unsafe_assigned_interrupts; module_param_named(allow_unsafe_assigned_interrupts, allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(allow_unsafe_assigned_interrupts, "Enable device assignment on platforms without interrupt remapping support."); static int kvm_iommu_unmap_memslots(struct kvm *kvm); static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, unsigned long size) { gfn_t end_gfn; pfn_t pfn; pfn = gfn_to_pfn_memslot(slot, gfn); end_gfn = gfn + (size >> PAGE_SHIFT); gfn += 1; if (is_error_noslot_pfn(pfn)) return pfn; while (gfn < end_gfn) gfn_to_pfn_memslot(slot, gfn++); return pfn; } static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) { unsigned long i; for (i = 0; i < npages; ++i) kvm_release_pfn_clean(pfn + i); } int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { gfn_t gfn, end_gfn; pfn_t pfn; int r = 0; struct iommu_domain *domain = kvm->arch.iommu_domain; int flags; /* check if iommu exists and in use */ if (!domain) return 0; gfn = slot->base_gfn; end_gfn = gfn + slot->npages; flags = IOMMU_READ; if (!(slot->flags & KVM_MEM_READONLY)) flags |= IOMMU_WRITE; if (!kvm->arch.iommu_noncoherent) flags |= IOMMU_CACHE; while (gfn < end_gfn) { unsigned long page_size; /* Check if already mapped */ if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { gfn += 1; continue; } /* Get the page size we could use to map */ page_size = kvm_host_page_size(kvm, gfn); /* Make sure the page_size does not exceed the memslot */ while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) page_size >>= 1; /* Make sure gfn is aligned to the page size we want to map */ while ((gfn << PAGE_SHIFT) & (page_size - 1)) page_size >>= 1; /* Make sure hva is aligned to the page size we want to map */ while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1)) page_size >>= 1; /* * Pin all pages we are about to map in memory. This is * important because we unmap and unpin in 4kb steps later. */ pfn = kvm_pin_pages(slot, gfn, page_size); if (is_error_noslot_pfn(pfn)) { gfn += 1; continue; } /* Map into IO address space */ r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), page_size, flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%llx\n", pfn); kvm_unpin_pages(kvm, pfn, page_size); goto unmap_pages; } gfn += page_size >> PAGE_SHIFT; } return 0; unmap_pages: kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); return r; } static int kvm_iommu_map_memslots(struct kvm *kvm) { int idx, r = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; if (kvm->arch.iommu_noncoherent) kvm_arch_register_noncoherent_dma(kvm); idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { r = kvm_iommu_map_pages(kvm, memslot); if (r) break; } srcu_read_unlock(&kvm->srcu, idx); return r; } int kvm_assign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct pci_dev *pdev = NULL; struct iommu_domain *domain = kvm->arch.iommu_domain; int r; bool noncoherent; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; r = iommu_attach_device(domain, &pdev->dev); if (r) { dev_err(&pdev->dev, "kvm assign device failed ret %d", r); return r; } noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY); /* Check if need to update IOMMU page table for guest memory */ if (noncoherent != kvm->arch.iommu_noncoherent) { kvm_iommu_unmap_memslots(kvm); kvm->arch.iommu_noncoherent = noncoherent; r = kvm_iommu_map_memslots(kvm); if (r) goto out_unmap; } pci_set_dev_assigned(pdev); dev_info(&pdev->dev, "kvm assign device\n"); return 0; out_unmap: kvm_iommu_unmap_memslots(kvm); return r; } int kvm_deassign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct iommu_domain *domain = kvm->arch.iommu_domain; struct pci_dev *pdev = NULL; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; iommu_detach_device(domain, &pdev->dev); pci_clear_dev_assigned(pdev); dev_info(&pdev->dev, "kvm deassign device\n"); return 0; } int kvm_iommu_map_guest(struct kvm *kvm) { int r; if (!iommu_present(&pci_bus_type)) { printk(KERN_ERR "%s: iommu not found\n", __func__); return -ENODEV; } mutex_lock(&kvm->slots_lock); kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); if (!kvm->arch.iommu_domain) { r = -ENOMEM; goto out_unlock; } if (!allow_unsafe_assigned_interrupts && !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) { printk(KERN_WARNING "%s: No interrupt remapping support," " disallowing device assignment." " Re-enble with \"allow_unsafe_assigned_interrupts=1\"" " module option.\n", __func__); iommu_domain_free(kvm->arch.iommu_domain); kvm->arch.iommu_domain = NULL; r = -EPERM; goto out_unlock; } r = kvm_iommu_map_memslots(kvm); if (r) kvm_iommu_unmap_memslots(kvm); out_unlock: mutex_unlock(&kvm->slots_lock); return r; } static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) { struct iommu_domain *domain; gfn_t end_gfn, gfn; pfn_t pfn; u64 phys; domain = kvm->arch.iommu_domain; end_gfn = base_gfn + npages; gfn = base_gfn; /* check if iommu exists and in use */ if (!domain) return; while (gfn < end_gfn) { unsigned long unmap_pages; size_t size; /* Get physical address */ phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); if (!phys) { gfn++; continue; } pfn = phys >> PAGE_SHIFT; /* Unmap address from IO address space */ size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); unmap_pages = 1ULL << get_order(size); /* Unpin all pages we just unmapped to not leak any memory */ kvm_unpin_pages(kvm, pfn, unmap_pages); gfn += unmap_pages; } } void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); } static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int idx; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) kvm_iommu_unmap_pages(kvm, memslot); srcu_read_unlock(&kvm->srcu, idx); if (kvm->arch.iommu_noncoherent) kvm_arch_unregister_noncoherent_dma(kvm); return 0; } int kvm_iommu_unmap_guest(struct kvm *kvm) { struct iommu_domain *domain = kvm->arch.iommu_domain; /* check if iommu exists and in use */ if (!domain) return 0; mutex_lock(&kvm->slots_lock); kvm_iommu_unmap_memslots(kvm); kvm->arch.iommu_domain = NULL; kvm->arch.iommu_noncoherent = false; mutex_unlock(&kvm->slots_lock); iommu_domain_free(domain); return 0; }
/* * Copyright (c) 2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Copyright (C) 2006-2008 Intel Corporation * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Author: Allen M. Kay <allen.m.kay@intel.com> * Author: Weidong Han <weidong.han@intel.com> * Author: Ben-Ami Yassour <benami@il.ibm.com> */ #include <linux/list.h> #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/stat.h> #include <linux/dmar.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> static bool allow_unsafe_assigned_interrupts; module_param_named(allow_unsafe_assigned_interrupts, allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(allow_unsafe_assigned_interrupts, "Enable device assignment on platforms without interrupt remapping support."); static int kvm_iommu_unmap_memslots(struct kvm *kvm); static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, unsigned long npages) { gfn_t end_gfn; pfn_t pfn; pfn = gfn_to_pfn_memslot(slot, gfn); end_gfn = gfn + npages; gfn += 1; if (is_error_noslot_pfn(pfn)) return pfn; while (gfn < end_gfn) gfn_to_pfn_memslot(slot, gfn++); return pfn; } static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) { unsigned long i; for (i = 0; i < npages; ++i) kvm_release_pfn_clean(pfn + i); } int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { gfn_t gfn, end_gfn; pfn_t pfn; int r = 0; struct iommu_domain *domain = kvm->arch.iommu_domain; int flags; /* check if iommu exists and in use */ if (!domain) return 0; gfn = slot->base_gfn; end_gfn = gfn + slot->npages; flags = IOMMU_READ; if (!(slot->flags & KVM_MEM_READONLY)) flags |= IOMMU_WRITE; if (!kvm->arch.iommu_noncoherent) flags |= IOMMU_CACHE; while (gfn < end_gfn) { unsigned long page_size; /* Check if already mapped */ if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { gfn += 1; continue; } /* Get the page size we could use to map */ page_size = kvm_host_page_size(kvm, gfn); /* Make sure the page_size does not exceed the memslot */ while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) page_size >>= 1; /* Make sure gfn is aligned to the page size we want to map */ while ((gfn << PAGE_SHIFT) & (page_size - 1)) page_size >>= 1; /* Make sure hva is aligned to the page size we want to map */ while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1)) page_size >>= 1; /* * Pin all pages we are about to map in memory. This is * important because we unmap and unpin in 4kb steps later. */ pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT); if (is_error_noslot_pfn(pfn)) { gfn += 1; continue; } /* Map into IO address space */ r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), page_size, flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%llx\n", pfn); kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT); goto unmap_pages; } gfn += page_size >> PAGE_SHIFT; } return 0; unmap_pages: kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); return r; } static int kvm_iommu_map_memslots(struct kvm *kvm) { int idx, r = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; if (kvm->arch.iommu_noncoherent) kvm_arch_register_noncoherent_dma(kvm); idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { r = kvm_iommu_map_pages(kvm, memslot); if (r) break; } srcu_read_unlock(&kvm->srcu, idx); return r; } int kvm_assign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct pci_dev *pdev = NULL; struct iommu_domain *domain = kvm->arch.iommu_domain; int r; bool noncoherent; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; r = iommu_attach_device(domain, &pdev->dev); if (r) { dev_err(&pdev->dev, "kvm assign device failed ret %d", r); return r; } noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY); /* Check if need to update IOMMU page table for guest memory */ if (noncoherent != kvm->arch.iommu_noncoherent) { kvm_iommu_unmap_memslots(kvm); kvm->arch.iommu_noncoherent = noncoherent; r = kvm_iommu_map_memslots(kvm); if (r) goto out_unmap; } pci_set_dev_assigned(pdev); dev_info(&pdev->dev, "kvm assign device\n"); return 0; out_unmap: kvm_iommu_unmap_memslots(kvm); return r; } int kvm_deassign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct iommu_domain *domain = kvm->arch.iommu_domain; struct pci_dev *pdev = NULL; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; iommu_detach_device(domain, &pdev->dev); pci_clear_dev_assigned(pdev); dev_info(&pdev->dev, "kvm deassign device\n"); return 0; } int kvm_iommu_map_guest(struct kvm *kvm) { int r; if (!iommu_present(&pci_bus_type)) { printk(KERN_ERR "%s: iommu not found\n", __func__); return -ENODEV; } mutex_lock(&kvm->slots_lock); kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); if (!kvm->arch.iommu_domain) { r = -ENOMEM; goto out_unlock; } if (!allow_unsafe_assigned_interrupts && !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) { printk(KERN_WARNING "%s: No interrupt remapping support," " disallowing device assignment." " Re-enble with \"allow_unsafe_assigned_interrupts=1\"" " module option.\n", __func__); iommu_domain_free(kvm->arch.iommu_domain); kvm->arch.iommu_domain = NULL; r = -EPERM; goto out_unlock; } r = kvm_iommu_map_memslots(kvm); if (r) kvm_iommu_unmap_memslots(kvm); out_unlock: mutex_unlock(&kvm->slots_lock); return r; } static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) { struct iommu_domain *domain; gfn_t end_gfn, gfn; pfn_t pfn; u64 phys; domain = kvm->arch.iommu_domain; end_gfn = base_gfn + npages; gfn = base_gfn; /* check if iommu exists and in use */ if (!domain) return; while (gfn < end_gfn) { unsigned long unmap_pages; size_t size; /* Get physical address */ phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); if (!phys) { gfn++; continue; } pfn = phys >> PAGE_SHIFT; /* Unmap address from IO address space */ size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); unmap_pages = 1ULL << get_order(size); /* Unpin all pages we just unmapped to not leak any memory */ kvm_unpin_pages(kvm, pfn, unmap_pages); gfn += unmap_pages; } } void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); } static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int idx; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) kvm_iommu_unmap_pages(kvm, memslot); srcu_read_unlock(&kvm->srcu, idx); if (kvm->arch.iommu_noncoherent) kvm_arch_unregister_noncoherent_dma(kvm); return 0; } int kvm_iommu_unmap_guest(struct kvm *kvm) { struct iommu_domain *domain = kvm->arch.iommu_domain; /* check if iommu exists and in use */ if (!domain) return 0; mutex_lock(&kvm->slots_lock); kvm_iommu_unmap_memslots(kvm); kvm->arch.iommu_domain = NULL; kvm->arch.iommu_noncoherent = false; mutex_unlock(&kvm->slots_lock); iommu_domain_free(domain); return 0; }
static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, unsigned long size) { gfn_t end_gfn; pfn_t pfn; pfn = gfn_to_pfn_memslot(slot, gfn); end_gfn = gfn + (size >> PAGE_SHIFT); gfn += 1; if (is_error_noslot_pfn(pfn)) return pfn; while (gfn < end_gfn) gfn_to_pfn_memslot(slot, gfn++); return pfn; }
static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, unsigned long npages) { gfn_t end_gfn; pfn_t pfn; pfn = gfn_to_pfn_memslot(slot, gfn); end_gfn = gfn + npages; gfn += 1; if (is_error_noslot_pfn(pfn)) return pfn; while (gfn < end_gfn) gfn_to_pfn_memslot(slot, gfn++); return pfn; }
{'added': [(46, '\t\t\t unsigned long npages)'), (52, '\tend_gfn = gfn + npages;'), (122, '\t\tpfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);'), (134, '\t\t\tkvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);')], 'deleted': [(46, '\t\t\t unsigned long size)'), (52, '\tend_gfn = gfn + (size >> PAGE_SHIFT);'), (122, '\t\tpfn = kvm_pin_pages(slot, gfn, page_size);'), (134, '\t\t\tkvm_unpin_pages(kvm, pfn, page_size);')]}
4
4
238
1,316
14
73
3
https://github.com/torvalds/linux
CVE-2014-8369
CWE-119
440
scsi-disk.c
C
scsi_read_data
/* * SCSI Device emulation * * Copyright (c) 2006 CodeSourcery. * Based on code by Fabrice Bellard * * Written by Paul Brook * Modifications: * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case * when the allocation length of CDB is smaller * than 36. * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the * MODE SENSE response. * * This code is licensed under the LGPL. * * Note that this file only handles the SCSI architecture model and device * commands. Emulation of interface/link layer protocols is handled by * the host adapter emulator. */ //#define DEBUG_SCSI #ifdef DEBUG_SCSI #define DPRINTF(fmt, ...) \ do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) #else #define DPRINTF(fmt, ...) do {} while(0) #endif #define BADF(fmt, ...) \ do { fprintf(stderr, "scsi-disk: " fmt , ## __VA_ARGS__); } while (0) #include "qemu-common.h" #include "qemu-error.h" #include "scsi.h" #include "scsi-defs.h" #include "sysemu.h" #include "blockdev.h" #include "block_int.h" #define SCSI_DMA_BUF_SIZE 131072 #define SCSI_MAX_INQUIRY_LEN 256 #define SCSI_REQ_STATUS_RETRY 0x01 #define SCSI_REQ_STATUS_RETRY_TYPE_MASK 0x06 #define SCSI_REQ_STATUS_RETRY_READ 0x00 #define SCSI_REQ_STATUS_RETRY_WRITE 0x02 #define SCSI_REQ_STATUS_RETRY_FLUSH 0x04 typedef struct SCSIDiskState SCSIDiskState; typedef struct SCSIDiskReq { SCSIRequest req; /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ uint64_t sector; uint32_t sector_count; struct iovec iov; QEMUIOVector qiov; uint32_t status; BlockAcctCookie acct; } SCSIDiskReq; struct SCSIDiskState { SCSIDevice qdev; BlockDriverState *bs; /* The qemu block layer uses a fixed 512 byte sector size. This is the number of 512 byte blocks in a single scsi sector. */ int cluster_size; uint32_t removable; uint64_t max_lba; QEMUBH *bh; char *version; char *serial; bool tray_open; bool tray_locked; }; static int scsi_handle_rw_error(SCSIDiskReq *r, int error, int type); static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf); static void scsi_free_request(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); qemu_vfree(r->iov.iov_base); } /* Helper function for command completion with sense. */ static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) { DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n", r->req.tag, sense.key, sense.asc, sense.ascq); scsi_req_build_sense(&r->req, sense); scsi_req_complete(&r->req, CHECK_CONDITION); } /* Cancel a pending data transfer. */ static void scsi_cancel_io(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); DPRINTF("Cancel tag=0x%x\n", req->tag); if (r->req.aiocb) { bdrv_aio_cancel(r->req.aiocb); } r->req.aiocb = NULL; } static void scsi_read_complete(void * opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); int n; if (r->req.aiocb != NULL) { r->req.aiocb = NULL; bdrv_acct_done(s->bs, &r->acct); } if (ret) { if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_READ)) { return; } } DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->iov.iov_len); n = r->iov.iov_len / 512; r->sector += n; r->sector_count -= n; scsi_req_data(&r->req, r->iov.iov_len); } static void scsi_flush_complete(void * opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); if (r->req.aiocb != NULL) { r->req.aiocb = NULL; bdrv_acct_done(s->bs, &r->acct); } if (ret < 0) { if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_FLUSH)) { return; } } scsi_req_complete(&r->req, GOOD); } /* Read more data from scsi device into buffer. */ static void scsi_read_data(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; if (r->sector_count == (uint32_t)-1) { DPRINTF("Read buf_len=%zd\n", r->iov.iov_len); r->sector_count = 0; scsi_req_data(&r->req, r->iov.iov_len); return; } DPRINTF("Read sector_count=%d\n", r->sector_count); if (r->sector_count == 0) { /* This also clears the sense buffer for REQUEST SENSE. */ scsi_req_complete(&r->req, GOOD); return; } /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { DPRINTF("Data transfer direction invalid\n"); scsi_read_complete(r, -EINVAL); return; } n = r->sector_count; if (n > SCSI_DMA_BUF_SIZE / 512) n = SCSI_DMA_BUF_SIZE / 512; if (s->tray_open) { scsi_read_complete(r, -ENOMEDIUM); } r->iov.iov_len = n * 512; qemu_iovec_init_external(&r->qiov, &r->iov, 1); bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ); r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n, scsi_read_complete, r); if (r->req.aiocb == NULL) { scsi_read_complete(r, -EIO); } } static int scsi_handle_rw_error(SCSIDiskReq *r, int error, int type) { int is_read = (type == SCSI_REQ_STATUS_RETRY_READ); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); BlockErrorAction action = bdrv_get_on_error(s->bs, is_read); if (action == BLOCK_ERR_IGNORE) { bdrv_mon_event(s->bs, BDRV_ACTION_IGNORE, is_read); return 0; } if ((error == ENOSPC && action == BLOCK_ERR_STOP_ENOSPC) || action == BLOCK_ERR_STOP_ANY) { type &= SCSI_REQ_STATUS_RETRY_TYPE_MASK; r->status |= SCSI_REQ_STATUS_RETRY | type; bdrv_mon_event(s->bs, BDRV_ACTION_STOP, is_read); vm_stop(VMSTOP_DISKFULL); } else { switch (error) { case ENOMEM: scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); break; case EINVAL: scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); break; default: scsi_check_condition(r, SENSE_CODE(IO_ERROR)); break; } bdrv_mon_event(s->bs, BDRV_ACTION_REPORT, is_read); } return 1; } static void scsi_write_complete(void * opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t len; uint32_t n; if (r->req.aiocb != NULL) { r->req.aiocb = NULL; bdrv_acct_done(s->bs, &r->acct); } if (ret) { if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_WRITE)) { return; } } n = r->iov.iov_len / 512; r->sector += n; r->sector_count -= n; if (r->sector_count == 0) { scsi_req_complete(&r->req, GOOD); } else { len = r->sector_count * 512; if (len > SCSI_DMA_BUF_SIZE) { len = SCSI_DMA_BUF_SIZE; } r->iov.iov_len = len; DPRINTF("Write complete tag=0x%x more=%d\n", r->req.tag, len); scsi_req_data(&r->req, len); } } static void scsi_write_data(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { DPRINTF("Data transfer direction invalid\n"); scsi_write_complete(r, -EINVAL); return; } n = r->iov.iov_len / 512; if (n) { if (s->tray_open) { scsi_write_complete(r, -ENOMEDIUM); } qemu_iovec_init_external(&r->qiov, &r->iov, 1); bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_WRITE); r->req.aiocb = bdrv_aio_writev(s->bs, r->sector, &r->qiov, n, scsi_write_complete, r); if (r->req.aiocb == NULL) { scsi_write_complete(r, -ENOMEM); } } else { /* Invoke completion routine to fetch data from host. */ scsi_write_complete(r, 0); } } static void scsi_dma_restart_bh(void *opaque) { SCSIDiskState *s = opaque; SCSIRequest *req; SCSIDiskReq *r; qemu_bh_delete(s->bh); s->bh = NULL; QTAILQ_FOREACH(req, &s->qdev.requests, next) { r = DO_UPCAST(SCSIDiskReq, req, req); if (r->status & SCSI_REQ_STATUS_RETRY) { int status = r->status; int ret; r->status &= ~(SCSI_REQ_STATUS_RETRY | SCSI_REQ_STATUS_RETRY_TYPE_MASK); switch (status & SCSI_REQ_STATUS_RETRY_TYPE_MASK) { case SCSI_REQ_STATUS_RETRY_READ: scsi_read_data(&r->req); break; case SCSI_REQ_STATUS_RETRY_WRITE: scsi_write_data(&r->req); break; case SCSI_REQ_STATUS_RETRY_FLUSH: ret = scsi_disk_emulate_command(r, r->iov.iov_base); if (ret == 0) { scsi_req_complete(&r->req, GOOD); } } } } } static void scsi_dma_restart_cb(void *opaque, int running, int reason) { SCSIDiskState *s = opaque; if (!running) return; if (!s->bh) { s->bh = qemu_bh_new(scsi_dma_restart_bh, s); qemu_bh_schedule(s->bh); } } /* Return a pointer to the data buffer. */ static uint8_t *scsi_get_buf(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); return (uint8_t *)r->iov.iov_base; } static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); int buflen = 0; if (req->cmd.buf[1] & 0x2) { /* Command support data - optional, not implemented */ BADF("optional INQUIRY command support request not implemented\n"); return -1; } if (req->cmd.buf[1] & 0x1) { /* Vital product data */ uint8_t page_code = req->cmd.buf[2]; if (req->cmd.xfer < 4) { BADF("Error: Inquiry (EVPD[%02X]) buffer size %zd is " "less than 4\n", page_code, req->cmd.xfer); return -1; } if (s->qdev.type == TYPE_ROM) { outbuf[buflen++] = 5; } else { outbuf[buflen++] = 0; } outbuf[buflen++] = page_code ; // this page outbuf[buflen++] = 0x00; switch (page_code) { case 0x00: /* Supported page codes, mandatory */ { int pages; DPRINTF("Inquiry EVPD[Supported pages] " "buffer size %zd\n", req->cmd.xfer); pages = buflen++; outbuf[buflen++] = 0x00; // list of supported pages (this page) if (s->serial) outbuf[buflen++] = 0x80; // unit serial number outbuf[buflen++] = 0x83; // device identification if (s->qdev.type == TYPE_DISK) { outbuf[buflen++] = 0xb0; // block limits outbuf[buflen++] = 0xb2; // thin provisioning } outbuf[pages] = buflen - pages - 1; // number of pages break; } case 0x80: /* Device serial number, optional */ { int l; if (!s->serial) { DPRINTF("Inquiry (EVPD[Serial number] not supported\n"); return -1; } l = strlen(s->serial); if (l > req->cmd.xfer) l = req->cmd.xfer; if (l > 20) l = 20; DPRINTF("Inquiry EVPD[Serial number] " "buffer size %zd\n", req->cmd.xfer); outbuf[buflen++] = l; memcpy(outbuf+buflen, s->serial, l); buflen += l; break; } case 0x83: /* Device identification page, mandatory */ { int max_len = 255 - 8; int id_len = strlen(bdrv_get_device_name(s->bs)); if (id_len > max_len) id_len = max_len; DPRINTF("Inquiry EVPD[Device identification] " "buffer size %zd\n", req->cmd.xfer); outbuf[buflen++] = 4 + id_len; outbuf[buflen++] = 0x2; // ASCII outbuf[buflen++] = 0; // not officially assigned outbuf[buflen++] = 0; // reserved outbuf[buflen++] = id_len; // length of data following memcpy(outbuf+buflen, bdrv_get_device_name(s->bs), id_len); buflen += id_len; break; } case 0xb0: /* block limits */ { unsigned int unmap_sectors = s->qdev.conf.discard_granularity / s->qdev.blocksize; unsigned int min_io_size = s->qdev.conf.min_io_size / s->qdev.blocksize; unsigned int opt_io_size = s->qdev.conf.opt_io_size / s->qdev.blocksize; if (s->qdev.type == TYPE_ROM) { DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", page_code); return -1; } /* required VPD size with unmap support */ outbuf[3] = buflen = 0x3c; memset(outbuf + 4, 0, buflen - 4); /* optimal transfer length granularity */ outbuf[6] = (min_io_size >> 8) & 0xff; outbuf[7] = min_io_size & 0xff; /* optimal transfer length */ outbuf[12] = (opt_io_size >> 24) & 0xff; outbuf[13] = (opt_io_size >> 16) & 0xff; outbuf[14] = (opt_io_size >> 8) & 0xff; outbuf[15] = opt_io_size & 0xff; /* optimal unmap granularity */ outbuf[28] = (unmap_sectors >> 24) & 0xff; outbuf[29] = (unmap_sectors >> 16) & 0xff; outbuf[30] = (unmap_sectors >> 8) & 0xff; outbuf[31] = unmap_sectors & 0xff; break; } case 0xb2: /* thin provisioning */ { outbuf[3] = buflen = 8; outbuf[4] = 0; outbuf[5] = 0x40; /* write same with unmap supported */ outbuf[6] = 0; outbuf[7] = 0; break; } default: BADF("Error: unsupported Inquiry (EVPD[%02X]) " "buffer size %zd\n", page_code, req->cmd.xfer); return -1; } /* done with EVPD */ return buflen; } /* Standard INQUIRY data */ if (req->cmd.buf[2] != 0) { BADF("Error: Inquiry (STANDARD) page or code " "is non-zero [%02X]\n", req->cmd.buf[2]); return -1; } /* PAGE CODE == 0 */ if (req->cmd.xfer < 5) { BADF("Error: Inquiry (STANDARD) buffer size %zd " "is less than 5\n", req->cmd.xfer); return -1; } buflen = req->cmd.xfer; if (buflen > SCSI_MAX_INQUIRY_LEN) buflen = SCSI_MAX_INQUIRY_LEN; memset(outbuf, 0, buflen); outbuf[0] = s->qdev.type & 0x1f; if (s->qdev.type == TYPE_ROM) { outbuf[1] = 0x80; memcpy(&outbuf[16], "QEMU CD-ROM ", 16); } else { outbuf[1] = s->removable ? 0x80 : 0; memcpy(&outbuf[16], "QEMU HARDDISK ", 16); } memcpy(&outbuf[8], "QEMU ", 8); memset(&outbuf[32], 0, 4); memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); /* * We claim conformance to SPC-3, which is required for guests * to ask for modern features like READ CAPACITY(16) or the * block characteristics VPD page by default. Not all of SPC-3 * is actually implemented, but we're good enough. */ outbuf[2] = 5; outbuf[3] = 2; /* Format 2 */ if (buflen > 36) { outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ } else { /* If the allocation length of CDB is too small, the additional length is not adjusted */ outbuf[4] = 36 - 5; } /* Sync data transfer and TCQ. */ outbuf[7] = 0x10 | (req->bus->tcq ? 0x02 : 0); return buflen; } static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, int page_control) { BlockDriverState *bdrv = s->bs; int cylinders, heads, secs; uint8_t *p = *p_outbuf; /* * If Changeable Values are requested, a mask denoting those mode parameters * that are changeable shall be returned. As we currently don't support * parameter changes via MODE_SELECT all bits are returned set to zero. * The buffer was already menset to zero by the caller of this function. */ switch (page) { case 4: /* Rigid disk device geometry page. */ if (s->qdev.type == TYPE_ROM) { return -1; } p[0] = 4; p[1] = 0x16; if (page_control == 1) { /* Changeable Values */ break; } /* if a geometry hint is available, use it */ bdrv_get_geometry_hint(bdrv, &cylinders, &heads, &secs); p[2] = (cylinders >> 16) & 0xff; p[3] = (cylinders >> 8) & 0xff; p[4] = cylinders & 0xff; p[5] = heads & 0xff; /* Write precomp start cylinder, disabled */ p[6] = (cylinders >> 16) & 0xff; p[7] = (cylinders >> 8) & 0xff; p[8] = cylinders & 0xff; /* Reduced current start cylinder, disabled */ p[9] = (cylinders >> 16) & 0xff; p[10] = (cylinders >> 8) & 0xff; p[11] = cylinders & 0xff; /* Device step rate [ns], 200ns */ p[12] = 0; p[13] = 200; /* Landing zone cylinder */ p[14] = 0xff; p[15] = 0xff; p[16] = 0xff; /* Medium rotation rate [rpm], 5400 rpm */ p[20] = (5400 >> 8) & 0xff; p[21] = 5400 & 0xff; break; case 5: /* Flexible disk device geometry page. */ if (s->qdev.type == TYPE_ROM) { return -1; } p[0] = 5; p[1] = 0x1e; if (page_control == 1) { /* Changeable Values */ break; } /* Transfer rate [kbit/s], 5Mbit/s */ p[2] = 5000 >> 8; p[3] = 5000 & 0xff; /* if a geometry hint is available, use it */ bdrv_get_geometry_hint(bdrv, &cylinders, &heads, &secs); p[4] = heads & 0xff; p[5] = secs & 0xff; p[6] = s->cluster_size * 2; p[8] = (cylinders >> 8) & 0xff; p[9] = cylinders & 0xff; /* Write precomp start cylinder, disabled */ p[10] = (cylinders >> 8) & 0xff; p[11] = cylinders & 0xff; /* Reduced current start cylinder, disabled */ p[12] = (cylinders >> 8) & 0xff; p[13] = cylinders & 0xff; /* Device step rate [100us], 100us */ p[14] = 0; p[15] = 1; /* Device step pulse width [us], 1us */ p[16] = 1; /* Device head settle delay [100us], 100us */ p[17] = 0; p[18] = 1; /* Motor on delay [0.1s], 0.1s */ p[19] = 1; /* Motor off delay [0.1s], 0.1s */ p[20] = 1; /* Medium rotation rate [rpm], 5400 rpm */ p[28] = (5400 >> 8) & 0xff; p[29] = 5400 & 0xff; break; case 8: /* Caching page. */ p[0] = 8; p[1] = 0x12; if (page_control == 1) { /* Changeable Values */ break; } if (bdrv_enable_write_cache(s->bs)) { p[2] = 4; /* WCE */ } break; case 0x2a: /* CD Capabilities and Mechanical Status page. */ if (s->qdev.type != TYPE_ROM) { return -1; } p[0] = 0x2a; p[1] = 0x14; if (page_control == 1) { /* Changeable Values */ break; } p[2] = 3; // CD-R & CD-RW read p[3] = 0; // Writing not supported p[4] = 0x7f; /* Audio, composite, digital out, mode 2 form 1&2, multi session */ p[5] = 0xff; /* CD DA, DA accurate, RW supported, RW corrected, C2 errors, ISRC, UPC, Bar code */ p[6] = 0x2d | (s->tray_locked ? 2 : 0); /* Locking supported, jumper present, eject, tray */ p[7] = 0; /* no volume & mute control, no changer */ p[8] = (50 * 176) >> 8; // 50x read speed p[9] = (50 * 176) & 0xff; p[10] = 0 >> 8; // No volume p[11] = 0 & 0xff; p[12] = 2048 >> 8; // 2M buffer p[13] = 2048 & 0xff; p[14] = (16 * 176) >> 8; // 16x read speed current p[15] = (16 * 176) & 0xff; p[18] = (16 * 176) >> 8; // 16x write speed p[19] = (16 * 176) & 0xff; p[20] = (16 * 176) >> 8; // 16x write speed current p[21] = (16 * 176) & 0xff; break; default: return -1; } *p_outbuf += p[1] + 2; return p[1] + 2; } static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint64_t nb_sectors; int page, dbd, buflen, ret, page_control; uint8_t *p; uint8_t dev_specific_param; dbd = r->req.cmd.buf[1] & 0x8; page = r->req.cmd.buf[2] & 0x3f; page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n", (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control); memset(outbuf, 0, r->req.cmd.xfer); p = outbuf; if (bdrv_is_read_only(s->bs)) { dev_specific_param = 0x80; /* Readonly. */ } else { dev_specific_param = 0x00; } if (r->req.cmd.buf[0] == MODE_SENSE) { p[1] = 0; /* Default media type. */ p[2] = dev_specific_param; p[3] = 0; /* Block descriptor length. */ p += 4; } else { /* MODE_SENSE_10 */ p[2] = 0; /* Default media type. */ p[3] = dev_specific_param; p[6] = p[7] = 0; /* Block descriptor length. */ p += 8; } bdrv_get_geometry(s->bs, &nb_sectors); if (!dbd && nb_sectors) { if (r->req.cmd.buf[0] == MODE_SENSE) { outbuf[3] = 8; /* Block descriptor length */ } else { /* MODE_SENSE_10 */ outbuf[7] = 8; /* Block descriptor length */ } nb_sectors /= s->cluster_size; if (nb_sectors > 0xffffff) nb_sectors = 0; p[0] = 0; /* media density code */ p[1] = (nb_sectors >> 16) & 0xff; p[2] = (nb_sectors >> 8) & 0xff; p[3] = nb_sectors & 0xff; p[4] = 0; /* reserved */ p[5] = 0; /* bytes 5-7 are the sector size in bytes */ p[6] = s->cluster_size * 2; p[7] = 0; p += 8; } if (page_control == 3) { /* Saved Values */ scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); return -1; } if (page == 0x3f) { for (page = 0; page <= 0x3e; page++) { mode_sense_page(s, page, &p, page_control); } } else { ret = mode_sense_page(s, page, &p, page_control); if (ret == -1) { return -1; } } buflen = p - outbuf; /* * The mode data length field specifies the length in bytes of the * following data that is available to be transferred. The mode data * length does not include itself. */ if (r->req.cmd.buf[0] == MODE_SENSE) { outbuf[0] = buflen - 1; } else { /* MODE_SENSE_10 */ outbuf[0] = ((buflen - 2) >> 8) & 0xff; outbuf[1] = (buflen - 2) & 0xff; } if (buflen > r->req.cmd.xfer) buflen = r->req.cmd.xfer; return buflen; } static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); int start_track, format, msf, toclen; uint64_t nb_sectors; msf = req->cmd.buf[1] & 2; format = req->cmd.buf[2] & 0xf; start_track = req->cmd.buf[6]; bdrv_get_geometry(s->bs, &nb_sectors); DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); nb_sectors /= s->cluster_size; switch (format) { case 0: toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); break; case 1: /* multi session : only a single session defined */ toclen = 12; memset(outbuf, 0, 12); outbuf[1] = 0x0a; outbuf[2] = 0x01; outbuf[3] = 0x01; break; case 2: toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); break; default: return -1; } if (toclen > req->cmd.xfer) toclen = req->cmd.xfer; return toclen; } static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) { SCSIRequest *req = &r->req; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); bool start = req->cmd.buf[4] & 1; bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ if (s->qdev.type == TYPE_ROM && loej) { if (!start && !s->tray_open && s->tray_locked) { scsi_check_condition(r, bdrv_is_inserted(s->bs) ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); return -1; } bdrv_eject(s->bs, !start); s->tray_open = !start; } return 0; } static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf) { SCSIRequest *req = &r->req; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); uint64_t nb_sectors; int buflen = 0; switch (req->cmd.buf[0]) { case TEST_UNIT_READY: if (s->tray_open || !bdrv_is_inserted(s->bs)) goto not_ready; break; case INQUIRY: buflen = scsi_disk_emulate_inquiry(req, outbuf); if (buflen < 0) goto illegal_request; break; case MODE_SENSE: case MODE_SENSE_10: buflen = scsi_disk_emulate_mode_sense(r, outbuf); if (buflen < 0) goto illegal_request; break; case READ_TOC: buflen = scsi_disk_emulate_read_toc(req, outbuf); if (buflen < 0) goto illegal_request; break; case RESERVE: if (req->cmd.buf[1] & 1) goto illegal_request; break; case RESERVE_10: if (req->cmd.buf[1] & 3) goto illegal_request; break; case RELEASE: if (req->cmd.buf[1] & 1) goto illegal_request; break; case RELEASE_10: if (req->cmd.buf[1] & 3) goto illegal_request; break; case START_STOP: if (scsi_disk_emulate_start_stop(r) < 0) { return -1; } break; case ALLOW_MEDIUM_REMOVAL: s->tray_locked = req->cmd.buf[4] & 1; bdrv_lock_medium(s->bs, req->cmd.buf[4] & 1); break; case READ_CAPACITY_10: /* The normal LEN field for this command is zero. */ memset(outbuf, 0, 8); bdrv_get_geometry(s->bs, &nb_sectors); if (!nb_sectors) goto not_ready; nb_sectors /= s->cluster_size; /* Returned value is the address of the last sector. */ nb_sectors--; /* Remember the new size for read/write sanity checking. */ s->max_lba = nb_sectors; /* Clip to 2TB, instead of returning capacity modulo 2TB. */ if (nb_sectors > UINT32_MAX) nb_sectors = UINT32_MAX; outbuf[0] = (nb_sectors >> 24) & 0xff; outbuf[1] = (nb_sectors >> 16) & 0xff; outbuf[2] = (nb_sectors >> 8) & 0xff; outbuf[3] = nb_sectors & 0xff; outbuf[4] = 0; outbuf[5] = 0; outbuf[6] = s->cluster_size * 2; outbuf[7] = 0; buflen = 8; break; case GET_CONFIGURATION: memset(outbuf, 0, 8); /* ??? This should probably return much more information. For now just return the basic header indicating the CD-ROM profile. */ outbuf[7] = 8; // CD-ROM buflen = 8; break; case SERVICE_ACTION_IN_16: /* Service Action In subcommands. */ if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { DPRINTF("SAI READ CAPACITY(16)\n"); memset(outbuf, 0, req->cmd.xfer); bdrv_get_geometry(s->bs, &nb_sectors); if (!nb_sectors) goto not_ready; nb_sectors /= s->cluster_size; /* Returned value is the address of the last sector. */ nb_sectors--; /* Remember the new size for read/write sanity checking. */ s->max_lba = nb_sectors; outbuf[0] = (nb_sectors >> 56) & 0xff; outbuf[1] = (nb_sectors >> 48) & 0xff; outbuf[2] = (nb_sectors >> 40) & 0xff; outbuf[3] = (nb_sectors >> 32) & 0xff; outbuf[4] = (nb_sectors >> 24) & 0xff; outbuf[5] = (nb_sectors >> 16) & 0xff; outbuf[6] = (nb_sectors >> 8) & 0xff; outbuf[7] = nb_sectors & 0xff; outbuf[8] = 0; outbuf[9] = 0; outbuf[10] = s->cluster_size * 2; outbuf[11] = 0; outbuf[12] = 0; outbuf[13] = get_physical_block_exp(&s->qdev.conf); /* set TPE bit if the format supports discard */ if (s->qdev.conf.discard_granularity) { outbuf[14] = 0x80; } /* Protection, exponent and lowest lba field left blank. */ buflen = req->cmd.xfer; break; } DPRINTF("Unsupported Service Action In\n"); goto illegal_request; case VERIFY_10: break; default: scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); return -1; } return buflen; not_ready: if (s->tray_open || !bdrv_is_inserted(s->bs)) { scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); } else { scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); } return -1; illegal_request: if (r->req.status == -1) { scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); } return -1; } /* Execute a scsi command. Returns the length of the data expected by the command. This will be Positive for data transfers from the device (eg. disk reads), negative for transfers to the device (eg. disk writes), and zero if the command does not transfer any data. */ static int32_t scsi_send_command(SCSIRequest *req, uint8_t *buf) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); int32_t len; uint8_t command; uint8_t *outbuf; int rc; command = buf[0]; outbuf = (uint8_t *)r->iov.iov_base; DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", req->lun, req->tag, buf[0]); #ifdef DEBUG_SCSI { int i; for (i = 1; i < r->req.cmd.len; i++) { printf(" 0x%02x", buf[i]); } printf("\n"); } #endif switch (command) { case TEST_UNIT_READY: case INQUIRY: case MODE_SENSE: case MODE_SENSE_10: case RESERVE: case RESERVE_10: case RELEASE: case RELEASE_10: case START_STOP: case ALLOW_MEDIUM_REMOVAL: case READ_CAPACITY_10: case READ_TOC: case GET_CONFIGURATION: case SERVICE_ACTION_IN_16: case VERIFY_10: rc = scsi_disk_emulate_command(r, outbuf); if (rc < 0) { return 0; } r->iov.iov_len = rc; break; case SYNCHRONIZE_CACHE: bdrv_acct_start(s->bs, &r->acct, 0, BDRV_ACCT_FLUSH); r->req.aiocb = bdrv_aio_flush(s->bs, scsi_flush_complete, r); if (r->req.aiocb == NULL) { scsi_flush_complete(r, -EIO); } return 0; case READ_6: case READ_10: case READ_12: case READ_16: len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF("Read (sector %" PRId64 ", count %d)\n", r->req.cmd.lba, len); if (r->req.cmd.lba > s->max_lba) goto illegal_lba; r->sector = r->req.cmd.lba * s->cluster_size; r->sector_count = len * s->cluster_size; break; case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: case WRITE_VERIFY_10: case WRITE_VERIFY_12: case WRITE_VERIFY_16: len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF("Write %s(sector %" PRId64 ", count %d)\n", (command & 0xe) == 0xe ? "And Verify " : "", r->req.cmd.lba, len); if (r->req.cmd.lba > s->max_lba) goto illegal_lba; r->sector = r->req.cmd.lba * s->cluster_size; r->sector_count = len * s->cluster_size; break; case MODE_SELECT: DPRINTF("Mode Select(6) (len %lu)\n", (long)r->req.cmd.xfer); /* We don't support mode parameter changes. Allow the mode parameter header + block descriptors only. */ if (r->req.cmd.xfer > 12) { goto fail; } break; case MODE_SELECT_10: DPRINTF("Mode Select(10) (len %lu)\n", (long)r->req.cmd.xfer); /* We don't support mode parameter changes. Allow the mode parameter header + block descriptors only. */ if (r->req.cmd.xfer > 16) { goto fail; } break; case SEEK_6: case SEEK_10: DPRINTF("Seek(%d) (sector %" PRId64 ")\n", command == SEEK_6 ? 6 : 10, r->req.cmd.lba); if (r->req.cmd.lba > s->max_lba) { goto illegal_lba; } break; case WRITE_SAME_16: len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF("WRITE SAME(16) (sector %" PRId64 ", count %d)\n", r->req.cmd.lba, len); if (r->req.cmd.lba > s->max_lba) { goto illegal_lba; } /* * We only support WRITE SAME with the unmap bit set for now. */ if (!(buf[1] & 0x8)) { goto fail; } rc = bdrv_discard(s->bs, r->req.cmd.lba * s->cluster_size, len * s->cluster_size); if (rc < 0) { /* XXX: better error code ?*/ goto fail; } break; case REQUEST_SENSE: abort(); default: DPRINTF("Unknown SCSI command (%2.2x)\n", buf[0]); scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); return 0; fail: scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); return 0; illegal_lba: scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); return 0; } if (r->sector_count == 0 && r->iov.iov_len == 0) { scsi_req_complete(&r->req, GOOD); } len = r->sector_count * 512 + r->iov.iov_len; if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { return -len; } else { if (!r->sector_count) r->sector_count = -1; return len; } } static void scsi_disk_reset(DeviceState *dev) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); uint64_t nb_sectors; scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); bdrv_get_geometry(s->bs, &nb_sectors); nb_sectors /= s->cluster_size; if (nb_sectors) { nb_sectors--; } s->max_lba = nb_sectors; } static void scsi_destroy(SCSIDevice *dev) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); scsi_device_purge_requests(&s->qdev, SENSE_CODE(NO_SENSE)); blockdev_mark_auto_del(s->qdev.conf.bs); } static void scsi_cd_change_media_cb(void *opaque, bool load) { ((SCSIDiskState *)opaque)->tray_open = !load; } static bool scsi_cd_is_tray_open(void *opaque) { return ((SCSIDiskState *)opaque)->tray_open; } static bool scsi_cd_is_medium_locked(void *opaque) { return ((SCSIDiskState *)opaque)->tray_locked; } static const BlockDevOps scsi_cd_block_ops = { .change_media_cb = scsi_cd_change_media_cb, .is_tray_open = scsi_cd_is_tray_open, .is_medium_locked = scsi_cd_is_medium_locked, }; static int scsi_initfn(SCSIDevice *dev, uint8_t scsi_type) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); DriveInfo *dinfo; if (!s->qdev.conf.bs) { error_report("scsi-disk: drive property not set"); return -1; } s->bs = s->qdev.conf.bs; if (scsi_type == TYPE_DISK && !bdrv_is_inserted(s->bs)) { error_report("Device needs media, but drive is empty"); return -1; } if (!s->serial) { /* try to fall back to value set with legacy -drive serial=... */ dinfo = drive_get_by_blockdev(s->bs); if (*dinfo->serial) { s->serial = g_strdup(dinfo->serial); } } if (!s->version) { s->version = g_strdup(QEMU_VERSION); } if (bdrv_is_sg(s->bs)) { error_report("scsi-disk: unwanted /dev/sg*"); return -1; } if (scsi_type == TYPE_ROM) { bdrv_set_dev_ops(s->bs, &scsi_cd_block_ops, s); s->qdev.blocksize = 2048; } else if (scsi_type == TYPE_DISK) { s->qdev.blocksize = s->qdev.conf.logical_block_size; } else { error_report("scsi-disk: Unhandled SCSI type %02x", scsi_type); return -1; } s->cluster_size = s->qdev.blocksize / 512; bdrv_set_buffer_alignment(s->bs, s->qdev.blocksize); s->qdev.type = scsi_type; qemu_add_vm_change_state_handler(scsi_dma_restart_cb, s); add_boot_device_path(s->qdev.conf.bootindex, &dev->qdev, ",0"); return 0; } static int scsi_hd_initfn(SCSIDevice *dev) { return scsi_initfn(dev, TYPE_DISK); } static int scsi_cd_initfn(SCSIDevice *dev) { return scsi_initfn(dev, TYPE_ROM); } static int scsi_disk_initfn(SCSIDevice *dev) { DriveInfo *dinfo; uint8_t scsi_type; if (!dev->conf.bs) { scsi_type = TYPE_DISK; /* will die in scsi_initfn() */ } else { dinfo = drive_get_by_blockdev(dev->conf.bs); scsi_type = dinfo->media_cd ? TYPE_ROM : TYPE_DISK; } return scsi_initfn(dev, scsi_type); } static SCSIReqOps scsi_disk_reqops = { .size = sizeof(SCSIDiskReq), .free_req = scsi_free_request, .send_command = scsi_send_command, .read_data = scsi_read_data, .write_data = scsi_write_data, .cancel_io = scsi_cancel_io, .get_buf = scsi_get_buf, }; static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, void *hba_private) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); SCSIRequest *req; SCSIDiskReq *r; req = scsi_req_alloc(&scsi_disk_reqops, &s->qdev, tag, lun, hba_private); r = DO_UPCAST(SCSIDiskReq, req, req); r->iov.iov_base = qemu_blockalign(s->bs, SCSI_DMA_BUF_SIZE); return req; } #define DEFINE_SCSI_DISK_PROPERTIES() \ DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ DEFINE_PROP_STRING("serial", SCSIDiskState, serial) static SCSIDeviceInfo scsi_disk_info[] = { { .qdev.name = "scsi-hd", .qdev.fw_name = "disk", .qdev.desc = "virtual SCSI disk", .qdev.size = sizeof(SCSIDiskState), .qdev.reset = scsi_disk_reset, .init = scsi_hd_initfn, .destroy = scsi_destroy, .alloc_req = scsi_new_request, .qdev.props = (Property[]) { DEFINE_SCSI_DISK_PROPERTIES(), DEFINE_PROP_BIT("removable", SCSIDiskState, removable, 0, false), DEFINE_PROP_END_OF_LIST(), } },{ .qdev.name = "scsi-cd", .qdev.fw_name = "disk", .qdev.desc = "virtual SCSI CD-ROM", .qdev.size = sizeof(SCSIDiskState), .qdev.reset = scsi_disk_reset, .init = scsi_cd_initfn, .destroy = scsi_destroy, .alloc_req = scsi_new_request, .qdev.props = (Property[]) { DEFINE_SCSI_DISK_PROPERTIES(), DEFINE_PROP_END_OF_LIST(), }, },{ .qdev.name = "scsi-disk", /* legacy -device scsi-disk */ .qdev.fw_name = "disk", .qdev.desc = "virtual SCSI disk or CD-ROM (legacy)", .qdev.size = sizeof(SCSIDiskState), .qdev.reset = scsi_disk_reset, .init = scsi_disk_initfn, .destroy = scsi_destroy, .alloc_req = scsi_new_request, .qdev.props = (Property[]) { DEFINE_SCSI_DISK_PROPERTIES(), DEFINE_PROP_BIT("removable", SCSIDiskState, removable, 0, false), DEFINE_PROP_END_OF_LIST(), } } }; static void scsi_disk_register_devices(void) { int i; for (i = 0; i < ARRAY_SIZE(scsi_disk_info); i++) { scsi_qdev_register(&scsi_disk_info[i]); } } device_init(scsi_disk_register_devices)
/* * SCSI Device emulation * * Copyright (c) 2006 CodeSourcery. * Based on code by Fabrice Bellard * * Written by Paul Brook * Modifications: * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case * when the allocation length of CDB is smaller * than 36. * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the * MODE SENSE response. * * This code is licensed under the LGPL. * * Note that this file only handles the SCSI architecture model and device * commands. Emulation of interface/link layer protocols is handled by * the host adapter emulator. */ //#define DEBUG_SCSI #ifdef DEBUG_SCSI #define DPRINTF(fmt, ...) \ do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) #else #define DPRINTF(fmt, ...) do {} while(0) #endif #define BADF(fmt, ...) \ do { fprintf(stderr, "scsi-disk: " fmt , ## __VA_ARGS__); } while (0) #include "qemu-common.h" #include "qemu-error.h" #include "scsi.h" #include "scsi-defs.h" #include "sysemu.h" #include "blockdev.h" #include "block_int.h" #define SCSI_DMA_BUF_SIZE 131072 #define SCSI_MAX_INQUIRY_LEN 256 #define SCSI_REQ_STATUS_RETRY 0x01 #define SCSI_REQ_STATUS_RETRY_TYPE_MASK 0x06 #define SCSI_REQ_STATUS_RETRY_READ 0x00 #define SCSI_REQ_STATUS_RETRY_WRITE 0x02 #define SCSI_REQ_STATUS_RETRY_FLUSH 0x04 typedef struct SCSIDiskState SCSIDiskState; typedef struct SCSIDiskReq { SCSIRequest req; /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ uint64_t sector; uint32_t sector_count; struct iovec iov; QEMUIOVector qiov; uint32_t status; BlockAcctCookie acct; } SCSIDiskReq; struct SCSIDiskState { SCSIDevice qdev; BlockDriverState *bs; /* The qemu block layer uses a fixed 512 byte sector size. This is the number of 512 byte blocks in a single scsi sector. */ int cluster_size; uint32_t removable; uint64_t max_lba; QEMUBH *bh; char *version; char *serial; bool tray_open; bool tray_locked; }; static int scsi_handle_rw_error(SCSIDiskReq *r, int error, int type); static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf); static void scsi_free_request(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); qemu_vfree(r->iov.iov_base); } /* Helper function for command completion with sense. */ static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) { DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n", r->req.tag, sense.key, sense.asc, sense.ascq); scsi_req_build_sense(&r->req, sense); scsi_req_complete(&r->req, CHECK_CONDITION); } /* Cancel a pending data transfer. */ static void scsi_cancel_io(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); DPRINTF("Cancel tag=0x%x\n", req->tag); if (r->req.aiocb) { bdrv_aio_cancel(r->req.aiocb); } r->req.aiocb = NULL; } static uint32_t scsi_init_iovec(SCSIDiskReq *r) { r->iov.iov_len = MIN(r->sector_count * 512, SCSI_DMA_BUF_SIZE); qemu_iovec_init_external(&r->qiov, &r->iov, 1); return r->qiov.size / 512; } static void scsi_read_complete(void * opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); int n; if (r->req.aiocb != NULL) { r->req.aiocb = NULL; bdrv_acct_done(s->bs, &r->acct); } if (ret) { if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_READ)) { return; } } DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size); n = r->qiov.size / 512; r->sector += n; r->sector_count -= n; scsi_req_data(&r->req, r->qiov.size); } static void scsi_flush_complete(void * opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); if (r->req.aiocb != NULL) { r->req.aiocb = NULL; bdrv_acct_done(s->bs, &r->acct); } if (ret < 0) { if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_FLUSH)) { return; } } scsi_req_complete(&r->req, GOOD); } /* Read more data from scsi device into buffer. */ static void scsi_read_data(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; if (r->sector_count == (uint32_t)-1) { DPRINTF("Read buf_len=%zd\n", r->iov.iov_len); r->sector_count = 0; scsi_req_data(&r->req, r->iov.iov_len); return; } DPRINTF("Read sector_count=%d\n", r->sector_count); if (r->sector_count == 0) { /* This also clears the sense buffer for REQUEST SENSE. */ scsi_req_complete(&r->req, GOOD); return; } /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { DPRINTF("Data transfer direction invalid\n"); scsi_read_complete(r, -EINVAL); return; } if (s->tray_open) { scsi_read_complete(r, -ENOMEDIUM); } n = scsi_init_iovec(r); bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ); r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n, scsi_read_complete, r); if (r->req.aiocb == NULL) { scsi_read_complete(r, -EIO); } } static int scsi_handle_rw_error(SCSIDiskReq *r, int error, int type) { int is_read = (type == SCSI_REQ_STATUS_RETRY_READ); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); BlockErrorAction action = bdrv_get_on_error(s->bs, is_read); if (action == BLOCK_ERR_IGNORE) { bdrv_mon_event(s->bs, BDRV_ACTION_IGNORE, is_read); return 0; } if ((error == ENOSPC && action == BLOCK_ERR_STOP_ENOSPC) || action == BLOCK_ERR_STOP_ANY) { type &= SCSI_REQ_STATUS_RETRY_TYPE_MASK; r->status |= SCSI_REQ_STATUS_RETRY | type; bdrv_mon_event(s->bs, BDRV_ACTION_STOP, is_read); vm_stop(VMSTOP_DISKFULL); } else { switch (error) { case ENOMEM: scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); break; case EINVAL: scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); break; default: scsi_check_condition(r, SENSE_CODE(IO_ERROR)); break; } bdrv_mon_event(s->bs, BDRV_ACTION_REPORT, is_read); } return 1; } static void scsi_write_complete(void * opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; if (r->req.aiocb != NULL) { r->req.aiocb = NULL; bdrv_acct_done(s->bs, &r->acct); } if (ret) { if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_WRITE)) { return; } } n = r->qiov.size / 512; r->sector += n; r->sector_count -= n; if (r->sector_count == 0) { scsi_req_complete(&r->req, GOOD); } else { scsi_init_iovec(r); DPRINTF("Write complete tag=0x%x more=%d\n", r->req.tag, r->qiov.size); scsi_req_data(&r->req, r->qiov.size); } } static void scsi_write_data(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { DPRINTF("Data transfer direction invalid\n"); scsi_write_complete(r, -EINVAL); return; } n = r->qiov.size / 512; if (n) { if (s->tray_open) { scsi_write_complete(r, -ENOMEDIUM); } bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_WRITE); r->req.aiocb = bdrv_aio_writev(s->bs, r->sector, &r->qiov, n, scsi_write_complete, r); if (r->req.aiocb == NULL) { scsi_write_complete(r, -ENOMEM); } } else { /* Called for the first time. Ask the driver to send us more data. */ scsi_write_complete(r, 0); } } static void scsi_dma_restart_bh(void *opaque) { SCSIDiskState *s = opaque; SCSIRequest *req; SCSIDiskReq *r; qemu_bh_delete(s->bh); s->bh = NULL; QTAILQ_FOREACH(req, &s->qdev.requests, next) { r = DO_UPCAST(SCSIDiskReq, req, req); if (r->status & SCSI_REQ_STATUS_RETRY) { int status = r->status; int ret; r->status &= ~(SCSI_REQ_STATUS_RETRY | SCSI_REQ_STATUS_RETRY_TYPE_MASK); switch (status & SCSI_REQ_STATUS_RETRY_TYPE_MASK) { case SCSI_REQ_STATUS_RETRY_READ: scsi_read_data(&r->req); break; case SCSI_REQ_STATUS_RETRY_WRITE: scsi_write_data(&r->req); break; case SCSI_REQ_STATUS_RETRY_FLUSH: ret = scsi_disk_emulate_command(r, r->iov.iov_base); if (ret == 0) { scsi_req_complete(&r->req, GOOD); } } } } } static void scsi_dma_restart_cb(void *opaque, int running, int reason) { SCSIDiskState *s = opaque; if (!running) return; if (!s->bh) { s->bh = qemu_bh_new(scsi_dma_restart_bh, s); qemu_bh_schedule(s->bh); } } /* Return a pointer to the data buffer. */ static uint8_t *scsi_get_buf(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); return (uint8_t *)r->iov.iov_base; } static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); int buflen = 0; if (req->cmd.buf[1] & 0x2) { /* Command support data - optional, not implemented */ BADF("optional INQUIRY command support request not implemented\n"); return -1; } if (req->cmd.buf[1] & 0x1) { /* Vital product data */ uint8_t page_code = req->cmd.buf[2]; if (req->cmd.xfer < 4) { BADF("Error: Inquiry (EVPD[%02X]) buffer size %zd is " "less than 4\n", page_code, req->cmd.xfer); return -1; } if (s->qdev.type == TYPE_ROM) { outbuf[buflen++] = 5; } else { outbuf[buflen++] = 0; } outbuf[buflen++] = page_code ; // this page outbuf[buflen++] = 0x00; switch (page_code) { case 0x00: /* Supported page codes, mandatory */ { int pages; DPRINTF("Inquiry EVPD[Supported pages] " "buffer size %zd\n", req->cmd.xfer); pages = buflen++; outbuf[buflen++] = 0x00; // list of supported pages (this page) if (s->serial) outbuf[buflen++] = 0x80; // unit serial number outbuf[buflen++] = 0x83; // device identification if (s->qdev.type == TYPE_DISK) { outbuf[buflen++] = 0xb0; // block limits outbuf[buflen++] = 0xb2; // thin provisioning } outbuf[pages] = buflen - pages - 1; // number of pages break; } case 0x80: /* Device serial number, optional */ { int l; if (!s->serial) { DPRINTF("Inquiry (EVPD[Serial number] not supported\n"); return -1; } l = strlen(s->serial); if (l > req->cmd.xfer) l = req->cmd.xfer; if (l > 20) l = 20; DPRINTF("Inquiry EVPD[Serial number] " "buffer size %zd\n", req->cmd.xfer); outbuf[buflen++] = l; memcpy(outbuf+buflen, s->serial, l); buflen += l; break; } case 0x83: /* Device identification page, mandatory */ { int max_len = 255 - 8; int id_len = strlen(bdrv_get_device_name(s->bs)); if (id_len > max_len) id_len = max_len; DPRINTF("Inquiry EVPD[Device identification] " "buffer size %zd\n", req->cmd.xfer); outbuf[buflen++] = 4 + id_len; outbuf[buflen++] = 0x2; // ASCII outbuf[buflen++] = 0; // not officially assigned outbuf[buflen++] = 0; // reserved outbuf[buflen++] = id_len; // length of data following memcpy(outbuf+buflen, bdrv_get_device_name(s->bs), id_len); buflen += id_len; break; } case 0xb0: /* block limits */ { unsigned int unmap_sectors = s->qdev.conf.discard_granularity / s->qdev.blocksize; unsigned int min_io_size = s->qdev.conf.min_io_size / s->qdev.blocksize; unsigned int opt_io_size = s->qdev.conf.opt_io_size / s->qdev.blocksize; if (s->qdev.type == TYPE_ROM) { DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", page_code); return -1; } /* required VPD size with unmap support */ outbuf[3] = buflen = 0x3c; memset(outbuf + 4, 0, buflen - 4); /* optimal transfer length granularity */ outbuf[6] = (min_io_size >> 8) & 0xff; outbuf[7] = min_io_size & 0xff; /* optimal transfer length */ outbuf[12] = (opt_io_size >> 24) & 0xff; outbuf[13] = (opt_io_size >> 16) & 0xff; outbuf[14] = (opt_io_size >> 8) & 0xff; outbuf[15] = opt_io_size & 0xff; /* optimal unmap granularity */ outbuf[28] = (unmap_sectors >> 24) & 0xff; outbuf[29] = (unmap_sectors >> 16) & 0xff; outbuf[30] = (unmap_sectors >> 8) & 0xff; outbuf[31] = unmap_sectors & 0xff; break; } case 0xb2: /* thin provisioning */ { outbuf[3] = buflen = 8; outbuf[4] = 0; outbuf[5] = 0x40; /* write same with unmap supported */ outbuf[6] = 0; outbuf[7] = 0; break; } default: BADF("Error: unsupported Inquiry (EVPD[%02X]) " "buffer size %zd\n", page_code, req->cmd.xfer); return -1; } /* done with EVPD */ return buflen; } /* Standard INQUIRY data */ if (req->cmd.buf[2] != 0) { BADF("Error: Inquiry (STANDARD) page or code " "is non-zero [%02X]\n", req->cmd.buf[2]); return -1; } /* PAGE CODE == 0 */ if (req->cmd.xfer < 5) { BADF("Error: Inquiry (STANDARD) buffer size %zd " "is less than 5\n", req->cmd.xfer); return -1; } buflen = req->cmd.xfer; if (buflen > SCSI_MAX_INQUIRY_LEN) buflen = SCSI_MAX_INQUIRY_LEN; memset(outbuf, 0, buflen); outbuf[0] = s->qdev.type & 0x1f; if (s->qdev.type == TYPE_ROM) { outbuf[1] = 0x80; memcpy(&outbuf[16], "QEMU CD-ROM ", 16); } else { outbuf[1] = s->removable ? 0x80 : 0; memcpy(&outbuf[16], "QEMU HARDDISK ", 16); } memcpy(&outbuf[8], "QEMU ", 8); memset(&outbuf[32], 0, 4); memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); /* * We claim conformance to SPC-3, which is required for guests * to ask for modern features like READ CAPACITY(16) or the * block characteristics VPD page by default. Not all of SPC-3 * is actually implemented, but we're good enough. */ outbuf[2] = 5; outbuf[3] = 2; /* Format 2 */ if (buflen > 36) { outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ } else { /* If the allocation length of CDB is too small, the additional length is not adjusted */ outbuf[4] = 36 - 5; } /* Sync data transfer and TCQ. */ outbuf[7] = 0x10 | (req->bus->tcq ? 0x02 : 0); return buflen; } static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, int page_control) { BlockDriverState *bdrv = s->bs; int cylinders, heads, secs; uint8_t *p = *p_outbuf; /* * If Changeable Values are requested, a mask denoting those mode parameters * that are changeable shall be returned. As we currently don't support * parameter changes via MODE_SELECT all bits are returned set to zero. * The buffer was already menset to zero by the caller of this function. */ switch (page) { case 4: /* Rigid disk device geometry page. */ if (s->qdev.type == TYPE_ROM) { return -1; } p[0] = 4; p[1] = 0x16; if (page_control == 1) { /* Changeable Values */ break; } /* if a geometry hint is available, use it */ bdrv_get_geometry_hint(bdrv, &cylinders, &heads, &secs); p[2] = (cylinders >> 16) & 0xff; p[3] = (cylinders >> 8) & 0xff; p[4] = cylinders & 0xff; p[5] = heads & 0xff; /* Write precomp start cylinder, disabled */ p[6] = (cylinders >> 16) & 0xff; p[7] = (cylinders >> 8) & 0xff; p[8] = cylinders & 0xff; /* Reduced current start cylinder, disabled */ p[9] = (cylinders >> 16) & 0xff; p[10] = (cylinders >> 8) & 0xff; p[11] = cylinders & 0xff; /* Device step rate [ns], 200ns */ p[12] = 0; p[13] = 200; /* Landing zone cylinder */ p[14] = 0xff; p[15] = 0xff; p[16] = 0xff; /* Medium rotation rate [rpm], 5400 rpm */ p[20] = (5400 >> 8) & 0xff; p[21] = 5400 & 0xff; break; case 5: /* Flexible disk device geometry page. */ if (s->qdev.type == TYPE_ROM) { return -1; } p[0] = 5; p[1] = 0x1e; if (page_control == 1) { /* Changeable Values */ break; } /* Transfer rate [kbit/s], 5Mbit/s */ p[2] = 5000 >> 8; p[3] = 5000 & 0xff; /* if a geometry hint is available, use it */ bdrv_get_geometry_hint(bdrv, &cylinders, &heads, &secs); p[4] = heads & 0xff; p[5] = secs & 0xff; p[6] = s->cluster_size * 2; p[8] = (cylinders >> 8) & 0xff; p[9] = cylinders & 0xff; /* Write precomp start cylinder, disabled */ p[10] = (cylinders >> 8) & 0xff; p[11] = cylinders & 0xff; /* Reduced current start cylinder, disabled */ p[12] = (cylinders >> 8) & 0xff; p[13] = cylinders & 0xff; /* Device step rate [100us], 100us */ p[14] = 0; p[15] = 1; /* Device step pulse width [us], 1us */ p[16] = 1; /* Device head settle delay [100us], 100us */ p[17] = 0; p[18] = 1; /* Motor on delay [0.1s], 0.1s */ p[19] = 1; /* Motor off delay [0.1s], 0.1s */ p[20] = 1; /* Medium rotation rate [rpm], 5400 rpm */ p[28] = (5400 >> 8) & 0xff; p[29] = 5400 & 0xff; break; case 8: /* Caching page. */ p[0] = 8; p[1] = 0x12; if (page_control == 1) { /* Changeable Values */ break; } if (bdrv_enable_write_cache(s->bs)) { p[2] = 4; /* WCE */ } break; case 0x2a: /* CD Capabilities and Mechanical Status page. */ if (s->qdev.type != TYPE_ROM) { return -1; } p[0] = 0x2a; p[1] = 0x14; if (page_control == 1) { /* Changeable Values */ break; } p[2] = 3; // CD-R & CD-RW read p[3] = 0; // Writing not supported p[4] = 0x7f; /* Audio, composite, digital out, mode 2 form 1&2, multi session */ p[5] = 0xff; /* CD DA, DA accurate, RW supported, RW corrected, C2 errors, ISRC, UPC, Bar code */ p[6] = 0x2d | (s->tray_locked ? 2 : 0); /* Locking supported, jumper present, eject, tray */ p[7] = 0; /* no volume & mute control, no changer */ p[8] = (50 * 176) >> 8; // 50x read speed p[9] = (50 * 176) & 0xff; p[10] = 0 >> 8; // No volume p[11] = 0 & 0xff; p[12] = 2048 >> 8; // 2M buffer p[13] = 2048 & 0xff; p[14] = (16 * 176) >> 8; // 16x read speed current p[15] = (16 * 176) & 0xff; p[18] = (16 * 176) >> 8; // 16x write speed p[19] = (16 * 176) & 0xff; p[20] = (16 * 176) >> 8; // 16x write speed current p[21] = (16 * 176) & 0xff; break; default: return -1; } *p_outbuf += p[1] + 2; return p[1] + 2; } static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint64_t nb_sectors; int page, dbd, buflen, ret, page_control; uint8_t *p; uint8_t dev_specific_param; dbd = r->req.cmd.buf[1] & 0x8; page = r->req.cmd.buf[2] & 0x3f; page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n", (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control); memset(outbuf, 0, r->req.cmd.xfer); p = outbuf; if (bdrv_is_read_only(s->bs)) { dev_specific_param = 0x80; /* Readonly. */ } else { dev_specific_param = 0x00; } if (r->req.cmd.buf[0] == MODE_SENSE) { p[1] = 0; /* Default media type. */ p[2] = dev_specific_param; p[3] = 0; /* Block descriptor length. */ p += 4; } else { /* MODE_SENSE_10 */ p[2] = 0; /* Default media type. */ p[3] = dev_specific_param; p[6] = p[7] = 0; /* Block descriptor length. */ p += 8; } bdrv_get_geometry(s->bs, &nb_sectors); if (!dbd && nb_sectors) { if (r->req.cmd.buf[0] == MODE_SENSE) { outbuf[3] = 8; /* Block descriptor length */ } else { /* MODE_SENSE_10 */ outbuf[7] = 8; /* Block descriptor length */ } nb_sectors /= s->cluster_size; if (nb_sectors > 0xffffff) nb_sectors = 0; p[0] = 0; /* media density code */ p[1] = (nb_sectors >> 16) & 0xff; p[2] = (nb_sectors >> 8) & 0xff; p[3] = nb_sectors & 0xff; p[4] = 0; /* reserved */ p[5] = 0; /* bytes 5-7 are the sector size in bytes */ p[6] = s->cluster_size * 2; p[7] = 0; p += 8; } if (page_control == 3) { /* Saved Values */ scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); return -1; } if (page == 0x3f) { for (page = 0; page <= 0x3e; page++) { mode_sense_page(s, page, &p, page_control); } } else { ret = mode_sense_page(s, page, &p, page_control); if (ret == -1) { return -1; } } buflen = p - outbuf; /* * The mode data length field specifies the length in bytes of the * following data that is available to be transferred. The mode data * length does not include itself. */ if (r->req.cmd.buf[0] == MODE_SENSE) { outbuf[0] = buflen - 1; } else { /* MODE_SENSE_10 */ outbuf[0] = ((buflen - 2) >> 8) & 0xff; outbuf[1] = (buflen - 2) & 0xff; } if (buflen > r->req.cmd.xfer) buflen = r->req.cmd.xfer; return buflen; } static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); int start_track, format, msf, toclen; uint64_t nb_sectors; msf = req->cmd.buf[1] & 2; format = req->cmd.buf[2] & 0xf; start_track = req->cmd.buf[6]; bdrv_get_geometry(s->bs, &nb_sectors); DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); nb_sectors /= s->cluster_size; switch (format) { case 0: toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); break; case 1: /* multi session : only a single session defined */ toclen = 12; memset(outbuf, 0, 12); outbuf[1] = 0x0a; outbuf[2] = 0x01; outbuf[3] = 0x01; break; case 2: toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); break; default: return -1; } if (toclen > req->cmd.xfer) toclen = req->cmd.xfer; return toclen; } static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) { SCSIRequest *req = &r->req; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); bool start = req->cmd.buf[4] & 1; bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ if (s->qdev.type == TYPE_ROM && loej) { if (!start && !s->tray_open && s->tray_locked) { scsi_check_condition(r, bdrv_is_inserted(s->bs) ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); return -1; } bdrv_eject(s->bs, !start); s->tray_open = !start; } return 0; } static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf) { SCSIRequest *req = &r->req; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); uint64_t nb_sectors; int buflen = 0; switch (req->cmd.buf[0]) { case TEST_UNIT_READY: if (s->tray_open || !bdrv_is_inserted(s->bs)) goto not_ready; break; case INQUIRY: buflen = scsi_disk_emulate_inquiry(req, outbuf); if (buflen < 0) goto illegal_request; break; case MODE_SENSE: case MODE_SENSE_10: buflen = scsi_disk_emulate_mode_sense(r, outbuf); if (buflen < 0) goto illegal_request; break; case READ_TOC: buflen = scsi_disk_emulate_read_toc(req, outbuf); if (buflen < 0) goto illegal_request; break; case RESERVE: if (req->cmd.buf[1] & 1) goto illegal_request; break; case RESERVE_10: if (req->cmd.buf[1] & 3) goto illegal_request; break; case RELEASE: if (req->cmd.buf[1] & 1) goto illegal_request; break; case RELEASE_10: if (req->cmd.buf[1] & 3) goto illegal_request; break; case START_STOP: if (scsi_disk_emulate_start_stop(r) < 0) { return -1; } break; case ALLOW_MEDIUM_REMOVAL: s->tray_locked = req->cmd.buf[4] & 1; bdrv_lock_medium(s->bs, req->cmd.buf[4] & 1); break; case READ_CAPACITY_10: /* The normal LEN field for this command is zero. */ memset(outbuf, 0, 8); bdrv_get_geometry(s->bs, &nb_sectors); if (!nb_sectors) goto not_ready; nb_sectors /= s->cluster_size; /* Returned value is the address of the last sector. */ nb_sectors--; /* Remember the new size for read/write sanity checking. */ s->max_lba = nb_sectors; /* Clip to 2TB, instead of returning capacity modulo 2TB. */ if (nb_sectors > UINT32_MAX) nb_sectors = UINT32_MAX; outbuf[0] = (nb_sectors >> 24) & 0xff; outbuf[1] = (nb_sectors >> 16) & 0xff; outbuf[2] = (nb_sectors >> 8) & 0xff; outbuf[3] = nb_sectors & 0xff; outbuf[4] = 0; outbuf[5] = 0; outbuf[6] = s->cluster_size * 2; outbuf[7] = 0; buflen = 8; break; case GET_CONFIGURATION: memset(outbuf, 0, 8); /* ??? This should probably return much more information. For now just return the basic header indicating the CD-ROM profile. */ outbuf[7] = 8; // CD-ROM buflen = 8; break; case SERVICE_ACTION_IN_16: /* Service Action In subcommands. */ if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { DPRINTF("SAI READ CAPACITY(16)\n"); memset(outbuf, 0, req->cmd.xfer); bdrv_get_geometry(s->bs, &nb_sectors); if (!nb_sectors) goto not_ready; nb_sectors /= s->cluster_size; /* Returned value is the address of the last sector. */ nb_sectors--; /* Remember the new size for read/write sanity checking. */ s->max_lba = nb_sectors; outbuf[0] = (nb_sectors >> 56) & 0xff; outbuf[1] = (nb_sectors >> 48) & 0xff; outbuf[2] = (nb_sectors >> 40) & 0xff; outbuf[3] = (nb_sectors >> 32) & 0xff; outbuf[4] = (nb_sectors >> 24) & 0xff; outbuf[5] = (nb_sectors >> 16) & 0xff; outbuf[6] = (nb_sectors >> 8) & 0xff; outbuf[7] = nb_sectors & 0xff; outbuf[8] = 0; outbuf[9] = 0; outbuf[10] = s->cluster_size * 2; outbuf[11] = 0; outbuf[12] = 0; outbuf[13] = get_physical_block_exp(&s->qdev.conf); /* set TPE bit if the format supports discard */ if (s->qdev.conf.discard_granularity) { outbuf[14] = 0x80; } /* Protection, exponent and lowest lba field left blank. */ buflen = req->cmd.xfer; break; } DPRINTF("Unsupported Service Action In\n"); goto illegal_request; case VERIFY_10: break; default: scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); return -1; } return buflen; not_ready: if (s->tray_open || !bdrv_is_inserted(s->bs)) { scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); } else { scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); } return -1; illegal_request: if (r->req.status == -1) { scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); } return -1; } /* Execute a scsi command. Returns the length of the data expected by the command. This will be Positive for data transfers from the device (eg. disk reads), negative for transfers to the device (eg. disk writes), and zero if the command does not transfer any data. */ static int32_t scsi_send_command(SCSIRequest *req, uint8_t *buf) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); int32_t len; uint8_t command; uint8_t *outbuf; int rc; command = buf[0]; outbuf = (uint8_t *)r->iov.iov_base; DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", req->lun, req->tag, buf[0]); #ifdef DEBUG_SCSI { int i; for (i = 1; i < r->req.cmd.len; i++) { printf(" 0x%02x", buf[i]); } printf("\n"); } #endif switch (command) { case TEST_UNIT_READY: case INQUIRY: case MODE_SENSE: case MODE_SENSE_10: case RESERVE: case RESERVE_10: case RELEASE: case RELEASE_10: case START_STOP: case ALLOW_MEDIUM_REMOVAL: case READ_CAPACITY_10: case READ_TOC: case GET_CONFIGURATION: case SERVICE_ACTION_IN_16: case VERIFY_10: rc = scsi_disk_emulate_command(r, outbuf); if (rc < 0) { return 0; } r->iov.iov_len = rc; break; case SYNCHRONIZE_CACHE: bdrv_acct_start(s->bs, &r->acct, 0, BDRV_ACCT_FLUSH); r->req.aiocb = bdrv_aio_flush(s->bs, scsi_flush_complete, r); if (r->req.aiocb == NULL) { scsi_flush_complete(r, -EIO); } return 0; case READ_6: case READ_10: case READ_12: case READ_16: len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF("Read (sector %" PRId64 ", count %d)\n", r->req.cmd.lba, len); if (r->req.cmd.lba > s->max_lba) goto illegal_lba; r->sector = r->req.cmd.lba * s->cluster_size; r->sector_count = len * s->cluster_size; break; case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: case WRITE_VERIFY_10: case WRITE_VERIFY_12: case WRITE_VERIFY_16: len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF("Write %s(sector %" PRId64 ", count %d)\n", (command & 0xe) == 0xe ? "And Verify " : "", r->req.cmd.lba, len); if (r->req.cmd.lba > s->max_lba) goto illegal_lba; r->sector = r->req.cmd.lba * s->cluster_size; r->sector_count = len * s->cluster_size; break; case MODE_SELECT: DPRINTF("Mode Select(6) (len %lu)\n", (long)r->req.cmd.xfer); /* We don't support mode parameter changes. Allow the mode parameter header + block descriptors only. */ if (r->req.cmd.xfer > 12) { goto fail; } break; case MODE_SELECT_10: DPRINTF("Mode Select(10) (len %lu)\n", (long)r->req.cmd.xfer); /* We don't support mode parameter changes. Allow the mode parameter header + block descriptors only. */ if (r->req.cmd.xfer > 16) { goto fail; } break; case SEEK_6: case SEEK_10: DPRINTF("Seek(%d) (sector %" PRId64 ")\n", command == SEEK_6 ? 6 : 10, r->req.cmd.lba); if (r->req.cmd.lba > s->max_lba) { goto illegal_lba; } break; case WRITE_SAME_16: len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF("WRITE SAME(16) (sector %" PRId64 ", count %d)\n", r->req.cmd.lba, len); if (r->req.cmd.lba > s->max_lba) { goto illegal_lba; } /* * We only support WRITE SAME with the unmap bit set for now. */ if (!(buf[1] & 0x8)) { goto fail; } rc = bdrv_discard(s->bs, r->req.cmd.lba * s->cluster_size, len * s->cluster_size); if (rc < 0) { /* XXX: better error code ?*/ goto fail; } break; case REQUEST_SENSE: abort(); default: DPRINTF("Unknown SCSI command (%2.2x)\n", buf[0]); scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); return 0; fail: scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); return 0; illegal_lba: scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); return 0; } if (r->sector_count == 0 && r->iov.iov_len == 0) { scsi_req_complete(&r->req, GOOD); } len = r->sector_count * 512 + r->iov.iov_len; if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { return -len; } else { if (!r->sector_count) r->sector_count = -1; return len; } } static void scsi_disk_reset(DeviceState *dev) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); uint64_t nb_sectors; scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); bdrv_get_geometry(s->bs, &nb_sectors); nb_sectors /= s->cluster_size; if (nb_sectors) { nb_sectors--; } s->max_lba = nb_sectors; } static void scsi_destroy(SCSIDevice *dev) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); scsi_device_purge_requests(&s->qdev, SENSE_CODE(NO_SENSE)); blockdev_mark_auto_del(s->qdev.conf.bs); } static void scsi_cd_change_media_cb(void *opaque, bool load) { ((SCSIDiskState *)opaque)->tray_open = !load; } static bool scsi_cd_is_tray_open(void *opaque) { return ((SCSIDiskState *)opaque)->tray_open; } static bool scsi_cd_is_medium_locked(void *opaque) { return ((SCSIDiskState *)opaque)->tray_locked; } static const BlockDevOps scsi_cd_block_ops = { .change_media_cb = scsi_cd_change_media_cb, .is_tray_open = scsi_cd_is_tray_open, .is_medium_locked = scsi_cd_is_medium_locked, }; static int scsi_initfn(SCSIDevice *dev, uint8_t scsi_type) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); DriveInfo *dinfo; if (!s->qdev.conf.bs) { error_report("scsi-disk: drive property not set"); return -1; } s->bs = s->qdev.conf.bs; if (scsi_type == TYPE_DISK && !bdrv_is_inserted(s->bs)) { error_report("Device needs media, but drive is empty"); return -1; } if (!s->serial) { /* try to fall back to value set with legacy -drive serial=... */ dinfo = drive_get_by_blockdev(s->bs); if (*dinfo->serial) { s->serial = g_strdup(dinfo->serial); } } if (!s->version) { s->version = g_strdup(QEMU_VERSION); } if (bdrv_is_sg(s->bs)) { error_report("scsi-disk: unwanted /dev/sg*"); return -1; } if (scsi_type == TYPE_ROM) { bdrv_set_dev_ops(s->bs, &scsi_cd_block_ops, s); s->qdev.blocksize = 2048; } else if (scsi_type == TYPE_DISK) { s->qdev.blocksize = s->qdev.conf.logical_block_size; } else { error_report("scsi-disk: Unhandled SCSI type %02x", scsi_type); return -1; } s->cluster_size = s->qdev.blocksize / 512; bdrv_set_buffer_alignment(s->bs, s->qdev.blocksize); s->qdev.type = scsi_type; qemu_add_vm_change_state_handler(scsi_dma_restart_cb, s); add_boot_device_path(s->qdev.conf.bootindex, &dev->qdev, ",0"); return 0; } static int scsi_hd_initfn(SCSIDevice *dev) { return scsi_initfn(dev, TYPE_DISK); } static int scsi_cd_initfn(SCSIDevice *dev) { return scsi_initfn(dev, TYPE_ROM); } static int scsi_disk_initfn(SCSIDevice *dev) { DriveInfo *dinfo; uint8_t scsi_type; if (!dev->conf.bs) { scsi_type = TYPE_DISK; /* will die in scsi_initfn() */ } else { dinfo = drive_get_by_blockdev(dev->conf.bs); scsi_type = dinfo->media_cd ? TYPE_ROM : TYPE_DISK; } return scsi_initfn(dev, scsi_type); } static SCSIReqOps scsi_disk_reqops = { .size = sizeof(SCSIDiskReq), .free_req = scsi_free_request, .send_command = scsi_send_command, .read_data = scsi_read_data, .write_data = scsi_write_data, .cancel_io = scsi_cancel_io, .get_buf = scsi_get_buf, }; static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, void *hba_private) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); SCSIRequest *req; SCSIDiskReq *r; req = scsi_req_alloc(&scsi_disk_reqops, &s->qdev, tag, lun, hba_private); r = DO_UPCAST(SCSIDiskReq, req, req); r->iov.iov_base = qemu_blockalign(s->bs, SCSI_DMA_BUF_SIZE); return req; } #define DEFINE_SCSI_DISK_PROPERTIES() \ DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ DEFINE_PROP_STRING("serial", SCSIDiskState, serial) static SCSIDeviceInfo scsi_disk_info[] = { { .qdev.name = "scsi-hd", .qdev.fw_name = "disk", .qdev.desc = "virtual SCSI disk", .qdev.size = sizeof(SCSIDiskState), .qdev.reset = scsi_disk_reset, .init = scsi_hd_initfn, .destroy = scsi_destroy, .alloc_req = scsi_new_request, .qdev.props = (Property[]) { DEFINE_SCSI_DISK_PROPERTIES(), DEFINE_PROP_BIT("removable", SCSIDiskState, removable, 0, false), DEFINE_PROP_END_OF_LIST(), } },{ .qdev.name = "scsi-cd", .qdev.fw_name = "disk", .qdev.desc = "virtual SCSI CD-ROM", .qdev.size = sizeof(SCSIDiskState), .qdev.reset = scsi_disk_reset, .init = scsi_cd_initfn, .destroy = scsi_destroy, .alloc_req = scsi_new_request, .qdev.props = (Property[]) { DEFINE_SCSI_DISK_PROPERTIES(), DEFINE_PROP_END_OF_LIST(), }, },{ .qdev.name = "scsi-disk", /* legacy -device scsi-disk */ .qdev.fw_name = "disk", .qdev.desc = "virtual SCSI disk or CD-ROM (legacy)", .qdev.size = sizeof(SCSIDiskState), .qdev.reset = scsi_disk_reset, .init = scsi_disk_initfn, .destroy = scsi_destroy, .alloc_req = scsi_new_request, .qdev.props = (Property[]) { DEFINE_SCSI_DISK_PROPERTIES(), DEFINE_PROP_BIT("removable", SCSIDiskState, removable, 0, false), DEFINE_PROP_END_OF_LIST(), } } }; static void scsi_disk_register_devices(void) { int i; for (i = 0; i < ARRAY_SIZE(scsi_disk_info); i++) { scsi_qdev_register(&scsi_disk_info[i]); } } device_init(scsi_disk_register_devices)
static void scsi_read_data(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; if (r->sector_count == (uint32_t)-1) { DPRINTF("Read buf_len=%zd\n", r->iov.iov_len); r->sector_count = 0; scsi_req_data(&r->req, r->iov.iov_len); return; } DPRINTF("Read sector_count=%d\n", r->sector_count); if (r->sector_count == 0) { /* This also clears the sense buffer for REQUEST SENSE. */ scsi_req_complete(&r->req, GOOD); return; } /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { DPRINTF("Data transfer direction invalid\n"); scsi_read_complete(r, -EINVAL); return; } n = r->sector_count; if (n > SCSI_DMA_BUF_SIZE / 512) n = SCSI_DMA_BUF_SIZE / 512; if (s->tray_open) { scsi_read_complete(r, -ENOMEDIUM); } r->iov.iov_len = n * 512; qemu_iovec_init_external(&r->qiov, &r->iov, 1); bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ); r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n, scsi_read_complete, r); if (r->req.aiocb == NULL) { scsi_read_complete(r, -EIO); } }
static void scsi_read_data(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; if (r->sector_count == (uint32_t)-1) { DPRINTF("Read buf_len=%zd\n", r->iov.iov_len); r->sector_count = 0; scsi_req_data(&r->req, r->iov.iov_len); return; } DPRINTF("Read sector_count=%d\n", r->sector_count); if (r->sector_count == 0) { /* This also clears the sense buffer for REQUEST SENSE. */ scsi_req_complete(&r->req, GOOD); return; } /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { DPRINTF("Data transfer direction invalid\n"); scsi_read_complete(r, -EINVAL); return; } if (s->tray_open) { scsi_read_complete(r, -ENOMEDIUM); } n = scsi_init_iovec(r); bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ); r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n, scsi_read_complete, r); if (r->req.aiocb == NULL) { scsi_read_complete(r, -EIO); } }
{'added': [(111, 'static uint32_t scsi_init_iovec(SCSIDiskReq *r)'), (112, '{'), (113, ' r->iov.iov_len = MIN(r->sector_count * 512, SCSI_DMA_BUF_SIZE);'), (114, ' qemu_iovec_init_external(&r->qiov, &r->iov, 1);'), (115, ' return r->qiov.size / 512;'), (116, '}'), (117, ''), (135, ' DPRINTF("Data ready tag=0x%x len=%zd\\n", r->req.tag, r->qiov.size);'), (137, ' n = r->qiov.size / 512;'), (140, ' scsi_req_data(&r->req, r->qiov.size);'), (194, ' n = scsi_init_iovec(r);'), (256, ' n = r->qiov.size / 512;'), (262, ' scsi_init_iovec(r);'), (263, ' DPRINTF("Write complete tag=0x%x more=%d\\n", r->req.tag, r->qiov.size);'), (264, ' scsi_req_data(&r->req, r->qiov.size);'), (283, ' n = r->qiov.size / 512;'), (290, ' scsi_write_complete, r);'), (295, ' /* Called for the first time. Ask the driver to send us more data. */')], 'deleted': [(128, ' DPRINTF("Data ready tag=0x%x len=%zd\\n", r->req.tag, r->iov.iov_len);'), (130, ' n = r->iov.iov_len / 512;'), (133, ' scsi_req_data(&r->req, r->iov.iov_len);'), (184, ' n = r->sector_count;'), (185, ' if (n > SCSI_DMA_BUF_SIZE / 512)'), (186, ' n = SCSI_DMA_BUF_SIZE / 512;'), (187, ''), (191, ' r->iov.iov_len = n * 512;'), (192, ' qemu_iovec_init_external(&r->qiov, &r->iov, 1);'), (193, ''), (242, ' uint32_t len;'), (256, ' n = r->iov.iov_len / 512;'), (262, ' len = r->sector_count * 512;'), (263, ' if (len > SCSI_DMA_BUF_SIZE) {'), (264, ' len = SCSI_DMA_BUF_SIZE;'), (265, ' }'), (266, ' r->iov.iov_len = len;'), (267, ' DPRINTF("Write complete tag=0x%x more=%d\\n", r->req.tag, len);'), (268, ' scsi_req_data(&r->req, len);'), (287, ' n = r->iov.iov_len / 512;'), (292, ' qemu_iovec_init_external(&r->qiov, &r->iov, 1);'), (293, ''), (296, ' scsi_write_complete, r);'), (301, ' /* Invoke completion routine to fetch data from host. */')]}
18
24
1,077
7,198
37
286
7
https://github.com/bonzini/qemu
CVE-2011-3346
CWE-119
330
secure_enclave.c
C
trustedGetEncryptedSecretShareAES
/* Modifications Copyright (C) 2019-2020 SKALE Labs Copyright 2018 Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <string.h> #include <stdio.h> #include <stdbool.h> #include <assert.h> #include "secure_enclave_t.h" #include "sgx_tcrypto.h" #include "sgx_tseal.h" #include <sgx_tgmp.h> #include <sgx_trts.h> #include <sgx_key.h> #include "Point.h" #include "DomainParameters.h" #include "Signature.h" #include "Curves.h" #include "DHDkg.h" #include "AESUtils.h" #include "EnclaveConstants.h" #include "EnclaveCommon.h" #include "SIGNED_ENCLAVE_VERSION" #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) #define INIT_ERROR_STATE *errString = 0; *errStatus = UNKNOWN_ERROR; #define SET_SUCCESS *errStatus = 0; #define CHECK_STATE(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR((const char*) __FILE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ return;} #define CHECK_STATE_CLEAN(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR(__FILE__); LOG_ERROR(__LINE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ goto clean;} #define CHECK_STATUS(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ LOG_ERROR(__FUNCTION__); \ snprintf(errString, BUF_LEN, "failed with status %d : %s", status, __ERRMESSAGE__); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; #define CHECK_STATUS2(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ snprintf(errString, BUF_LEN, __ERRMESSAGE__, status); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; void *(*gmp_realloc_func)(void *, size_t, size_t); void *(*oc_realloc_func)(void *, size_t, size_t); void (*gmp_free_func)(void *, size_t); void (*oc_free_func)(void *, size_t); void *reallocate_function(void *, size_t, size_t); void free_function(void *, size_t); unsigned char *globalRandom = NULL; #define CALL_ONCE \ static volatile bool called = false;\ if (called) { \ LOG_ERROR(__FUNCTION__); \ LOG_ERROR("This function shouldnt be called twice. Aborting!"); \ abort(); \ } else {called = true;}; void trustedEnclaveInit(uint32_t _logLevel) { CALL_ONCE LOG_INFO(__FUNCTION__); globalLogLevel_ = _logLevel; oc_realloc_func = &reallocate_function; oc_free_func = &free_function; LOG_INFO("Setting memory functions"); mp_get_memory_functions(NULL, &gmp_realloc_func, &gmp_free_func); mp_set_memory_functions(NULL, oc_realloc_func, oc_free_func); LOG_INFO("Calling enclave init"); enclave_init(); LOG_INFO("Reading random"); globalRandom = calloc(32,1); int ret = sgx_read_rand(globalRandom, 32); if(ret != SGX_SUCCESS) { LOG_ERROR("sgx_read_rand failed. Aboring enclave."); abort(); } LOG_INFO("Successfully inited enclave. Signed enclave version:" SIGNED_ENCLAVE_VERSION ); #ifndef SGX_DEBUG LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_DEBUG != 0 LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_MODE == SIM LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE SIMULATION MODE! NEVER USE IN PRODUCTION!"); #endif } void free_function(void *ptr, size_t sz) { if (sgx_is_within_enclave(ptr, sz)) gmp_free_func(ptr, sz); else { sgx_status_t status; status = oc_free(ptr, sz); if (status != SGX_SUCCESS) abort(); } } void *reallocate_function(void *ptr, size_t osize, size_t nsize) { uint64_t nptr; sgx_status_t status; if (sgx_is_within_enclave(ptr, osize)) { return gmp_realloc_func(ptr, osize, nsize); } status = oc_realloc(&nptr, ptr, osize, nsize); if (status != SGX_SUCCESS) abort(); /* * If the entire range of allocated memory is not outside the enclave * then something truly terrible has happened. In theory, we could * free() and try again, but would you trust the OS at this point? */ if (!sgx_is_outside_enclave((void *) ptr, nsize)) abort(); return (void *) nptr; } void get_global_random(unsigned char *_randBuff, uint64_t _size) { char errString[BUF_LEN]; int status; int *errStatus = &status; INIT_ERROR_STATE CHECK_STATE(_size <= 32) CHECK_STATE(_randBuff); sgx_sha_state_handle_t shaStateHandle; CHECK_STATE(sgx_sha256_init(&shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_update(globalRandom, 32, shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_get_hash(shaStateHandle, (sgx_sha256_hash_t *)globalRandom) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_close(shaStateHandle) == SGX_SUCCESS); memcpy(_randBuff, globalRandom, _size); } void sealHexSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); CHECK_STATE(strnlen(sek_hex, 33) == 32) uint64_t plaintextLen = strlen(sek_hex) + 1; uint64_t sealedLen = sgx_calc_sealed_data_size(0, plaintextLen); sgx_attributes_t attribute_mask; attribute_mask.flags = 0xfffffffffffffff3; attribute_mask.xfrm = 0x0; sgx_misc_select_t misc = 0xF0000000; sgx_status_t status = sgx_seal_data_ex(SGX_KEYPOLICY_MRENCLAVE, attribute_mask, misc, 0, NULL, plaintextLen, (uint8_t *) sek_hex, sealedLen, (sgx_sealed_data_t *) encrypted_sek); CHECK_STATUS("seal SEK failed after SEK generation"); uint32_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(encrypt_text_length = plaintextLen); SAFE_CHAR_BUF(unsealedKey, BUF_LEN); uint32_t decLen = BUF_LEN; uint32_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(add_text_length == 0); CHECK_STATE(sgx_is_within_enclave(encrypted_sek,sizeof(sgx_sealed_data_t))); status = sgx_unseal_data((const sgx_sealed_data_t *)encrypted_sek, NULL, NULL, (uint8_t *) unsealedKey, &decLen ); CHECK_STATUS("seal/unseal SEK failed after SEK generation in unseal"); *enc_len = sealedLen; SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); RANDOM_CHAR_BUF(SEK_raw, SGX_AESGCM_KEY_SIZE); carray2Hex((uint8_t*) SEK_raw, SGX_AESGCM_KEY_SIZE, sek_hex); memcpy(AES_key, SEK_raw, SGX_AESGCM_KEY_SIZE); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK(int *errStatus, char *errString, uint8_t *encrypted_sek) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); SAFE_CHAR_BUF(aes_key_hex, BUF_LEN); uint32_t dec_len = BUF_LEN; sgx_status_t status = sgx_unseal_data( (const sgx_sealed_data_t *) encrypted_sek, NULL, 0, (uint8_t *)aes_key_hex, &dec_len); if (status == 0x3001) { LOG_ERROR("Could not decrypt LevelDB storage! \n" "If you upgraded sgxwallet software or if you are restoring from backup, please run sgxwallet with -b flag and " "pass your backup key."); } CHECK_STATUS2("sgx unseal SEK failed with status %d"); uint64_t len; hex2carray(aes_key_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK_backup(int *errStatus, char *errString, uint8_t *encrypted_sek, uint32_t *enc_len, const char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); uint64_t len; hex2carray(sek_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, (char *)sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t *enc_len, char *pub_key_x, char *pub_key_y) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); RANDOM_CHAR_BUF(rand_char, 32); mpz_t seed; mpz_init(seed); mpz_t skey; mpz_init(skey); point Pkey = point_init(); mpz_import(seed, 32, 1, sizeof(rand_char[0]), 0, 0, rand_char); mpz_mod(skey, seed, curve->p); signature_extract_public_key(Pkey, skey, curve); SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, Pkey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, Pkey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SAFE_CHAR_BUF(skey_str, ECDSA_SKEY_LEN);SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2); mpz_get_str(arr_skey_str, ECDSA_SKEY_BASE, skey); n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { skey_str[i] = '0'; } strncpy(skey_str + n_zeroes, arr_skey_str, 65 - n_zeroes); skey_str[ECDSA_SKEY_LEN - 1] = 0; snprintf(errString, BUF_LEN, "skey len is %d\n", (int) strlen(skey_str)); int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN); CHECK_STATUS("ecdsa private key encryption failed"); *enc_len = strlen(skey_str) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, ECDSA_SKEY_LEN); CHECK_STATUS2("ecdsa private key decr failed with status %d"); SET_SUCCESS clean: mpz_clear(seed); mpz_clear(skey); point_clear(Pkey); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, char *pub_key_x, char *pub_key_y) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); point pKey = point_init(); point pKey_test = point_init(); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("AES_decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; strncpy(errString, skey, 1024); status = mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE); CHECK_STATUS("mpz_set_str failed for private key"); signature_extract_public_key(pKey, privateKeyMpz, curve); point_multiplication(pKey_test, privateKeyMpz, curve->G, curve); if (!point_cmp(pKey, pKey_test)) { snprintf(errString, BUF_LEN, "Points are not equal"); LOG_ERROR(errString); *errStatus = -11; goto clean; } SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, pKey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, pKey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SET_SUCCESS clean: mpz_clear(privateKeyMpz); point_clear(pKey); point_clear(pKey_test); static uint64_t counter = 0; if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; } static uint64_t sigCounter = 0; void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, const char *hash, char *sigR, char *sigS, uint8_t *sig_v, int base) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(hash); CHECK_STATE(sigR); CHECK_STATE(sigS); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); mpz_t msgMpz; mpz_init(msgMpz); signature sign = signature_init(); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; if (mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid secret key"); LOG_ERROR(errString); goto clean; } if (mpz_set_str(msgMpz, hash, 16) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid message hash"); LOG_ERROR(errString); goto clean; } signature_sign(sign, msgMpz, privateKeyMpz, curve); sigCounter++; if (sigCounter % 1000 == 0) { point Pkey = point_init(); signature_extract_public_key(Pkey, privateKeyMpz, curve); if (!signature_verify(msgMpz, sign, Pkey, curve)) { *errStatus = -2; snprintf(errString, BUF_LEN, "signature is not verified! "); point_clear(Pkey); goto clean; } point_clear(Pkey); } SAFE_CHAR_BUF(arrM, BUF_LEN); mpz_get_str(arrM, 16, msgMpz); snprintf(errString, BUF_LEN, "message is %s ", arrM); SAFE_CHAR_BUF(arrR, BUF_LEN); mpz_get_str(arrR, base, sign->r); strncpy(sigR, arrR, 1024); SAFE_CHAR_BUF(arrS, BUF_LEN); mpz_get_str(arrS, base, sign->s); strncpy(sigS, arrS, 1024); *sig_v = sign->v; SET_SUCCESS clean: mpz_clear(privateKeyMpz); mpz_clear(msgMpz); signature_free(sign); LOG_DEBUG(__FUNCTION__ ); LOG_DEBUG("SGX call completed"); } void trustedDecryptKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, char *key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(key); *errStatus = -9; int status = AES_decrypt_DH(encryptedPrivateKey, enc_len, key, 3072); if (status != 0) { *errStatus = status; snprintf(errString, BUF_LEN, "aes decrypt failed with status %d", status); LOG_ERROR(errString); goto clean; } *errStatus = -10; uint64_t keyLen = strnlen(key, MAX_KEY_LENGTH); if (keyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Key is not null terminated"); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; } void trustedEncryptKeyAES(int *errStatus, char *errString, const char *key, uint8_t *encryptedPrivateKey, uint32_t *enc_len) { LOG_INFO(__FUNCTION__); *errString = 0; *errStatus = UNKNOWN_ERROR; CHECK_STATE(key); CHECK_STATE(encryptedPrivateKey); *errStatus = UNKNOWN_ERROR; int status = AES_encrypt_DH((char *)key, encryptedPrivateKey, BUF_LEN); CHECK_STATUS2("AES encrypt failed with status %d"); *enc_len = strlen(key) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; SAFE_CHAR_BUF(decryptedKey, BUF_LEN); status = AES_decrypt_DH(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN); CHECK_STATUS2("trustedDecryptKey failed with status %d"); uint64_t decryptedKeyLen = strnlen(decryptedKey, MAX_KEY_LENGTH); if (decryptedKeyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Decrypted key is not null terminated"); LOG_ERROR(errString); goto clean; } *errStatus = -8; if (strncmp(key, decryptedKey, MAX_KEY_LENGTH) != 0) { snprintf(errString, BUF_LEN, "Decrypted key does not match original key"); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedBlsSignMessageAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, char *_hashX, char *_hashY, char *signature) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(_hashX); CHECK_STATE(_hashY); CHECK_STATE(signature); SAFE_CHAR_BUF(key, BUF_LEN);SAFE_CHAR_BUF(sig, BUF_LEN); int status = AES_decrypt(encryptedPrivateKey, enc_len, key, BUF_LEN); CHECK_STATUS("AES decrypt failed") if (!enclave_sign(key, _hashX, _hashY, sig)) { strncpy(errString, "Enclave failed to create bls signature", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } strncpy(signature, sig, BUF_LEN); if (strnlen(signature, BUF_LEN) < 10) { strncpy(errString, "Signature too short", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } SET_SUCCESS LOG_DEBUG("SGX call completed"); clean: ; LOG_DEBUG("SGX call completed"); } void trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t *enc_len, size_t _t) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); SAFE_CHAR_BUF(dkg_secret, DKG_BUFER_LENGTH); int status = gen_dkg_poly(dkg_secret, _t); CHECK_STATUS("gen_dkg_poly failed") status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN); CHECK_STATUS("SGX AES encrypt DKG poly failed"); *enc_len = strlen(dkg_secret) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; SAFE_CHAR_BUF(decr_dkg_secret, DKG_BUFER_LENGTH); status = AES_decrypt(encrypted_dkg_secret, *enc_len, decr_dkg_secret, DKG_BUFER_LENGTH); CHECK_STATUS("aes decrypt dkg poly failed"); if (strcmp(dkg_secret, decr_dkg_secret) != 0) { snprintf(errString, BUF_LEN, "encrypted poly is not equal to decrypted poly"); LOG_ERROR(errString); *errStatus = -333; goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedDecryptDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t enc_len, uint8_t *decrypted_dkg_secret) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(decrypted_dkg_secret); int status = AES_decrypt(encrypted_dkg_secret, enc_len, (char *) decrypted_dkg_secret, 3072); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint32_t enc_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_poly); memset(getThreadLocalDecryptedDkgPoly(), 0, DKG_BUFER_LENGTH); int status = AES_decrypt(encrypted_poly, enc_len, (char *) getThreadLocalDecryptedDkgPoly(), DKG_BUFER_LENGTH); CHECK_STATUS2("sgx_unseal_data - encrypted_poly failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint32_t *dec_len, char *result_str, char *s_shareG2, char *pub_keyB, uint8_t _t, uint8_t _n, uint8_t ind) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE uint32_t enc_len; int status; CHECK_STATE(encrypted_skey); CHECK_STATE(result_str); CHECK_STATE(s_shareG2); CHECK_STATE(pub_keyB); LOG_DEBUG(__FUNCTION__); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); SAFE_CHAR_BUF(pub_key_x, BUF_LEN);SAFE_CHAR_BUF(pub_key_y, BUF_LEN); trustedGenerateEcdsaKeyAES(&status, errString, encrypted_skey, &enc_len, pub_key_x, pub_key_y); CHECK_STATUS("trustedGenerateEcdsaKeyAES failed"); status = AES_decrypt(encrypted_skey, enc_len, skey, ECDSA_SKEY_LEN); skey[ECDSA_SKEY_LEN - 1] = 0; CHECK_STATUS2("AES_decrypt failed (in trustedGetEncryptedSecretShareAES) with status %d"); *dec_len = enc_len; SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN); status = gen_session_key(skey, pub_keyB, common_key); CHECK_STATUS("gen_session_key failed") SAFE_CHAR_BUF(s_share, ECDSA_SKEY_LEN); status = calc_secret_share(getThreadLocalDecryptedDkgPoly(), s_share, _t, _n, ind); CHECK_STATUS("calc secret share failed") status = calc_secret_shareG2(s_share, s_shareG2); CHECK_STATUS("invalid decr secret share"); SAFE_CHAR_BUF(cypher, ECDSA_SKEY_LEN); status=xor_encrypt(common_key, s_share, cypher); CHECK_STATUS("xor_encrypt failed") strncpy(result_str, cypher, strlen(cypher)); strncpy(result_str + strlen(cypher), pub_key_x, strlen(pub_key_x)); strncpy(result_str + strlen(pub_key_x) + strlen(pub_key_y), pub_key_y, strlen(pub_key_y)); SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t enc_len, char *public_shares, unsigned _t, unsigned _n) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(public_shares); CHECK_STATE(_t <= _n && _n > 0) SAFE_CHAR_BUF(decrypted_dkg_secret, DKG_MAX_SEALED_LEN); int status = AES_decrypt(encrypted_dkg_secret, enc_len, decrypted_dkg_secret, DKG_MAX_SEALED_LEN); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d"); status = calc_public_shares(decrypted_dkg_secret, public_shares, _t) != 0; CHECK_STATUS("t does not match polynomial in db"); SET_SUCCESS clean: ; LOG_INFO("SGX call completed"); } void trustedDkgVerifyAES(int *errStatus, char *errString, const char *public_shares, const char *s_share, uint8_t *encryptedPrivateKey, uint64_t enc_len, unsigned _t, int _ind, int *result) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(public_shares); CHECK_STATE(s_share); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t s; mpz_init(s); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("AES_decrypt failed (in trustedDkgVerifyAES) with status %d"); SAFE_CHAR_BUF(encr_sshare, ECDSA_SKEY_LEN); strncpy(encr_sshare, s_share, ECDSA_SKEY_LEN - 1); SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); SAFE_CHAR_BUF(decr_sshare, ECDSA_SKEY_LEN); status=xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed") status = mpz_set_str(s, decr_sshare, 16); CHECK_STATUS("invalid decr secret share"); *result = Verification(public_shares, s, _t, _ind); SET_SUCCESS clean: mpz_clear(s); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedCreateBlsKeyAES(int *errStatus, char *errString, const char *s_shares, uint8_t *encryptedPrivateKey, uint64_t key_len, uint8_t *encr_bls_key, uint32_t *enc_bls_key_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(s_shares); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(encr_bls_key); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t sum; mpz_init(sum); mpz_set_ui(sum, 0); mpz_t q; mpz_init(q); mpz_set_str(q, "21888242871839275222246405745257275088548364400416034343698204186575808495617", 10); mpz_t bls_key; mpz_init(bls_key); int status = AES_decrypt(encryptedPrivateKey, key_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[ECDSA_SKEY_LEN - 1] = 0; int num_shares = strlen(s_shares) / 192; for (int i = 0; i < num_shares; i++) { SAFE_CHAR_BUF(encr_sshare, 65); strncpy(encr_sshare, s_shares + 192 * i, 64); encr_sshare[64] = 0; SAFE_CHAR_BUF(s_share, 193); strncpy(s_share, s_shares + 192 * i, 192); s_share[192] = 0; SAFE_CHAR_BUF(common_key, 65); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); common_key[64] = 0; SAFE_CHAR_BUF(decr_sshare, 65); status = xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed"); decr_sshare[64] = 0; mpz_t decr_secret_share; mpz_init(decr_secret_share); if (mpz_set_str(decr_secret_share, decr_sshare, 16) == -1) { *errStatus = 111; snprintf(errString, BUF_LEN, "invalid decrypted secret share"); LOG_ERROR(errString); mpz_clear(decr_secret_share); goto clean; } mpz_addmul_ui(sum, decr_secret_share, 1); mpz_clear(decr_secret_share); } mpz_mod(bls_key, sum, q); SAFE_CHAR_BUF(key_share, BLS_KEY_LENGTH); SAFE_CHAR_BUF(arr_skey_str, BUF_LEN); mpz_get_str(arr_skey_str, 16, bls_key); int n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { key_share[i] = '0'; } strncpy(key_share + n_zeroes, arr_skey_str, 65 - n_zeroes); key_share[BLS_KEY_LENGTH - 1] = 0; status = AES_encrypt(key_share, encr_bls_key, BUF_LEN); CHECK_STATUS2("aes encrypt bls private key failed with status %d "); *enc_bls_key_len = strlen(key_share) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; SET_SUCCESS clean: mpz_clear(bls_key); mpz_clear(sum); mpz_clear(q); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetBlsPubKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t key_len, char *bls_pub_key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(bls_pub_key); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey_hex, ECDSA_SKEY_LEN); int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, ECDSA_SKEY_LEN); CHECK_STATUS2("AES decrypt failed %d"); skey_hex[ECDSA_SKEY_LEN - 1] = 0; status = calc_bls_public_key(skey_hex, bls_pub_key); CHECK_STATUS("could not calculate bls public key"); SET_SUCCESS static uint64_t counter = 0; clean: if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; }
/* Modifications Copyright (C) 2019-2020 SKALE Labs Copyright 2018 Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <string.h> #include <stdio.h> #include <stdbool.h> #include <assert.h> #include "secure_enclave_t.h" #include "sgx_tcrypto.h" #include "sgx_tseal.h" #include <sgx_tgmp.h> #include <sgx_trts.h> #include <sgx_key.h> #include "Point.h" #include "DomainParameters.h" #include "Signature.h" #include "Curves.h" #include "DHDkg.h" #include "AESUtils.h" #include "EnclaveConstants.h" #include "EnclaveCommon.h" #include "SIGNED_ENCLAVE_VERSION" #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) #define INIT_ERROR_STATE *errString = 0; *errStatus = UNKNOWN_ERROR; #define SET_SUCCESS *errStatus = 0; #define CHECK_STATE(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR((const char*) __FILE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ return;} #define CHECK_STATE_CLEAN(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR(__FILE__); LOG_ERROR(__LINE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ goto clean;} #define CHECK_STATUS(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ LOG_ERROR(__FUNCTION__); \ snprintf(errString, BUF_LEN, "failed with status %d : %s", status, __ERRMESSAGE__); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; #define CHECK_STATUS2(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ snprintf(errString, BUF_LEN, __ERRMESSAGE__, status); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; void *(*gmp_realloc_func)(void *, size_t, size_t); void *(*oc_realloc_func)(void *, size_t, size_t); void (*gmp_free_func)(void *, size_t); void (*oc_free_func)(void *, size_t); void *reallocate_function(void *, size_t, size_t); void free_function(void *, size_t); unsigned char *globalRandom = NULL; #define CALL_ONCE \ static volatile bool called = false;\ if (called) { \ LOG_ERROR(__FUNCTION__); \ LOG_ERROR("This function shouldnt be called twice. Aborting!"); \ abort(); \ } else {called = true;}; void trustedEnclaveInit(uint64_t _logLevel) { CALL_ONCE LOG_INFO(__FUNCTION__); globalLogLevel_ = _logLevel; oc_realloc_func = &reallocate_function; oc_free_func = &free_function; LOG_INFO("Setting memory functions"); mp_get_memory_functions(NULL, &gmp_realloc_func, &gmp_free_func); mp_set_memory_functions(NULL, oc_realloc_func, oc_free_func); LOG_INFO("Calling enclave init"); enclave_init(); LOG_INFO("Reading random"); globalRandom = calloc(32,1); int ret = sgx_read_rand(globalRandom, 32); if(ret != SGX_SUCCESS) { LOG_ERROR("sgx_read_rand failed. Aboring enclave."); abort(); } LOG_INFO("Successfully inited enclave. Signed enclave version:" SIGNED_ENCLAVE_VERSION ); #ifndef SGX_DEBUG LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_DEBUG != 0 LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_MODE == SIM LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE SIMULATION MODE! NEVER USE IN PRODUCTION!"); #endif } void free_function(void *ptr, size_t sz) { if (sgx_is_within_enclave(ptr, sz)) gmp_free_func(ptr, sz); else { sgx_status_t status; status = oc_free(ptr, sz); if (status != SGX_SUCCESS) abort(); } } void *reallocate_function(void *ptr, size_t osize, size_t nsize) { uint64_t nptr; sgx_status_t status; if (sgx_is_within_enclave(ptr, osize)) { return gmp_realloc_func(ptr, osize, nsize); } status = oc_realloc(&nptr, ptr, osize, nsize); if (status != SGX_SUCCESS) abort(); /* * If the entire range of allocated memory is not outside the enclave * then something truly terrible has happened. In theory, we could * free() and try again, but would you trust the OS at this point? */ if (!sgx_is_outside_enclave((void *) ptr, nsize)) abort(); return (void *) nptr; } void get_global_random(unsigned char *_randBuff, uint64_t _size) { char errString[BUF_LEN]; int status; int *errStatus = &status; INIT_ERROR_STATE CHECK_STATE(_size <= 32) CHECK_STATE(_randBuff); sgx_sha_state_handle_t shaStateHandle; CHECK_STATE(sgx_sha256_init(&shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_update(globalRandom, 32, shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_get_hash(shaStateHandle, (sgx_sha256_hash_t *)globalRandom) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_close(shaStateHandle) == SGX_SUCCESS); memcpy(_randBuff, globalRandom, _size); } void sealHexSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); CHECK_STATE(strnlen(sek_hex, 33) == 32) uint64_t plaintextLen = strlen(sek_hex) + 1; uint64_t sealedLen = sgx_calc_sealed_data_size(0, plaintextLen); sgx_attributes_t attribute_mask; attribute_mask.flags = 0xfffffffffffffff3; attribute_mask.xfrm = 0x0; sgx_misc_select_t misc = 0xF0000000; sgx_status_t status = sgx_seal_data_ex(SGX_KEYPOLICY_MRENCLAVE, attribute_mask, misc, 0, NULL, plaintextLen, (uint8_t *) sek_hex, sealedLen, (sgx_sealed_data_t *) encrypted_sek); CHECK_STATUS("seal SEK failed after SEK generation"); uint64_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(encrypt_text_length = plaintextLen); SAFE_CHAR_BUF(unsealedKey, BUF_LEN); uint32_t decLen = BUF_LEN; uint64_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(add_text_length == 0); CHECK_STATE(sgx_is_within_enclave(encrypted_sek,sizeof(sgx_sealed_data_t))); status = sgx_unseal_data((const sgx_sealed_data_t *)encrypted_sek, NULL, NULL, (uint8_t *) unsealedKey, &decLen ); CHECK_STATUS("seal/unseal SEK failed after SEK generation in unseal"); *enc_len = sealedLen; SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); RANDOM_CHAR_BUF(SEK_raw, SGX_AESGCM_KEY_SIZE); carray2Hex((uint8_t*) SEK_raw, SGX_AESGCM_KEY_SIZE, sek_hex); memcpy(AES_key, SEK_raw, SGX_AESGCM_KEY_SIZE); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK(int *errStatus, char *errString, uint8_t *encrypted_sek) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); SAFE_CHAR_BUF(aes_key_hex, BUF_LEN); uint32_t dec_len = BUF_LEN; sgx_status_t status = sgx_unseal_data( (const sgx_sealed_data_t *) encrypted_sek, NULL, 0, (uint8_t *)aes_key_hex, &dec_len); if (status == 0x3001) { LOG_ERROR("Could not decrypt LevelDB storage! \n" "If you upgraded sgxwallet software or if you are restoring from backup, please run sgxwallet with -b flag and " "pass your backup key."); } CHECK_STATUS2("sgx unseal SEK failed with status %d"); uint64_t len; hex2carray(aes_key_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK_backup(int *errStatus, char *errString, uint8_t *encrypted_sek, uint64_t *enc_len, const char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); uint64_t len; hex2carray(sek_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, (char *)sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t *enc_len, char *pub_key_x, char *pub_key_y) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); RANDOM_CHAR_BUF(rand_char, 32); mpz_t seed; mpz_init(seed); mpz_t skey; mpz_init(skey); point Pkey = point_init(); mpz_import(seed, 32, 1, sizeof(rand_char[0]), 0, 0, rand_char); mpz_mod(skey, seed, curve->p); signature_extract_public_key(Pkey, skey, curve); SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, Pkey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, Pkey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SAFE_CHAR_BUF(skey_str, BUF_LEN); SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2); mpz_get_str(arr_skey_str, ECDSA_SKEY_BASE, skey); n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { skey_str[i] = '0'; } strncpy(skey_str + n_zeroes, arr_skey_str, 65 - n_zeroes); snprintf(errString, BUF_LEN, "skey len is %d\n", (int) strlen(skey_str)); int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN, ECDSA, NON_DECRYPTABLE, enc_len); CHECK_STATUS("ecdsa private key encryption failed"); status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, BUF_LEN); CHECK_STATUS2("ecdsa private key decr failed with status %d"); SET_SUCCESS clean: mpz_clear(seed); mpz_clear(skey); point_clear(Pkey); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, char *pub_key_x, char *pub_key_y) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE SAFE_CHAR_BUF(skey, BUF_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); point pKey = point_init(); point pKey_test = point_init(); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN); CHECK_STATUS2("AES_decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; strncpy(errString, skey, 1024); status = mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE); CHECK_STATUS("mpz_set_str failed for private key"); signature_extract_public_key(pKey, privateKeyMpz, curve); point_multiplication(pKey_test, privateKeyMpz, curve->G, curve); if (!point_cmp(pKey, pKey_test)) { snprintf(errString, BUF_LEN, "Points are not equal"); LOG_ERROR(errString); *errStatus = -11; goto clean; } SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, pKey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, pKey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SET_SUCCESS clean: mpz_clear(privateKeyMpz); point_clear(pKey); point_clear(pKey_test); static uint64_t counter = 0; if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; } static uint64_t sigCounter = 0; void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, const char *hash, char *sigR, char *sigS, uint8_t *sig_v, int base) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(hash); CHECK_STATE(sigR); CHECK_STATE(sigS); SAFE_CHAR_BUF(skey, BUF_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); mpz_t msgMpz; mpz_init(msgMpz); signature sign = signature_init(); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; if (mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid secret key"); LOG_ERROR(errString); goto clean; } if (mpz_set_str(msgMpz, hash, 16) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid message hash"); LOG_ERROR(errString); goto clean; } signature_sign(sign, msgMpz, privateKeyMpz, curve); sigCounter++; if (sigCounter % 1000 == 0) { point Pkey = point_init(); signature_extract_public_key(Pkey, privateKeyMpz, curve); if (!signature_verify(msgMpz, sign, Pkey, curve)) { *errStatus = -2; snprintf(errString, BUF_LEN, "signature is not verified! "); point_clear(Pkey); goto clean; } point_clear(Pkey); } SAFE_CHAR_BUF(arrM, BUF_LEN); mpz_get_str(arrM, 16, msgMpz); snprintf(errString, BUF_LEN, "message is %s ", arrM); SAFE_CHAR_BUF(arrR, BUF_LEN); mpz_get_str(arrR, base, sign->r); strncpy(sigR, arrR, 1024); SAFE_CHAR_BUF(arrS, BUF_LEN); mpz_get_str(arrS, base, sign->s); strncpy(sigS, arrS, 1024); *sig_v = sign->v; SET_SUCCESS clean: mpz_clear(privateKeyMpz); mpz_clear(msgMpz); signature_free(sign); LOG_DEBUG(__FUNCTION__ ); LOG_DEBUG("SGX call completed"); } void trustedDecryptKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, char *key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(key); *errStatus = -9; int status = AES_decrypt(encryptedPrivateKey, enc_len, key, 3072); if (status != 0) { *errStatus = status; snprintf(errString, BUF_LEN, "aes decrypt failed with status %d", status); LOG_ERROR(errString); goto clean; } *errStatus = -10; uint64_t keyLen = strnlen(key, MAX_KEY_LENGTH); if (keyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Key is not null terminated"); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; } void trustedEncryptKeyAES(int *errStatus, char *errString, const char *key, uint8_t *encryptedPrivateKey, uint64_t *enc_len) { LOG_INFO(__FUNCTION__); *errString = 0; *errStatus = UNKNOWN_ERROR; CHECK_STATE(key); CHECK_STATE(encryptedPrivateKey); *errStatus = UNKNOWN_ERROR; int status = AES_encrypt((char *)key, encryptedPrivateKey, BUF_LEN, DKG, DECRYPTABLE, enc_len); CHECK_STATUS2("AES encrypt failed with status %d"); SAFE_CHAR_BUF(decryptedKey, BUF_LEN); status = AES_decrypt(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN); CHECK_STATUS2("trustedDecryptKey failed with status %d"); uint64_t decryptedKeyLen = strnlen(decryptedKey, MAX_KEY_LENGTH); if (decryptedKeyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Decrypted key is not null terminated"); LOG_ERROR(errString); goto clean; } *errStatus = -8; if (strncmp(key, decryptedKey, MAX_KEY_LENGTH) != 0) { snprintf(errString, BUF_LEN, "Decrypted key does not match original key"); LOG_ERROR(key); LOG_ERROR(decryptedKey); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedBlsSignMessageAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, char *_hashX, char *_hashY, char *signature) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(_hashX); CHECK_STATE(_hashY); CHECK_STATE(signature); SAFE_CHAR_BUF(key, BUF_LEN);SAFE_CHAR_BUF(sig, BUF_LEN); int status = AES_decrypt(encryptedPrivateKey, enc_len, key, BUF_LEN); CHECK_STATUS("AES decrypt failed") if (!enclave_sign(key, _hashX, _hashY, sig)) { strncpy(errString, "Enclave failed to create bls signature", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } strncpy(signature, sig, BUF_LEN); if (strnlen(signature, BUF_LEN) < 10) { strncpy(errString, "Signature too short", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } SET_SUCCESS LOG_DEBUG("SGX call completed"); clean: ; LOG_DEBUG("SGX call completed"); } void trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t *enc_len, size_t _t) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); SAFE_CHAR_BUF(dkg_secret, DKG_BUFER_LENGTH); int status = gen_dkg_poly(dkg_secret, _t); CHECK_STATUS("gen_dkg_poly failed") status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN, DKG, DECRYPTABLE, enc_len); CHECK_STATUS("SGX AES encrypt DKG poly failed"); SAFE_CHAR_BUF(decr_dkg_secret, DKG_BUFER_LENGTH); status = AES_decrypt(encrypted_dkg_secret, *enc_len, decr_dkg_secret, DKG_BUFER_LENGTH); CHECK_STATUS("aes decrypt dkg poly failed"); if (strcmp(dkg_secret, decr_dkg_secret) != 0) { snprintf(errString, BUF_LEN, "encrypted poly is not equal to decrypted poly"); LOG_ERROR(errString); *errStatus = -333; goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedDecryptDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t enc_len, uint8_t *decrypted_dkg_secret) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(decrypted_dkg_secret); int status = AES_decrypt(encrypted_dkg_secret, enc_len, (char *) decrypted_dkg_secret, 3072); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint64_t enc_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_poly); memset(getThreadLocalDecryptedDkgPoly(), 0, DKG_BUFER_LENGTH); int status = AES_decrypt(encrypted_poly, enc_len, (char *) getThreadLocalDecryptedDkgPoly(), DKG_BUFER_LENGTH); CHECK_STATUS2("sgx_unseal_data - encrypted_poly failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint64_t *dec_len, char *result_str, char *s_shareG2, char *pub_keyB, uint8_t _t, uint8_t _n, uint8_t ind) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE uint64_t enc_len; int status; CHECK_STATE(encrypted_skey); CHECK_STATE(result_str); CHECK_STATE(s_shareG2); CHECK_STATE(pub_keyB); LOG_DEBUG(__FUNCTION__); SAFE_CHAR_BUF(skey, BUF_LEN); SAFE_CHAR_BUF(pub_key_x, BUF_LEN);SAFE_CHAR_BUF(pub_key_y, BUF_LEN); trustedGenerateEcdsaKeyAES(&status, errString, encrypted_skey, &enc_len, pub_key_x, pub_key_y); CHECK_STATUS("trustedGenerateEcdsaKeyAES failed"); status = AES_decrypt(encrypted_skey, enc_len, skey, BUF_LEN); skey[ECDSA_SKEY_LEN - 1] = 0; CHECK_STATUS2("AES_decrypt failed (in trustedGetEncryptedSecretShareAES) with status %d"); *dec_len = enc_len; SAFE_CHAR_BUF(common_key, BUF_LEN); status = gen_session_key(skey, pub_keyB, common_key); CHECK_STATUS("gen_session_key failed") SAFE_CHAR_BUF(s_share, BUF_LEN); status = calc_secret_share(getThreadLocalDecryptedDkgPoly(), s_share, _t, _n, ind); CHECK_STATUS("calc secret share failed") status = calc_secret_shareG2(s_share, s_shareG2); CHECK_STATUS("invalid decr secret share"); SAFE_CHAR_BUF(cypher, BUF_LEN); status=xor_encrypt(common_key, s_share, cypher); CHECK_STATUS("xor_encrypt failed") strncpy(result_str, cypher, strlen(cypher)); strncpy(result_str + strlen(cypher), pub_key_x, strlen(pub_key_x)); strncpy(result_str + strlen(pub_key_x) + strlen(pub_key_y), pub_key_y, strlen(pub_key_y)); SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t enc_len, char *public_shares, unsigned _t, unsigned _n) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(public_shares); CHECK_STATE(_t <= _n && _n > 0) SAFE_CHAR_BUF(decrypted_dkg_secret, DKG_MAX_SEALED_LEN); int status = AES_decrypt(encrypted_dkg_secret, enc_len, decrypted_dkg_secret, DKG_MAX_SEALED_LEN); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d"); status = calc_public_shares(decrypted_dkg_secret, public_shares, _t) != 0; CHECK_STATUS("t does not match polynomial in db"); SET_SUCCESS clean: ; LOG_INFO("SGX call completed"); } void trustedDkgVerifyAES(int *errStatus, char *errString, const char *public_shares, const char *s_share, uint8_t *encryptedPrivateKey, uint64_t enc_len, unsigned _t, int _ind, int *result) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(public_shares); CHECK_STATE(s_share); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey,BUF_LEN); mpz_t s; mpz_init(s); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN); CHECK_STATUS2("AES_decrypt failed (in trustedDkgVerifyAES) with status %d"); SAFE_CHAR_BUF(encr_sshare, BUF_LEN); strncpy(encr_sshare, s_share, ECDSA_SKEY_LEN - 1); SAFE_CHAR_BUF(common_key, BUF_LEN); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); SAFE_CHAR_BUF(decr_sshare, BUF_LEN); status=xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed") status = mpz_set_str(s, decr_sshare, 16); CHECK_STATUS("invalid decr secret share"); *result = Verification(public_shares, s, _t, _ind); SET_SUCCESS clean: mpz_clear(s); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedCreateBlsKeyAES(int *errStatus, char *errString, const char *s_shares, uint8_t *encryptedPrivateKey, uint64_t key_len, uint8_t *encr_bls_key, uint64_t *enc_bls_key_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(s_shares); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(encr_bls_key); SAFE_CHAR_BUF(skey, BUF_LEN); mpz_t sum; mpz_init(sum); mpz_set_ui(sum, 0); mpz_t q; mpz_init(q); mpz_set_str(q, "21888242871839275222246405745257275088548364400416034343698204186575808495617", 10); mpz_t bls_key; mpz_init(bls_key); int status = AES_decrypt(encryptedPrivateKey, key_len, skey, BUF_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[ECDSA_SKEY_LEN - 1] = 0; int num_shares = strlen(s_shares) / 192; for (int i = 0; i < num_shares; i++) { SAFE_CHAR_BUF(encr_sshare, 65); strncpy(encr_sshare, s_shares + 192 * i, 64); encr_sshare[64] = 0; SAFE_CHAR_BUF(s_share, 193); strncpy(s_share, s_shares + 192 * i, 192); s_share[192] = 0; SAFE_CHAR_BUF(common_key, 65); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); common_key[64] = 0; SAFE_CHAR_BUF(decr_sshare, 65); status = xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed"); decr_sshare[64] = 0; mpz_t decr_secret_share; mpz_init(decr_secret_share); if (mpz_set_str(decr_secret_share, decr_sshare, 16) == -1) { *errStatus = 111; snprintf(errString, BUF_LEN, "invalid decrypted secret share"); LOG_ERROR(errString); mpz_clear(decr_secret_share); goto clean; } mpz_addmul_ui(sum, decr_secret_share, 1); mpz_clear(decr_secret_share); } mpz_mod(bls_key, sum, q); SAFE_CHAR_BUF(key_share, BLS_KEY_LENGTH); SAFE_CHAR_BUF(arr_skey_str, BUF_LEN); mpz_get_str(arr_skey_str, 16, bls_key); int n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { key_share[i] = '0'; } strncpy(key_share + n_zeroes, arr_skey_str, 65 - n_zeroes); key_share[BLS_KEY_LENGTH - 1] = 0; status = AES_encrypt(key_share, encr_bls_key, BUF_LEN, BLS, NON_DECRYPTABLE, enc_bls_key_len); CHECK_STATUS2("aes encrypt bls private key failed with status %d "); SET_SUCCESS clean: mpz_clear(bls_key); mpz_clear(sum); mpz_clear(q); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetBlsPubKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t key_len, char *bls_pub_key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(bls_pub_key); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey_hex, BUF_LEN); int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, BUF_LEN); CHECK_STATUS2("AES decrypt failed %d"); skey_hex[ECDSA_SKEY_LEN - 1] = 0; status = calc_bls_public_key(skey_hex, bls_pub_key); CHECK_STATUS("could not calculate bls public key"); SET_SUCCESS static uint64_t counter = 0; clean: if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; }
void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint32_t *dec_len, char *result_str, char *s_shareG2, char *pub_keyB, uint8_t _t, uint8_t _n, uint8_t ind) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE uint32_t enc_len; int status; CHECK_STATE(encrypted_skey); CHECK_STATE(result_str); CHECK_STATE(s_shareG2); CHECK_STATE(pub_keyB); LOG_DEBUG(__FUNCTION__); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); SAFE_CHAR_BUF(pub_key_x, BUF_LEN);SAFE_CHAR_BUF(pub_key_y, BUF_LEN); trustedGenerateEcdsaKeyAES(&status, errString, encrypted_skey, &enc_len, pub_key_x, pub_key_y); CHECK_STATUS("trustedGenerateEcdsaKeyAES failed"); status = AES_decrypt(encrypted_skey, enc_len, skey, ECDSA_SKEY_LEN); skey[ECDSA_SKEY_LEN - 1] = 0; CHECK_STATUS2("AES_decrypt failed (in trustedGetEncryptedSecretShareAES) with status %d"); *dec_len = enc_len; SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN); status = gen_session_key(skey, pub_keyB, common_key); CHECK_STATUS("gen_session_key failed") SAFE_CHAR_BUF(s_share, ECDSA_SKEY_LEN); status = calc_secret_share(getThreadLocalDecryptedDkgPoly(), s_share, _t, _n, ind); CHECK_STATUS("calc secret share failed") status = calc_secret_shareG2(s_share, s_shareG2); CHECK_STATUS("invalid decr secret share"); SAFE_CHAR_BUF(cypher, ECDSA_SKEY_LEN); status=xor_encrypt(common_key, s_share, cypher); CHECK_STATUS("xor_encrypt failed") strncpy(result_str, cypher, strlen(cypher)); strncpy(result_str + strlen(cypher), pub_key_x, strlen(pub_key_x)); strncpy(result_str + strlen(pub_key_x) + strlen(pub_key_y), pub_key_y, strlen(pub_key_y)); SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); }
void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint64_t *dec_len, char *result_str, char *s_shareG2, char *pub_keyB, uint8_t _t, uint8_t _n, uint8_t ind) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE uint64_t enc_len; int status; CHECK_STATE(encrypted_skey); CHECK_STATE(result_str); CHECK_STATE(s_shareG2); CHECK_STATE(pub_keyB); LOG_DEBUG(__FUNCTION__); SAFE_CHAR_BUF(skey, BUF_LEN); SAFE_CHAR_BUF(pub_key_x, BUF_LEN);SAFE_CHAR_BUF(pub_key_y, BUF_LEN); trustedGenerateEcdsaKeyAES(&status, errString, encrypted_skey, &enc_len, pub_key_x, pub_key_y); CHECK_STATUS("trustedGenerateEcdsaKeyAES failed"); status = AES_decrypt(encrypted_skey, enc_len, skey, BUF_LEN); skey[ECDSA_SKEY_LEN - 1] = 0; CHECK_STATUS2("AES_decrypt failed (in trustedGetEncryptedSecretShareAES) with status %d"); *dec_len = enc_len; SAFE_CHAR_BUF(common_key, BUF_LEN); status = gen_session_key(skey, pub_keyB, common_key); CHECK_STATUS("gen_session_key failed") SAFE_CHAR_BUF(s_share, BUF_LEN); status = calc_secret_share(getThreadLocalDecryptedDkgPoly(), s_share, _t, _n, ind); CHECK_STATUS("calc secret share failed") status = calc_secret_shareG2(s_share, s_shareG2); CHECK_STATUS("invalid decr secret share"); SAFE_CHAR_BUF(cypher, BUF_LEN); status=xor_encrypt(common_key, s_share, cypher); CHECK_STATUS("xor_encrypt failed") strncpy(result_str, cypher, strlen(cypher)); strncpy(result_str + strlen(cypher), pub_key_x, strlen(pub_key_x)); strncpy(result_str + strlen(pub_key_x) + strlen(pub_key_y), pub_key_y, strlen(pub_key_y)); SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); }
{'added': [(125, 'void trustedEnclaveInit(uint64_t _logLevel) {'), (235, ' uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) {'), (258, ' uint64_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (266, ' uint64_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (282, ' uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) {'), (344, ' uint8_t *encrypted_sek, uint64_t *enc_len, const char *sek_hex) {'), (373, ' uint8_t *encryptedPrivateKey, uint64_t *enc_len, char *pub_key_x, char *pub_key_y) {'), (413, ' SAFE_CHAR_BUF(skey_str, BUF_LEN);'), (414, ' SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2);'), (423, ' int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN,'), (424, ' ECDSA, NON_DECRYPTABLE, enc_len);'), (427, ' status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, BUF_LEN);'), (441, ' uint8_t *encryptedPrivateKey, uint64_t enc_len, char *pub_key_x, char *pub_key_y) {'), (445, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (457, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN);'), (517, 'void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len,'), (528, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (536, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN);'), (602, ' uint64_t enc_len, char *key) {'), (612, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, key, 3072);'), (638, ' uint8_t *encryptedPrivateKey, uint64_t *enc_len) {'), (649, ' int status = AES_encrypt((char *)key, encryptedPrivateKey, BUF_LEN,'), (650, ' DKG, DECRYPTABLE, enc_len);'), (656, ' status = AES_decrypt(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN);'), (672, ' LOG_ERROR(key);'), (673, ' LOG_ERROR(decryptedKey);'), (687, ' uint64_t enc_len, char *_hashX,'), (729, 'trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t *enc_len, size_t _t) {'), (741, ' status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN,'), (742, ' DKG, DECRYPTABLE, enc_len);'), (746, ''), (772, ' uint64_t enc_len,'), (794, 'void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint64_t enc_len) {'), (814, 'void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint64_t *dec_len,'), (821, ' uint64_t enc_len;'), (831, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (839, ' status = AES_decrypt(encrypted_skey, enc_len, skey, BUF_LEN);'), (847, ' SAFE_CHAR_BUF(common_key, BUF_LEN);'), (853, ' SAFE_CHAR_BUF(s_share, BUF_LEN);'), (862, ' SAFE_CHAR_BUF(cypher, BUF_LEN);'), (879, 'void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t enc_len,'), (917, ' SAFE_CHAR_BUF(skey,BUF_LEN);'), (922, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN);'), (926, ' SAFE_CHAR_BUF(encr_sshare, BUF_LEN);'), (930, ' SAFE_CHAR_BUF(common_key, BUF_LEN);'), (936, ' SAFE_CHAR_BUF(decr_sshare, BUF_LEN);'), (958, ' uint64_t *enc_bls_key_len) {'), (968, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (982, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey, BUF_LEN);'), (1042, ' status = AES_encrypt(key_share, encr_bls_key, BUF_LEN, BLS, NON_DECRYPTABLE, enc_bls_key_len);'), (1066, ' SAFE_CHAR_BUF(skey_hex, BUF_LEN);'), (1068, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, BUF_LEN);')], 'deleted': [(125, 'void trustedEnclaveInit(uint32_t _logLevel) {'), (235, ' uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) {'), (258, ' uint32_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (266, ' uint32_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (282, ' uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) {'), (344, ' uint8_t *encrypted_sek, uint32_t *enc_len, const char *sek_hex) {'), (373, ' uint8_t *encryptedPrivateKey, uint32_t *enc_len, char *pub_key_x, char *pub_key_y) {'), (413, ' SAFE_CHAR_BUF(skey_str, ECDSA_SKEY_LEN);SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2);'), (420, ' skey_str[ECDSA_SKEY_LEN - 1] = 0;'), (423, ' int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN);'), (426, ' *enc_len = strlen(skey_str) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (427, ''), (428, ' status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, ECDSA_SKEY_LEN);'), (442, ' uint8_t *encryptedPrivateKey, uint32_t enc_len, char *pub_key_x, char *pub_key_y) {'), (446, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (458, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN);'), (518, 'void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len,'), (529, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (537, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN);'), (603, ' uint32_t enc_len, char *key) {'), (613, ' int status = AES_decrypt_DH(encryptedPrivateKey, enc_len, key, 3072);'), (639, ' uint8_t *encryptedPrivateKey, uint32_t *enc_len) {'), (650, ' int status = AES_encrypt_DH((char *)key, encryptedPrivateKey, BUF_LEN);'), (654, ' *enc_len = strlen(key) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (655, ''), (658, ' status = AES_decrypt_DH(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN);'), (687, ' uint32_t enc_len, char *_hashX,'), (729, 'trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t *enc_len, size_t _t) {'), (741, ' status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN);'), (745, ' *enc_len = strlen(dkg_secret) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (771, ' uint32_t enc_len,'), (793, 'void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint32_t enc_len) {'), (813, 'void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint32_t *dec_len,'), (820, ' uint32_t enc_len;'), (830, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (838, ' status = AES_decrypt(encrypted_skey, enc_len, skey, ECDSA_SKEY_LEN);'), (846, ' SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN);'), (852, ' SAFE_CHAR_BUF(s_share, ECDSA_SKEY_LEN);'), (861, ' SAFE_CHAR_BUF(cypher, ECDSA_SKEY_LEN);'), (878, 'void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t enc_len,'), (916, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (921, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN);'), (925, ' SAFE_CHAR_BUF(encr_sshare, ECDSA_SKEY_LEN);'), (929, ' SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN);'), (935, ' SAFE_CHAR_BUF(decr_sshare, ECDSA_SKEY_LEN);'), (957, ' uint32_t *enc_bls_key_len) {'), (967, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (981, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey, ECDSA_SKEY_LEN);'), (1041, ' status = AES_encrypt(key_share, encr_bls_key, BUF_LEN);'), (1045, ' *enc_bls_key_len = strlen(key_share) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (1046, ''), (1067, ' SAFE_CHAR_BUF(skey_hex, ECDSA_SKEY_LEN);'), (1069, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, ECDSA_SKEY_LEN);')]}
52
53
678
4,334
40
304
1
https://github.com/skalenetwork/sgxwallet
CVE-2021-36218
CWE-787
1,064
print-802_11.c
C
parse_elements
/* * Copyright (c) 2001 * Fortress Technologies, Inc. All rights reserved. * Charlie Lenahan (clenahan@fortresstech.com) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: IEEE 802.11 printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "cpack.h" /* Lengths of 802.11 header components. */ #define IEEE802_11_FC_LEN 2 #define IEEE802_11_DUR_LEN 2 #define IEEE802_11_DA_LEN 6 #define IEEE802_11_SA_LEN 6 #define IEEE802_11_BSSID_LEN 6 #define IEEE802_11_RA_LEN 6 #define IEEE802_11_TA_LEN 6 #define IEEE802_11_ADDR1_LEN 6 #define IEEE802_11_SEQ_LEN 2 #define IEEE802_11_CTL_LEN 2 #define IEEE802_11_CARRIED_FC_LEN 2 #define IEEE802_11_HT_CONTROL_LEN 4 #define IEEE802_11_IV_LEN 3 #define IEEE802_11_KID_LEN 1 /* Frame check sequence length. */ #define IEEE802_11_FCS_LEN 4 /* Lengths of beacon components. */ #define IEEE802_11_TSTAMP_LEN 8 #define IEEE802_11_BCNINT_LEN 2 #define IEEE802_11_CAPINFO_LEN 2 #define IEEE802_11_LISTENINT_LEN 2 #define IEEE802_11_AID_LEN 2 #define IEEE802_11_STATUS_LEN 2 #define IEEE802_11_REASON_LEN 2 /* Length of previous AP in reassocation frame */ #define IEEE802_11_AP_LEN 6 #define T_MGMT 0x0 /* management */ #define T_CTRL 0x1 /* control */ #define T_DATA 0x2 /* data */ #define T_RESV 0x3 /* reserved */ #define ST_ASSOC_REQUEST 0x0 #define ST_ASSOC_RESPONSE 0x1 #define ST_REASSOC_REQUEST 0x2 #define ST_REASSOC_RESPONSE 0x3 #define ST_PROBE_REQUEST 0x4 #define ST_PROBE_RESPONSE 0x5 /* RESERVED 0x6 */ /* RESERVED 0x7 */ #define ST_BEACON 0x8 #define ST_ATIM 0x9 #define ST_DISASSOC 0xA #define ST_AUTH 0xB #define ST_DEAUTH 0xC #define ST_ACTION 0xD /* RESERVED 0xE */ /* RESERVED 0xF */ static const struct tok st_str[] = { { ST_ASSOC_REQUEST, "Assoc Request" }, { ST_ASSOC_RESPONSE, "Assoc Response" }, { ST_REASSOC_REQUEST, "ReAssoc Request" }, { ST_REASSOC_RESPONSE, "ReAssoc Response" }, { ST_PROBE_REQUEST, "Probe Request" }, { ST_PROBE_RESPONSE, "Probe Response" }, { ST_BEACON, "Beacon" }, { ST_ATIM, "ATIM" }, { ST_DISASSOC, "Disassociation" }, { ST_AUTH, "Authentication" }, { ST_DEAUTH, "DeAuthentication" }, { ST_ACTION, "Action" }, { 0, NULL } }; #define CTRL_CONTROL_WRAPPER 0x7 #define CTRL_BAR 0x8 #define CTRL_BA 0x9 #define CTRL_PS_POLL 0xA #define CTRL_RTS 0xB #define CTRL_CTS 0xC #define CTRL_ACK 0xD #define CTRL_CF_END 0xE #define CTRL_END_ACK 0xF static const struct tok ctrl_str[] = { { CTRL_CONTROL_WRAPPER, "Control Wrapper" }, { CTRL_BAR, "BAR" }, { CTRL_BA, "BA" }, { CTRL_PS_POLL, "Power Save-Poll" }, { CTRL_RTS, "Request-To-Send" }, { CTRL_CTS, "Clear-To-Send" }, { CTRL_ACK, "Acknowledgment" }, { CTRL_CF_END, "CF-End" }, { CTRL_END_ACK, "CF-End+CF-Ack" }, { 0, NULL } }; #define DATA_DATA 0x0 #define DATA_DATA_CF_ACK 0x1 #define DATA_DATA_CF_POLL 0x2 #define DATA_DATA_CF_ACK_POLL 0x3 #define DATA_NODATA 0x4 #define DATA_NODATA_CF_ACK 0x5 #define DATA_NODATA_CF_POLL 0x6 #define DATA_NODATA_CF_ACK_POLL 0x7 #define DATA_QOS_DATA 0x8 #define DATA_QOS_DATA_CF_ACK 0x9 #define DATA_QOS_DATA_CF_POLL 0xA #define DATA_QOS_DATA_CF_ACK_POLL 0xB #define DATA_QOS_NODATA 0xC #define DATA_QOS_CF_POLL_NODATA 0xE #define DATA_QOS_CF_ACK_POLL_NODATA 0xF /* * The subtype field of a data frame is, in effect, composed of 4 flag * bits - CF-Ack, CF-Poll, Null (means the frame doesn't actually have * any data), and QoS. */ #define DATA_FRAME_IS_CF_ACK(x) ((x) & 0x01) #define DATA_FRAME_IS_CF_POLL(x) ((x) & 0x02) #define DATA_FRAME_IS_NULL(x) ((x) & 0x04) #define DATA_FRAME_IS_QOS(x) ((x) & 0x08) /* * Bits in the frame control field. */ #define FC_VERSION(fc) ((fc) & 0x3) #define FC_TYPE(fc) (((fc) >> 2) & 0x3) #define FC_SUBTYPE(fc) (((fc) >> 4) & 0xF) #define FC_TO_DS(fc) ((fc) & 0x0100) #define FC_FROM_DS(fc) ((fc) & 0x0200) #define FC_MORE_FLAG(fc) ((fc) & 0x0400) #define FC_RETRY(fc) ((fc) & 0x0800) #define FC_POWER_MGMT(fc) ((fc) & 0x1000) #define FC_MORE_DATA(fc) ((fc) & 0x2000) #define FC_PROTECTED(fc) ((fc) & 0x4000) #define FC_ORDER(fc) ((fc) & 0x8000) struct mgmt_header_t { uint16_t fc; uint16_t duration; uint8_t da[IEEE802_11_DA_LEN]; uint8_t sa[IEEE802_11_SA_LEN]; uint8_t bssid[IEEE802_11_BSSID_LEN]; uint16_t seq_ctrl; }; #define MGMT_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_DA_LEN+IEEE802_11_SA_LEN+\ IEEE802_11_BSSID_LEN+IEEE802_11_SEQ_LEN) #define CAPABILITY_ESS(cap) ((cap) & 0x0001) #define CAPABILITY_IBSS(cap) ((cap) & 0x0002) #define CAPABILITY_CFP(cap) ((cap) & 0x0004) #define CAPABILITY_CFP_REQ(cap) ((cap) & 0x0008) #define CAPABILITY_PRIVACY(cap) ((cap) & 0x0010) struct ssid_t { uint8_t element_id; uint8_t length; u_char ssid[33]; /* 32 + 1 for null */ }; struct rates_t { uint8_t element_id; uint8_t length; uint8_t rate[16]; }; struct challenge_t { uint8_t element_id; uint8_t length; uint8_t text[254]; /* 1-253 + 1 for null */ }; struct fh_t { uint8_t element_id; uint8_t length; uint16_t dwell_time; uint8_t hop_set; uint8_t hop_pattern; uint8_t hop_index; }; struct ds_t { uint8_t element_id; uint8_t length; uint8_t channel; }; struct cf_t { uint8_t element_id; uint8_t length; uint8_t count; uint8_t period; uint16_t max_duration; uint16_t dur_remaing; }; struct tim_t { uint8_t element_id; uint8_t length; uint8_t count; uint8_t period; uint8_t bitmap_control; uint8_t bitmap[251]; }; #define E_SSID 0 #define E_RATES 1 #define E_FH 2 #define E_DS 3 #define E_CF 4 #define E_TIM 5 #define E_IBSS 6 /* reserved 7 */ /* reserved 8 */ /* reserved 9 */ /* reserved 10 */ /* reserved 11 */ /* reserved 12 */ /* reserved 13 */ /* reserved 14 */ /* reserved 15 */ /* reserved 16 */ #define E_CHALLENGE 16 /* reserved 17 */ /* reserved 18 */ /* reserved 19 */ /* reserved 16 */ /* reserved 16 */ struct mgmt_body_t { uint8_t timestamp[IEEE802_11_TSTAMP_LEN]; uint16_t beacon_interval; uint16_t listen_interval; uint16_t status_code; uint16_t aid; u_char ap[IEEE802_11_AP_LEN]; uint16_t reason_code; uint16_t auth_alg; uint16_t auth_trans_seq_num; int challenge_present; struct challenge_t challenge; uint16_t capability_info; int ssid_present; struct ssid_t ssid; int rates_present; struct rates_t rates; int ds_present; struct ds_t ds; int cf_present; struct cf_t cf; int fh_present; struct fh_t fh; int tim_present; struct tim_t tim; }; struct ctrl_control_wrapper_hdr_t { uint16_t fc; uint16_t duration; uint8_t addr1[IEEE802_11_ADDR1_LEN]; uint16_t carried_fc[IEEE802_11_CARRIED_FC_LEN]; uint16_t ht_control[IEEE802_11_HT_CONTROL_LEN]; }; #define CTRL_CONTROL_WRAPPER_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_ADDR1_LEN+\ IEEE802_11_CARRIED_FC_LEN+\ IEEE802_11_HT_CONTROL_LEN) struct ctrl_rts_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; uint8_t ta[IEEE802_11_TA_LEN]; }; #define CTRL_RTS_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_RA_LEN+IEEE802_11_TA_LEN) struct ctrl_cts_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; }; #define CTRL_CTS_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+IEEE802_11_RA_LEN) struct ctrl_ack_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; }; #define CTRL_ACK_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+IEEE802_11_RA_LEN) struct ctrl_ps_poll_hdr_t { uint16_t fc; uint16_t aid; uint8_t bssid[IEEE802_11_BSSID_LEN]; uint8_t ta[IEEE802_11_TA_LEN]; }; #define CTRL_PS_POLL_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_AID_LEN+\ IEEE802_11_BSSID_LEN+IEEE802_11_TA_LEN) struct ctrl_end_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; uint8_t bssid[IEEE802_11_BSSID_LEN]; }; #define CTRL_END_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_RA_LEN+IEEE802_11_BSSID_LEN) struct ctrl_end_ack_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; uint8_t bssid[IEEE802_11_BSSID_LEN]; }; #define CTRL_END_ACK_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_RA_LEN+IEEE802_11_BSSID_LEN) struct ctrl_ba_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; }; #define CTRL_BA_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+IEEE802_11_RA_LEN) struct ctrl_bar_hdr_t { uint16_t fc; uint16_t dur; uint8_t ra[IEEE802_11_RA_LEN]; uint8_t ta[IEEE802_11_TA_LEN]; uint16_t ctl; uint16_t seq; }; #define CTRL_BAR_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_RA_LEN+IEEE802_11_TA_LEN+\ IEEE802_11_CTL_LEN+IEEE802_11_SEQ_LEN) struct meshcntl_t { uint8_t flags; uint8_t ttl; uint8_t seq[4]; uint8_t addr4[6]; uint8_t addr5[6]; uint8_t addr6[6]; }; #define IV_IV(iv) ((iv) & 0xFFFFFF) #define IV_PAD(iv) (((iv) >> 24) & 0x3F) #define IV_KEYID(iv) (((iv) >> 30) & 0x03) #define PRINT_SSID(p) \ if (p.ssid_present) { \ ND_PRINT((ndo, " (")); \ fn_print(ndo, p.ssid.ssid, NULL); \ ND_PRINT((ndo, ")")); \ } #define PRINT_RATE(_sep, _r, _suf) \ ND_PRINT((ndo, "%s%2.1f%s", _sep, (.5 * ((_r) & 0x7f)), _suf)) #define PRINT_RATES(p) \ if (p.rates_present) { \ int z; \ const char *sep = " ["; \ for (z = 0; z < p.rates.length ; z++) { \ PRINT_RATE(sep, p.rates.rate[z], \ (p.rates.rate[z] & 0x80 ? "*" : "")); \ sep = " "; \ } \ if (p.rates.length != 0) \ ND_PRINT((ndo, " Mbit]")); \ } #define PRINT_DS_CHANNEL(p) \ if (p.ds_present) \ ND_PRINT((ndo, " CH: %u", p.ds.channel)); \ ND_PRINT((ndo, "%s", \ CAPABILITY_PRIVACY(p.capability_info) ? ", PRIVACY" : "")); #define MAX_MCS_INDEX 76 /* * Indices are: * * the MCS index (0-76); * * 0 for 20 MHz, 1 for 40 MHz; * * 0 for a long guard interval, 1 for a short guard interval. */ static const float ieee80211_float_htrates[MAX_MCS_INDEX+1][2][2] = { /* MCS 0 */ { /* 20 Mhz */ { 6.5, /* SGI */ 7.2, }, /* 40 Mhz */ { 13.5, /* SGI */ 15.0, }, }, /* MCS 1 */ { /* 20 Mhz */ { 13.0, /* SGI */ 14.4, }, /* 40 Mhz */ { 27.0, /* SGI */ 30.0, }, }, /* MCS 2 */ { /* 20 Mhz */ { 19.5, /* SGI */ 21.7, }, /* 40 Mhz */ { 40.5, /* SGI */ 45.0, }, }, /* MCS 3 */ { /* 20 Mhz */ { 26.0, /* SGI */ 28.9, }, /* 40 Mhz */ { 54.0, /* SGI */ 60.0, }, }, /* MCS 4 */ { /* 20 Mhz */ { 39.0, /* SGI */ 43.3, }, /* 40 Mhz */ { 81.0, /* SGI */ 90.0, }, }, /* MCS 5 */ { /* 20 Mhz */ { 52.0, /* SGI */ 57.8, }, /* 40 Mhz */ { 108.0, /* SGI */ 120.0, }, }, /* MCS 6 */ { /* 20 Mhz */ { 58.5, /* SGI */ 65.0, }, /* 40 Mhz */ { 121.5, /* SGI */ 135.0, }, }, /* MCS 7 */ { /* 20 Mhz */ { 65.0, /* SGI */ 72.2, }, /* 40 Mhz */ { 135.0, /* SGI */ 150.0, }, }, /* MCS 8 */ { /* 20 Mhz */ { 13.0, /* SGI */ 14.4, }, /* 40 Mhz */ { 27.0, /* SGI */ 30.0, }, }, /* MCS 9 */ { /* 20 Mhz */ { 26.0, /* SGI */ 28.9, }, /* 40 Mhz */ { 54.0, /* SGI */ 60.0, }, }, /* MCS 10 */ { /* 20 Mhz */ { 39.0, /* SGI */ 43.3, }, /* 40 Mhz */ { 81.0, /* SGI */ 90.0, }, }, /* MCS 11 */ { /* 20 Mhz */ { 52.0, /* SGI */ 57.8, }, /* 40 Mhz */ { 108.0, /* SGI */ 120.0, }, }, /* MCS 12 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 13 */ { /* 20 Mhz */ { 104.0, /* SGI */ 115.6, }, /* 40 Mhz */ { 216.0, /* SGI */ 240.0, }, }, /* MCS 14 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 15 */ { /* 20 Mhz */ { 130.0, /* SGI */ 144.4, }, /* 40 Mhz */ { 270.0, /* SGI */ 300.0, }, }, /* MCS 16 */ { /* 20 Mhz */ { 19.5, /* SGI */ 21.7, }, /* 40 Mhz */ { 40.5, /* SGI */ 45.0, }, }, /* MCS 17 */ { /* 20 Mhz */ { 39.0, /* SGI */ 43.3, }, /* 40 Mhz */ { 81.0, /* SGI */ 90.0, }, }, /* MCS 18 */ { /* 20 Mhz */ { 58.5, /* SGI */ 65.0, }, /* 40 Mhz */ { 121.5, /* SGI */ 135.0, }, }, /* MCS 19 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 20 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 21 */ { /* 20 Mhz */ { 156.0, /* SGI */ 173.3, }, /* 40 Mhz */ { 324.0, /* SGI */ 360.0, }, }, /* MCS 22 */ { /* 20 Mhz */ { 175.5, /* SGI */ 195.0, }, /* 40 Mhz */ { 364.5, /* SGI */ 405.0, }, }, /* MCS 23 */ { /* 20 Mhz */ { 195.0, /* SGI */ 216.7, }, /* 40 Mhz */ { 405.0, /* SGI */ 450.0, }, }, /* MCS 24 */ { /* 20 Mhz */ { 26.0, /* SGI */ 28.9, }, /* 40 Mhz */ { 54.0, /* SGI */ 60.0, }, }, /* MCS 25 */ { /* 20 Mhz */ { 52.0, /* SGI */ 57.8, }, /* 40 Mhz */ { 108.0, /* SGI */ 120.0, }, }, /* MCS 26 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 27 */ { /* 20 Mhz */ { 104.0, /* SGI */ 115.6, }, /* 40 Mhz */ { 216.0, /* SGI */ 240.0, }, }, /* MCS 28 */ { /* 20 Mhz */ { 156.0, /* SGI */ 173.3, }, /* 40 Mhz */ { 324.0, /* SGI */ 360.0, }, }, /* MCS 29 */ { /* 20 Mhz */ { 208.0, /* SGI */ 231.1, }, /* 40 Mhz */ { 432.0, /* SGI */ 480.0, }, }, /* MCS 30 */ { /* 20 Mhz */ { 234.0, /* SGI */ 260.0, }, /* 40 Mhz */ { 486.0, /* SGI */ 540.0, }, }, /* MCS 31 */ { /* 20 Mhz */ { 260.0, /* SGI */ 288.9, }, /* 40 Mhz */ { 540.0, /* SGI */ 600.0, }, }, /* MCS 32 */ { /* 20 Mhz */ { 0.0, /* SGI */ 0.0, }, /* not valid */ /* 40 Mhz */ { 6.0, /* SGI */ 6.7, }, }, /* MCS 33 */ { /* 20 Mhz */ { 39.0, /* SGI */ 43.3, }, /* 40 Mhz */ { 81.0, /* SGI */ 90.0, }, }, /* MCS 34 */ { /* 20 Mhz */ { 52.0, /* SGI */ 57.8, }, /* 40 Mhz */ { 108.0, /* SGI */ 120.0, }, }, /* MCS 35 */ { /* 20 Mhz */ { 65.0, /* SGI */ 72.2, }, /* 40 Mhz */ { 135.0, /* SGI */ 150.0, }, }, /* MCS 36 */ { /* 20 Mhz */ { 58.5, /* SGI */ 65.0, }, /* 40 Mhz */ { 121.5, /* SGI */ 135.0, }, }, /* MCS 37 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 38 */ { /* 20 Mhz */ { 97.5, /* SGI */ 108.3, }, /* 40 Mhz */ { 202.5, /* SGI */ 225.0, }, }, /* MCS 39 */ { /* 20 Mhz */ { 52.0, /* SGI */ 57.8, }, /* 40 Mhz */ { 108.0, /* SGI */ 120.0, }, }, /* MCS 40 */ { /* 20 Mhz */ { 65.0, /* SGI */ 72.2, }, /* 40 Mhz */ { 135.0, /* SGI */ 150.0, }, }, /* MCS 41 */ { /* 20 Mhz */ { 65.0, /* SGI */ 72.2, }, /* 40 Mhz */ { 135.0, /* SGI */ 150.0, }, }, /* MCS 42 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 43 */ { /* 20 Mhz */ { 91.0, /* SGI */ 101.1, }, /* 40 Mhz */ { 189.0, /* SGI */ 210.0, }, }, /* MCS 44 */ { /* 20 Mhz */ { 91.0, /* SGI */ 101.1, }, /* 40 Mhz */ { 189.0, /* SGI */ 210.0, }, }, /* MCS 45 */ { /* 20 Mhz */ { 104.0, /* SGI */ 115.6, }, /* 40 Mhz */ { 216.0, /* SGI */ 240.0, }, }, /* MCS 46 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 47 */ { /* 20 Mhz */ { 97.5, /* SGI */ 108.3, }, /* 40 Mhz */ { 202.5, /* SGI */ 225.0, }, }, /* MCS 48 */ { /* 20 Mhz */ { 97.5, /* SGI */ 108.3, }, /* 40 Mhz */ { 202.5, /* SGI */ 225.0, }, }, /* MCS 49 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 50 */ { /* 20 Mhz */ { 136.5, /* SGI */ 151.7, }, /* 40 Mhz */ { 283.5, /* SGI */ 315.0, }, }, /* MCS 51 */ { /* 20 Mhz */ { 136.5, /* SGI */ 151.7, }, /* 40 Mhz */ { 283.5, /* SGI */ 315.0, }, }, /* MCS 52 */ { /* 20 Mhz */ { 156.0, /* SGI */ 173.3, }, /* 40 Mhz */ { 324.0, /* SGI */ 360.0, }, }, /* MCS 53 */ { /* 20 Mhz */ { 65.0, /* SGI */ 72.2, }, /* 40 Mhz */ { 135.0, /* SGI */ 150.0, }, }, /* MCS 54 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 55 */ { /* 20 Mhz */ { 91.0, /* SGI */ 101.1, }, /* 40 Mhz */ { 189.0, /* SGI */ 210.0, }, }, /* MCS 56 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 57 */ { /* 20 Mhz */ { 91.0, /* SGI */ 101.1, }, /* 40 Mhz */ { 189.0, /* SGI */ 210.0, }, }, /* MCS 58 */ { /* 20 Mhz */ { 104.0, /* SGI */ 115.6, }, /* 40 Mhz */ { 216.0, /* SGI */ 240.0, }, }, /* MCS 59 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 60 */ { /* 20 Mhz */ { 104.0, /* SGI */ 115.6, }, /* 40 Mhz */ { 216.0, /* SGI */ 240.0, }, }, /* MCS 61 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 62 */ { /* 20 Mhz */ { 130.0, /* SGI */ 144.4, }, /* 40 Mhz */ { 270.0, /* SGI */ 300.0, }, }, /* MCS 63 */ { /* 20 Mhz */ { 130.0, /* SGI */ 144.4, }, /* 40 Mhz */ { 270.0, /* SGI */ 300.0, }, }, /* MCS 64 */ { /* 20 Mhz */ { 143.0, /* SGI */ 158.9, }, /* 40 Mhz */ { 297.0, /* SGI */ 330.0, }, }, /* MCS 65 */ { /* 20 Mhz */ { 97.5, /* SGI */ 108.3, }, /* 40 Mhz */ { 202.5, /* SGI */ 225.0, }, }, /* MCS 66 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 67 */ { /* 20 Mhz */ { 136.5, /* SGI */ 151.7, }, /* 40 Mhz */ { 283.5, /* SGI */ 315.0, }, }, /* MCS 68 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 69 */ { /* 20 Mhz */ { 136.5, /* SGI */ 151.7, }, /* 40 Mhz */ { 283.5, /* SGI */ 315.0, }, }, /* MCS 70 */ { /* 20 Mhz */ { 156.0, /* SGI */ 173.3, }, /* 40 Mhz */ { 324.0, /* SGI */ 360.0, }, }, /* MCS 71 */ { /* 20 Mhz */ { 175.5, /* SGI */ 195.0, }, /* 40 Mhz */ { 364.5, /* SGI */ 405.0, }, }, /* MCS 72 */ { /* 20 Mhz */ { 156.0, /* SGI */ 173.3, }, /* 40 Mhz */ { 324.0, /* SGI */ 360.0, }, }, /* MCS 73 */ { /* 20 Mhz */ { 175.5, /* SGI */ 195.0, }, /* 40 Mhz */ { 364.5, /* SGI */ 405.0, }, }, /* MCS 74 */ { /* 20 Mhz */ { 195.0, /* SGI */ 216.7, }, /* 40 Mhz */ { 405.0, /* SGI */ 450.0, }, }, /* MCS 75 */ { /* 20 Mhz */ { 195.0, /* SGI */ 216.7, }, /* 40 Mhz */ { 405.0, /* SGI */ 450.0, }, }, /* MCS 76 */ { /* 20 Mhz */ { 214.5, /* SGI */ 238.3, }, /* 40 Mhz */ { 445.5, /* SGI */ 495.0, }, }, }; static const char *auth_alg_text[]={"Open System","Shared Key","EAP"}; #define NUM_AUTH_ALGS (sizeof auth_alg_text / sizeof auth_alg_text[0]) static const char *status_text[] = { "Successful", /* 0 */ "Unspecified failure", /* 1 */ "Reserved", /* 2 */ "Reserved", /* 3 */ "Reserved", /* 4 */ "Reserved", /* 5 */ "Reserved", /* 6 */ "Reserved", /* 7 */ "Reserved", /* 8 */ "Reserved", /* 9 */ "Cannot Support all requested capabilities in the Capability " "Information field", /* 10 */ "Reassociation denied due to inability to confirm that association " "exists", /* 11 */ "Association denied due to reason outside the scope of the " "standard", /* 12 */ "Responding station does not support the specified authentication " "algorithm ", /* 13 */ "Received an Authentication frame with authentication transaction " "sequence number out of expected sequence", /* 14 */ "Authentication rejected because of challenge failure", /* 15 */ "Authentication rejected due to timeout waiting for next frame in " "sequence", /* 16 */ "Association denied because AP is unable to handle additional" "associated stations", /* 17 */ "Association denied due to requesting station not supporting all of " "the data rates in BSSBasicRateSet parameter", /* 18 */ "Association denied due to requesting station not supporting " "short preamble operation", /* 19 */ "Association denied due to requesting station not supporting " "PBCC encoding", /* 20 */ "Association denied due to requesting station not supporting " "channel agility", /* 21 */ "Association request rejected because Spectrum Management " "capability is required", /* 22 */ "Association request rejected because the information in the " "Power Capability element is unacceptable", /* 23 */ "Association request rejected because the information in the " "Supported Channels element is unacceptable", /* 24 */ "Association denied due to requesting station not supporting " "short slot operation", /* 25 */ "Association denied due to requesting station not supporting " "DSSS-OFDM operation", /* 26 */ "Association denied because the requested STA does not support HT " "features", /* 27 */ "Reserved", /* 28 */ "Association denied because the requested STA does not support " "the PCO transition time required by the AP", /* 29 */ "Reserved", /* 30 */ "Reserved", /* 31 */ "Unspecified, QoS-related failure", /* 32 */ "Association denied due to QAP having insufficient bandwidth " "to handle another QSTA", /* 33 */ "Association denied due to excessive frame loss rates and/or " "poor conditions on current operating channel", /* 34 */ "Association (with QBSS) denied due to requesting station not " "supporting the QoS facility", /* 35 */ "Association denied due to requesting station not supporting " "Block Ack", /* 36 */ "The request has been declined", /* 37 */ "The request has not been successful as one or more parameters " "have invalid values", /* 38 */ "The TS has not been created because the request cannot be honored. " "Try again with the suggested changes to the TSPEC", /* 39 */ "Invalid Information Element", /* 40 */ "Group Cipher is not valid", /* 41 */ "Pairwise Cipher is not valid", /* 42 */ "AKMP is not valid", /* 43 */ "Unsupported RSN IE version", /* 44 */ "Invalid RSN IE Capabilities", /* 45 */ "Cipher suite is rejected per security policy", /* 46 */ "The TS has not been created. However, the HC may be capable of " "creating a TS, in response to a request, after the time indicated " "in the TS Delay element", /* 47 */ "Direct Link is not allowed in the BSS by policy", /* 48 */ "Destination STA is not present within this QBSS.", /* 49 */ "The Destination STA is not a QSTA.", /* 50 */ }; #define NUM_STATUSES (sizeof status_text / sizeof status_text[0]) static const char *reason_text[] = { "Reserved", /* 0 */ "Unspecified reason", /* 1 */ "Previous authentication no longer valid", /* 2 */ "Deauthenticated because sending station is leaving (or has left) " "IBSS or ESS", /* 3 */ "Disassociated due to inactivity", /* 4 */ "Disassociated because AP is unable to handle all currently " " associated stations", /* 5 */ "Class 2 frame received from nonauthenticated station", /* 6 */ "Class 3 frame received from nonassociated station", /* 7 */ "Disassociated because sending station is leaving " "(or has left) BSS", /* 8 */ "Station requesting (re)association is not authenticated with " "responding station", /* 9 */ "Disassociated because the information in the Power Capability " "element is unacceptable", /* 10 */ "Disassociated because the information in the SupportedChannels " "element is unacceptable", /* 11 */ "Invalid Information Element", /* 12 */ "Reserved", /* 13 */ "Michael MIC failure", /* 14 */ "4-Way Handshake timeout", /* 15 */ "Group key update timeout", /* 16 */ "Information element in 4-Way Handshake different from (Re)Association" "Request/Probe Response/Beacon", /* 17 */ "Group Cipher is not valid", /* 18 */ "AKMP is not valid", /* 20 */ "Unsupported RSN IE version", /* 21 */ "Invalid RSN IE Capabilities", /* 22 */ "IEEE 802.1X Authentication failed", /* 23 */ "Cipher suite is rejected per security policy", /* 24 */ "Reserved", /* 25 */ "Reserved", /* 26 */ "Reserved", /* 27 */ "Reserved", /* 28 */ "Reserved", /* 29 */ "Reserved", /* 30 */ "TS deleted because QoS AP lacks sufficient bandwidth for this " "QoS STA due to a change in BSS service characteristics or " "operational mode (e.g. an HT BSS change from 40 MHz channel " "to 20 MHz channel)", /* 31 */ "Disassociated for unspecified, QoS-related reason", /* 32 */ "Disassociated because QoS AP lacks sufficient bandwidth for this " "QoS STA", /* 33 */ "Disassociated because of excessive number of frames that need to be " "acknowledged, but are not acknowledged for AP transmissions " "and/or poor channel conditions", /* 34 */ "Disassociated because STA is transmitting outside the limits " "of its TXOPs", /* 35 */ "Requested from peer STA as the STA is leaving the BSS " "(or resetting)", /* 36 */ "Requested from peer STA as it does not want to use the " "mechanism", /* 37 */ "Requested from peer STA as the STA received frames using the " "mechanism for which a set up is required", /* 38 */ "Requested from peer STA due to time out", /* 39 */ "Reserved", /* 40 */ "Reserved", /* 41 */ "Reserved", /* 42 */ "Reserved", /* 43 */ "Reserved", /* 44 */ "Peer STA does not support the requested cipher suite", /* 45 */ "Association denied due to requesting STA not supporting HT " "features", /* 46 */ }; #define NUM_REASONS (sizeof reason_text / sizeof reason_text[0]) static int wep_print(netdissect_options *ndo, const u_char *p) { uint32_t iv; if (!ND_TTEST2(*p, IEEE802_11_IV_LEN + IEEE802_11_KID_LEN)) return 0; iv = EXTRACT_LE_32BITS(p); ND_PRINT((ndo, " IV:%3x Pad %x KeyID %x", IV_IV(iv), IV_PAD(iv), IV_KEYID(iv))); return 1; } static int parse_elements(netdissect_options *ndo, struct mgmt_body_t *pbody, const u_char *p, int offset, u_int length) { u_int elementlen; struct ssid_t ssid; struct challenge_t challenge; struct rates_t rates; struct ds_t ds; struct cf_t cf; struct tim_t tim; /* * We haven't seen any elements yet. */ pbody->challenge_present = 0; pbody->ssid_present = 0; pbody->rates_present = 0; pbody->ds_present = 0; pbody->cf_present = 0; pbody->tim_present = 0; while (length != 0) { /* Make sure we at least have the element ID and length. */ if (!ND_TTEST2(*(p + offset), 2)) return 0; if (length < 2) return 0; elementlen = *(p + offset + 1); /* Make sure we have the entire element. */ if (!ND_TTEST2(*(p + offset + 2), elementlen)) return 0; if (length < elementlen + 2) return 0; switch (*(p + offset)) { case E_SSID: memcpy(&ssid, p + offset, 2); offset += 2; length -= 2; if (ssid.length != 0) { if (ssid.length > sizeof(ssid.ssid) - 1) return 0; if (!ND_TTEST2(*(p + offset), ssid.length)) return 0; if (length < ssid.length) return 0; memcpy(&ssid.ssid, p + offset, ssid.length); offset += ssid.length; length -= ssid.length; } ssid.ssid[ssid.length] = '\0'; /* * Present and not truncated. * * If we haven't already seen an SSID IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->ssid_present) { pbody->ssid = ssid; pbody->ssid_present = 1; } break; case E_CHALLENGE: memcpy(&challenge, p + offset, 2); offset += 2; length -= 2; if (challenge.length != 0) { if (challenge.length > sizeof(challenge.text) - 1) return 0; if (!ND_TTEST2(*(p + offset), challenge.length)) return 0; if (length < challenge.length) return 0; memcpy(&challenge.text, p + offset, challenge.length); offset += challenge.length; length -= challenge.length; } challenge.text[challenge.length] = '\0'; /* * Present and not truncated. * * If we haven't already seen a challenge IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->challenge_present) { pbody->challenge = challenge; pbody->challenge_present = 1; } break; case E_RATES: memcpy(&rates, p + offset, 2); offset += 2; length -= 2; if (rates.length != 0) { if (rates.length > sizeof rates.rate) return 0; if (!ND_TTEST2(*(p + offset), rates.length)) return 0; if (length < rates.length) return 0; memcpy(&rates.rate, p + offset, rates.length); offset += rates.length; length -= rates.length; } /* * Present and not truncated. * * If we haven't already seen a rates IE, * copy this one if it's not zero-length, * otherwise ignore this one, so we later * report the first one we saw. * * We ignore zero-length rates IEs as some * devices seem to put a zero-length rates * IE, followed by an SSID IE, followed by * a non-zero-length rates IE into frames, * even though IEEE Std 802.11-2007 doesn't * seem to indicate that a zero-length rates * IE is valid. */ if (!pbody->rates_present && rates.length != 0) { pbody->rates = rates; pbody->rates_present = 1; } break; case E_DS: memcpy(&ds, p + offset, 2); offset += 2; length -= 2; if (ds.length != 1) { offset += ds.length; length -= ds.length; break; } ds.channel = *(p + offset); offset += 1; length -= 1; /* * Present and not truncated. * * If we haven't already seen a DS IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->ds_present) { pbody->ds = ds; pbody->ds_present = 1; } break; case E_CF: memcpy(&cf, p + offset, 2); offset += 2; length -= 2; if (cf.length != 6) { offset += cf.length; length -= cf.length; break; } memcpy(&cf.count, p + offset, 6); offset += 6; length -= 6; /* * Present and not truncated. * * If we haven't already seen a CF IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->cf_present) { pbody->cf = cf; pbody->cf_present = 1; } break; case E_TIM: memcpy(&tim, p + offset, 2); offset += 2; length -= 2; if (tim.length <= 3) { offset += tim.length; length -= tim.length; break; } if (tim.length - 3 > (int)sizeof tim.bitmap) return 0; memcpy(&tim.count, p + offset, 3); offset += 3; length -= 3; memcpy(tim.bitmap, p + offset + 3, tim.length - 3); offset += tim.length - 3; length -= tim.length - 3; /* * Present and not truncated. * * If we haven't already seen a TIM IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->tim_present) { pbody->tim = tim; pbody->tim_present = 1; } break; default: #if 0 ND_PRINT((ndo, "(1) unhandled element_id (%d) ", *(p + offset))); #endif offset += 2 + elementlen; length -= 2 + elementlen; break; } } /* No problems found. */ return 1; } /********************************************************************************* * Print Handle functions for the management frame types *********************************************************************************/ static int handle_beacon(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_TSTAMP_LEN + IEEE802_11_BCNINT_LEN + IEEE802_11_CAPINFO_LEN)) return 0; if (length < IEEE802_11_TSTAMP_LEN + IEEE802_11_BCNINT_LEN + IEEE802_11_CAPINFO_LEN) return 0; memcpy(&pbody.timestamp, p, IEEE802_11_TSTAMP_LEN); offset += IEEE802_11_TSTAMP_LEN; length -= IEEE802_11_TSTAMP_LEN; pbody.beacon_interval = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_BCNINT_LEN; length -= IEEE802_11_BCNINT_LEN; pbody.capability_info = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_CAPINFO_LEN; length -= IEEE802_11_CAPINFO_LEN; ret = parse_elements(ndo, &pbody, p, offset, length); PRINT_SSID(pbody); PRINT_RATES(pbody); ND_PRINT((ndo, " %s", CAPABILITY_ESS(pbody.capability_info) ? "ESS" : "IBSS")); PRINT_DS_CHANNEL(pbody); return ret; } static int handle_assoc_request(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN)) return 0; if (length < IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN) return 0; pbody.capability_info = EXTRACT_LE_16BITS(p); offset += IEEE802_11_CAPINFO_LEN; length -= IEEE802_11_CAPINFO_LEN; pbody.listen_interval = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_LISTENINT_LEN; length -= IEEE802_11_LISTENINT_LEN; ret = parse_elements(ndo, &pbody, p, offset, length); PRINT_SSID(pbody); PRINT_RATES(pbody); return ret; } static int handle_assoc_response(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_CAPINFO_LEN + IEEE802_11_STATUS_LEN + IEEE802_11_AID_LEN)) return 0; if (length < IEEE802_11_CAPINFO_LEN + IEEE802_11_STATUS_LEN + IEEE802_11_AID_LEN) return 0; pbody.capability_info = EXTRACT_LE_16BITS(p); offset += IEEE802_11_CAPINFO_LEN; length -= IEEE802_11_CAPINFO_LEN; pbody.status_code = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_STATUS_LEN; length -= IEEE802_11_STATUS_LEN; pbody.aid = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_AID_LEN; length -= IEEE802_11_AID_LEN; ret = parse_elements(ndo, &pbody, p, offset, length); ND_PRINT((ndo, " AID(%x) :%s: %s", ((uint16_t)(pbody.aid << 2 )) >> 2 , CAPABILITY_PRIVACY(pbody.capability_info) ? " PRIVACY " : "", (pbody.status_code < NUM_STATUSES ? status_text[pbody.status_code] : "n/a"))); return ret; } static int handle_reassoc_request(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN + IEEE802_11_AP_LEN)) return 0; if (length < IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN + IEEE802_11_AP_LEN) return 0; pbody.capability_info = EXTRACT_LE_16BITS(p); offset += IEEE802_11_CAPINFO_LEN; length -= IEEE802_11_CAPINFO_LEN; pbody.listen_interval = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_LISTENINT_LEN; length -= IEEE802_11_LISTENINT_LEN; memcpy(&pbody.ap, p+offset, IEEE802_11_AP_LEN); offset += IEEE802_11_AP_LEN; length -= IEEE802_11_AP_LEN; ret = parse_elements(ndo, &pbody, p, offset, length); PRINT_SSID(pbody); ND_PRINT((ndo, " AP : %s", etheraddr_string(ndo, pbody.ap ))); return ret; } static int handle_reassoc_response(netdissect_options *ndo, const u_char *p, u_int length) { /* Same as a Association Reponse */ return handle_assoc_response(ndo, p, length); } static int handle_probe_request(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); ret = parse_elements(ndo, &pbody, p, offset, length); PRINT_SSID(pbody); PRINT_RATES(pbody); return ret; } static int handle_probe_response(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_TSTAMP_LEN + IEEE802_11_BCNINT_LEN + IEEE802_11_CAPINFO_LEN)) return 0; if (length < IEEE802_11_TSTAMP_LEN + IEEE802_11_BCNINT_LEN + IEEE802_11_CAPINFO_LEN) return 0; memcpy(&pbody.timestamp, p, IEEE802_11_TSTAMP_LEN); offset += IEEE802_11_TSTAMP_LEN; length -= IEEE802_11_TSTAMP_LEN; pbody.beacon_interval = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_BCNINT_LEN; length -= IEEE802_11_BCNINT_LEN; pbody.capability_info = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_CAPINFO_LEN; length -= IEEE802_11_CAPINFO_LEN; ret = parse_elements(ndo, &pbody, p, offset, length); PRINT_SSID(pbody); PRINT_RATES(pbody); PRINT_DS_CHANNEL(pbody); return ret; } static int handle_atim(void) { /* the frame body for ATIM is null. */ return 1; } static int handle_disassoc(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_REASON_LEN)) return 0; if (length < IEEE802_11_REASON_LEN) return 0; pbody.reason_code = EXTRACT_LE_16BITS(p); ND_PRINT((ndo, ": %s", (pbody.reason_code < NUM_REASONS) ? reason_text[pbody.reason_code] : "Reserved")); return 1; } static int handle_auth(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, 6)) return 0; if (length < 6) return 0; pbody.auth_alg = EXTRACT_LE_16BITS(p); offset += 2; length -= 2; pbody.auth_trans_seq_num = EXTRACT_LE_16BITS(p + offset); offset += 2; length -= 2; pbody.status_code = EXTRACT_LE_16BITS(p + offset); offset += 2; length -= 2; ret = parse_elements(ndo, &pbody, p, offset, length); if ((pbody.auth_alg == 1) && ((pbody.auth_trans_seq_num == 2) || (pbody.auth_trans_seq_num == 3))) { ND_PRINT((ndo, " (%s)-%x [Challenge Text] %s", (pbody.auth_alg < NUM_AUTH_ALGS) ? auth_alg_text[pbody.auth_alg] : "Reserved", pbody.auth_trans_seq_num, ((pbody.auth_trans_seq_num % 2) ? ((pbody.status_code < NUM_STATUSES) ? status_text[pbody.status_code] : "n/a") : ""))); return ret; } ND_PRINT((ndo, " (%s)-%x: %s", (pbody.auth_alg < NUM_AUTH_ALGS) ? auth_alg_text[pbody.auth_alg] : "Reserved", pbody.auth_trans_seq_num, (pbody.auth_trans_seq_num % 2) ? ((pbody.status_code < NUM_STATUSES) ? status_text[pbody.status_code] : "n/a") : "")); return ret; } static int handle_deauth(netdissect_options *ndo, const uint8_t *src, const u_char *p, u_int length) { struct mgmt_body_t pbody; const char *reason = NULL; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_REASON_LEN)) return 0; if (length < IEEE802_11_REASON_LEN) return 0; pbody.reason_code = EXTRACT_LE_16BITS(p); reason = (pbody.reason_code < NUM_REASONS) ? reason_text[pbody.reason_code] : "Reserved"; if (ndo->ndo_eflag) { ND_PRINT((ndo, ": %s", reason)); } else { ND_PRINT((ndo, " (%s): %s", etheraddr_string(ndo, src), reason)); } return 1; } #define PRINT_HT_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "TxChWidth")) : \ (v) == 1 ? ND_PRINT((ndo, "MIMOPwrSave")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_BA_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "ADDBA Request")) : \ (v) == 1 ? ND_PRINT((ndo, "ADDBA Response")) : \ (v) == 2 ? ND_PRINT((ndo, "DELBA")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_MESHLINK_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "Request")) : \ (v) == 1 ? ND_PRINT((ndo, "Report")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_MESHPEERING_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "Open")) : \ (v) == 1 ? ND_PRINT((ndo, "Confirm")) : \ (v) == 2 ? ND_PRINT((ndo, "Close")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_MESHPATH_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "Request")) : \ (v) == 1 ? ND_PRINT((ndo, "Report")) : \ (v) == 2 ? ND_PRINT((ndo, "Error")) : \ (v) == 3 ? ND_PRINT((ndo, "RootAnnouncement")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_MESH_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "MeshLink")) : \ (v) == 1 ? ND_PRINT((ndo, "HWMP")) : \ (v) == 2 ? ND_PRINT((ndo, "Gate Announcement")) : \ (v) == 3 ? ND_PRINT((ndo, "Congestion Control")) : \ (v) == 4 ? ND_PRINT((ndo, "MCCA Setup Request")) : \ (v) == 5 ? ND_PRINT((ndo, "MCCA Setup Reply")) : \ (v) == 6 ? ND_PRINT((ndo, "MCCA Advertisement Request")) : \ (v) == 7 ? ND_PRINT((ndo, "MCCA Advertisement")) : \ (v) == 8 ? ND_PRINT((ndo, "MCCA Teardown")) : \ (v) == 9 ? ND_PRINT((ndo, "TBTT Adjustment Request")) : \ (v) == 10 ? ND_PRINT((ndo, "TBTT Adjustment Response")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_MULTIHOP_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "Proxy Update")) : \ (v) == 1 ? ND_PRINT((ndo, "Proxy Update Confirmation")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_SELFPROT_ACTION(v) (\ (v) == 1 ? ND_PRINT((ndo, "Peering Open")) : \ (v) == 2 ? ND_PRINT((ndo, "Peering Confirm")) : \ (v) == 3 ? ND_PRINT((ndo, "Peering Close")) : \ (v) == 4 ? ND_PRINT((ndo, "Group Key Inform")) : \ (v) == 5 ? ND_PRINT((ndo, "Group Key Acknowledge")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) static int handle_action(netdissect_options *ndo, const uint8_t *src, const u_char *p, u_int length) { if (!ND_TTEST2(*p, 2)) return 0; if (length < 2) return 0; if (ndo->ndo_eflag) { ND_PRINT((ndo, ": ")); } else { ND_PRINT((ndo, " (%s): ", etheraddr_string(ndo, src))); } switch (p[0]) { case 0: ND_PRINT((ndo, "Spectrum Management Act#%d", p[1])); break; case 1: ND_PRINT((ndo, "QoS Act#%d", p[1])); break; case 2: ND_PRINT((ndo, "DLS Act#%d", p[1])); break; case 3: ND_PRINT((ndo, "BA ")); PRINT_BA_ACTION(p[1]); break; case 7: ND_PRINT((ndo, "HT ")); PRINT_HT_ACTION(p[1]); break; case 13: ND_PRINT((ndo, "MeshAction ")); PRINT_MESH_ACTION(p[1]); break; case 14: ND_PRINT((ndo, "MultiohopAction ")); PRINT_MULTIHOP_ACTION(p[1]); break; case 15: ND_PRINT((ndo, "SelfprotectAction ")); PRINT_SELFPROT_ACTION(p[1]); break; case 127: ND_PRINT((ndo, "Vendor Act#%d", p[1])); break; default: ND_PRINT((ndo, "Reserved(%d) Act#%d", p[0], p[1])); break; } return 1; } /********************************************************************************* * Print Body funcs *********************************************************************************/ static int mgmt_body_print(netdissect_options *ndo, uint16_t fc, const uint8_t *src, const u_char *p, u_int length) { ND_PRINT((ndo, "%s", tok2str(st_str, "Unhandled Management subtype(%x)", FC_SUBTYPE(fc)))); /* There may be a problem w/ AP not having this bit set */ if (FC_PROTECTED(fc)) return wep_print(ndo, p); switch (FC_SUBTYPE(fc)) { case ST_ASSOC_REQUEST: return handle_assoc_request(ndo, p, length); case ST_ASSOC_RESPONSE: return handle_assoc_response(ndo, p, length); case ST_REASSOC_REQUEST: return handle_reassoc_request(ndo, p, length); case ST_REASSOC_RESPONSE: return handle_reassoc_response(ndo, p, length); case ST_PROBE_REQUEST: return handle_probe_request(ndo, p, length); case ST_PROBE_RESPONSE: return handle_probe_response(ndo, p, length); case ST_BEACON: return handle_beacon(ndo, p, length); case ST_ATIM: return handle_atim(); case ST_DISASSOC: return handle_disassoc(ndo, p, length); case ST_AUTH: return handle_auth(ndo, p, length); case ST_DEAUTH: return handle_deauth(ndo, src, p, length); case ST_ACTION: return handle_action(ndo, src, p, length); default: return 1; } } /********************************************************************************* * Handles printing all the control frame types *********************************************************************************/ static int ctrl_body_print(netdissect_options *ndo, uint16_t fc, const u_char *p) { ND_PRINT((ndo, "%s", tok2str(ctrl_str, "Unknown Ctrl Subtype", FC_SUBTYPE(fc)))); switch (FC_SUBTYPE(fc)) { case CTRL_CONTROL_WRAPPER: /* XXX - requires special handling */ break; case CTRL_BAR: if (!ND_TTEST2(*p, CTRL_BAR_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s TA:%s CTL(%x) SEQ(%u) ", etheraddr_string(ndo, ((const struct ctrl_bar_hdr_t *)p)->ra), etheraddr_string(ndo, ((const struct ctrl_bar_hdr_t *)p)->ta), EXTRACT_LE_16BITS(&(((const struct ctrl_bar_hdr_t *)p)->ctl)), EXTRACT_LE_16BITS(&(((const struct ctrl_bar_hdr_t *)p)->seq)))); break; case CTRL_BA: if (!ND_TTEST2(*p, CTRL_BA_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s ", etheraddr_string(ndo, ((const struct ctrl_ba_hdr_t *)p)->ra))); break; case CTRL_PS_POLL: if (!ND_TTEST2(*p, CTRL_PS_POLL_HDRLEN)) return 0; ND_PRINT((ndo, " AID(%x)", EXTRACT_LE_16BITS(&(((const struct ctrl_ps_poll_hdr_t *)p)->aid)))); break; case CTRL_RTS: if (!ND_TTEST2(*p, CTRL_RTS_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " TA:%s ", etheraddr_string(ndo, ((const struct ctrl_rts_hdr_t *)p)->ta))); break; case CTRL_CTS: if (!ND_TTEST2(*p, CTRL_CTS_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s ", etheraddr_string(ndo, ((const struct ctrl_cts_hdr_t *)p)->ra))); break; case CTRL_ACK: if (!ND_TTEST2(*p, CTRL_ACK_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s ", etheraddr_string(ndo, ((const struct ctrl_ack_hdr_t *)p)->ra))); break; case CTRL_CF_END: if (!ND_TTEST2(*p, CTRL_END_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s ", etheraddr_string(ndo, ((const struct ctrl_end_hdr_t *)p)->ra))); break; case CTRL_END_ACK: if (!ND_TTEST2(*p, CTRL_END_ACK_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s ", etheraddr_string(ndo, ((const struct ctrl_end_ack_hdr_t *)p)->ra))); break; } return 1; } /* * Data Frame - Address field contents * * To Ds | From DS | Addr 1 | Addr 2 | Addr 3 | Addr 4 * 0 | 0 | DA | SA | BSSID | n/a * 0 | 1 | DA | BSSID | SA | n/a * 1 | 0 | BSSID | SA | DA | n/a * 1 | 1 | RA | TA | DA | SA */ /* * Function to get source and destination MAC addresses for a data frame. */ static void get_data_src_dst_mac(uint16_t fc, const u_char *p, const uint8_t **srcp, const uint8_t **dstp) { #define ADDR1 (p + 4) #define ADDR2 (p + 10) #define ADDR3 (p + 16) #define ADDR4 (p + 24) if (!FC_TO_DS(fc)) { if (!FC_FROM_DS(fc)) { /* not To DS and not From DS */ *srcp = ADDR2; *dstp = ADDR1; } else { /* not To DS and From DS */ *srcp = ADDR3; *dstp = ADDR1; } } else { if (!FC_FROM_DS(fc)) { /* From DS and not To DS */ *srcp = ADDR2; *dstp = ADDR3; } else { /* To DS and From DS */ *srcp = ADDR4; *dstp = ADDR3; } } #undef ADDR1 #undef ADDR2 #undef ADDR3 #undef ADDR4 } static void get_mgmt_src_dst_mac(const u_char *p, const uint8_t **srcp, const uint8_t **dstp) { const struct mgmt_header_t *hp = (const struct mgmt_header_t *) p; if (srcp != NULL) *srcp = hp->sa; if (dstp != NULL) *dstp = hp->da; } /* * Print Header funcs */ static void data_header_print(netdissect_options *ndo, uint16_t fc, const u_char *p) { u_int subtype = FC_SUBTYPE(fc); if (DATA_FRAME_IS_CF_ACK(subtype) || DATA_FRAME_IS_CF_POLL(subtype) || DATA_FRAME_IS_QOS(subtype)) { ND_PRINT((ndo, "CF ")); if (DATA_FRAME_IS_CF_ACK(subtype)) { if (DATA_FRAME_IS_CF_POLL(subtype)) ND_PRINT((ndo, "Ack/Poll")); else ND_PRINT((ndo, "Ack")); } else { if (DATA_FRAME_IS_CF_POLL(subtype)) ND_PRINT((ndo, "Poll")); } if (DATA_FRAME_IS_QOS(subtype)) ND_PRINT((ndo, "+QoS")); ND_PRINT((ndo, " ")); } #define ADDR1 (p + 4) #define ADDR2 (p + 10) #define ADDR3 (p + 16) #define ADDR4 (p + 24) if (!FC_TO_DS(fc) && !FC_FROM_DS(fc)) { ND_PRINT((ndo, "DA:%s SA:%s BSSID:%s ", etheraddr_string(ndo, ADDR1), etheraddr_string(ndo, ADDR2), etheraddr_string(ndo, ADDR3))); } else if (!FC_TO_DS(fc) && FC_FROM_DS(fc)) { ND_PRINT((ndo, "DA:%s BSSID:%s SA:%s ", etheraddr_string(ndo, ADDR1), etheraddr_string(ndo, ADDR2), etheraddr_string(ndo, ADDR3))); } else if (FC_TO_DS(fc) && !FC_FROM_DS(fc)) { ND_PRINT((ndo, "BSSID:%s SA:%s DA:%s ", etheraddr_string(ndo, ADDR1), etheraddr_string(ndo, ADDR2), etheraddr_string(ndo, ADDR3))); } else if (FC_TO_DS(fc) && FC_FROM_DS(fc)) { ND_PRINT((ndo, "RA:%s TA:%s DA:%s SA:%s ", etheraddr_string(ndo, ADDR1), etheraddr_string(ndo, ADDR2), etheraddr_string(ndo, ADDR3), etheraddr_string(ndo, ADDR4))); } #undef ADDR1 #undef ADDR2 #undef ADDR3 #undef ADDR4 } static void mgmt_header_print(netdissect_options *ndo, const u_char *p) { const struct mgmt_header_t *hp = (const struct mgmt_header_t *) p; ND_PRINT((ndo, "BSSID:%s DA:%s SA:%s ", etheraddr_string(ndo, (hp)->bssid), etheraddr_string(ndo, (hp)->da), etheraddr_string(ndo, (hp)->sa))); } static void ctrl_header_print(netdissect_options *ndo, uint16_t fc, const u_char *p) { switch (FC_SUBTYPE(fc)) { case CTRL_BAR: ND_PRINT((ndo, " RA:%s TA:%s CTL(%x) SEQ(%u) ", etheraddr_string(ndo, ((const struct ctrl_bar_hdr_t *)p)->ra), etheraddr_string(ndo, ((const struct ctrl_bar_hdr_t *)p)->ta), EXTRACT_LE_16BITS(&(((const struct ctrl_bar_hdr_t *)p)->ctl)), EXTRACT_LE_16BITS(&(((const struct ctrl_bar_hdr_t *)p)->seq)))); break; case CTRL_BA: ND_PRINT((ndo, "RA:%s ", etheraddr_string(ndo, ((const struct ctrl_ba_hdr_t *)p)->ra))); break; case CTRL_PS_POLL: ND_PRINT((ndo, "BSSID:%s TA:%s ", etheraddr_string(ndo, ((const struct ctrl_ps_poll_hdr_t *)p)->bssid), etheraddr_string(ndo, ((const struct ctrl_ps_poll_hdr_t *)p)->ta))); break; case CTRL_RTS: ND_PRINT((ndo, "RA:%s TA:%s ", etheraddr_string(ndo, ((const struct ctrl_rts_hdr_t *)p)->ra), etheraddr_string(ndo, ((const struct ctrl_rts_hdr_t *)p)->ta))); break; case CTRL_CTS: ND_PRINT((ndo, "RA:%s ", etheraddr_string(ndo, ((const struct ctrl_cts_hdr_t *)p)->ra))); break; case CTRL_ACK: ND_PRINT((ndo, "RA:%s ", etheraddr_string(ndo, ((const struct ctrl_ack_hdr_t *)p)->ra))); break; case CTRL_CF_END: ND_PRINT((ndo, "RA:%s BSSID:%s ", etheraddr_string(ndo, ((const struct ctrl_end_hdr_t *)p)->ra), etheraddr_string(ndo, ((const struct ctrl_end_hdr_t *)p)->bssid))); break; case CTRL_END_ACK: ND_PRINT((ndo, "RA:%s BSSID:%s ", etheraddr_string(ndo, ((const struct ctrl_end_ack_hdr_t *)p)->ra), etheraddr_string(ndo, ((const struct ctrl_end_ack_hdr_t *)p)->bssid))); break; default: /* We shouldn't get here - we should already have quit */ break; } } static int extract_header_length(netdissect_options *ndo, uint16_t fc) { int len; switch (FC_TYPE(fc)) { case T_MGMT: return MGMT_HDRLEN; case T_CTRL: switch (FC_SUBTYPE(fc)) { case CTRL_CONTROL_WRAPPER: return CTRL_CONTROL_WRAPPER_HDRLEN; case CTRL_BAR: return CTRL_BAR_HDRLEN; case CTRL_BA: return CTRL_BA_HDRLEN; case CTRL_PS_POLL: return CTRL_PS_POLL_HDRLEN; case CTRL_RTS: return CTRL_RTS_HDRLEN; case CTRL_CTS: return CTRL_CTS_HDRLEN; case CTRL_ACK: return CTRL_ACK_HDRLEN; case CTRL_CF_END: return CTRL_END_HDRLEN; case CTRL_END_ACK: return CTRL_END_ACK_HDRLEN; default: ND_PRINT((ndo, "unknown 802.11 ctrl frame subtype (%d)", FC_SUBTYPE(fc))); return 0; } case T_DATA: len = (FC_TO_DS(fc) && FC_FROM_DS(fc)) ? 30 : 24; if (DATA_FRAME_IS_QOS(FC_SUBTYPE(fc))) len += 2; return len; default: ND_PRINT((ndo, "unknown 802.11 frame type (%d)", FC_TYPE(fc))); return 0; } } static int extract_mesh_header_length(const u_char *p) { return (p[0] &~ 3) ? 0 : 6*(1 + (p[0] & 3)); } /* * Print the 802.11 MAC header. */ static void ieee_802_11_hdr_print(netdissect_options *ndo, uint16_t fc, const u_char *p, u_int hdrlen, u_int meshdrlen) { if (ndo->ndo_vflag) { if (FC_MORE_DATA(fc)) ND_PRINT((ndo, "More Data ")); if (FC_MORE_FLAG(fc)) ND_PRINT((ndo, "More Fragments ")); if (FC_POWER_MGMT(fc)) ND_PRINT((ndo, "Pwr Mgmt ")); if (FC_RETRY(fc)) ND_PRINT((ndo, "Retry ")); if (FC_ORDER(fc)) ND_PRINT((ndo, "Strictly Ordered ")); if (FC_PROTECTED(fc)) ND_PRINT((ndo, "Protected ")); if (FC_TYPE(fc) != T_CTRL || FC_SUBTYPE(fc) != CTRL_PS_POLL) ND_PRINT((ndo, "%dus ", EXTRACT_LE_16BITS( &((const struct mgmt_header_t *)p)->duration))); } if (meshdrlen != 0) { const struct meshcntl_t *mc = (const struct meshcntl_t *)&p[hdrlen - meshdrlen]; int ae = mc->flags & 3; ND_PRINT((ndo, "MeshData (AE %d TTL %u seq %u", ae, mc->ttl, EXTRACT_LE_32BITS(mc->seq))); if (ae > 0) ND_PRINT((ndo, " A4:%s", etheraddr_string(ndo, mc->addr4))); if (ae > 1) ND_PRINT((ndo, " A5:%s", etheraddr_string(ndo, mc->addr5))); if (ae > 2) ND_PRINT((ndo, " A6:%s", etheraddr_string(ndo, mc->addr6))); ND_PRINT((ndo, ") ")); } switch (FC_TYPE(fc)) { case T_MGMT: mgmt_header_print(ndo, p); break; case T_CTRL: ctrl_header_print(ndo, fc, p); break; case T_DATA: data_header_print(ndo, fc, p); break; default: break; } } #ifndef roundup2 #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */ #endif static const char tstr[] = "[|802.11]"; static u_int ieee802_11_print(netdissect_options *ndo, const u_char *p, u_int length, u_int orig_caplen, int pad, u_int fcslen) { uint16_t fc; u_int caplen, hdrlen, meshdrlen; struct lladdr_info src, dst; int llc_hdrlen; caplen = orig_caplen; /* Remove FCS, if present */ if (length < fcslen) { ND_PRINT((ndo, "%s", tstr)); return caplen; } length -= fcslen; if (caplen > length) { /* Amount of FCS in actual packet data, if any */ fcslen = caplen - length; caplen -= fcslen; ndo->ndo_snapend -= fcslen; } if (caplen < IEEE802_11_FC_LEN) { ND_PRINT((ndo, "%s", tstr)); return orig_caplen; } fc = EXTRACT_LE_16BITS(p); hdrlen = extract_header_length(ndo, fc); if (hdrlen == 0) { /* Unknown frame type or control frame subtype; quit. */ return (0); } if (pad) hdrlen = roundup2(hdrlen, 4); if (ndo->ndo_Hflag && FC_TYPE(fc) == T_DATA && DATA_FRAME_IS_QOS(FC_SUBTYPE(fc))) { meshdrlen = extract_mesh_header_length(p+hdrlen); hdrlen += meshdrlen; } else meshdrlen = 0; if (caplen < hdrlen) { ND_PRINT((ndo, "%s", tstr)); return hdrlen; } if (ndo->ndo_eflag) ieee_802_11_hdr_print(ndo, fc, p, hdrlen, meshdrlen); /* * Go past the 802.11 header. */ length -= hdrlen; caplen -= hdrlen; p += hdrlen; src.addr_string = etheraddr_string; dst.addr_string = etheraddr_string; switch (FC_TYPE(fc)) { case T_MGMT: get_mgmt_src_dst_mac(p - hdrlen, &src.addr, &dst.addr); if (!mgmt_body_print(ndo, fc, src.addr, p, length)) { ND_PRINT((ndo, "%s", tstr)); return hdrlen; } break; case T_CTRL: if (!ctrl_body_print(ndo, fc, p - hdrlen)) { ND_PRINT((ndo, "%s", tstr)); return hdrlen; } break; case T_DATA: if (DATA_FRAME_IS_NULL(FC_SUBTYPE(fc))) return hdrlen; /* no-data frame */ /* There may be a problem w/ AP not having this bit set */ if (FC_PROTECTED(fc)) { ND_PRINT((ndo, "Data")); if (!wep_print(ndo, p)) { ND_PRINT((ndo, "%s", tstr)); return hdrlen; } } else { get_data_src_dst_mac(fc, p - hdrlen, &src.addr, &dst.addr); llc_hdrlen = llc_print(ndo, p, length, caplen, &src, &dst); if (llc_hdrlen < 0) { /* * Some kinds of LLC packet we cannot * handle intelligently */ if (!ndo->ndo_suppress_default_print) ND_DEFAULTPRINT(p, caplen); llc_hdrlen = -llc_hdrlen; } hdrlen += llc_hdrlen; } break; default: /* We shouldn't get here - we should already have quit */ break; } return hdrlen; } /* * This is the top level routine of the printer. 'p' points * to the 802.11 header of the packet, 'h->ts' is the timestamp, * 'h->len' is the length of the packet off the wire, and 'h->caplen' * is the number of bytes actually captured. */ u_int ieee802_11_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { return ieee802_11_print(ndo, p, h->len, h->caplen, 0, 0); } /* $FreeBSD: src/sys/net80211/ieee80211_radiotap.h,v 1.5 2005/01/22 20:12:05 sam Exp $ */ /* NetBSD: ieee802_11_radio.h,v 1.2 2006/02/26 03:04:03 dyoung Exp */ /*- * Copyright (c) 2003, 2004 David Young. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of David Young may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. */ /* A generic radio capture format is desirable. It must be * rigidly defined (e.g., units for fields should be given), * and easily extensible. * * The following is an extensible radio capture format. It is * based on a bitmap indicating which fields are present. * * I am trying to describe precisely what the application programmer * should expect in the following, and for that reason I tell the * units and origin of each measurement (where it applies), or else I * use sufficiently weaselly language ("is a monotonically nondecreasing * function of...") that I cannot set false expectations for lawyerly * readers. */ /* * The radio capture header precedes the 802.11 header. * * Note well: all radiotap fields are little-endian. */ struct ieee80211_radiotap_header { uint8_t it_version; /* Version 0. Only increases * for drastic changes, * introduction of compatible * new fields does not count. */ uint8_t it_pad; uint16_t it_len; /* length of the whole * header in bytes, including * it_version, it_pad, * it_len, and data fields. */ uint32_t it_present; /* A bitmap telling which * fields are present. Set bit 31 * (0x80000000) to extend the * bitmap by another 32 bits. * Additional extensions are made * by setting bit 31. */ }; /* Name Data type Units * ---- --------- ----- * * IEEE80211_RADIOTAP_TSFT uint64_t microseconds * * Value in microseconds of the MAC's 64-bit 802.11 Time * Synchronization Function timer when the first bit of the * MPDU arrived at the MAC. For received frames, only. * * IEEE80211_RADIOTAP_CHANNEL 2 x uint16_t MHz, bitmap * * Tx/Rx frequency in MHz, followed by flags (see below). * Note that IEEE80211_RADIOTAP_XCHANNEL must be used to * represent an HT channel as there is not enough room in * the flags word. * * IEEE80211_RADIOTAP_FHSS uint16_t see below * * For frequency-hopping radios, the hop set (first byte) * and pattern (second byte). * * IEEE80211_RADIOTAP_RATE uint8_t 500kb/s or index * * Tx/Rx data rate. If bit 0x80 is set then it represents an * an MCS index and not an IEEE rate. * * IEEE80211_RADIOTAP_DBM_ANTSIGNAL int8_t decibels from * one milliwatt (dBm) * * RF signal power at the antenna, decibel difference from * one milliwatt. * * IEEE80211_RADIOTAP_DBM_ANTNOISE int8_t decibels from * one milliwatt (dBm) * * RF noise power at the antenna, decibel difference from one * milliwatt. * * IEEE80211_RADIOTAP_DB_ANTSIGNAL uint8_t decibel (dB) * * RF signal power at the antenna, decibel difference from an * arbitrary, fixed reference. * * IEEE80211_RADIOTAP_DB_ANTNOISE uint8_t decibel (dB) * * RF noise power at the antenna, decibel difference from an * arbitrary, fixed reference point. * * IEEE80211_RADIOTAP_LOCK_QUALITY uint16_t unitless * * Quality of Barker code lock. Unitless. Monotonically * nondecreasing with "better" lock strength. Called "Signal * Quality" in datasheets. (Is there a standard way to measure * this?) * * IEEE80211_RADIOTAP_TX_ATTENUATION uint16_t unitless * * Transmit power expressed as unitless distance from max * power set at factory calibration. 0 is max power. * Monotonically nondecreasing with lower power levels. * * IEEE80211_RADIOTAP_DB_TX_ATTENUATION uint16_t decibels (dB) * * Transmit power expressed as decibel distance from max power * set at factory calibration. 0 is max power. Monotonically * nondecreasing with lower power levels. * * IEEE80211_RADIOTAP_DBM_TX_POWER int8_t decibels from * one milliwatt (dBm) * * Transmit power expressed as dBm (decibels from a 1 milliwatt * reference). This is the absolute power level measured at * the antenna port. * * IEEE80211_RADIOTAP_FLAGS uint8_t bitmap * * Properties of transmitted and received frames. See flags * defined below. * * IEEE80211_RADIOTAP_ANTENNA uint8_t antenna index * * Unitless indication of the Rx/Tx antenna for this packet. * The first antenna is antenna 0. * * IEEE80211_RADIOTAP_RX_FLAGS uint16_t bitmap * * Properties of received frames. See flags defined below. * * IEEE80211_RADIOTAP_XCHANNEL uint32_t bitmap * uint16_t MHz * uint8_t channel number * uint8_t .5 dBm * * Extended channel specification: flags (see below) followed by * frequency in MHz, the corresponding IEEE channel number, and * finally the maximum regulatory transmit power cap in .5 dBm * units. This property supersedes IEEE80211_RADIOTAP_CHANNEL * and only one of the two should be present. * * IEEE80211_RADIOTAP_MCS uint8_t known * uint8_t flags * uint8_t mcs * * Bitset indicating which fields have known values, followed * by bitset of flag values, followed by the MCS rate index as * in IEEE 802.11n. * * * IEEE80211_RADIOTAP_AMPDU_STATUS u32, u16, u8, u8 unitless * * Contains the AMPDU information for the subframe. * * IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 * * Contains VHT information about this frame. * * IEEE80211_RADIOTAP_VENDOR_NAMESPACE * uint8_t OUI[3] * uint8_t subspace * uint16_t length * * The Vendor Namespace Field contains three sub-fields. The first * sub-field is 3 bytes long. It contains the vendor's IEEE 802 * Organizationally Unique Identifier (OUI). The fourth byte is a * vendor-specific "namespace selector." * */ enum ieee80211_radiotap_type { IEEE80211_RADIOTAP_TSFT = 0, IEEE80211_RADIOTAP_FLAGS = 1, IEEE80211_RADIOTAP_RATE = 2, IEEE80211_RADIOTAP_CHANNEL = 3, IEEE80211_RADIOTAP_FHSS = 4, IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5, IEEE80211_RADIOTAP_DBM_ANTNOISE = 6, IEEE80211_RADIOTAP_LOCK_QUALITY = 7, IEEE80211_RADIOTAP_TX_ATTENUATION = 8, IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9, IEEE80211_RADIOTAP_DBM_TX_POWER = 10, IEEE80211_RADIOTAP_ANTENNA = 11, IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12, IEEE80211_RADIOTAP_DB_ANTNOISE = 13, IEEE80211_RADIOTAP_RX_FLAGS = 14, /* NB: gap for netbsd definitions */ IEEE80211_RADIOTAP_XCHANNEL = 18, IEEE80211_RADIOTAP_MCS = 19, IEEE80211_RADIOTAP_AMPDU_STATUS = 20, IEEE80211_RADIOTAP_VHT = 21, IEEE80211_RADIOTAP_NAMESPACE = 29, IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30, IEEE80211_RADIOTAP_EXT = 31 }; /* channel attributes */ #define IEEE80211_CHAN_TURBO 0x00010 /* Turbo channel */ #define IEEE80211_CHAN_CCK 0x00020 /* CCK channel */ #define IEEE80211_CHAN_OFDM 0x00040 /* OFDM channel */ #define IEEE80211_CHAN_2GHZ 0x00080 /* 2 GHz spectrum channel. */ #define IEEE80211_CHAN_5GHZ 0x00100 /* 5 GHz spectrum channel */ #define IEEE80211_CHAN_PASSIVE 0x00200 /* Only passive scan allowed */ #define IEEE80211_CHAN_DYN 0x00400 /* Dynamic CCK-OFDM channel */ #define IEEE80211_CHAN_GFSK 0x00800 /* GFSK channel (FHSS PHY) */ #define IEEE80211_CHAN_GSM 0x01000 /* 900 MHz spectrum channel */ #define IEEE80211_CHAN_STURBO 0x02000 /* 11a static turbo channel only */ #define IEEE80211_CHAN_HALF 0x04000 /* Half rate channel */ #define IEEE80211_CHAN_QUARTER 0x08000 /* Quarter rate channel */ #define IEEE80211_CHAN_HT20 0x10000 /* HT 20 channel */ #define IEEE80211_CHAN_HT40U 0x20000 /* HT 40 channel w/ ext above */ #define IEEE80211_CHAN_HT40D 0x40000 /* HT 40 channel w/ ext below */ /* Useful combinations of channel characteristics, borrowed from Ethereal */ #define IEEE80211_CHAN_A \ (IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM) #define IEEE80211_CHAN_B \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_CCK) #define IEEE80211_CHAN_G \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN) #define IEEE80211_CHAN_TA \ (IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_TURBO) #define IEEE80211_CHAN_TG \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN | IEEE80211_CHAN_TURBO) /* For IEEE80211_RADIOTAP_FLAGS */ #define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received * during CFP */ #define IEEE80211_RADIOTAP_F_SHORTPRE 0x02 /* sent/received * with short * preamble */ #define IEEE80211_RADIOTAP_F_WEP 0x04 /* sent/received * with WEP encryption */ #define IEEE80211_RADIOTAP_F_FRAG 0x08 /* sent/received * with fragmentation */ #define IEEE80211_RADIOTAP_F_FCS 0x10 /* frame includes FCS */ #define IEEE80211_RADIOTAP_F_DATAPAD 0x20 /* frame has padding between * 802.11 header and payload * (to 32-bit boundary) */ #define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* does not pass FCS check */ /* For IEEE80211_RADIOTAP_RX_FLAGS */ #define IEEE80211_RADIOTAP_F_RX_BADFCS 0x0001 /* frame failed crc check */ #define IEEE80211_RADIOTAP_F_RX_PLCP_CRC 0x0002 /* frame failed PLCP CRC check */ /* For IEEE80211_RADIOTAP_MCS known */ #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_KNOWN 0x01 #define IEEE80211_RADIOTAP_MCS_MCS_INDEX_KNOWN 0x02 /* MCS index field */ #define IEEE80211_RADIOTAP_MCS_GUARD_INTERVAL_KNOWN 0x04 #define IEEE80211_RADIOTAP_MCS_HT_FORMAT_KNOWN 0x08 #define IEEE80211_RADIOTAP_MCS_FEC_TYPE_KNOWN 0x10 #define IEEE80211_RADIOTAP_MCS_STBC_KNOWN 0x20 #define IEEE80211_RADIOTAP_MCS_NESS_KNOWN 0x40 #define IEEE80211_RADIOTAP_MCS_NESS_BIT_1 0x80 /* For IEEE80211_RADIOTAP_MCS flags */ #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_MASK 0x03 #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_20 0 #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_40 1 #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_20L 2 #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_20U 3 #define IEEE80211_RADIOTAP_MCS_SHORT_GI 0x04 /* short guard interval */ #define IEEE80211_RADIOTAP_MCS_HT_GREENFIELD 0x08 #define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 #define IEEE80211_RADIOTAP_MCS_STBC_MASK 0x60 #define IEEE80211_RADIOTAP_MCS_STBC_1 1 #define IEEE80211_RADIOTAP_MCS_STBC_2 2 #define IEEE80211_RADIOTAP_MCS_STBC_3 3 #define IEEE80211_RADIOTAP_MCS_STBC_SHIFT 5 #define IEEE80211_RADIOTAP_MCS_NESS_BIT_0 0x80 /* For IEEE80211_RADIOTAP_AMPDU_STATUS */ #define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001 #define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN 0x0002 #define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN 0x0004 #define IEEE80211_RADIOTAP_AMPDU_IS_LAST 0x0008 #define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR 0x0010 #define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN 0x0020 /* For IEEE80211_RADIOTAP_VHT known */ #define IEEE80211_RADIOTAP_VHT_STBC_KNOWN 0x0001 #define IEEE80211_RADIOTAP_VHT_TXOP_PS_NA_KNOWN 0x0002 #define IEEE80211_RADIOTAP_VHT_GUARD_INTERVAL_KNOWN 0x0004 #define IEEE80211_RADIOTAP_VHT_SGI_NSYM_DIS_KNOWN 0x0008 #define IEEE80211_RADIOTAP_VHT_LDPC_EXTRA_OFDM_SYM_KNOWN 0x0010 #define IEEE80211_RADIOTAP_VHT_BEAMFORMED_KNOWN 0x0020 #define IEEE80211_RADIOTAP_VHT_BANDWIDTH_KNOWN 0x0040 #define IEEE80211_RADIOTAP_VHT_GROUP_ID_KNOWN 0x0080 #define IEEE80211_RADIOTAP_VHT_PARTIAL_AID_KNOWN 0x0100 /* For IEEE80211_RADIOTAP_VHT flags */ #define IEEE80211_RADIOTAP_VHT_STBC 0x01 #define IEEE80211_RADIOTAP_VHT_TXOP_PS_NA 0x02 #define IEEE80211_RADIOTAP_VHT_SHORT_GI 0x04 #define IEEE80211_RADIOTAP_VHT_SGI_NSYM_M10_9 0x08 #define IEEE80211_RADIOTAP_VHT_LDPC_EXTRA_OFDM_SYM 0x10 #define IEEE80211_RADIOTAP_VHT_BEAMFORMED 0x20 #define IEEE80211_RADIOTAP_VHT_BANDWIDTH_MASK 0x1f #define IEEE80211_RADIOTAP_VHT_NSS_MASK 0x0f #define IEEE80211_RADIOTAP_VHT_MCS_MASK 0xf0 #define IEEE80211_RADIOTAP_VHT_MCS_SHIFT 4 #define IEEE80211_RADIOTAP_CODING_LDPC_USERn 0x01 #define IEEE80211_CHAN_FHSS \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_GFSK) #define IEEE80211_CHAN_A \ (IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM) #define IEEE80211_CHAN_B \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_CCK) #define IEEE80211_CHAN_PUREG \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM) #define IEEE80211_CHAN_G \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN) #define IS_CHAN_FHSS(flags) \ ((flags & IEEE80211_CHAN_FHSS) == IEEE80211_CHAN_FHSS) #define IS_CHAN_A(flags) \ ((flags & IEEE80211_CHAN_A) == IEEE80211_CHAN_A) #define IS_CHAN_B(flags) \ ((flags & IEEE80211_CHAN_B) == IEEE80211_CHAN_B) #define IS_CHAN_PUREG(flags) \ ((flags & IEEE80211_CHAN_PUREG) == IEEE80211_CHAN_PUREG) #define IS_CHAN_G(flags) \ ((flags & IEEE80211_CHAN_G) == IEEE80211_CHAN_G) #define IS_CHAN_ANYG(flags) \ (IS_CHAN_PUREG(flags) || IS_CHAN_G(flags)) static void print_chaninfo(netdissect_options *ndo, uint16_t freq, int flags, int presentflags) { ND_PRINT((ndo, "%u MHz", freq)); if (presentflags & (1 << IEEE80211_RADIOTAP_MCS)) { /* * We have the MCS field, so this is 11n, regardless * of what the channel flags say. */ ND_PRINT((ndo, " 11n")); } else { if (IS_CHAN_FHSS(flags)) ND_PRINT((ndo, " FHSS")); if (IS_CHAN_A(flags)) { if (flags & IEEE80211_CHAN_HALF) ND_PRINT((ndo, " 11a/10Mhz")); else if (flags & IEEE80211_CHAN_QUARTER) ND_PRINT((ndo, " 11a/5Mhz")); else ND_PRINT((ndo, " 11a")); } if (IS_CHAN_ANYG(flags)) { if (flags & IEEE80211_CHAN_HALF) ND_PRINT((ndo, " 11g/10Mhz")); else if (flags & IEEE80211_CHAN_QUARTER) ND_PRINT((ndo, " 11g/5Mhz")); else ND_PRINT((ndo, " 11g")); } else if (IS_CHAN_B(flags)) ND_PRINT((ndo, " 11b")); if (flags & IEEE80211_CHAN_TURBO) ND_PRINT((ndo, " Turbo")); } /* * These apply to 11n. */ if (flags & IEEE80211_CHAN_HT20) ND_PRINT((ndo, " ht/20")); else if (flags & IEEE80211_CHAN_HT40D) ND_PRINT((ndo, " ht/40-")); else if (flags & IEEE80211_CHAN_HT40U) ND_PRINT((ndo, " ht/40+")); ND_PRINT((ndo, " ")); } static int print_radiotap_field(netdissect_options *ndo, struct cpack_state *s, uint32_t bit, uint8_t *flagsp, uint32_t presentflags) { u_int i; int rc; switch (bit) { case IEEE80211_RADIOTAP_TSFT: { uint64_t tsft; rc = cpack_uint64(s, &tsft); if (rc != 0) goto trunc; ND_PRINT((ndo, "%" PRIu64 "us tsft ", tsft)); break; } case IEEE80211_RADIOTAP_FLAGS: { uint8_t flagsval; rc = cpack_uint8(s, &flagsval); if (rc != 0) goto trunc; *flagsp = flagsval; if (flagsval & IEEE80211_RADIOTAP_F_CFP) ND_PRINT((ndo, "cfp ")); if (flagsval & IEEE80211_RADIOTAP_F_SHORTPRE) ND_PRINT((ndo, "short preamble ")); if (flagsval & IEEE80211_RADIOTAP_F_WEP) ND_PRINT((ndo, "wep ")); if (flagsval & IEEE80211_RADIOTAP_F_FRAG) ND_PRINT((ndo, "fragmented ")); if (flagsval & IEEE80211_RADIOTAP_F_BADFCS) ND_PRINT((ndo, "bad-fcs ")); break; } case IEEE80211_RADIOTAP_RATE: { uint8_t rate; rc = cpack_uint8(s, &rate); if (rc != 0) goto trunc; /* * XXX On FreeBSD rate & 0x80 means we have an MCS. On * Linux and AirPcap it does not. (What about * Mac OS X, NetBSD, OpenBSD, and DragonFly BSD?) * * This is an issue either for proprietary extensions * to 11a or 11g, which do exist, or for 11n * implementations that stuff a rate value into * this field, which also appear to exist. * * We currently handle that by assuming that * if the 0x80 bit is set *and* the remaining * bits have a value between 0 and 15 it's * an MCS value, otherwise it's a rate. If * there are cases where systems that use * "0x80 + MCS index" for MCS indices > 15, * or stuff a rate value here between 64 and * 71.5 Mb/s in here, we'll need a preference * setting. Such rates do exist, e.g. 11n * MCS 7 at 20 MHz with a long guard interval. */ if (rate >= 0x80 && rate <= 0x8f) { /* * XXX - we don't know the channel width * or guard interval length, so we can't * convert this to a data rate. * * If you want us to show a data rate, * use the MCS field, not the Rate field; * the MCS field includes not only the * MCS index, it also includes bandwidth * and guard interval information. * * XXX - can we get the channel width * from XChannel and the guard interval * information from Flags, at least on * FreeBSD? */ ND_PRINT((ndo, "MCS %u ", rate & 0x7f)); } else ND_PRINT((ndo, "%2.1f Mb/s ", .5 * rate)); break; } case IEEE80211_RADIOTAP_CHANNEL: { uint16_t frequency; uint16_t flags; rc = cpack_uint16(s, &frequency); if (rc != 0) goto trunc; rc = cpack_uint16(s, &flags); if (rc != 0) goto trunc; /* * If CHANNEL and XCHANNEL are both present, skip * CHANNEL. */ if (presentflags & (1 << IEEE80211_RADIOTAP_XCHANNEL)) break; print_chaninfo(ndo, frequency, flags, presentflags); break; } case IEEE80211_RADIOTAP_FHSS: { uint8_t hopset; uint8_t hoppat; rc = cpack_uint8(s, &hopset); if (rc != 0) goto trunc; rc = cpack_uint8(s, &hoppat); if (rc != 0) goto trunc; ND_PRINT((ndo, "fhset %d fhpat %d ", hopset, hoppat)); break; } case IEEE80211_RADIOTAP_DBM_ANTSIGNAL: { int8_t dbm_antsignal; rc = cpack_int8(s, &dbm_antsignal); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddBm signal ", dbm_antsignal)); break; } case IEEE80211_RADIOTAP_DBM_ANTNOISE: { int8_t dbm_antnoise; rc = cpack_int8(s, &dbm_antnoise); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddBm noise ", dbm_antnoise)); break; } case IEEE80211_RADIOTAP_LOCK_QUALITY: { uint16_t lock_quality; rc = cpack_uint16(s, &lock_quality); if (rc != 0) goto trunc; ND_PRINT((ndo, "%u sq ", lock_quality)); break; } case IEEE80211_RADIOTAP_TX_ATTENUATION: { uint16_t tx_attenuation; rc = cpack_uint16(s, &tx_attenuation); if (rc != 0) goto trunc; ND_PRINT((ndo, "%d tx power ", -(int)tx_attenuation)); break; } case IEEE80211_RADIOTAP_DB_TX_ATTENUATION: { uint8_t db_tx_attenuation; rc = cpack_uint8(s, &db_tx_attenuation); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddB tx attenuation ", -(int)db_tx_attenuation)); break; } case IEEE80211_RADIOTAP_DBM_TX_POWER: { int8_t dbm_tx_power; rc = cpack_int8(s, &dbm_tx_power); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddBm tx power ", dbm_tx_power)); break; } case IEEE80211_RADIOTAP_ANTENNA: { uint8_t antenna; rc = cpack_uint8(s, &antenna); if (rc != 0) goto trunc; ND_PRINT((ndo, "antenna %u ", antenna)); break; } case IEEE80211_RADIOTAP_DB_ANTSIGNAL: { uint8_t db_antsignal; rc = cpack_uint8(s, &db_antsignal); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddB signal ", db_antsignal)); break; } case IEEE80211_RADIOTAP_DB_ANTNOISE: { uint8_t db_antnoise; rc = cpack_uint8(s, &db_antnoise); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddB noise ", db_antnoise)); break; } case IEEE80211_RADIOTAP_RX_FLAGS: { uint16_t rx_flags; rc = cpack_uint16(s, &rx_flags); if (rc != 0) goto trunc; /* Do nothing for now */ break; } case IEEE80211_RADIOTAP_XCHANNEL: { uint32_t flags; uint16_t frequency; uint8_t channel; uint8_t maxpower; rc = cpack_uint32(s, &flags); if (rc != 0) goto trunc; rc = cpack_uint16(s, &frequency); if (rc != 0) goto trunc; rc = cpack_uint8(s, &channel); if (rc != 0) goto trunc; rc = cpack_uint8(s, &maxpower); if (rc != 0) goto trunc; print_chaninfo(ndo, frequency, flags, presentflags); break; } case IEEE80211_RADIOTAP_MCS: { uint8_t known; uint8_t flags; uint8_t mcs_index; static const char *ht_bandwidth[4] = { "20 MHz", "40 MHz", "20 MHz (L)", "20 MHz (U)" }; float htrate; rc = cpack_uint8(s, &known); if (rc != 0) goto trunc; rc = cpack_uint8(s, &flags); if (rc != 0) goto trunc; rc = cpack_uint8(s, &mcs_index); if (rc != 0) goto trunc; if (known & IEEE80211_RADIOTAP_MCS_MCS_INDEX_KNOWN) { /* * We know the MCS index. */ if (mcs_index <= MAX_MCS_INDEX) { /* * And it's in-range. */ if (known & (IEEE80211_RADIOTAP_MCS_BANDWIDTH_KNOWN|IEEE80211_RADIOTAP_MCS_GUARD_INTERVAL_KNOWN)) { /* * And we know both the bandwidth and * the guard interval, so we can look * up the rate. */ htrate = ieee80211_float_htrates \ [mcs_index] \ [((flags & IEEE80211_RADIOTAP_MCS_BANDWIDTH_MASK) == IEEE80211_RADIOTAP_MCS_BANDWIDTH_40 ? 1 : 0)] \ [((flags & IEEE80211_RADIOTAP_MCS_SHORT_GI) ? 1 : 0)]; } else { /* * We don't know both the bandwidth * and the guard interval, so we can * only report the MCS index. */ htrate = 0.0; } } else { /* * The MCS value is out of range. */ htrate = 0.0; } if (htrate != 0.0) { /* * We have the rate. * Print it. */ ND_PRINT((ndo, "%.1f Mb/s MCS %u ", htrate, mcs_index)); } else { /* * We at least have the MCS index. * Print it. */ ND_PRINT((ndo, "MCS %u ", mcs_index)); } } if (known & IEEE80211_RADIOTAP_MCS_BANDWIDTH_KNOWN) { ND_PRINT((ndo, "%s ", ht_bandwidth[flags & IEEE80211_RADIOTAP_MCS_BANDWIDTH_MASK])); } if (known & IEEE80211_RADIOTAP_MCS_GUARD_INTERVAL_KNOWN) { ND_PRINT((ndo, "%s GI ", (flags & IEEE80211_RADIOTAP_MCS_SHORT_GI) ? "short" : "long")); } if (known & IEEE80211_RADIOTAP_MCS_HT_FORMAT_KNOWN) { ND_PRINT((ndo, "%s ", (flags & IEEE80211_RADIOTAP_MCS_HT_GREENFIELD) ? "greenfield" : "mixed")); } if (known & IEEE80211_RADIOTAP_MCS_FEC_TYPE_KNOWN) { ND_PRINT((ndo, "%s FEC ", (flags & IEEE80211_RADIOTAP_MCS_FEC_LDPC) ? "LDPC" : "BCC")); } if (known & IEEE80211_RADIOTAP_MCS_STBC_KNOWN) { ND_PRINT((ndo, "RX-STBC%u ", (flags & IEEE80211_RADIOTAP_MCS_STBC_MASK) >> IEEE80211_RADIOTAP_MCS_STBC_SHIFT)); } break; } case IEEE80211_RADIOTAP_AMPDU_STATUS: { uint32_t reference_num; uint16_t flags; uint8_t delim_crc; uint8_t reserved; rc = cpack_uint32(s, &reference_num); if (rc != 0) goto trunc; rc = cpack_uint16(s, &flags); if (rc != 0) goto trunc; rc = cpack_uint8(s, &delim_crc); if (rc != 0) goto trunc; rc = cpack_uint8(s, &reserved); if (rc != 0) goto trunc; /* Do nothing for now */ break; } case IEEE80211_RADIOTAP_VHT: { uint16_t known; uint8_t flags; uint8_t bandwidth; uint8_t mcs_nss[4]; uint8_t coding; uint8_t group_id; uint16_t partial_aid; static const char *vht_bandwidth[32] = { "20 MHz", "40 MHz", "20 MHz (L)", "20 MHz (U)", "80 MHz", "80 MHz (L)", "80 MHz (U)", "80 MHz (LL)", "80 MHz (LU)", "80 MHz (UL)", "80 MHz (UU)", "160 MHz", "160 MHz (L)", "160 MHz (U)", "160 MHz (LL)", "160 MHz (LU)", "160 MHz (UL)", "160 MHz (UU)", "160 MHz (LLL)", "160 MHz (LLU)", "160 MHz (LUL)", "160 MHz (UUU)", "160 MHz (ULL)", "160 MHz (ULU)", "160 MHz (UUL)", "160 MHz (UUU)", "unknown (26)", "unknown (27)", "unknown (28)", "unknown (29)", "unknown (30)", "unknown (31)" }; rc = cpack_uint16(s, &known); if (rc != 0) goto trunc; rc = cpack_uint8(s, &flags); if (rc != 0) goto trunc; rc = cpack_uint8(s, &bandwidth); if (rc != 0) goto trunc; for (i = 0; i < 4; i++) { rc = cpack_uint8(s, &mcs_nss[i]); if (rc != 0) goto trunc; } rc = cpack_uint8(s, &coding); if (rc != 0) goto trunc; rc = cpack_uint8(s, &group_id); if (rc != 0) goto trunc; rc = cpack_uint16(s, &partial_aid); if (rc != 0) goto trunc; for (i = 0; i < 4; i++) { u_int nss, mcs; nss = mcs_nss[i] & IEEE80211_RADIOTAP_VHT_NSS_MASK; mcs = (mcs_nss[i] & IEEE80211_RADIOTAP_VHT_MCS_MASK) >> IEEE80211_RADIOTAP_VHT_MCS_SHIFT; if (nss == 0) continue; ND_PRINT((ndo, "User %u MCS %u ", i, mcs)); ND_PRINT((ndo, "%s FEC ", (coding & (IEEE80211_RADIOTAP_CODING_LDPC_USERn << i)) ? "LDPC" : "BCC")); } if (known & IEEE80211_RADIOTAP_VHT_BANDWIDTH_KNOWN) { ND_PRINT((ndo, "%s ", vht_bandwidth[bandwidth & IEEE80211_RADIOTAP_VHT_BANDWIDTH_MASK])); } if (known & IEEE80211_RADIOTAP_VHT_GUARD_INTERVAL_KNOWN) { ND_PRINT((ndo, "%s GI ", (flags & IEEE80211_RADIOTAP_VHT_SHORT_GI) ? "short" : "long")); } break; } default: /* this bit indicates a field whose * size we do not know, so we cannot * proceed. Just print the bit number. */ ND_PRINT((ndo, "[bit %u] ", bit)); return -1; } return 0; trunc: ND_PRINT((ndo, "%s", tstr)); return rc; } static int print_in_radiotap_namespace(netdissect_options *ndo, struct cpack_state *s, uint8_t *flags, uint32_t presentflags, int bit0) { #define BITNO_32(x) (((x) >> 16) ? 16 + BITNO_16((x) >> 16) : BITNO_16((x))) #define BITNO_16(x) (((x) >> 8) ? 8 + BITNO_8((x) >> 8) : BITNO_8((x))) #define BITNO_8(x) (((x) >> 4) ? 4 + BITNO_4((x) >> 4) : BITNO_4((x))) #define BITNO_4(x) (((x) >> 2) ? 2 + BITNO_2((x) >> 2) : BITNO_2((x))) #define BITNO_2(x) (((x) & 2) ? 1 : 0) uint32_t present, next_present; int bitno; enum ieee80211_radiotap_type bit; int rc; for (present = presentflags; present; present = next_present) { /* * Clear the least significant bit that is set. */ next_present = present & (present - 1); /* * Get the bit number, within this presence word, * of the remaining least significant bit that * is set. */ bitno = BITNO_32(present ^ next_present); /* * Stop if this is one of the "same meaning * in all presence flags" bits. */ if (bitno >= IEEE80211_RADIOTAP_NAMESPACE) break; /* * Get the radiotap bit number of that bit. */ bit = (enum ieee80211_radiotap_type)(bit0 + bitno); rc = print_radiotap_field(ndo, s, bit, flags, presentflags); if (rc != 0) return rc; } return 0; } static u_int ieee802_11_radio_print(netdissect_options *ndo, const u_char *p, u_int length, u_int caplen) { #define BIT(n) (1U << n) #define IS_EXTENDED(__p) \ (EXTRACT_LE_32BITS(__p) & BIT(IEEE80211_RADIOTAP_EXT)) != 0 struct cpack_state cpacker; const struct ieee80211_radiotap_header *hdr; uint32_t presentflags; const uint32_t *presentp, *last_presentp; int vendor_namespace; uint8_t vendor_oui[3]; uint8_t vendor_subnamespace; uint16_t skip_length; int bit0; u_int len; uint8_t flags; int pad; u_int fcslen; if (caplen < sizeof(*hdr)) { ND_PRINT((ndo, "%s", tstr)); return caplen; } hdr = (const struct ieee80211_radiotap_header *)p; len = EXTRACT_LE_16BITS(&hdr->it_len); /* * If we don't have the entire radiotap header, just give up. */ if (caplen < len) { ND_PRINT((ndo, "%s", tstr)); return caplen; } cpack_init(&cpacker, (const uint8_t *)hdr, len); /* align against header start */ cpack_advance(&cpacker, sizeof(*hdr)); /* includes the 1st bitmap */ for (last_presentp = &hdr->it_present; (const u_char*)(last_presentp + 1) <= p + len && IS_EXTENDED(last_presentp); last_presentp++) cpack_advance(&cpacker, sizeof(hdr->it_present)); /* more bitmaps */ /* are there more bitmap extensions than bytes in header? */ if ((const u_char*)(last_presentp + 1) > p + len) { ND_PRINT((ndo, "%s", tstr)); return caplen; } /* * Start out at the beginning of the default radiotap namespace. */ bit0 = 0; vendor_namespace = 0; memset(vendor_oui, 0, 3); vendor_subnamespace = 0; skip_length = 0; /* Assume no flags */ flags = 0; /* Assume no Atheros padding between 802.11 header and body */ pad = 0; /* Assume no FCS at end of frame */ fcslen = 0; for (presentp = &hdr->it_present; presentp <= last_presentp; presentp++) { presentflags = EXTRACT_LE_32BITS(presentp); /* * If this is a vendor namespace, we don't handle it. */ if (vendor_namespace) { /* * Skip past the stuff we don't understand. * If we add support for any vendor namespaces, * it'd be added here; use vendor_oui and * vendor_subnamespace to interpret the fields. */ if (cpack_advance(&cpacker, skip_length) != 0) { /* * Ran out of space in the packet. */ break; } /* * We've skipped it all; nothing more to * skip. */ skip_length = 0; } else { if (print_in_radiotap_namespace(ndo, &cpacker, &flags, presentflags, bit0) != 0) { /* * Fatal error - can't process anything * more in the radiotap header. */ break; } } /* * Handle the namespace switch bits; we've already handled * the extension bit in all but the last word above. */ switch (presentflags & (BIT(IEEE80211_RADIOTAP_NAMESPACE)|BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE))) { case 0: /* * We're not changing namespaces. * advance to the next 32 bits in the current * namespace. */ bit0 += 32; break; case BIT(IEEE80211_RADIOTAP_NAMESPACE): /* * We're switching to the radiotap namespace. * Reset the presence-bitmap index to 0, and * reset the namespace to the default radiotap * namespace. */ bit0 = 0; vendor_namespace = 0; memset(vendor_oui, 0, 3); vendor_subnamespace = 0; skip_length = 0; break; case BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE): /* * We're switching to a vendor namespace. * Reset the presence-bitmap index to 0, * note that we're in a vendor namespace, * and fetch the fields of the Vendor Namespace * item. */ bit0 = 0; vendor_namespace = 1; if ((cpack_align_and_reserve(&cpacker, 2)) == NULL) { ND_PRINT((ndo, "%s", tstr)); break; } if (cpack_uint8(&cpacker, &vendor_oui[0]) != 0) { ND_PRINT((ndo, "%s", tstr)); break; } if (cpack_uint8(&cpacker, &vendor_oui[1]) != 0) { ND_PRINT((ndo, "%s", tstr)); break; } if (cpack_uint8(&cpacker, &vendor_oui[2]) != 0) { ND_PRINT((ndo, "%s", tstr)); break; } if (cpack_uint8(&cpacker, &vendor_subnamespace) != 0) { ND_PRINT((ndo, "%s", tstr)); break; } if (cpack_uint16(&cpacker, &skip_length) != 0) { ND_PRINT((ndo, "%s", tstr)); break; } break; default: /* * Illegal combination. The behavior in this * case is undefined by the radiotap spec; we * just ignore both bits. */ break; } } if (flags & IEEE80211_RADIOTAP_F_DATAPAD) pad = 1; /* Atheros padding */ if (flags & IEEE80211_RADIOTAP_F_FCS) fcslen = 4; /* FCS at end of packet */ return len + ieee802_11_print(ndo, p + len, length - len, caplen - len, pad, fcslen); #undef BITNO_32 #undef BITNO_16 #undef BITNO_8 #undef BITNO_4 #undef BITNO_2 #undef BIT } static u_int ieee802_11_avs_radio_print(netdissect_options *ndo, const u_char *p, u_int length, u_int caplen) { uint32_t caphdr_len; if (caplen < 8) { ND_PRINT((ndo, "%s", tstr)); return caplen; } caphdr_len = EXTRACT_32BITS(p + 4); if (caphdr_len < 8) { /* * Yow! The capture header length is claimed not * to be large enough to include even the version * cookie or capture header length! */ ND_PRINT((ndo, "%s", tstr)); return caplen; } if (caplen < caphdr_len) { ND_PRINT((ndo, "%s", tstr)); return caplen; } return caphdr_len + ieee802_11_print(ndo, p + caphdr_len, length - caphdr_len, caplen - caphdr_len, 0, 0); } #define PRISM_HDR_LEN 144 #define WLANCAP_MAGIC_COOKIE_BASE 0x80211000 #define WLANCAP_MAGIC_COOKIE_V1 0x80211001 #define WLANCAP_MAGIC_COOKIE_V2 0x80211002 /* * For DLT_PRISM_HEADER; like DLT_IEEE802_11, but with an extra header, * containing information such as radio information, which we * currently ignore. * * If, however, the packet begins with WLANCAP_MAGIC_COOKIE_V1 or * WLANCAP_MAGIC_COOKIE_V2, it's really DLT_IEEE802_11_RADIO_AVS * (currently, on Linux, there's no ARPHRD_ type for * DLT_IEEE802_11_RADIO_AVS, as there is a ARPHRD_IEEE80211_PRISM * for DLT_PRISM_HEADER, so ARPHRD_IEEE80211_PRISM is used for * the AVS header, and the first 4 bytes of the header are used to * indicate whether it's a Prism header or an AVS header). */ u_int prism_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int caplen = h->caplen; u_int length = h->len; uint32_t msgcode; if (caplen < 4) { ND_PRINT((ndo, "%s", tstr)); return caplen; } msgcode = EXTRACT_32BITS(p); if (msgcode == WLANCAP_MAGIC_COOKIE_V1 || msgcode == WLANCAP_MAGIC_COOKIE_V2) return ieee802_11_avs_radio_print(ndo, p, length, caplen); if (caplen < PRISM_HDR_LEN) { ND_PRINT((ndo, "%s", tstr)); return caplen; } return PRISM_HDR_LEN + ieee802_11_print(ndo, p + PRISM_HDR_LEN, length - PRISM_HDR_LEN, caplen - PRISM_HDR_LEN, 0, 0); } /* * For DLT_IEEE802_11_RADIO; like DLT_IEEE802_11, but with an extra * header, containing information such as radio information. */ u_int ieee802_11_radio_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { return ieee802_11_radio_print(ndo, p, h->len, h->caplen); } /* * For DLT_IEEE802_11_RADIO_AVS; like DLT_IEEE802_11, but with an * extra header, containing information such as radio information, * which we currently ignore. */ u_int ieee802_11_radio_avs_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { return ieee802_11_avs_radio_print(ndo, p, h->len, h->caplen); }
/* * Copyright (c) 2001 * Fortress Technologies, Inc. All rights reserved. * Charlie Lenahan (clenahan@fortresstech.com) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: IEEE 802.11 printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "cpack.h" /* Lengths of 802.11 header components. */ #define IEEE802_11_FC_LEN 2 #define IEEE802_11_DUR_LEN 2 #define IEEE802_11_DA_LEN 6 #define IEEE802_11_SA_LEN 6 #define IEEE802_11_BSSID_LEN 6 #define IEEE802_11_RA_LEN 6 #define IEEE802_11_TA_LEN 6 #define IEEE802_11_ADDR1_LEN 6 #define IEEE802_11_SEQ_LEN 2 #define IEEE802_11_CTL_LEN 2 #define IEEE802_11_CARRIED_FC_LEN 2 #define IEEE802_11_HT_CONTROL_LEN 4 #define IEEE802_11_IV_LEN 3 #define IEEE802_11_KID_LEN 1 /* Frame check sequence length. */ #define IEEE802_11_FCS_LEN 4 /* Lengths of beacon components. */ #define IEEE802_11_TSTAMP_LEN 8 #define IEEE802_11_BCNINT_LEN 2 #define IEEE802_11_CAPINFO_LEN 2 #define IEEE802_11_LISTENINT_LEN 2 #define IEEE802_11_AID_LEN 2 #define IEEE802_11_STATUS_LEN 2 #define IEEE802_11_REASON_LEN 2 /* Length of previous AP in reassocation frame */ #define IEEE802_11_AP_LEN 6 #define T_MGMT 0x0 /* management */ #define T_CTRL 0x1 /* control */ #define T_DATA 0x2 /* data */ #define T_RESV 0x3 /* reserved */ #define ST_ASSOC_REQUEST 0x0 #define ST_ASSOC_RESPONSE 0x1 #define ST_REASSOC_REQUEST 0x2 #define ST_REASSOC_RESPONSE 0x3 #define ST_PROBE_REQUEST 0x4 #define ST_PROBE_RESPONSE 0x5 /* RESERVED 0x6 */ /* RESERVED 0x7 */ #define ST_BEACON 0x8 #define ST_ATIM 0x9 #define ST_DISASSOC 0xA #define ST_AUTH 0xB #define ST_DEAUTH 0xC #define ST_ACTION 0xD /* RESERVED 0xE */ /* RESERVED 0xF */ static const struct tok st_str[] = { { ST_ASSOC_REQUEST, "Assoc Request" }, { ST_ASSOC_RESPONSE, "Assoc Response" }, { ST_REASSOC_REQUEST, "ReAssoc Request" }, { ST_REASSOC_RESPONSE, "ReAssoc Response" }, { ST_PROBE_REQUEST, "Probe Request" }, { ST_PROBE_RESPONSE, "Probe Response" }, { ST_BEACON, "Beacon" }, { ST_ATIM, "ATIM" }, { ST_DISASSOC, "Disassociation" }, { ST_AUTH, "Authentication" }, { ST_DEAUTH, "DeAuthentication" }, { ST_ACTION, "Action" }, { 0, NULL } }; #define CTRL_CONTROL_WRAPPER 0x7 #define CTRL_BAR 0x8 #define CTRL_BA 0x9 #define CTRL_PS_POLL 0xA #define CTRL_RTS 0xB #define CTRL_CTS 0xC #define CTRL_ACK 0xD #define CTRL_CF_END 0xE #define CTRL_END_ACK 0xF static const struct tok ctrl_str[] = { { CTRL_CONTROL_WRAPPER, "Control Wrapper" }, { CTRL_BAR, "BAR" }, { CTRL_BA, "BA" }, { CTRL_PS_POLL, "Power Save-Poll" }, { CTRL_RTS, "Request-To-Send" }, { CTRL_CTS, "Clear-To-Send" }, { CTRL_ACK, "Acknowledgment" }, { CTRL_CF_END, "CF-End" }, { CTRL_END_ACK, "CF-End+CF-Ack" }, { 0, NULL } }; #define DATA_DATA 0x0 #define DATA_DATA_CF_ACK 0x1 #define DATA_DATA_CF_POLL 0x2 #define DATA_DATA_CF_ACK_POLL 0x3 #define DATA_NODATA 0x4 #define DATA_NODATA_CF_ACK 0x5 #define DATA_NODATA_CF_POLL 0x6 #define DATA_NODATA_CF_ACK_POLL 0x7 #define DATA_QOS_DATA 0x8 #define DATA_QOS_DATA_CF_ACK 0x9 #define DATA_QOS_DATA_CF_POLL 0xA #define DATA_QOS_DATA_CF_ACK_POLL 0xB #define DATA_QOS_NODATA 0xC #define DATA_QOS_CF_POLL_NODATA 0xE #define DATA_QOS_CF_ACK_POLL_NODATA 0xF /* * The subtype field of a data frame is, in effect, composed of 4 flag * bits - CF-Ack, CF-Poll, Null (means the frame doesn't actually have * any data), and QoS. */ #define DATA_FRAME_IS_CF_ACK(x) ((x) & 0x01) #define DATA_FRAME_IS_CF_POLL(x) ((x) & 0x02) #define DATA_FRAME_IS_NULL(x) ((x) & 0x04) #define DATA_FRAME_IS_QOS(x) ((x) & 0x08) /* * Bits in the frame control field. */ #define FC_VERSION(fc) ((fc) & 0x3) #define FC_TYPE(fc) (((fc) >> 2) & 0x3) #define FC_SUBTYPE(fc) (((fc) >> 4) & 0xF) #define FC_TO_DS(fc) ((fc) & 0x0100) #define FC_FROM_DS(fc) ((fc) & 0x0200) #define FC_MORE_FLAG(fc) ((fc) & 0x0400) #define FC_RETRY(fc) ((fc) & 0x0800) #define FC_POWER_MGMT(fc) ((fc) & 0x1000) #define FC_MORE_DATA(fc) ((fc) & 0x2000) #define FC_PROTECTED(fc) ((fc) & 0x4000) #define FC_ORDER(fc) ((fc) & 0x8000) struct mgmt_header_t { uint16_t fc; uint16_t duration; uint8_t da[IEEE802_11_DA_LEN]; uint8_t sa[IEEE802_11_SA_LEN]; uint8_t bssid[IEEE802_11_BSSID_LEN]; uint16_t seq_ctrl; }; #define MGMT_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_DA_LEN+IEEE802_11_SA_LEN+\ IEEE802_11_BSSID_LEN+IEEE802_11_SEQ_LEN) #define CAPABILITY_ESS(cap) ((cap) & 0x0001) #define CAPABILITY_IBSS(cap) ((cap) & 0x0002) #define CAPABILITY_CFP(cap) ((cap) & 0x0004) #define CAPABILITY_CFP_REQ(cap) ((cap) & 0x0008) #define CAPABILITY_PRIVACY(cap) ((cap) & 0x0010) struct ssid_t { uint8_t element_id; uint8_t length; u_char ssid[33]; /* 32 + 1 for null */ }; struct rates_t { uint8_t element_id; uint8_t length; uint8_t rate[16]; }; struct challenge_t { uint8_t element_id; uint8_t length; uint8_t text[254]; /* 1-253 + 1 for null */ }; struct fh_t { uint8_t element_id; uint8_t length; uint16_t dwell_time; uint8_t hop_set; uint8_t hop_pattern; uint8_t hop_index; }; struct ds_t { uint8_t element_id; uint8_t length; uint8_t channel; }; struct cf_t { uint8_t element_id; uint8_t length; uint8_t count; uint8_t period; uint16_t max_duration; uint16_t dur_remaing; }; struct tim_t { uint8_t element_id; uint8_t length; uint8_t count; uint8_t period; uint8_t bitmap_control; uint8_t bitmap[251]; }; #define E_SSID 0 #define E_RATES 1 #define E_FH 2 #define E_DS 3 #define E_CF 4 #define E_TIM 5 #define E_IBSS 6 /* reserved 7 */ /* reserved 8 */ /* reserved 9 */ /* reserved 10 */ /* reserved 11 */ /* reserved 12 */ /* reserved 13 */ /* reserved 14 */ /* reserved 15 */ /* reserved 16 */ #define E_CHALLENGE 16 /* reserved 17 */ /* reserved 18 */ /* reserved 19 */ /* reserved 16 */ /* reserved 16 */ struct mgmt_body_t { uint8_t timestamp[IEEE802_11_TSTAMP_LEN]; uint16_t beacon_interval; uint16_t listen_interval; uint16_t status_code; uint16_t aid; u_char ap[IEEE802_11_AP_LEN]; uint16_t reason_code; uint16_t auth_alg; uint16_t auth_trans_seq_num; int challenge_present; struct challenge_t challenge; uint16_t capability_info; int ssid_present; struct ssid_t ssid; int rates_present; struct rates_t rates; int ds_present; struct ds_t ds; int cf_present; struct cf_t cf; int fh_present; struct fh_t fh; int tim_present; struct tim_t tim; }; struct ctrl_control_wrapper_hdr_t { uint16_t fc; uint16_t duration; uint8_t addr1[IEEE802_11_ADDR1_LEN]; uint16_t carried_fc[IEEE802_11_CARRIED_FC_LEN]; uint16_t ht_control[IEEE802_11_HT_CONTROL_LEN]; }; #define CTRL_CONTROL_WRAPPER_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_ADDR1_LEN+\ IEEE802_11_CARRIED_FC_LEN+\ IEEE802_11_HT_CONTROL_LEN) struct ctrl_rts_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; uint8_t ta[IEEE802_11_TA_LEN]; }; #define CTRL_RTS_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_RA_LEN+IEEE802_11_TA_LEN) struct ctrl_cts_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; }; #define CTRL_CTS_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+IEEE802_11_RA_LEN) struct ctrl_ack_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; }; #define CTRL_ACK_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+IEEE802_11_RA_LEN) struct ctrl_ps_poll_hdr_t { uint16_t fc; uint16_t aid; uint8_t bssid[IEEE802_11_BSSID_LEN]; uint8_t ta[IEEE802_11_TA_LEN]; }; #define CTRL_PS_POLL_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_AID_LEN+\ IEEE802_11_BSSID_LEN+IEEE802_11_TA_LEN) struct ctrl_end_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; uint8_t bssid[IEEE802_11_BSSID_LEN]; }; #define CTRL_END_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_RA_LEN+IEEE802_11_BSSID_LEN) struct ctrl_end_ack_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; uint8_t bssid[IEEE802_11_BSSID_LEN]; }; #define CTRL_END_ACK_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_RA_LEN+IEEE802_11_BSSID_LEN) struct ctrl_ba_hdr_t { uint16_t fc; uint16_t duration; uint8_t ra[IEEE802_11_RA_LEN]; }; #define CTRL_BA_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+IEEE802_11_RA_LEN) struct ctrl_bar_hdr_t { uint16_t fc; uint16_t dur; uint8_t ra[IEEE802_11_RA_LEN]; uint8_t ta[IEEE802_11_TA_LEN]; uint16_t ctl; uint16_t seq; }; #define CTRL_BAR_HDRLEN (IEEE802_11_FC_LEN+IEEE802_11_DUR_LEN+\ IEEE802_11_RA_LEN+IEEE802_11_TA_LEN+\ IEEE802_11_CTL_LEN+IEEE802_11_SEQ_LEN) struct meshcntl_t { uint8_t flags; uint8_t ttl; uint8_t seq[4]; uint8_t addr4[6]; uint8_t addr5[6]; uint8_t addr6[6]; }; #define IV_IV(iv) ((iv) & 0xFFFFFF) #define IV_PAD(iv) (((iv) >> 24) & 0x3F) #define IV_KEYID(iv) (((iv) >> 30) & 0x03) #define PRINT_SSID(p) \ if (p.ssid_present) { \ ND_PRINT((ndo, " (")); \ fn_print(ndo, p.ssid.ssid, NULL); \ ND_PRINT((ndo, ")")); \ } #define PRINT_RATE(_sep, _r, _suf) \ ND_PRINT((ndo, "%s%2.1f%s", _sep, (.5 * ((_r) & 0x7f)), _suf)) #define PRINT_RATES(p) \ if (p.rates_present) { \ int z; \ const char *sep = " ["; \ for (z = 0; z < p.rates.length ; z++) { \ PRINT_RATE(sep, p.rates.rate[z], \ (p.rates.rate[z] & 0x80 ? "*" : "")); \ sep = " "; \ } \ if (p.rates.length != 0) \ ND_PRINT((ndo, " Mbit]")); \ } #define PRINT_DS_CHANNEL(p) \ if (p.ds_present) \ ND_PRINT((ndo, " CH: %u", p.ds.channel)); \ ND_PRINT((ndo, "%s", \ CAPABILITY_PRIVACY(p.capability_info) ? ", PRIVACY" : "")); #define MAX_MCS_INDEX 76 /* * Indices are: * * the MCS index (0-76); * * 0 for 20 MHz, 1 for 40 MHz; * * 0 for a long guard interval, 1 for a short guard interval. */ static const float ieee80211_float_htrates[MAX_MCS_INDEX+1][2][2] = { /* MCS 0 */ { /* 20 Mhz */ { 6.5, /* SGI */ 7.2, }, /* 40 Mhz */ { 13.5, /* SGI */ 15.0, }, }, /* MCS 1 */ { /* 20 Mhz */ { 13.0, /* SGI */ 14.4, }, /* 40 Mhz */ { 27.0, /* SGI */ 30.0, }, }, /* MCS 2 */ { /* 20 Mhz */ { 19.5, /* SGI */ 21.7, }, /* 40 Mhz */ { 40.5, /* SGI */ 45.0, }, }, /* MCS 3 */ { /* 20 Mhz */ { 26.0, /* SGI */ 28.9, }, /* 40 Mhz */ { 54.0, /* SGI */ 60.0, }, }, /* MCS 4 */ { /* 20 Mhz */ { 39.0, /* SGI */ 43.3, }, /* 40 Mhz */ { 81.0, /* SGI */ 90.0, }, }, /* MCS 5 */ { /* 20 Mhz */ { 52.0, /* SGI */ 57.8, }, /* 40 Mhz */ { 108.0, /* SGI */ 120.0, }, }, /* MCS 6 */ { /* 20 Mhz */ { 58.5, /* SGI */ 65.0, }, /* 40 Mhz */ { 121.5, /* SGI */ 135.0, }, }, /* MCS 7 */ { /* 20 Mhz */ { 65.0, /* SGI */ 72.2, }, /* 40 Mhz */ { 135.0, /* SGI */ 150.0, }, }, /* MCS 8 */ { /* 20 Mhz */ { 13.0, /* SGI */ 14.4, }, /* 40 Mhz */ { 27.0, /* SGI */ 30.0, }, }, /* MCS 9 */ { /* 20 Mhz */ { 26.0, /* SGI */ 28.9, }, /* 40 Mhz */ { 54.0, /* SGI */ 60.0, }, }, /* MCS 10 */ { /* 20 Mhz */ { 39.0, /* SGI */ 43.3, }, /* 40 Mhz */ { 81.0, /* SGI */ 90.0, }, }, /* MCS 11 */ { /* 20 Mhz */ { 52.0, /* SGI */ 57.8, }, /* 40 Mhz */ { 108.0, /* SGI */ 120.0, }, }, /* MCS 12 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 13 */ { /* 20 Mhz */ { 104.0, /* SGI */ 115.6, }, /* 40 Mhz */ { 216.0, /* SGI */ 240.0, }, }, /* MCS 14 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 15 */ { /* 20 Mhz */ { 130.0, /* SGI */ 144.4, }, /* 40 Mhz */ { 270.0, /* SGI */ 300.0, }, }, /* MCS 16 */ { /* 20 Mhz */ { 19.5, /* SGI */ 21.7, }, /* 40 Mhz */ { 40.5, /* SGI */ 45.0, }, }, /* MCS 17 */ { /* 20 Mhz */ { 39.0, /* SGI */ 43.3, }, /* 40 Mhz */ { 81.0, /* SGI */ 90.0, }, }, /* MCS 18 */ { /* 20 Mhz */ { 58.5, /* SGI */ 65.0, }, /* 40 Mhz */ { 121.5, /* SGI */ 135.0, }, }, /* MCS 19 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 20 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 21 */ { /* 20 Mhz */ { 156.0, /* SGI */ 173.3, }, /* 40 Mhz */ { 324.0, /* SGI */ 360.0, }, }, /* MCS 22 */ { /* 20 Mhz */ { 175.5, /* SGI */ 195.0, }, /* 40 Mhz */ { 364.5, /* SGI */ 405.0, }, }, /* MCS 23 */ { /* 20 Mhz */ { 195.0, /* SGI */ 216.7, }, /* 40 Mhz */ { 405.0, /* SGI */ 450.0, }, }, /* MCS 24 */ { /* 20 Mhz */ { 26.0, /* SGI */ 28.9, }, /* 40 Mhz */ { 54.0, /* SGI */ 60.0, }, }, /* MCS 25 */ { /* 20 Mhz */ { 52.0, /* SGI */ 57.8, }, /* 40 Mhz */ { 108.0, /* SGI */ 120.0, }, }, /* MCS 26 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 27 */ { /* 20 Mhz */ { 104.0, /* SGI */ 115.6, }, /* 40 Mhz */ { 216.0, /* SGI */ 240.0, }, }, /* MCS 28 */ { /* 20 Mhz */ { 156.0, /* SGI */ 173.3, }, /* 40 Mhz */ { 324.0, /* SGI */ 360.0, }, }, /* MCS 29 */ { /* 20 Mhz */ { 208.0, /* SGI */ 231.1, }, /* 40 Mhz */ { 432.0, /* SGI */ 480.0, }, }, /* MCS 30 */ { /* 20 Mhz */ { 234.0, /* SGI */ 260.0, }, /* 40 Mhz */ { 486.0, /* SGI */ 540.0, }, }, /* MCS 31 */ { /* 20 Mhz */ { 260.0, /* SGI */ 288.9, }, /* 40 Mhz */ { 540.0, /* SGI */ 600.0, }, }, /* MCS 32 */ { /* 20 Mhz */ { 0.0, /* SGI */ 0.0, }, /* not valid */ /* 40 Mhz */ { 6.0, /* SGI */ 6.7, }, }, /* MCS 33 */ { /* 20 Mhz */ { 39.0, /* SGI */ 43.3, }, /* 40 Mhz */ { 81.0, /* SGI */ 90.0, }, }, /* MCS 34 */ { /* 20 Mhz */ { 52.0, /* SGI */ 57.8, }, /* 40 Mhz */ { 108.0, /* SGI */ 120.0, }, }, /* MCS 35 */ { /* 20 Mhz */ { 65.0, /* SGI */ 72.2, }, /* 40 Mhz */ { 135.0, /* SGI */ 150.0, }, }, /* MCS 36 */ { /* 20 Mhz */ { 58.5, /* SGI */ 65.0, }, /* 40 Mhz */ { 121.5, /* SGI */ 135.0, }, }, /* MCS 37 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 38 */ { /* 20 Mhz */ { 97.5, /* SGI */ 108.3, }, /* 40 Mhz */ { 202.5, /* SGI */ 225.0, }, }, /* MCS 39 */ { /* 20 Mhz */ { 52.0, /* SGI */ 57.8, }, /* 40 Mhz */ { 108.0, /* SGI */ 120.0, }, }, /* MCS 40 */ { /* 20 Mhz */ { 65.0, /* SGI */ 72.2, }, /* 40 Mhz */ { 135.0, /* SGI */ 150.0, }, }, /* MCS 41 */ { /* 20 Mhz */ { 65.0, /* SGI */ 72.2, }, /* 40 Mhz */ { 135.0, /* SGI */ 150.0, }, }, /* MCS 42 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 43 */ { /* 20 Mhz */ { 91.0, /* SGI */ 101.1, }, /* 40 Mhz */ { 189.0, /* SGI */ 210.0, }, }, /* MCS 44 */ { /* 20 Mhz */ { 91.0, /* SGI */ 101.1, }, /* 40 Mhz */ { 189.0, /* SGI */ 210.0, }, }, /* MCS 45 */ { /* 20 Mhz */ { 104.0, /* SGI */ 115.6, }, /* 40 Mhz */ { 216.0, /* SGI */ 240.0, }, }, /* MCS 46 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 47 */ { /* 20 Mhz */ { 97.5, /* SGI */ 108.3, }, /* 40 Mhz */ { 202.5, /* SGI */ 225.0, }, }, /* MCS 48 */ { /* 20 Mhz */ { 97.5, /* SGI */ 108.3, }, /* 40 Mhz */ { 202.5, /* SGI */ 225.0, }, }, /* MCS 49 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 50 */ { /* 20 Mhz */ { 136.5, /* SGI */ 151.7, }, /* 40 Mhz */ { 283.5, /* SGI */ 315.0, }, }, /* MCS 51 */ { /* 20 Mhz */ { 136.5, /* SGI */ 151.7, }, /* 40 Mhz */ { 283.5, /* SGI */ 315.0, }, }, /* MCS 52 */ { /* 20 Mhz */ { 156.0, /* SGI */ 173.3, }, /* 40 Mhz */ { 324.0, /* SGI */ 360.0, }, }, /* MCS 53 */ { /* 20 Mhz */ { 65.0, /* SGI */ 72.2, }, /* 40 Mhz */ { 135.0, /* SGI */ 150.0, }, }, /* MCS 54 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 55 */ { /* 20 Mhz */ { 91.0, /* SGI */ 101.1, }, /* 40 Mhz */ { 189.0, /* SGI */ 210.0, }, }, /* MCS 56 */ { /* 20 Mhz */ { 78.0, /* SGI */ 86.7, }, /* 40 Mhz */ { 162.0, /* SGI */ 180.0, }, }, /* MCS 57 */ { /* 20 Mhz */ { 91.0, /* SGI */ 101.1, }, /* 40 Mhz */ { 189.0, /* SGI */ 210.0, }, }, /* MCS 58 */ { /* 20 Mhz */ { 104.0, /* SGI */ 115.6, }, /* 40 Mhz */ { 216.0, /* SGI */ 240.0, }, }, /* MCS 59 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 60 */ { /* 20 Mhz */ { 104.0, /* SGI */ 115.6, }, /* 40 Mhz */ { 216.0, /* SGI */ 240.0, }, }, /* MCS 61 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 62 */ { /* 20 Mhz */ { 130.0, /* SGI */ 144.4, }, /* 40 Mhz */ { 270.0, /* SGI */ 300.0, }, }, /* MCS 63 */ { /* 20 Mhz */ { 130.0, /* SGI */ 144.4, }, /* 40 Mhz */ { 270.0, /* SGI */ 300.0, }, }, /* MCS 64 */ { /* 20 Mhz */ { 143.0, /* SGI */ 158.9, }, /* 40 Mhz */ { 297.0, /* SGI */ 330.0, }, }, /* MCS 65 */ { /* 20 Mhz */ { 97.5, /* SGI */ 108.3, }, /* 40 Mhz */ { 202.5, /* SGI */ 225.0, }, }, /* MCS 66 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 67 */ { /* 20 Mhz */ { 136.5, /* SGI */ 151.7, }, /* 40 Mhz */ { 283.5, /* SGI */ 315.0, }, }, /* MCS 68 */ { /* 20 Mhz */ { 117.0, /* SGI */ 130.0, }, /* 40 Mhz */ { 243.0, /* SGI */ 270.0, }, }, /* MCS 69 */ { /* 20 Mhz */ { 136.5, /* SGI */ 151.7, }, /* 40 Mhz */ { 283.5, /* SGI */ 315.0, }, }, /* MCS 70 */ { /* 20 Mhz */ { 156.0, /* SGI */ 173.3, }, /* 40 Mhz */ { 324.0, /* SGI */ 360.0, }, }, /* MCS 71 */ { /* 20 Mhz */ { 175.5, /* SGI */ 195.0, }, /* 40 Mhz */ { 364.5, /* SGI */ 405.0, }, }, /* MCS 72 */ { /* 20 Mhz */ { 156.0, /* SGI */ 173.3, }, /* 40 Mhz */ { 324.0, /* SGI */ 360.0, }, }, /* MCS 73 */ { /* 20 Mhz */ { 175.5, /* SGI */ 195.0, }, /* 40 Mhz */ { 364.5, /* SGI */ 405.0, }, }, /* MCS 74 */ { /* 20 Mhz */ { 195.0, /* SGI */ 216.7, }, /* 40 Mhz */ { 405.0, /* SGI */ 450.0, }, }, /* MCS 75 */ { /* 20 Mhz */ { 195.0, /* SGI */ 216.7, }, /* 40 Mhz */ { 405.0, /* SGI */ 450.0, }, }, /* MCS 76 */ { /* 20 Mhz */ { 214.5, /* SGI */ 238.3, }, /* 40 Mhz */ { 445.5, /* SGI */ 495.0, }, }, }; static const char *auth_alg_text[]={"Open System","Shared Key","EAP"}; #define NUM_AUTH_ALGS (sizeof auth_alg_text / sizeof auth_alg_text[0]) static const char *status_text[] = { "Successful", /* 0 */ "Unspecified failure", /* 1 */ "Reserved", /* 2 */ "Reserved", /* 3 */ "Reserved", /* 4 */ "Reserved", /* 5 */ "Reserved", /* 6 */ "Reserved", /* 7 */ "Reserved", /* 8 */ "Reserved", /* 9 */ "Cannot Support all requested capabilities in the Capability " "Information field", /* 10 */ "Reassociation denied due to inability to confirm that association " "exists", /* 11 */ "Association denied due to reason outside the scope of the " "standard", /* 12 */ "Responding station does not support the specified authentication " "algorithm ", /* 13 */ "Received an Authentication frame with authentication transaction " "sequence number out of expected sequence", /* 14 */ "Authentication rejected because of challenge failure", /* 15 */ "Authentication rejected due to timeout waiting for next frame in " "sequence", /* 16 */ "Association denied because AP is unable to handle additional" "associated stations", /* 17 */ "Association denied due to requesting station not supporting all of " "the data rates in BSSBasicRateSet parameter", /* 18 */ "Association denied due to requesting station not supporting " "short preamble operation", /* 19 */ "Association denied due to requesting station not supporting " "PBCC encoding", /* 20 */ "Association denied due to requesting station not supporting " "channel agility", /* 21 */ "Association request rejected because Spectrum Management " "capability is required", /* 22 */ "Association request rejected because the information in the " "Power Capability element is unacceptable", /* 23 */ "Association request rejected because the information in the " "Supported Channels element is unacceptable", /* 24 */ "Association denied due to requesting station not supporting " "short slot operation", /* 25 */ "Association denied due to requesting station not supporting " "DSSS-OFDM operation", /* 26 */ "Association denied because the requested STA does not support HT " "features", /* 27 */ "Reserved", /* 28 */ "Association denied because the requested STA does not support " "the PCO transition time required by the AP", /* 29 */ "Reserved", /* 30 */ "Reserved", /* 31 */ "Unspecified, QoS-related failure", /* 32 */ "Association denied due to QAP having insufficient bandwidth " "to handle another QSTA", /* 33 */ "Association denied due to excessive frame loss rates and/or " "poor conditions on current operating channel", /* 34 */ "Association (with QBSS) denied due to requesting station not " "supporting the QoS facility", /* 35 */ "Association denied due to requesting station not supporting " "Block Ack", /* 36 */ "The request has been declined", /* 37 */ "The request has not been successful as one or more parameters " "have invalid values", /* 38 */ "The TS has not been created because the request cannot be honored. " "Try again with the suggested changes to the TSPEC", /* 39 */ "Invalid Information Element", /* 40 */ "Group Cipher is not valid", /* 41 */ "Pairwise Cipher is not valid", /* 42 */ "AKMP is not valid", /* 43 */ "Unsupported RSN IE version", /* 44 */ "Invalid RSN IE Capabilities", /* 45 */ "Cipher suite is rejected per security policy", /* 46 */ "The TS has not been created. However, the HC may be capable of " "creating a TS, in response to a request, after the time indicated " "in the TS Delay element", /* 47 */ "Direct Link is not allowed in the BSS by policy", /* 48 */ "Destination STA is not present within this QBSS.", /* 49 */ "The Destination STA is not a QSTA.", /* 50 */ }; #define NUM_STATUSES (sizeof status_text / sizeof status_text[0]) static const char *reason_text[] = { "Reserved", /* 0 */ "Unspecified reason", /* 1 */ "Previous authentication no longer valid", /* 2 */ "Deauthenticated because sending station is leaving (or has left) " "IBSS or ESS", /* 3 */ "Disassociated due to inactivity", /* 4 */ "Disassociated because AP is unable to handle all currently " " associated stations", /* 5 */ "Class 2 frame received from nonauthenticated station", /* 6 */ "Class 3 frame received from nonassociated station", /* 7 */ "Disassociated because sending station is leaving " "(or has left) BSS", /* 8 */ "Station requesting (re)association is not authenticated with " "responding station", /* 9 */ "Disassociated because the information in the Power Capability " "element is unacceptable", /* 10 */ "Disassociated because the information in the SupportedChannels " "element is unacceptable", /* 11 */ "Invalid Information Element", /* 12 */ "Reserved", /* 13 */ "Michael MIC failure", /* 14 */ "4-Way Handshake timeout", /* 15 */ "Group key update timeout", /* 16 */ "Information element in 4-Way Handshake different from (Re)Association" "Request/Probe Response/Beacon", /* 17 */ "Group Cipher is not valid", /* 18 */ "AKMP is not valid", /* 20 */ "Unsupported RSN IE version", /* 21 */ "Invalid RSN IE Capabilities", /* 22 */ "IEEE 802.1X Authentication failed", /* 23 */ "Cipher suite is rejected per security policy", /* 24 */ "Reserved", /* 25 */ "Reserved", /* 26 */ "Reserved", /* 27 */ "Reserved", /* 28 */ "Reserved", /* 29 */ "Reserved", /* 30 */ "TS deleted because QoS AP lacks sufficient bandwidth for this " "QoS STA due to a change in BSS service characteristics or " "operational mode (e.g. an HT BSS change from 40 MHz channel " "to 20 MHz channel)", /* 31 */ "Disassociated for unspecified, QoS-related reason", /* 32 */ "Disassociated because QoS AP lacks sufficient bandwidth for this " "QoS STA", /* 33 */ "Disassociated because of excessive number of frames that need to be " "acknowledged, but are not acknowledged for AP transmissions " "and/or poor channel conditions", /* 34 */ "Disassociated because STA is transmitting outside the limits " "of its TXOPs", /* 35 */ "Requested from peer STA as the STA is leaving the BSS " "(or resetting)", /* 36 */ "Requested from peer STA as it does not want to use the " "mechanism", /* 37 */ "Requested from peer STA as the STA received frames using the " "mechanism for which a set up is required", /* 38 */ "Requested from peer STA due to time out", /* 39 */ "Reserved", /* 40 */ "Reserved", /* 41 */ "Reserved", /* 42 */ "Reserved", /* 43 */ "Reserved", /* 44 */ "Peer STA does not support the requested cipher suite", /* 45 */ "Association denied due to requesting STA not supporting HT " "features", /* 46 */ }; #define NUM_REASONS (sizeof reason_text / sizeof reason_text[0]) static int wep_print(netdissect_options *ndo, const u_char *p) { uint32_t iv; if (!ND_TTEST2(*p, IEEE802_11_IV_LEN + IEEE802_11_KID_LEN)) return 0; iv = EXTRACT_LE_32BITS(p); ND_PRINT((ndo, " IV:%3x Pad %x KeyID %x", IV_IV(iv), IV_PAD(iv), IV_KEYID(iv))); return 1; } static int parse_elements(netdissect_options *ndo, struct mgmt_body_t *pbody, const u_char *p, int offset, u_int length) { u_int elementlen; struct ssid_t ssid; struct challenge_t challenge; struct rates_t rates; struct ds_t ds; struct cf_t cf; struct tim_t tim; /* * We haven't seen any elements yet. */ pbody->challenge_present = 0; pbody->ssid_present = 0; pbody->rates_present = 0; pbody->ds_present = 0; pbody->cf_present = 0; pbody->tim_present = 0; while (length != 0) { /* Make sure we at least have the element ID and length. */ if (!ND_TTEST2(*(p + offset), 2)) return 0; if (length < 2) return 0; elementlen = *(p + offset + 1); /* Make sure we have the entire element. */ if (!ND_TTEST2(*(p + offset + 2), elementlen)) return 0; if (length < elementlen + 2) return 0; switch (*(p + offset)) { case E_SSID: memcpy(&ssid, p + offset, 2); offset += 2; length -= 2; if (ssid.length != 0) { if (ssid.length > sizeof(ssid.ssid) - 1) return 0; memcpy(&ssid.ssid, p + offset, ssid.length); offset += ssid.length; length -= ssid.length; } ssid.ssid[ssid.length] = '\0'; /* * Present and not truncated. * * If we haven't already seen an SSID IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->ssid_present) { pbody->ssid = ssid; pbody->ssid_present = 1; } break; case E_CHALLENGE: memcpy(&challenge, p + offset, 2); offset += 2; length -= 2; if (challenge.length != 0) { if (challenge.length > sizeof(challenge.text) - 1) return 0; memcpy(&challenge.text, p + offset, challenge.length); offset += challenge.length; length -= challenge.length; } challenge.text[challenge.length] = '\0'; /* * Present and not truncated. * * If we haven't already seen a challenge IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->challenge_present) { pbody->challenge = challenge; pbody->challenge_present = 1; } break; case E_RATES: memcpy(&rates, p + offset, 2); offset += 2; length -= 2; if (rates.length != 0) { if (rates.length > sizeof rates.rate) return 0; memcpy(&rates.rate, p + offset, rates.length); offset += rates.length; length -= rates.length; } /* * Present and not truncated. * * If we haven't already seen a rates IE, * copy this one if it's not zero-length, * otherwise ignore this one, so we later * report the first one we saw. * * We ignore zero-length rates IEs as some * devices seem to put a zero-length rates * IE, followed by an SSID IE, followed by * a non-zero-length rates IE into frames, * even though IEEE Std 802.11-2007 doesn't * seem to indicate that a zero-length rates * IE is valid. */ if (!pbody->rates_present && rates.length != 0) { pbody->rates = rates; pbody->rates_present = 1; } break; case E_DS: memcpy(&ds, p + offset, 2); offset += 2; length -= 2; if (ds.length != 1) { offset += ds.length; length -= ds.length; break; } ds.channel = *(p + offset); offset += 1; length -= 1; /* * Present and not truncated. * * If we haven't already seen a DS IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->ds_present) { pbody->ds = ds; pbody->ds_present = 1; } break; case E_CF: memcpy(&cf, p + offset, 2); offset += 2; length -= 2; if (cf.length != 6) { offset += cf.length; length -= cf.length; break; } memcpy(&cf.count, p + offset, 6); offset += 6; length -= 6; /* * Present and not truncated. * * If we haven't already seen a CF IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->cf_present) { pbody->cf = cf; pbody->cf_present = 1; } break; case E_TIM: memcpy(&tim, p + offset, 2); offset += 2; length -= 2; if (tim.length <= 3) { offset += tim.length; length -= tim.length; break; } if (tim.length - 3 > (int)sizeof tim.bitmap) return 0; memcpy(&tim.count, p + offset, 3); offset += 3; length -= 3; memcpy(tim.bitmap, p + offset, tim.length - 3); offset += tim.length - 3; length -= tim.length - 3; /* * Present and not truncated. * * If we haven't already seen a TIM IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->tim_present) { pbody->tim = tim; pbody->tim_present = 1; } break; default: #if 0 ND_PRINT((ndo, "(1) unhandled element_id (%d) ", *(p + offset))); #endif offset += 2 + elementlen; length -= 2 + elementlen; break; } } /* No problems found. */ return 1; } /********************************************************************************* * Print Handle functions for the management frame types *********************************************************************************/ static int handle_beacon(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_TSTAMP_LEN + IEEE802_11_BCNINT_LEN + IEEE802_11_CAPINFO_LEN)) return 0; if (length < IEEE802_11_TSTAMP_LEN + IEEE802_11_BCNINT_LEN + IEEE802_11_CAPINFO_LEN) return 0; memcpy(&pbody.timestamp, p, IEEE802_11_TSTAMP_LEN); offset += IEEE802_11_TSTAMP_LEN; length -= IEEE802_11_TSTAMP_LEN; pbody.beacon_interval = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_BCNINT_LEN; length -= IEEE802_11_BCNINT_LEN; pbody.capability_info = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_CAPINFO_LEN; length -= IEEE802_11_CAPINFO_LEN; ret = parse_elements(ndo, &pbody, p, offset, length); PRINT_SSID(pbody); PRINT_RATES(pbody); ND_PRINT((ndo, " %s", CAPABILITY_ESS(pbody.capability_info) ? "ESS" : "IBSS")); PRINT_DS_CHANNEL(pbody); return ret; } static int handle_assoc_request(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN)) return 0; if (length < IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN) return 0; pbody.capability_info = EXTRACT_LE_16BITS(p); offset += IEEE802_11_CAPINFO_LEN; length -= IEEE802_11_CAPINFO_LEN; pbody.listen_interval = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_LISTENINT_LEN; length -= IEEE802_11_LISTENINT_LEN; ret = parse_elements(ndo, &pbody, p, offset, length); PRINT_SSID(pbody); PRINT_RATES(pbody); return ret; } static int handle_assoc_response(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_CAPINFO_LEN + IEEE802_11_STATUS_LEN + IEEE802_11_AID_LEN)) return 0; if (length < IEEE802_11_CAPINFO_LEN + IEEE802_11_STATUS_LEN + IEEE802_11_AID_LEN) return 0; pbody.capability_info = EXTRACT_LE_16BITS(p); offset += IEEE802_11_CAPINFO_LEN; length -= IEEE802_11_CAPINFO_LEN; pbody.status_code = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_STATUS_LEN; length -= IEEE802_11_STATUS_LEN; pbody.aid = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_AID_LEN; length -= IEEE802_11_AID_LEN; ret = parse_elements(ndo, &pbody, p, offset, length); ND_PRINT((ndo, " AID(%x) :%s: %s", ((uint16_t)(pbody.aid << 2 )) >> 2 , CAPABILITY_PRIVACY(pbody.capability_info) ? " PRIVACY " : "", (pbody.status_code < NUM_STATUSES ? status_text[pbody.status_code] : "n/a"))); return ret; } static int handle_reassoc_request(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN + IEEE802_11_AP_LEN)) return 0; if (length < IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN + IEEE802_11_AP_LEN) return 0; pbody.capability_info = EXTRACT_LE_16BITS(p); offset += IEEE802_11_CAPINFO_LEN; length -= IEEE802_11_CAPINFO_LEN; pbody.listen_interval = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_LISTENINT_LEN; length -= IEEE802_11_LISTENINT_LEN; memcpy(&pbody.ap, p+offset, IEEE802_11_AP_LEN); offset += IEEE802_11_AP_LEN; length -= IEEE802_11_AP_LEN; ret = parse_elements(ndo, &pbody, p, offset, length); PRINT_SSID(pbody); ND_PRINT((ndo, " AP : %s", etheraddr_string(ndo, pbody.ap ))); return ret; } static int handle_reassoc_response(netdissect_options *ndo, const u_char *p, u_int length) { /* Same as a Association Reponse */ return handle_assoc_response(ndo, p, length); } static int handle_probe_request(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); ret = parse_elements(ndo, &pbody, p, offset, length); PRINT_SSID(pbody); PRINT_RATES(pbody); return ret; } static int handle_probe_response(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_TSTAMP_LEN + IEEE802_11_BCNINT_LEN + IEEE802_11_CAPINFO_LEN)) return 0; if (length < IEEE802_11_TSTAMP_LEN + IEEE802_11_BCNINT_LEN + IEEE802_11_CAPINFO_LEN) return 0; memcpy(&pbody.timestamp, p, IEEE802_11_TSTAMP_LEN); offset += IEEE802_11_TSTAMP_LEN; length -= IEEE802_11_TSTAMP_LEN; pbody.beacon_interval = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_BCNINT_LEN; length -= IEEE802_11_BCNINT_LEN; pbody.capability_info = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_CAPINFO_LEN; length -= IEEE802_11_CAPINFO_LEN; ret = parse_elements(ndo, &pbody, p, offset, length); PRINT_SSID(pbody); PRINT_RATES(pbody); PRINT_DS_CHANNEL(pbody); return ret; } static int handle_atim(void) { /* the frame body for ATIM is null. */ return 1; } static int handle_disassoc(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_REASON_LEN)) return 0; if (length < IEEE802_11_REASON_LEN) return 0; pbody.reason_code = EXTRACT_LE_16BITS(p); ND_PRINT((ndo, ": %s", (pbody.reason_code < NUM_REASONS) ? reason_text[pbody.reason_code] : "Reserved")); return 1; } static int handle_auth(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, 6)) return 0; if (length < 6) return 0; pbody.auth_alg = EXTRACT_LE_16BITS(p); offset += 2; length -= 2; pbody.auth_trans_seq_num = EXTRACT_LE_16BITS(p + offset); offset += 2; length -= 2; pbody.status_code = EXTRACT_LE_16BITS(p + offset); offset += 2; length -= 2; ret = parse_elements(ndo, &pbody, p, offset, length); if ((pbody.auth_alg == 1) && ((pbody.auth_trans_seq_num == 2) || (pbody.auth_trans_seq_num == 3))) { ND_PRINT((ndo, " (%s)-%x [Challenge Text] %s", (pbody.auth_alg < NUM_AUTH_ALGS) ? auth_alg_text[pbody.auth_alg] : "Reserved", pbody.auth_trans_seq_num, ((pbody.auth_trans_seq_num % 2) ? ((pbody.status_code < NUM_STATUSES) ? status_text[pbody.status_code] : "n/a") : ""))); return ret; } ND_PRINT((ndo, " (%s)-%x: %s", (pbody.auth_alg < NUM_AUTH_ALGS) ? auth_alg_text[pbody.auth_alg] : "Reserved", pbody.auth_trans_seq_num, (pbody.auth_trans_seq_num % 2) ? ((pbody.status_code < NUM_STATUSES) ? status_text[pbody.status_code] : "n/a") : "")); return ret; } static int handle_deauth(netdissect_options *ndo, const uint8_t *src, const u_char *p, u_int length) { struct mgmt_body_t pbody; const char *reason = NULL; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_REASON_LEN)) return 0; if (length < IEEE802_11_REASON_LEN) return 0; pbody.reason_code = EXTRACT_LE_16BITS(p); reason = (pbody.reason_code < NUM_REASONS) ? reason_text[pbody.reason_code] : "Reserved"; if (ndo->ndo_eflag) { ND_PRINT((ndo, ": %s", reason)); } else { ND_PRINT((ndo, " (%s): %s", etheraddr_string(ndo, src), reason)); } return 1; } #define PRINT_HT_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "TxChWidth")) : \ (v) == 1 ? ND_PRINT((ndo, "MIMOPwrSave")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_BA_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "ADDBA Request")) : \ (v) == 1 ? ND_PRINT((ndo, "ADDBA Response")) : \ (v) == 2 ? ND_PRINT((ndo, "DELBA")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_MESHLINK_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "Request")) : \ (v) == 1 ? ND_PRINT((ndo, "Report")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_MESHPEERING_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "Open")) : \ (v) == 1 ? ND_PRINT((ndo, "Confirm")) : \ (v) == 2 ? ND_PRINT((ndo, "Close")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_MESHPATH_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "Request")) : \ (v) == 1 ? ND_PRINT((ndo, "Report")) : \ (v) == 2 ? ND_PRINT((ndo, "Error")) : \ (v) == 3 ? ND_PRINT((ndo, "RootAnnouncement")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_MESH_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "MeshLink")) : \ (v) == 1 ? ND_PRINT((ndo, "HWMP")) : \ (v) == 2 ? ND_PRINT((ndo, "Gate Announcement")) : \ (v) == 3 ? ND_PRINT((ndo, "Congestion Control")) : \ (v) == 4 ? ND_PRINT((ndo, "MCCA Setup Request")) : \ (v) == 5 ? ND_PRINT((ndo, "MCCA Setup Reply")) : \ (v) == 6 ? ND_PRINT((ndo, "MCCA Advertisement Request")) : \ (v) == 7 ? ND_PRINT((ndo, "MCCA Advertisement")) : \ (v) == 8 ? ND_PRINT((ndo, "MCCA Teardown")) : \ (v) == 9 ? ND_PRINT((ndo, "TBTT Adjustment Request")) : \ (v) == 10 ? ND_PRINT((ndo, "TBTT Adjustment Response")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_MULTIHOP_ACTION(v) (\ (v) == 0 ? ND_PRINT((ndo, "Proxy Update")) : \ (v) == 1 ? ND_PRINT((ndo, "Proxy Update Confirmation")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) #define PRINT_SELFPROT_ACTION(v) (\ (v) == 1 ? ND_PRINT((ndo, "Peering Open")) : \ (v) == 2 ? ND_PRINT((ndo, "Peering Confirm")) : \ (v) == 3 ? ND_PRINT((ndo, "Peering Close")) : \ (v) == 4 ? ND_PRINT((ndo, "Group Key Inform")) : \ (v) == 5 ? ND_PRINT((ndo, "Group Key Acknowledge")) : \ ND_PRINT((ndo, "Act#%d", (v))) \ ) static int handle_action(netdissect_options *ndo, const uint8_t *src, const u_char *p, u_int length) { if (!ND_TTEST2(*p, 2)) return 0; if (length < 2) return 0; if (ndo->ndo_eflag) { ND_PRINT((ndo, ": ")); } else { ND_PRINT((ndo, " (%s): ", etheraddr_string(ndo, src))); } switch (p[0]) { case 0: ND_PRINT((ndo, "Spectrum Management Act#%d", p[1])); break; case 1: ND_PRINT((ndo, "QoS Act#%d", p[1])); break; case 2: ND_PRINT((ndo, "DLS Act#%d", p[1])); break; case 3: ND_PRINT((ndo, "BA ")); PRINT_BA_ACTION(p[1]); break; case 7: ND_PRINT((ndo, "HT ")); PRINT_HT_ACTION(p[1]); break; case 13: ND_PRINT((ndo, "MeshAction ")); PRINT_MESH_ACTION(p[1]); break; case 14: ND_PRINT((ndo, "MultiohopAction ")); PRINT_MULTIHOP_ACTION(p[1]); break; case 15: ND_PRINT((ndo, "SelfprotectAction ")); PRINT_SELFPROT_ACTION(p[1]); break; case 127: ND_PRINT((ndo, "Vendor Act#%d", p[1])); break; default: ND_PRINT((ndo, "Reserved(%d) Act#%d", p[0], p[1])); break; } return 1; } /********************************************************************************* * Print Body funcs *********************************************************************************/ static int mgmt_body_print(netdissect_options *ndo, uint16_t fc, const uint8_t *src, const u_char *p, u_int length) { ND_PRINT((ndo, "%s", tok2str(st_str, "Unhandled Management subtype(%x)", FC_SUBTYPE(fc)))); /* There may be a problem w/ AP not having this bit set */ if (FC_PROTECTED(fc)) return wep_print(ndo, p); switch (FC_SUBTYPE(fc)) { case ST_ASSOC_REQUEST: return handle_assoc_request(ndo, p, length); case ST_ASSOC_RESPONSE: return handle_assoc_response(ndo, p, length); case ST_REASSOC_REQUEST: return handle_reassoc_request(ndo, p, length); case ST_REASSOC_RESPONSE: return handle_reassoc_response(ndo, p, length); case ST_PROBE_REQUEST: return handle_probe_request(ndo, p, length); case ST_PROBE_RESPONSE: return handle_probe_response(ndo, p, length); case ST_BEACON: return handle_beacon(ndo, p, length); case ST_ATIM: return handle_atim(); case ST_DISASSOC: return handle_disassoc(ndo, p, length); case ST_AUTH: return handle_auth(ndo, p, length); case ST_DEAUTH: return handle_deauth(ndo, src, p, length); case ST_ACTION: return handle_action(ndo, src, p, length); default: return 1; } } /********************************************************************************* * Handles printing all the control frame types *********************************************************************************/ static int ctrl_body_print(netdissect_options *ndo, uint16_t fc, const u_char *p) { ND_PRINT((ndo, "%s", tok2str(ctrl_str, "Unknown Ctrl Subtype", FC_SUBTYPE(fc)))); switch (FC_SUBTYPE(fc)) { case CTRL_CONTROL_WRAPPER: /* XXX - requires special handling */ break; case CTRL_BAR: if (!ND_TTEST2(*p, CTRL_BAR_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s TA:%s CTL(%x) SEQ(%u) ", etheraddr_string(ndo, ((const struct ctrl_bar_hdr_t *)p)->ra), etheraddr_string(ndo, ((const struct ctrl_bar_hdr_t *)p)->ta), EXTRACT_LE_16BITS(&(((const struct ctrl_bar_hdr_t *)p)->ctl)), EXTRACT_LE_16BITS(&(((const struct ctrl_bar_hdr_t *)p)->seq)))); break; case CTRL_BA: if (!ND_TTEST2(*p, CTRL_BA_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s ", etheraddr_string(ndo, ((const struct ctrl_ba_hdr_t *)p)->ra))); break; case CTRL_PS_POLL: if (!ND_TTEST2(*p, CTRL_PS_POLL_HDRLEN)) return 0; ND_PRINT((ndo, " AID(%x)", EXTRACT_LE_16BITS(&(((const struct ctrl_ps_poll_hdr_t *)p)->aid)))); break; case CTRL_RTS: if (!ND_TTEST2(*p, CTRL_RTS_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " TA:%s ", etheraddr_string(ndo, ((const struct ctrl_rts_hdr_t *)p)->ta))); break; case CTRL_CTS: if (!ND_TTEST2(*p, CTRL_CTS_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s ", etheraddr_string(ndo, ((const struct ctrl_cts_hdr_t *)p)->ra))); break; case CTRL_ACK: if (!ND_TTEST2(*p, CTRL_ACK_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s ", etheraddr_string(ndo, ((const struct ctrl_ack_hdr_t *)p)->ra))); break; case CTRL_CF_END: if (!ND_TTEST2(*p, CTRL_END_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s ", etheraddr_string(ndo, ((const struct ctrl_end_hdr_t *)p)->ra))); break; case CTRL_END_ACK: if (!ND_TTEST2(*p, CTRL_END_ACK_HDRLEN)) return 0; if (!ndo->ndo_eflag) ND_PRINT((ndo, " RA:%s ", etheraddr_string(ndo, ((const struct ctrl_end_ack_hdr_t *)p)->ra))); break; } return 1; } /* * Data Frame - Address field contents * * To Ds | From DS | Addr 1 | Addr 2 | Addr 3 | Addr 4 * 0 | 0 | DA | SA | BSSID | n/a * 0 | 1 | DA | BSSID | SA | n/a * 1 | 0 | BSSID | SA | DA | n/a * 1 | 1 | RA | TA | DA | SA */ /* * Function to get source and destination MAC addresses for a data frame. */ static void get_data_src_dst_mac(uint16_t fc, const u_char *p, const uint8_t **srcp, const uint8_t **dstp) { #define ADDR1 (p + 4) #define ADDR2 (p + 10) #define ADDR3 (p + 16) #define ADDR4 (p + 24) if (!FC_TO_DS(fc)) { if (!FC_FROM_DS(fc)) { /* not To DS and not From DS */ *srcp = ADDR2; *dstp = ADDR1; } else { /* not To DS and From DS */ *srcp = ADDR3; *dstp = ADDR1; } } else { if (!FC_FROM_DS(fc)) { /* From DS and not To DS */ *srcp = ADDR2; *dstp = ADDR3; } else { /* To DS and From DS */ *srcp = ADDR4; *dstp = ADDR3; } } #undef ADDR1 #undef ADDR2 #undef ADDR3 #undef ADDR4 } static void get_mgmt_src_dst_mac(const u_char *p, const uint8_t **srcp, const uint8_t **dstp) { const struct mgmt_header_t *hp = (const struct mgmt_header_t *) p; if (srcp != NULL) *srcp = hp->sa; if (dstp != NULL) *dstp = hp->da; } /* * Print Header funcs */ static void data_header_print(netdissect_options *ndo, uint16_t fc, const u_char *p) { u_int subtype = FC_SUBTYPE(fc); if (DATA_FRAME_IS_CF_ACK(subtype) || DATA_FRAME_IS_CF_POLL(subtype) || DATA_FRAME_IS_QOS(subtype)) { ND_PRINT((ndo, "CF ")); if (DATA_FRAME_IS_CF_ACK(subtype)) { if (DATA_FRAME_IS_CF_POLL(subtype)) ND_PRINT((ndo, "Ack/Poll")); else ND_PRINT((ndo, "Ack")); } else { if (DATA_FRAME_IS_CF_POLL(subtype)) ND_PRINT((ndo, "Poll")); } if (DATA_FRAME_IS_QOS(subtype)) ND_PRINT((ndo, "+QoS")); ND_PRINT((ndo, " ")); } #define ADDR1 (p + 4) #define ADDR2 (p + 10) #define ADDR3 (p + 16) #define ADDR4 (p + 24) if (!FC_TO_DS(fc) && !FC_FROM_DS(fc)) { ND_PRINT((ndo, "DA:%s SA:%s BSSID:%s ", etheraddr_string(ndo, ADDR1), etheraddr_string(ndo, ADDR2), etheraddr_string(ndo, ADDR3))); } else if (!FC_TO_DS(fc) && FC_FROM_DS(fc)) { ND_PRINT((ndo, "DA:%s BSSID:%s SA:%s ", etheraddr_string(ndo, ADDR1), etheraddr_string(ndo, ADDR2), etheraddr_string(ndo, ADDR3))); } else if (FC_TO_DS(fc) && !FC_FROM_DS(fc)) { ND_PRINT((ndo, "BSSID:%s SA:%s DA:%s ", etheraddr_string(ndo, ADDR1), etheraddr_string(ndo, ADDR2), etheraddr_string(ndo, ADDR3))); } else if (FC_TO_DS(fc) && FC_FROM_DS(fc)) { ND_PRINT((ndo, "RA:%s TA:%s DA:%s SA:%s ", etheraddr_string(ndo, ADDR1), etheraddr_string(ndo, ADDR2), etheraddr_string(ndo, ADDR3), etheraddr_string(ndo, ADDR4))); } #undef ADDR1 #undef ADDR2 #undef ADDR3 #undef ADDR4 } static void mgmt_header_print(netdissect_options *ndo, const u_char *p) { const struct mgmt_header_t *hp = (const struct mgmt_header_t *) p; ND_PRINT((ndo, "BSSID:%s DA:%s SA:%s ", etheraddr_string(ndo, (hp)->bssid), etheraddr_string(ndo, (hp)->da), etheraddr_string(ndo, (hp)->sa))); } static void ctrl_header_print(netdissect_options *ndo, uint16_t fc, const u_char *p) { switch (FC_SUBTYPE(fc)) { case CTRL_BAR: ND_PRINT((ndo, " RA:%s TA:%s CTL(%x) SEQ(%u) ", etheraddr_string(ndo, ((const struct ctrl_bar_hdr_t *)p)->ra), etheraddr_string(ndo, ((const struct ctrl_bar_hdr_t *)p)->ta), EXTRACT_LE_16BITS(&(((const struct ctrl_bar_hdr_t *)p)->ctl)), EXTRACT_LE_16BITS(&(((const struct ctrl_bar_hdr_t *)p)->seq)))); break; case CTRL_BA: ND_PRINT((ndo, "RA:%s ", etheraddr_string(ndo, ((const struct ctrl_ba_hdr_t *)p)->ra))); break; case CTRL_PS_POLL: ND_PRINT((ndo, "BSSID:%s TA:%s ", etheraddr_string(ndo, ((const struct ctrl_ps_poll_hdr_t *)p)->bssid), etheraddr_string(ndo, ((const struct ctrl_ps_poll_hdr_t *)p)->ta))); break; case CTRL_RTS: ND_PRINT((ndo, "RA:%s TA:%s ", etheraddr_string(ndo, ((const struct ctrl_rts_hdr_t *)p)->ra), etheraddr_string(ndo, ((const struct ctrl_rts_hdr_t *)p)->ta))); break; case CTRL_CTS: ND_PRINT((ndo, "RA:%s ", etheraddr_string(ndo, ((const struct ctrl_cts_hdr_t *)p)->ra))); break; case CTRL_ACK: ND_PRINT((ndo, "RA:%s ", etheraddr_string(ndo, ((const struct ctrl_ack_hdr_t *)p)->ra))); break; case CTRL_CF_END: ND_PRINT((ndo, "RA:%s BSSID:%s ", etheraddr_string(ndo, ((const struct ctrl_end_hdr_t *)p)->ra), etheraddr_string(ndo, ((const struct ctrl_end_hdr_t *)p)->bssid))); break; case CTRL_END_ACK: ND_PRINT((ndo, "RA:%s BSSID:%s ", etheraddr_string(ndo, ((const struct ctrl_end_ack_hdr_t *)p)->ra), etheraddr_string(ndo, ((const struct ctrl_end_ack_hdr_t *)p)->bssid))); break; default: /* We shouldn't get here - we should already have quit */ break; } } static int extract_header_length(netdissect_options *ndo, uint16_t fc) { int len; switch (FC_TYPE(fc)) { case T_MGMT: return MGMT_HDRLEN; case T_CTRL: switch (FC_SUBTYPE(fc)) { case CTRL_CONTROL_WRAPPER: return CTRL_CONTROL_WRAPPER_HDRLEN; case CTRL_BAR: return CTRL_BAR_HDRLEN; case CTRL_BA: return CTRL_BA_HDRLEN; case CTRL_PS_POLL: return CTRL_PS_POLL_HDRLEN; case CTRL_RTS: return CTRL_RTS_HDRLEN; case CTRL_CTS: return CTRL_CTS_HDRLEN; case CTRL_ACK: return CTRL_ACK_HDRLEN; case CTRL_CF_END: return CTRL_END_HDRLEN; case CTRL_END_ACK: return CTRL_END_ACK_HDRLEN; default: ND_PRINT((ndo, "unknown 802.11 ctrl frame subtype (%d)", FC_SUBTYPE(fc))); return 0; } case T_DATA: len = (FC_TO_DS(fc) && FC_FROM_DS(fc)) ? 30 : 24; if (DATA_FRAME_IS_QOS(FC_SUBTYPE(fc))) len += 2; return len; default: ND_PRINT((ndo, "unknown 802.11 frame type (%d)", FC_TYPE(fc))); return 0; } } static int extract_mesh_header_length(const u_char *p) { return (p[0] &~ 3) ? 0 : 6*(1 + (p[0] & 3)); } /* * Print the 802.11 MAC header. */ static void ieee_802_11_hdr_print(netdissect_options *ndo, uint16_t fc, const u_char *p, u_int hdrlen, u_int meshdrlen) { if (ndo->ndo_vflag) { if (FC_MORE_DATA(fc)) ND_PRINT((ndo, "More Data ")); if (FC_MORE_FLAG(fc)) ND_PRINT((ndo, "More Fragments ")); if (FC_POWER_MGMT(fc)) ND_PRINT((ndo, "Pwr Mgmt ")); if (FC_RETRY(fc)) ND_PRINT((ndo, "Retry ")); if (FC_ORDER(fc)) ND_PRINT((ndo, "Strictly Ordered ")); if (FC_PROTECTED(fc)) ND_PRINT((ndo, "Protected ")); if (FC_TYPE(fc) != T_CTRL || FC_SUBTYPE(fc) != CTRL_PS_POLL) ND_PRINT((ndo, "%dus ", EXTRACT_LE_16BITS( &((const struct mgmt_header_t *)p)->duration))); } if (meshdrlen != 0) { const struct meshcntl_t *mc = (const struct meshcntl_t *)&p[hdrlen - meshdrlen]; int ae = mc->flags & 3; ND_PRINT((ndo, "MeshData (AE %d TTL %u seq %u", ae, mc->ttl, EXTRACT_LE_32BITS(mc->seq))); if (ae > 0) ND_PRINT((ndo, " A4:%s", etheraddr_string(ndo, mc->addr4))); if (ae > 1) ND_PRINT((ndo, " A5:%s", etheraddr_string(ndo, mc->addr5))); if (ae > 2) ND_PRINT((ndo, " A6:%s", etheraddr_string(ndo, mc->addr6))); ND_PRINT((ndo, ") ")); } switch (FC_TYPE(fc)) { case T_MGMT: mgmt_header_print(ndo, p); break; case T_CTRL: ctrl_header_print(ndo, fc, p); break; case T_DATA: data_header_print(ndo, fc, p); break; default: break; } } #ifndef roundup2 #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */ #endif static const char tstr[] = "[|802.11]"; static u_int ieee802_11_print(netdissect_options *ndo, const u_char *p, u_int length, u_int orig_caplen, int pad, u_int fcslen) { uint16_t fc; u_int caplen, hdrlen, meshdrlen; struct lladdr_info src, dst; int llc_hdrlen; caplen = orig_caplen; /* Remove FCS, if present */ if (length < fcslen) { ND_PRINT((ndo, "%s", tstr)); return caplen; } length -= fcslen; if (caplen > length) { /* Amount of FCS in actual packet data, if any */ fcslen = caplen - length; caplen -= fcslen; ndo->ndo_snapend -= fcslen; } if (caplen < IEEE802_11_FC_LEN) { ND_PRINT((ndo, "%s", tstr)); return orig_caplen; } fc = EXTRACT_LE_16BITS(p); hdrlen = extract_header_length(ndo, fc); if (hdrlen == 0) { /* Unknown frame type or control frame subtype; quit. */ return (0); } if (pad) hdrlen = roundup2(hdrlen, 4); if (ndo->ndo_Hflag && FC_TYPE(fc) == T_DATA && DATA_FRAME_IS_QOS(FC_SUBTYPE(fc))) { meshdrlen = extract_mesh_header_length(p+hdrlen); hdrlen += meshdrlen; } else meshdrlen = 0; if (caplen < hdrlen) { ND_PRINT((ndo, "%s", tstr)); return hdrlen; } if (ndo->ndo_eflag) ieee_802_11_hdr_print(ndo, fc, p, hdrlen, meshdrlen); /* * Go past the 802.11 header. */ length -= hdrlen; caplen -= hdrlen; p += hdrlen; src.addr_string = etheraddr_string; dst.addr_string = etheraddr_string; switch (FC_TYPE(fc)) { case T_MGMT: get_mgmt_src_dst_mac(p - hdrlen, &src.addr, &dst.addr); if (!mgmt_body_print(ndo, fc, src.addr, p, length)) { ND_PRINT((ndo, "%s", tstr)); return hdrlen; } break; case T_CTRL: if (!ctrl_body_print(ndo, fc, p - hdrlen)) { ND_PRINT((ndo, "%s", tstr)); return hdrlen; } break; case T_DATA: if (DATA_FRAME_IS_NULL(FC_SUBTYPE(fc))) return hdrlen; /* no-data frame */ /* There may be a problem w/ AP not having this bit set */ if (FC_PROTECTED(fc)) { ND_PRINT((ndo, "Data")); if (!wep_print(ndo, p)) { ND_PRINT((ndo, "%s", tstr)); return hdrlen; } } else { get_data_src_dst_mac(fc, p - hdrlen, &src.addr, &dst.addr); llc_hdrlen = llc_print(ndo, p, length, caplen, &src, &dst); if (llc_hdrlen < 0) { /* * Some kinds of LLC packet we cannot * handle intelligently */ if (!ndo->ndo_suppress_default_print) ND_DEFAULTPRINT(p, caplen); llc_hdrlen = -llc_hdrlen; } hdrlen += llc_hdrlen; } break; default: /* We shouldn't get here - we should already have quit */ break; } return hdrlen; } /* * This is the top level routine of the printer. 'p' points * to the 802.11 header of the packet, 'h->ts' is the timestamp, * 'h->len' is the length of the packet off the wire, and 'h->caplen' * is the number of bytes actually captured. */ u_int ieee802_11_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { return ieee802_11_print(ndo, p, h->len, h->caplen, 0, 0); } /* $FreeBSD: src/sys/net80211/ieee80211_radiotap.h,v 1.5 2005/01/22 20:12:05 sam Exp $ */ /* NetBSD: ieee802_11_radio.h,v 1.2 2006/02/26 03:04:03 dyoung Exp */ /*- * Copyright (c) 2003, 2004 David Young. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of David Young may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. */ /* A generic radio capture format is desirable. It must be * rigidly defined (e.g., units for fields should be given), * and easily extensible. * * The following is an extensible radio capture format. It is * based on a bitmap indicating which fields are present. * * I am trying to describe precisely what the application programmer * should expect in the following, and for that reason I tell the * units and origin of each measurement (where it applies), or else I * use sufficiently weaselly language ("is a monotonically nondecreasing * function of...") that I cannot set false expectations for lawyerly * readers. */ /* * The radio capture header precedes the 802.11 header. * * Note well: all radiotap fields are little-endian. */ struct ieee80211_radiotap_header { uint8_t it_version; /* Version 0. Only increases * for drastic changes, * introduction of compatible * new fields does not count. */ uint8_t it_pad; uint16_t it_len; /* length of the whole * header in bytes, including * it_version, it_pad, * it_len, and data fields. */ uint32_t it_present; /* A bitmap telling which * fields are present. Set bit 31 * (0x80000000) to extend the * bitmap by another 32 bits. * Additional extensions are made * by setting bit 31. */ }; /* Name Data type Units * ---- --------- ----- * * IEEE80211_RADIOTAP_TSFT uint64_t microseconds * * Value in microseconds of the MAC's 64-bit 802.11 Time * Synchronization Function timer when the first bit of the * MPDU arrived at the MAC. For received frames, only. * * IEEE80211_RADIOTAP_CHANNEL 2 x uint16_t MHz, bitmap * * Tx/Rx frequency in MHz, followed by flags (see below). * Note that IEEE80211_RADIOTAP_XCHANNEL must be used to * represent an HT channel as there is not enough room in * the flags word. * * IEEE80211_RADIOTAP_FHSS uint16_t see below * * For frequency-hopping radios, the hop set (first byte) * and pattern (second byte). * * IEEE80211_RADIOTAP_RATE uint8_t 500kb/s or index * * Tx/Rx data rate. If bit 0x80 is set then it represents an * an MCS index and not an IEEE rate. * * IEEE80211_RADIOTAP_DBM_ANTSIGNAL int8_t decibels from * one milliwatt (dBm) * * RF signal power at the antenna, decibel difference from * one milliwatt. * * IEEE80211_RADIOTAP_DBM_ANTNOISE int8_t decibels from * one milliwatt (dBm) * * RF noise power at the antenna, decibel difference from one * milliwatt. * * IEEE80211_RADIOTAP_DB_ANTSIGNAL uint8_t decibel (dB) * * RF signal power at the antenna, decibel difference from an * arbitrary, fixed reference. * * IEEE80211_RADIOTAP_DB_ANTNOISE uint8_t decibel (dB) * * RF noise power at the antenna, decibel difference from an * arbitrary, fixed reference point. * * IEEE80211_RADIOTAP_LOCK_QUALITY uint16_t unitless * * Quality of Barker code lock. Unitless. Monotonically * nondecreasing with "better" lock strength. Called "Signal * Quality" in datasheets. (Is there a standard way to measure * this?) * * IEEE80211_RADIOTAP_TX_ATTENUATION uint16_t unitless * * Transmit power expressed as unitless distance from max * power set at factory calibration. 0 is max power. * Monotonically nondecreasing with lower power levels. * * IEEE80211_RADIOTAP_DB_TX_ATTENUATION uint16_t decibels (dB) * * Transmit power expressed as decibel distance from max power * set at factory calibration. 0 is max power. Monotonically * nondecreasing with lower power levels. * * IEEE80211_RADIOTAP_DBM_TX_POWER int8_t decibels from * one milliwatt (dBm) * * Transmit power expressed as dBm (decibels from a 1 milliwatt * reference). This is the absolute power level measured at * the antenna port. * * IEEE80211_RADIOTAP_FLAGS uint8_t bitmap * * Properties of transmitted and received frames. See flags * defined below. * * IEEE80211_RADIOTAP_ANTENNA uint8_t antenna index * * Unitless indication of the Rx/Tx antenna for this packet. * The first antenna is antenna 0. * * IEEE80211_RADIOTAP_RX_FLAGS uint16_t bitmap * * Properties of received frames. See flags defined below. * * IEEE80211_RADIOTAP_XCHANNEL uint32_t bitmap * uint16_t MHz * uint8_t channel number * uint8_t .5 dBm * * Extended channel specification: flags (see below) followed by * frequency in MHz, the corresponding IEEE channel number, and * finally the maximum regulatory transmit power cap in .5 dBm * units. This property supersedes IEEE80211_RADIOTAP_CHANNEL * and only one of the two should be present. * * IEEE80211_RADIOTAP_MCS uint8_t known * uint8_t flags * uint8_t mcs * * Bitset indicating which fields have known values, followed * by bitset of flag values, followed by the MCS rate index as * in IEEE 802.11n. * * * IEEE80211_RADIOTAP_AMPDU_STATUS u32, u16, u8, u8 unitless * * Contains the AMPDU information for the subframe. * * IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 * * Contains VHT information about this frame. * * IEEE80211_RADIOTAP_VENDOR_NAMESPACE * uint8_t OUI[3] * uint8_t subspace * uint16_t length * * The Vendor Namespace Field contains three sub-fields. The first * sub-field is 3 bytes long. It contains the vendor's IEEE 802 * Organizationally Unique Identifier (OUI). The fourth byte is a * vendor-specific "namespace selector." * */ enum ieee80211_radiotap_type { IEEE80211_RADIOTAP_TSFT = 0, IEEE80211_RADIOTAP_FLAGS = 1, IEEE80211_RADIOTAP_RATE = 2, IEEE80211_RADIOTAP_CHANNEL = 3, IEEE80211_RADIOTAP_FHSS = 4, IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5, IEEE80211_RADIOTAP_DBM_ANTNOISE = 6, IEEE80211_RADIOTAP_LOCK_QUALITY = 7, IEEE80211_RADIOTAP_TX_ATTENUATION = 8, IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9, IEEE80211_RADIOTAP_DBM_TX_POWER = 10, IEEE80211_RADIOTAP_ANTENNA = 11, IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12, IEEE80211_RADIOTAP_DB_ANTNOISE = 13, IEEE80211_RADIOTAP_RX_FLAGS = 14, /* NB: gap for netbsd definitions */ IEEE80211_RADIOTAP_XCHANNEL = 18, IEEE80211_RADIOTAP_MCS = 19, IEEE80211_RADIOTAP_AMPDU_STATUS = 20, IEEE80211_RADIOTAP_VHT = 21, IEEE80211_RADIOTAP_NAMESPACE = 29, IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30, IEEE80211_RADIOTAP_EXT = 31 }; /* channel attributes */ #define IEEE80211_CHAN_TURBO 0x00010 /* Turbo channel */ #define IEEE80211_CHAN_CCK 0x00020 /* CCK channel */ #define IEEE80211_CHAN_OFDM 0x00040 /* OFDM channel */ #define IEEE80211_CHAN_2GHZ 0x00080 /* 2 GHz spectrum channel. */ #define IEEE80211_CHAN_5GHZ 0x00100 /* 5 GHz spectrum channel */ #define IEEE80211_CHAN_PASSIVE 0x00200 /* Only passive scan allowed */ #define IEEE80211_CHAN_DYN 0x00400 /* Dynamic CCK-OFDM channel */ #define IEEE80211_CHAN_GFSK 0x00800 /* GFSK channel (FHSS PHY) */ #define IEEE80211_CHAN_GSM 0x01000 /* 900 MHz spectrum channel */ #define IEEE80211_CHAN_STURBO 0x02000 /* 11a static turbo channel only */ #define IEEE80211_CHAN_HALF 0x04000 /* Half rate channel */ #define IEEE80211_CHAN_QUARTER 0x08000 /* Quarter rate channel */ #define IEEE80211_CHAN_HT20 0x10000 /* HT 20 channel */ #define IEEE80211_CHAN_HT40U 0x20000 /* HT 40 channel w/ ext above */ #define IEEE80211_CHAN_HT40D 0x40000 /* HT 40 channel w/ ext below */ /* Useful combinations of channel characteristics, borrowed from Ethereal */ #define IEEE80211_CHAN_A \ (IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM) #define IEEE80211_CHAN_B \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_CCK) #define IEEE80211_CHAN_G \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN) #define IEEE80211_CHAN_TA \ (IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_TURBO) #define IEEE80211_CHAN_TG \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN | IEEE80211_CHAN_TURBO) /* For IEEE80211_RADIOTAP_FLAGS */ #define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received * during CFP */ #define IEEE80211_RADIOTAP_F_SHORTPRE 0x02 /* sent/received * with short * preamble */ #define IEEE80211_RADIOTAP_F_WEP 0x04 /* sent/received * with WEP encryption */ #define IEEE80211_RADIOTAP_F_FRAG 0x08 /* sent/received * with fragmentation */ #define IEEE80211_RADIOTAP_F_FCS 0x10 /* frame includes FCS */ #define IEEE80211_RADIOTAP_F_DATAPAD 0x20 /* frame has padding between * 802.11 header and payload * (to 32-bit boundary) */ #define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* does not pass FCS check */ /* For IEEE80211_RADIOTAP_RX_FLAGS */ #define IEEE80211_RADIOTAP_F_RX_BADFCS 0x0001 /* frame failed crc check */ #define IEEE80211_RADIOTAP_F_RX_PLCP_CRC 0x0002 /* frame failed PLCP CRC check */ /* For IEEE80211_RADIOTAP_MCS known */ #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_KNOWN 0x01 #define IEEE80211_RADIOTAP_MCS_MCS_INDEX_KNOWN 0x02 /* MCS index field */ #define IEEE80211_RADIOTAP_MCS_GUARD_INTERVAL_KNOWN 0x04 #define IEEE80211_RADIOTAP_MCS_HT_FORMAT_KNOWN 0x08 #define IEEE80211_RADIOTAP_MCS_FEC_TYPE_KNOWN 0x10 #define IEEE80211_RADIOTAP_MCS_STBC_KNOWN 0x20 #define IEEE80211_RADIOTAP_MCS_NESS_KNOWN 0x40 #define IEEE80211_RADIOTAP_MCS_NESS_BIT_1 0x80 /* For IEEE80211_RADIOTAP_MCS flags */ #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_MASK 0x03 #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_20 0 #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_40 1 #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_20L 2 #define IEEE80211_RADIOTAP_MCS_BANDWIDTH_20U 3 #define IEEE80211_RADIOTAP_MCS_SHORT_GI 0x04 /* short guard interval */ #define IEEE80211_RADIOTAP_MCS_HT_GREENFIELD 0x08 #define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 #define IEEE80211_RADIOTAP_MCS_STBC_MASK 0x60 #define IEEE80211_RADIOTAP_MCS_STBC_1 1 #define IEEE80211_RADIOTAP_MCS_STBC_2 2 #define IEEE80211_RADIOTAP_MCS_STBC_3 3 #define IEEE80211_RADIOTAP_MCS_STBC_SHIFT 5 #define IEEE80211_RADIOTAP_MCS_NESS_BIT_0 0x80 /* For IEEE80211_RADIOTAP_AMPDU_STATUS */ #define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001 #define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN 0x0002 #define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN 0x0004 #define IEEE80211_RADIOTAP_AMPDU_IS_LAST 0x0008 #define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR 0x0010 #define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN 0x0020 /* For IEEE80211_RADIOTAP_VHT known */ #define IEEE80211_RADIOTAP_VHT_STBC_KNOWN 0x0001 #define IEEE80211_RADIOTAP_VHT_TXOP_PS_NA_KNOWN 0x0002 #define IEEE80211_RADIOTAP_VHT_GUARD_INTERVAL_KNOWN 0x0004 #define IEEE80211_RADIOTAP_VHT_SGI_NSYM_DIS_KNOWN 0x0008 #define IEEE80211_RADIOTAP_VHT_LDPC_EXTRA_OFDM_SYM_KNOWN 0x0010 #define IEEE80211_RADIOTAP_VHT_BEAMFORMED_KNOWN 0x0020 #define IEEE80211_RADIOTAP_VHT_BANDWIDTH_KNOWN 0x0040 #define IEEE80211_RADIOTAP_VHT_GROUP_ID_KNOWN 0x0080 #define IEEE80211_RADIOTAP_VHT_PARTIAL_AID_KNOWN 0x0100 /* For IEEE80211_RADIOTAP_VHT flags */ #define IEEE80211_RADIOTAP_VHT_STBC 0x01 #define IEEE80211_RADIOTAP_VHT_TXOP_PS_NA 0x02 #define IEEE80211_RADIOTAP_VHT_SHORT_GI 0x04 #define IEEE80211_RADIOTAP_VHT_SGI_NSYM_M10_9 0x08 #define IEEE80211_RADIOTAP_VHT_LDPC_EXTRA_OFDM_SYM 0x10 #define IEEE80211_RADIOTAP_VHT_BEAMFORMED 0x20 #define IEEE80211_RADIOTAP_VHT_BANDWIDTH_MASK 0x1f #define IEEE80211_RADIOTAP_VHT_NSS_MASK 0x0f #define IEEE80211_RADIOTAP_VHT_MCS_MASK 0xf0 #define IEEE80211_RADIOTAP_VHT_MCS_SHIFT 4 #define IEEE80211_RADIOTAP_CODING_LDPC_USERn 0x01 #define IEEE80211_CHAN_FHSS \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_GFSK) #define IEEE80211_CHAN_A \ (IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM) #define IEEE80211_CHAN_B \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_CCK) #define IEEE80211_CHAN_PUREG \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM) #define IEEE80211_CHAN_G \ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN) #define IS_CHAN_FHSS(flags) \ ((flags & IEEE80211_CHAN_FHSS) == IEEE80211_CHAN_FHSS) #define IS_CHAN_A(flags) \ ((flags & IEEE80211_CHAN_A) == IEEE80211_CHAN_A) #define IS_CHAN_B(flags) \ ((flags & IEEE80211_CHAN_B) == IEEE80211_CHAN_B) #define IS_CHAN_PUREG(flags) \ ((flags & IEEE80211_CHAN_PUREG) == IEEE80211_CHAN_PUREG) #define IS_CHAN_G(flags) \ ((flags & IEEE80211_CHAN_G) == IEEE80211_CHAN_G) #define IS_CHAN_ANYG(flags) \ (IS_CHAN_PUREG(flags) || IS_CHAN_G(flags)) static void print_chaninfo(netdissect_options *ndo, uint16_t freq, int flags, int presentflags) { ND_PRINT((ndo, "%u MHz", freq)); if (presentflags & (1 << IEEE80211_RADIOTAP_MCS)) { /* * We have the MCS field, so this is 11n, regardless * of what the channel flags say. */ ND_PRINT((ndo, " 11n")); } else { if (IS_CHAN_FHSS(flags)) ND_PRINT((ndo, " FHSS")); if (IS_CHAN_A(flags)) { if (flags & IEEE80211_CHAN_HALF) ND_PRINT((ndo, " 11a/10Mhz")); else if (flags & IEEE80211_CHAN_QUARTER) ND_PRINT((ndo, " 11a/5Mhz")); else ND_PRINT((ndo, " 11a")); } if (IS_CHAN_ANYG(flags)) { if (flags & IEEE80211_CHAN_HALF) ND_PRINT((ndo, " 11g/10Mhz")); else if (flags & IEEE80211_CHAN_QUARTER) ND_PRINT((ndo, " 11g/5Mhz")); else ND_PRINT((ndo, " 11g")); } else if (IS_CHAN_B(flags)) ND_PRINT((ndo, " 11b")); if (flags & IEEE80211_CHAN_TURBO) ND_PRINT((ndo, " Turbo")); } /* * These apply to 11n. */ if (flags & IEEE80211_CHAN_HT20) ND_PRINT((ndo, " ht/20")); else if (flags & IEEE80211_CHAN_HT40D) ND_PRINT((ndo, " ht/40-")); else if (flags & IEEE80211_CHAN_HT40U) ND_PRINT((ndo, " ht/40+")); ND_PRINT((ndo, " ")); } static int print_radiotap_field(netdissect_options *ndo, struct cpack_state *s, uint32_t bit, uint8_t *flagsp, uint32_t presentflags) { u_int i; int rc; switch (bit) { case IEEE80211_RADIOTAP_TSFT: { uint64_t tsft; rc = cpack_uint64(s, &tsft); if (rc != 0) goto trunc; ND_PRINT((ndo, "%" PRIu64 "us tsft ", tsft)); break; } case IEEE80211_RADIOTAP_FLAGS: { uint8_t flagsval; rc = cpack_uint8(s, &flagsval); if (rc != 0) goto trunc; *flagsp = flagsval; if (flagsval & IEEE80211_RADIOTAP_F_CFP) ND_PRINT((ndo, "cfp ")); if (flagsval & IEEE80211_RADIOTAP_F_SHORTPRE) ND_PRINT((ndo, "short preamble ")); if (flagsval & IEEE80211_RADIOTAP_F_WEP) ND_PRINT((ndo, "wep ")); if (flagsval & IEEE80211_RADIOTAP_F_FRAG) ND_PRINT((ndo, "fragmented ")); if (flagsval & IEEE80211_RADIOTAP_F_BADFCS) ND_PRINT((ndo, "bad-fcs ")); break; } case IEEE80211_RADIOTAP_RATE: { uint8_t rate; rc = cpack_uint8(s, &rate); if (rc != 0) goto trunc; /* * XXX On FreeBSD rate & 0x80 means we have an MCS. On * Linux and AirPcap it does not. (What about * Mac OS X, NetBSD, OpenBSD, and DragonFly BSD?) * * This is an issue either for proprietary extensions * to 11a or 11g, which do exist, or for 11n * implementations that stuff a rate value into * this field, which also appear to exist. * * We currently handle that by assuming that * if the 0x80 bit is set *and* the remaining * bits have a value between 0 and 15 it's * an MCS value, otherwise it's a rate. If * there are cases where systems that use * "0x80 + MCS index" for MCS indices > 15, * or stuff a rate value here between 64 and * 71.5 Mb/s in here, we'll need a preference * setting. Such rates do exist, e.g. 11n * MCS 7 at 20 MHz with a long guard interval. */ if (rate >= 0x80 && rate <= 0x8f) { /* * XXX - we don't know the channel width * or guard interval length, so we can't * convert this to a data rate. * * If you want us to show a data rate, * use the MCS field, not the Rate field; * the MCS field includes not only the * MCS index, it also includes bandwidth * and guard interval information. * * XXX - can we get the channel width * from XChannel and the guard interval * information from Flags, at least on * FreeBSD? */ ND_PRINT((ndo, "MCS %u ", rate & 0x7f)); } else ND_PRINT((ndo, "%2.1f Mb/s ", .5 * rate)); break; } case IEEE80211_RADIOTAP_CHANNEL: { uint16_t frequency; uint16_t flags; rc = cpack_uint16(s, &frequency); if (rc != 0) goto trunc; rc = cpack_uint16(s, &flags); if (rc != 0) goto trunc; /* * If CHANNEL and XCHANNEL are both present, skip * CHANNEL. */ if (presentflags & (1 << IEEE80211_RADIOTAP_XCHANNEL)) break; print_chaninfo(ndo, frequency, flags, presentflags); break; } case IEEE80211_RADIOTAP_FHSS: { uint8_t hopset; uint8_t hoppat; rc = cpack_uint8(s, &hopset); if (rc != 0) goto trunc; rc = cpack_uint8(s, &hoppat); if (rc != 0) goto trunc; ND_PRINT((ndo, "fhset %d fhpat %d ", hopset, hoppat)); break; } case IEEE80211_RADIOTAP_DBM_ANTSIGNAL: { int8_t dbm_antsignal; rc = cpack_int8(s, &dbm_antsignal); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddBm signal ", dbm_antsignal)); break; } case IEEE80211_RADIOTAP_DBM_ANTNOISE: { int8_t dbm_antnoise; rc = cpack_int8(s, &dbm_antnoise); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddBm noise ", dbm_antnoise)); break; } case IEEE80211_RADIOTAP_LOCK_QUALITY: { uint16_t lock_quality; rc = cpack_uint16(s, &lock_quality); if (rc != 0) goto trunc; ND_PRINT((ndo, "%u sq ", lock_quality)); break; } case IEEE80211_RADIOTAP_TX_ATTENUATION: { uint16_t tx_attenuation; rc = cpack_uint16(s, &tx_attenuation); if (rc != 0) goto trunc; ND_PRINT((ndo, "%d tx power ", -(int)tx_attenuation)); break; } case IEEE80211_RADIOTAP_DB_TX_ATTENUATION: { uint8_t db_tx_attenuation; rc = cpack_uint8(s, &db_tx_attenuation); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddB tx attenuation ", -(int)db_tx_attenuation)); break; } case IEEE80211_RADIOTAP_DBM_TX_POWER: { int8_t dbm_tx_power; rc = cpack_int8(s, &dbm_tx_power); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddBm tx power ", dbm_tx_power)); break; } case IEEE80211_RADIOTAP_ANTENNA: { uint8_t antenna; rc = cpack_uint8(s, &antenna); if (rc != 0) goto trunc; ND_PRINT((ndo, "antenna %u ", antenna)); break; } case IEEE80211_RADIOTAP_DB_ANTSIGNAL: { uint8_t db_antsignal; rc = cpack_uint8(s, &db_antsignal); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddB signal ", db_antsignal)); break; } case IEEE80211_RADIOTAP_DB_ANTNOISE: { uint8_t db_antnoise; rc = cpack_uint8(s, &db_antnoise); if (rc != 0) goto trunc; ND_PRINT((ndo, "%ddB noise ", db_antnoise)); break; } case IEEE80211_RADIOTAP_RX_FLAGS: { uint16_t rx_flags; rc = cpack_uint16(s, &rx_flags); if (rc != 0) goto trunc; /* Do nothing for now */ break; } case IEEE80211_RADIOTAP_XCHANNEL: { uint32_t flags; uint16_t frequency; uint8_t channel; uint8_t maxpower; rc = cpack_uint32(s, &flags); if (rc != 0) goto trunc; rc = cpack_uint16(s, &frequency); if (rc != 0) goto trunc; rc = cpack_uint8(s, &channel); if (rc != 0) goto trunc; rc = cpack_uint8(s, &maxpower); if (rc != 0) goto trunc; print_chaninfo(ndo, frequency, flags, presentflags); break; } case IEEE80211_RADIOTAP_MCS: { uint8_t known; uint8_t flags; uint8_t mcs_index; static const char *ht_bandwidth[4] = { "20 MHz", "40 MHz", "20 MHz (L)", "20 MHz (U)" }; float htrate; rc = cpack_uint8(s, &known); if (rc != 0) goto trunc; rc = cpack_uint8(s, &flags); if (rc != 0) goto trunc; rc = cpack_uint8(s, &mcs_index); if (rc != 0) goto trunc; if (known & IEEE80211_RADIOTAP_MCS_MCS_INDEX_KNOWN) { /* * We know the MCS index. */ if (mcs_index <= MAX_MCS_INDEX) { /* * And it's in-range. */ if (known & (IEEE80211_RADIOTAP_MCS_BANDWIDTH_KNOWN|IEEE80211_RADIOTAP_MCS_GUARD_INTERVAL_KNOWN)) { /* * And we know both the bandwidth and * the guard interval, so we can look * up the rate. */ htrate = ieee80211_float_htrates \ [mcs_index] \ [((flags & IEEE80211_RADIOTAP_MCS_BANDWIDTH_MASK) == IEEE80211_RADIOTAP_MCS_BANDWIDTH_40 ? 1 : 0)] \ [((flags & IEEE80211_RADIOTAP_MCS_SHORT_GI) ? 1 : 0)]; } else { /* * We don't know both the bandwidth * and the guard interval, so we can * only report the MCS index. */ htrate = 0.0; } } else { /* * The MCS value is out of range. */ htrate = 0.0; } if (htrate != 0.0) { /* * We have the rate. * Print it. */ ND_PRINT((ndo, "%.1f Mb/s MCS %u ", htrate, mcs_index)); } else { /* * We at least have the MCS index. * Print it. */ ND_PRINT((ndo, "MCS %u ", mcs_index)); } } if (known & IEEE80211_RADIOTAP_MCS_BANDWIDTH_KNOWN) { ND_PRINT((ndo, "%s ", ht_bandwidth[flags & IEEE80211_RADIOTAP_MCS_BANDWIDTH_MASK])); } if (known & IEEE80211_RADIOTAP_MCS_GUARD_INTERVAL_KNOWN) { ND_PRINT((ndo, "%s GI ", (flags & IEEE80211_RADIOTAP_MCS_SHORT_GI) ? "short" : "long")); } if (known & IEEE80211_RADIOTAP_MCS_HT_FORMAT_KNOWN) { ND_PRINT((ndo, "%s ", (flags & IEEE80211_RADIOTAP_MCS_HT_GREENFIELD) ? "greenfield" : "mixed")); } if (known & IEEE80211_RADIOTAP_MCS_FEC_TYPE_KNOWN) { ND_PRINT((ndo, "%s FEC ", (flags & IEEE80211_RADIOTAP_MCS_FEC_LDPC) ? "LDPC" : "BCC")); } if (known & IEEE80211_RADIOTAP_MCS_STBC_KNOWN) { ND_PRINT((ndo, "RX-STBC%u ", (flags & IEEE80211_RADIOTAP_MCS_STBC_MASK) >> IEEE80211_RADIOTAP_MCS_STBC_SHIFT)); } break; } case IEEE80211_RADIOTAP_AMPDU_STATUS: { uint32_t reference_num; uint16_t flags; uint8_t delim_crc; uint8_t reserved; rc = cpack_uint32(s, &reference_num); if (rc != 0) goto trunc; rc = cpack_uint16(s, &flags); if (rc != 0) goto trunc; rc = cpack_uint8(s, &delim_crc); if (rc != 0) goto trunc; rc = cpack_uint8(s, &reserved); if (rc != 0) goto trunc; /* Do nothing for now */ break; } case IEEE80211_RADIOTAP_VHT: { uint16_t known; uint8_t flags; uint8_t bandwidth; uint8_t mcs_nss[4]; uint8_t coding; uint8_t group_id; uint16_t partial_aid; static const char *vht_bandwidth[32] = { "20 MHz", "40 MHz", "20 MHz (L)", "20 MHz (U)", "80 MHz", "80 MHz (L)", "80 MHz (U)", "80 MHz (LL)", "80 MHz (LU)", "80 MHz (UL)", "80 MHz (UU)", "160 MHz", "160 MHz (L)", "160 MHz (U)", "160 MHz (LL)", "160 MHz (LU)", "160 MHz (UL)", "160 MHz (UU)", "160 MHz (LLL)", "160 MHz (LLU)", "160 MHz (LUL)", "160 MHz (UUU)", "160 MHz (ULL)", "160 MHz (ULU)", "160 MHz (UUL)", "160 MHz (UUU)", "unknown (26)", "unknown (27)", "unknown (28)", "unknown (29)", "unknown (30)", "unknown (31)" }; rc = cpack_uint16(s, &known); if (rc != 0) goto trunc; rc = cpack_uint8(s, &flags); if (rc != 0) goto trunc; rc = cpack_uint8(s, &bandwidth); if (rc != 0) goto trunc; for (i = 0; i < 4; i++) { rc = cpack_uint8(s, &mcs_nss[i]); if (rc != 0) goto trunc; } rc = cpack_uint8(s, &coding); if (rc != 0) goto trunc; rc = cpack_uint8(s, &group_id); if (rc != 0) goto trunc; rc = cpack_uint16(s, &partial_aid); if (rc != 0) goto trunc; for (i = 0; i < 4; i++) { u_int nss, mcs; nss = mcs_nss[i] & IEEE80211_RADIOTAP_VHT_NSS_MASK; mcs = (mcs_nss[i] & IEEE80211_RADIOTAP_VHT_MCS_MASK) >> IEEE80211_RADIOTAP_VHT_MCS_SHIFT; if (nss == 0) continue; ND_PRINT((ndo, "User %u MCS %u ", i, mcs)); ND_PRINT((ndo, "%s FEC ", (coding & (IEEE80211_RADIOTAP_CODING_LDPC_USERn << i)) ? "LDPC" : "BCC")); } if (known & IEEE80211_RADIOTAP_VHT_BANDWIDTH_KNOWN) { ND_PRINT((ndo, "%s ", vht_bandwidth[bandwidth & IEEE80211_RADIOTAP_VHT_BANDWIDTH_MASK])); } if (known & IEEE80211_RADIOTAP_VHT_GUARD_INTERVAL_KNOWN) { ND_PRINT((ndo, "%s GI ", (flags & IEEE80211_RADIOTAP_VHT_SHORT_GI) ? "short" : "long")); } break; } default: /* this bit indicates a field whose * size we do not know, so we cannot * proceed. Just print the bit number. */ ND_PRINT((ndo, "[bit %u] ", bit)); return -1; } return 0; trunc: ND_PRINT((ndo, "%s", tstr)); return rc; } static int print_in_radiotap_namespace(netdissect_options *ndo, struct cpack_state *s, uint8_t *flags, uint32_t presentflags, int bit0) { #define BITNO_32(x) (((x) >> 16) ? 16 + BITNO_16((x) >> 16) : BITNO_16((x))) #define BITNO_16(x) (((x) >> 8) ? 8 + BITNO_8((x) >> 8) : BITNO_8((x))) #define BITNO_8(x) (((x) >> 4) ? 4 + BITNO_4((x) >> 4) : BITNO_4((x))) #define BITNO_4(x) (((x) >> 2) ? 2 + BITNO_2((x) >> 2) : BITNO_2((x))) #define BITNO_2(x) (((x) & 2) ? 1 : 0) uint32_t present, next_present; int bitno; enum ieee80211_radiotap_type bit; int rc; for (present = presentflags; present; present = next_present) { /* * Clear the least significant bit that is set. */ next_present = present & (present - 1); /* * Get the bit number, within this presence word, * of the remaining least significant bit that * is set. */ bitno = BITNO_32(present ^ next_present); /* * Stop if this is one of the "same meaning * in all presence flags" bits. */ if (bitno >= IEEE80211_RADIOTAP_NAMESPACE) break; /* * Get the radiotap bit number of that bit. */ bit = (enum ieee80211_radiotap_type)(bit0 + bitno); rc = print_radiotap_field(ndo, s, bit, flags, presentflags); if (rc != 0) return rc; } return 0; } static u_int ieee802_11_radio_print(netdissect_options *ndo, const u_char *p, u_int length, u_int caplen) { #define BIT(n) (1U << n) #define IS_EXTENDED(__p) \ (EXTRACT_LE_32BITS(__p) & BIT(IEEE80211_RADIOTAP_EXT)) != 0 struct cpack_state cpacker; const struct ieee80211_radiotap_header *hdr; uint32_t presentflags; const uint32_t *presentp, *last_presentp; int vendor_namespace; uint8_t vendor_oui[3]; uint8_t vendor_subnamespace; uint16_t skip_length; int bit0; u_int len; uint8_t flags; int pad; u_int fcslen; if (caplen < sizeof(*hdr)) { ND_PRINT((ndo, "%s", tstr)); return caplen; } hdr = (const struct ieee80211_radiotap_header *)p; len = EXTRACT_LE_16BITS(&hdr->it_len); /* * If we don't have the entire radiotap header, just give up. */ if (caplen < len) { ND_PRINT((ndo, "%s", tstr)); return caplen; } cpack_init(&cpacker, (const uint8_t *)hdr, len); /* align against header start */ cpack_advance(&cpacker, sizeof(*hdr)); /* includes the 1st bitmap */ for (last_presentp = &hdr->it_present; (const u_char*)(last_presentp + 1) <= p + len && IS_EXTENDED(last_presentp); last_presentp++) cpack_advance(&cpacker, sizeof(hdr->it_present)); /* more bitmaps */ /* are there more bitmap extensions than bytes in header? */ if ((const u_char*)(last_presentp + 1) > p + len) { ND_PRINT((ndo, "%s", tstr)); return caplen; } /* * Start out at the beginning of the default radiotap namespace. */ bit0 = 0; vendor_namespace = 0; memset(vendor_oui, 0, 3); vendor_subnamespace = 0; skip_length = 0; /* Assume no flags */ flags = 0; /* Assume no Atheros padding between 802.11 header and body */ pad = 0; /* Assume no FCS at end of frame */ fcslen = 0; for (presentp = &hdr->it_present; presentp <= last_presentp; presentp++) { presentflags = EXTRACT_LE_32BITS(presentp); /* * If this is a vendor namespace, we don't handle it. */ if (vendor_namespace) { /* * Skip past the stuff we don't understand. * If we add support for any vendor namespaces, * it'd be added here; use vendor_oui and * vendor_subnamespace to interpret the fields. */ if (cpack_advance(&cpacker, skip_length) != 0) { /* * Ran out of space in the packet. */ break; } /* * We've skipped it all; nothing more to * skip. */ skip_length = 0; } else { if (print_in_radiotap_namespace(ndo, &cpacker, &flags, presentflags, bit0) != 0) { /* * Fatal error - can't process anything * more in the radiotap header. */ break; } } /* * Handle the namespace switch bits; we've already handled * the extension bit in all but the last word above. */ switch (presentflags & (BIT(IEEE80211_RADIOTAP_NAMESPACE)|BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE))) { case 0: /* * We're not changing namespaces. * advance to the next 32 bits in the current * namespace. */ bit0 += 32; break; case BIT(IEEE80211_RADIOTAP_NAMESPACE): /* * We're switching to the radiotap namespace. * Reset the presence-bitmap index to 0, and * reset the namespace to the default radiotap * namespace. */ bit0 = 0; vendor_namespace = 0; memset(vendor_oui, 0, 3); vendor_subnamespace = 0; skip_length = 0; break; case BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE): /* * We're switching to a vendor namespace. * Reset the presence-bitmap index to 0, * note that we're in a vendor namespace, * and fetch the fields of the Vendor Namespace * item. */ bit0 = 0; vendor_namespace = 1; if ((cpack_align_and_reserve(&cpacker, 2)) == NULL) { ND_PRINT((ndo, "%s", tstr)); break; } if (cpack_uint8(&cpacker, &vendor_oui[0]) != 0) { ND_PRINT((ndo, "%s", tstr)); break; } if (cpack_uint8(&cpacker, &vendor_oui[1]) != 0) { ND_PRINT((ndo, "%s", tstr)); break; } if (cpack_uint8(&cpacker, &vendor_oui[2]) != 0) { ND_PRINT((ndo, "%s", tstr)); break; } if (cpack_uint8(&cpacker, &vendor_subnamespace) != 0) { ND_PRINT((ndo, "%s", tstr)); break; } if (cpack_uint16(&cpacker, &skip_length) != 0) { ND_PRINT((ndo, "%s", tstr)); break; } break; default: /* * Illegal combination. The behavior in this * case is undefined by the radiotap spec; we * just ignore both bits. */ break; } } if (flags & IEEE80211_RADIOTAP_F_DATAPAD) pad = 1; /* Atheros padding */ if (flags & IEEE80211_RADIOTAP_F_FCS) fcslen = 4; /* FCS at end of packet */ return len + ieee802_11_print(ndo, p + len, length - len, caplen - len, pad, fcslen); #undef BITNO_32 #undef BITNO_16 #undef BITNO_8 #undef BITNO_4 #undef BITNO_2 #undef BIT } static u_int ieee802_11_avs_radio_print(netdissect_options *ndo, const u_char *p, u_int length, u_int caplen) { uint32_t caphdr_len; if (caplen < 8) { ND_PRINT((ndo, "%s", tstr)); return caplen; } caphdr_len = EXTRACT_32BITS(p + 4); if (caphdr_len < 8) { /* * Yow! The capture header length is claimed not * to be large enough to include even the version * cookie or capture header length! */ ND_PRINT((ndo, "%s", tstr)); return caplen; } if (caplen < caphdr_len) { ND_PRINT((ndo, "%s", tstr)); return caplen; } return caphdr_len + ieee802_11_print(ndo, p + caphdr_len, length - caphdr_len, caplen - caphdr_len, 0, 0); } #define PRISM_HDR_LEN 144 #define WLANCAP_MAGIC_COOKIE_BASE 0x80211000 #define WLANCAP_MAGIC_COOKIE_V1 0x80211001 #define WLANCAP_MAGIC_COOKIE_V2 0x80211002 /* * For DLT_PRISM_HEADER; like DLT_IEEE802_11, but with an extra header, * containing information such as radio information, which we * currently ignore. * * If, however, the packet begins with WLANCAP_MAGIC_COOKIE_V1 or * WLANCAP_MAGIC_COOKIE_V2, it's really DLT_IEEE802_11_RADIO_AVS * (currently, on Linux, there's no ARPHRD_ type for * DLT_IEEE802_11_RADIO_AVS, as there is a ARPHRD_IEEE80211_PRISM * for DLT_PRISM_HEADER, so ARPHRD_IEEE80211_PRISM is used for * the AVS header, and the first 4 bytes of the header are used to * indicate whether it's a Prism header or an AVS header). */ u_int prism_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int caplen = h->caplen; u_int length = h->len; uint32_t msgcode; if (caplen < 4) { ND_PRINT((ndo, "%s", tstr)); return caplen; } msgcode = EXTRACT_32BITS(p); if (msgcode == WLANCAP_MAGIC_COOKIE_V1 || msgcode == WLANCAP_MAGIC_COOKIE_V2) return ieee802_11_avs_radio_print(ndo, p, length, caplen); if (caplen < PRISM_HDR_LEN) { ND_PRINT((ndo, "%s", tstr)); return caplen; } return PRISM_HDR_LEN + ieee802_11_print(ndo, p + PRISM_HDR_LEN, length - PRISM_HDR_LEN, caplen - PRISM_HDR_LEN, 0, 0); } /* * For DLT_IEEE802_11_RADIO; like DLT_IEEE802_11, but with an extra * header, containing information such as radio information. */ u_int ieee802_11_radio_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { return ieee802_11_radio_print(ndo, p, h->len, h->caplen); } /* * For DLT_IEEE802_11_RADIO_AVS; like DLT_IEEE802_11, but with an * extra header, containing information such as radio information, * which we currently ignore. */ u_int ieee802_11_radio_avs_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { return ieee802_11_avs_radio_print(ndo, p, h->len, h->caplen); }
parse_elements(netdissect_options *ndo, struct mgmt_body_t *pbody, const u_char *p, int offset, u_int length) { u_int elementlen; struct ssid_t ssid; struct challenge_t challenge; struct rates_t rates; struct ds_t ds; struct cf_t cf; struct tim_t tim; /* * We haven't seen any elements yet. */ pbody->challenge_present = 0; pbody->ssid_present = 0; pbody->rates_present = 0; pbody->ds_present = 0; pbody->cf_present = 0; pbody->tim_present = 0; while (length != 0) { /* Make sure we at least have the element ID and length. */ if (!ND_TTEST2(*(p + offset), 2)) return 0; if (length < 2) return 0; elementlen = *(p + offset + 1); /* Make sure we have the entire element. */ if (!ND_TTEST2(*(p + offset + 2), elementlen)) return 0; if (length < elementlen + 2) return 0; switch (*(p + offset)) { case E_SSID: memcpy(&ssid, p + offset, 2); offset += 2; length -= 2; if (ssid.length != 0) { if (ssid.length > sizeof(ssid.ssid) - 1) return 0; if (!ND_TTEST2(*(p + offset), ssid.length)) return 0; if (length < ssid.length) return 0; memcpy(&ssid.ssid, p + offset, ssid.length); offset += ssid.length; length -= ssid.length; } ssid.ssid[ssid.length] = '\0'; /* * Present and not truncated. * * If we haven't already seen an SSID IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->ssid_present) { pbody->ssid = ssid; pbody->ssid_present = 1; } break; case E_CHALLENGE: memcpy(&challenge, p + offset, 2); offset += 2; length -= 2; if (challenge.length != 0) { if (challenge.length > sizeof(challenge.text) - 1) return 0; if (!ND_TTEST2(*(p + offset), challenge.length)) return 0; if (length < challenge.length) return 0; memcpy(&challenge.text, p + offset, challenge.length); offset += challenge.length; length -= challenge.length; } challenge.text[challenge.length] = '\0'; /* * Present and not truncated. * * If we haven't already seen a challenge IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->challenge_present) { pbody->challenge = challenge; pbody->challenge_present = 1; } break; case E_RATES: memcpy(&rates, p + offset, 2); offset += 2; length -= 2; if (rates.length != 0) { if (rates.length > sizeof rates.rate) return 0; if (!ND_TTEST2(*(p + offset), rates.length)) return 0; if (length < rates.length) return 0; memcpy(&rates.rate, p + offset, rates.length); offset += rates.length; length -= rates.length; } /* * Present and not truncated. * * If we haven't already seen a rates IE, * copy this one if it's not zero-length, * otherwise ignore this one, so we later * report the first one we saw. * * We ignore zero-length rates IEs as some * devices seem to put a zero-length rates * IE, followed by an SSID IE, followed by * a non-zero-length rates IE into frames, * even though IEEE Std 802.11-2007 doesn't * seem to indicate that a zero-length rates * IE is valid. */ if (!pbody->rates_present && rates.length != 0) { pbody->rates = rates; pbody->rates_present = 1; } break; case E_DS: memcpy(&ds, p + offset, 2); offset += 2; length -= 2; if (ds.length != 1) { offset += ds.length; length -= ds.length; break; } ds.channel = *(p + offset); offset += 1; length -= 1; /* * Present and not truncated. * * If we haven't already seen a DS IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->ds_present) { pbody->ds = ds; pbody->ds_present = 1; } break; case E_CF: memcpy(&cf, p + offset, 2); offset += 2; length -= 2; if (cf.length != 6) { offset += cf.length; length -= cf.length; break; } memcpy(&cf.count, p + offset, 6); offset += 6; length -= 6; /* * Present and not truncated. * * If we haven't already seen a CF IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->cf_present) { pbody->cf = cf; pbody->cf_present = 1; } break; case E_TIM: memcpy(&tim, p + offset, 2); offset += 2; length -= 2; if (tim.length <= 3) { offset += tim.length; length -= tim.length; break; } if (tim.length - 3 > (int)sizeof tim.bitmap) return 0; memcpy(&tim.count, p + offset, 3); offset += 3; length -= 3; memcpy(tim.bitmap, p + offset + 3, tim.length - 3); offset += tim.length - 3; length -= tim.length - 3; /* * Present and not truncated. * * If we haven't already seen a TIM IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->tim_present) { pbody->tim = tim; pbody->tim_present = 1; } break; default: #if 0 ND_PRINT((ndo, "(1) unhandled element_id (%d) ", *(p + offset))); #endif offset += 2 + elementlen; length -= 2 + elementlen; break; } } /* No problems found. */ return 1; }
parse_elements(netdissect_options *ndo, struct mgmt_body_t *pbody, const u_char *p, int offset, u_int length) { u_int elementlen; struct ssid_t ssid; struct challenge_t challenge; struct rates_t rates; struct ds_t ds; struct cf_t cf; struct tim_t tim; /* * We haven't seen any elements yet. */ pbody->challenge_present = 0; pbody->ssid_present = 0; pbody->rates_present = 0; pbody->ds_present = 0; pbody->cf_present = 0; pbody->tim_present = 0; while (length != 0) { /* Make sure we at least have the element ID and length. */ if (!ND_TTEST2(*(p + offset), 2)) return 0; if (length < 2) return 0; elementlen = *(p + offset + 1); /* Make sure we have the entire element. */ if (!ND_TTEST2(*(p + offset + 2), elementlen)) return 0; if (length < elementlen + 2) return 0; switch (*(p + offset)) { case E_SSID: memcpy(&ssid, p + offset, 2); offset += 2; length -= 2; if (ssid.length != 0) { if (ssid.length > sizeof(ssid.ssid) - 1) return 0; memcpy(&ssid.ssid, p + offset, ssid.length); offset += ssid.length; length -= ssid.length; } ssid.ssid[ssid.length] = '\0'; /* * Present and not truncated. * * If we haven't already seen an SSID IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->ssid_present) { pbody->ssid = ssid; pbody->ssid_present = 1; } break; case E_CHALLENGE: memcpy(&challenge, p + offset, 2); offset += 2; length -= 2; if (challenge.length != 0) { if (challenge.length > sizeof(challenge.text) - 1) return 0; memcpy(&challenge.text, p + offset, challenge.length); offset += challenge.length; length -= challenge.length; } challenge.text[challenge.length] = '\0'; /* * Present and not truncated. * * If we haven't already seen a challenge IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->challenge_present) { pbody->challenge = challenge; pbody->challenge_present = 1; } break; case E_RATES: memcpy(&rates, p + offset, 2); offset += 2; length -= 2; if (rates.length != 0) { if (rates.length > sizeof rates.rate) return 0; memcpy(&rates.rate, p + offset, rates.length); offset += rates.length; length -= rates.length; } /* * Present and not truncated. * * If we haven't already seen a rates IE, * copy this one if it's not zero-length, * otherwise ignore this one, so we later * report the first one we saw. * * We ignore zero-length rates IEs as some * devices seem to put a zero-length rates * IE, followed by an SSID IE, followed by * a non-zero-length rates IE into frames, * even though IEEE Std 802.11-2007 doesn't * seem to indicate that a zero-length rates * IE is valid. */ if (!pbody->rates_present && rates.length != 0) { pbody->rates = rates; pbody->rates_present = 1; } break; case E_DS: memcpy(&ds, p + offset, 2); offset += 2; length -= 2; if (ds.length != 1) { offset += ds.length; length -= ds.length; break; } ds.channel = *(p + offset); offset += 1; length -= 1; /* * Present and not truncated. * * If we haven't already seen a DS IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->ds_present) { pbody->ds = ds; pbody->ds_present = 1; } break; case E_CF: memcpy(&cf, p + offset, 2); offset += 2; length -= 2; if (cf.length != 6) { offset += cf.length; length -= cf.length; break; } memcpy(&cf.count, p + offset, 6); offset += 6; length -= 6; /* * Present and not truncated. * * If we haven't already seen a CF IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->cf_present) { pbody->cf = cf; pbody->cf_present = 1; } break; case E_TIM: memcpy(&tim, p + offset, 2); offset += 2; length -= 2; if (tim.length <= 3) { offset += tim.length; length -= tim.length; break; } if (tim.length - 3 > (int)sizeof tim.bitmap) return 0; memcpy(&tim.count, p + offset, 3); offset += 3; length -= 3; memcpy(tim.bitmap, p + offset, tim.length - 3); offset += tim.length - 3; length -= tim.length - 3; /* * Present and not truncated. * * If we haven't already seen a TIM IE, * copy this one, otherwise ignore this one, * so we later report the first one we saw. */ if (!pbody->tim_present) { pbody->tim = tim; pbody->tim_present = 1; } break; default: #if 0 ND_PRINT((ndo, "(1) unhandled element_id (%d) ", *(p + offset))); #endif offset += 2 + elementlen; length -= 2 + elementlen; break; } } /* No problems found. */ return 1; }
{'added': [(1180, '\t\t\tmemcpy(tim.bitmap, p + offset, tim.length - 3);')], 'deleted': [(1042, '\t\t\t\tif (!ND_TTEST2(*(p + offset), ssid.length))'), (1043, '\t\t\t\t\treturn 0;'), (1044, '\t\t\t\tif (length < ssid.length)'), (1045, '\t\t\t\t\treturn 0;'), (1071, '\t\t\t\tif (!ND_TTEST2(*(p + offset), challenge.length))'), (1072, '\t\t\t\t\treturn 0;'), (1073, '\t\t\t\tif (length < challenge.length)'), (1074, '\t\t\t\t\treturn 0;'), (1100, '\t\t\t\tif (!ND_TTEST2(*(p + offset), rates.length))'), (1101, '\t\t\t\t\treturn 0;'), (1102, '\t\t\t\tif (length < rates.length)'), (1103, '\t\t\t\t\treturn 0;'), (1192, '\t\t\tmemcpy(tim.bitmap, p + offset + 3, tim.length - 3);')]}
1
13
2,024
11,722
158
960
36
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13008
CWE-125
3,209
timer.c
C
snd_timer_user_open
/* * Timers abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/module.h> #include <linux/string.h> #include <sound/core.h> #include <sound/timer.h> #include <sound/control.h> #include <sound/info.h> #include <sound/minors.h> #include <sound/initval.h> #include <linux/kmod.h> #if IS_ENABLED(CONFIG_SND_HRTIMER) #define DEFAULT_TIMER_LIMIT 4 #elif IS_ENABLED(CONFIG_SND_RTCTIMER) #define DEFAULT_TIMER_LIMIT 2 #else #define DEFAULT_TIMER_LIMIT 1 #endif static int timer_limit = DEFAULT_TIMER_LIMIT; static int timer_tstamp_monotonic = 1; MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA timer interface"); MODULE_LICENSE("GPL"); module_param(timer_limit, int, 0444); MODULE_PARM_DESC(timer_limit, "Maximum global timers in system."); module_param(timer_tstamp_monotonic, int, 0444); MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default)."); MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER); MODULE_ALIAS("devname:snd/timer"); struct snd_timer_user { struct snd_timer_instance *timeri; int tread; /* enhanced read with timestamps and events */ unsigned long ticks; unsigned long overrun; int qhead; int qtail; int qused; int queue_size; struct snd_timer_read *queue; struct snd_timer_tread *tqueue; spinlock_t qlock; unsigned long last_resolution; unsigned int filter; struct timespec tstamp; /* trigger tstamp */ wait_queue_head_t qchange_sleep; struct fasync_struct *fasync; struct mutex tread_sem; }; /* list of timers */ static LIST_HEAD(snd_timer_list); /* list of slave instances */ static LIST_HEAD(snd_timer_slave_list); /* lock for slave active lists */ static DEFINE_SPINLOCK(slave_active_lock); static DEFINE_MUTEX(register_mutex); static int snd_timer_free(struct snd_timer *timer); static int snd_timer_dev_free(struct snd_device *device); static int snd_timer_dev_register(struct snd_device *device); static int snd_timer_dev_disconnect(struct snd_device *device); static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left); /* * create a timer instance with the given owner string. * when timer is not NULL, increments the module counter */ static struct snd_timer_instance *snd_timer_instance_new(char *owner, struct snd_timer *timer) { struct snd_timer_instance *timeri; timeri = kzalloc(sizeof(*timeri), GFP_KERNEL); if (timeri == NULL) return NULL; timeri->owner = kstrdup(owner, GFP_KERNEL); if (! timeri->owner) { kfree(timeri); return NULL; } INIT_LIST_HEAD(&timeri->open_list); INIT_LIST_HEAD(&timeri->active_list); INIT_LIST_HEAD(&timeri->ack_list); INIT_LIST_HEAD(&timeri->slave_list_head); INIT_LIST_HEAD(&timeri->slave_active_head); timeri->timer = timer; if (timer && !try_module_get(timer->module)) { kfree(timeri->owner); kfree(timeri); return NULL; } return timeri; } /* * find a timer instance from the given timer id */ static struct snd_timer *snd_timer_find(struct snd_timer_id *tid) { struct snd_timer *timer = NULL; list_for_each_entry(timer, &snd_timer_list, device_list) { if (timer->tmr_class != tid->dev_class) continue; if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD || timer->tmr_class == SNDRV_TIMER_CLASS_PCM) && (timer->card == NULL || timer->card->number != tid->card)) continue; if (timer->tmr_device != tid->device) continue; if (timer->tmr_subdevice != tid->subdevice) continue; return timer; } return NULL; } #ifdef CONFIG_MODULES static void snd_timer_request(struct snd_timer_id *tid) { switch (tid->dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: if (tid->device < timer_limit) request_module("snd-timer-%i", tid->device); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: if (tid->card < snd_ecards_limit) request_module("snd-card-%i", tid->card); break; default: break; } } #endif /* * look for a master instance matching with the slave id of the given slave. * when found, relink the open_link of the slave. * * call this with register_mutex down. */ static void snd_timer_check_slave(struct snd_timer_instance *slave) { struct snd_timer *timer; struct snd_timer_instance *master; /* FIXME: it's really dumb to look up all entries.. */ list_for_each_entry(timer, &snd_timer_list, device_list) { list_for_each_entry(master, &timer->open_list_head, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { list_move_tail(&slave->open_list, &master->slave_list_head); spin_lock_irq(&slave_active_lock); slave->master = master; slave->timer = master->timer; spin_unlock_irq(&slave_active_lock); return; } } } } /* * look for slave instances matching with the slave id of the given master. * when found, relink the open_link of slaves. * * call this with register_mutex down. */ static void snd_timer_check_master(struct snd_timer_instance *master) { struct snd_timer_instance *slave, *tmp; /* check all pending slaves */ list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { list_move_tail(&slave->open_list, &master->slave_list_head); spin_lock_irq(&slave_active_lock); slave->master = master; slave->timer = master->timer; if (slave->flags & SNDRV_TIMER_IFLG_RUNNING) list_add_tail(&slave->active_list, &master->slave_active_head); spin_unlock_irq(&slave_active_lock); } } } /* * open a timer instance * when opening a master, the slave id must be here given. */ int snd_timer_open(struct snd_timer_instance **ti, char *owner, struct snd_timer_id *tid, unsigned int slave_id) { struct snd_timer *timer; struct snd_timer_instance *timeri = NULL; if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) { /* open a slave instance */ if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE || tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) { pr_debug("ALSA: timer: invalid slave class %i\n", tid->dev_sclass); return -EINVAL; } mutex_lock(&register_mutex); timeri = snd_timer_instance_new(owner, NULL); if (!timeri) { mutex_unlock(&register_mutex); return -ENOMEM; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = tid->device; timeri->flags |= SNDRV_TIMER_IFLG_SLAVE; list_add_tail(&timeri->open_list, &snd_timer_slave_list); snd_timer_check_slave(timeri); mutex_unlock(&register_mutex); *ti = timeri; return 0; } /* open a master instance */ mutex_lock(&register_mutex); timer = snd_timer_find(tid); #ifdef CONFIG_MODULES if (!timer) { mutex_unlock(&register_mutex); snd_timer_request(tid); mutex_lock(&register_mutex); timer = snd_timer_find(tid); } #endif if (!timer) { mutex_unlock(&register_mutex); return -ENODEV; } if (!list_empty(&timer->open_list_head)) { timeri = list_entry(timer->open_list_head.next, struct snd_timer_instance, open_list); if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) { mutex_unlock(&register_mutex); return -EBUSY; } } timeri = snd_timer_instance_new(owner, timer); if (!timeri) { mutex_unlock(&register_mutex); return -ENOMEM; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = slave_id; if (list_empty(&timer->open_list_head) && timer->hw.open) timer->hw.open(timer); list_add_tail(&timeri->open_list, &timer->open_list_head); snd_timer_check_master(timeri); mutex_unlock(&register_mutex); *ti = timeri; return 0; } static int _snd_timer_stop(struct snd_timer_instance *timeri, int keep_flag, int event); /* * close a timer instance */ int snd_timer_close(struct snd_timer_instance *timeri) { struct snd_timer *timer = NULL; struct snd_timer_instance *slave, *tmp; if (snd_BUG_ON(!timeri)) return -ENXIO; /* force to stop the timer */ snd_timer_stop(timeri); if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { /* wait, until the active callback is finished */ spin_lock_irq(&slave_active_lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&slave_active_lock); udelay(10); spin_lock_irq(&slave_active_lock); } spin_unlock_irq(&slave_active_lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); mutex_unlock(&register_mutex); } else { timer = timeri->timer; if (snd_BUG_ON(!timer)) goto out; /* wait, until the active callback is finished */ spin_lock_irq(&timer->lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&timer->lock); udelay(10); spin_lock_irq(&timer->lock); } spin_unlock_irq(&timer->lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); if (timer && list_empty(&timer->open_list_head) && timer->hw.close) timer->hw.close(timer); /* remove slave links */ list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head, open_list) { spin_lock_irq(&slave_active_lock); _snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION); list_move_tail(&slave->open_list, &snd_timer_slave_list); slave->master = NULL; slave->timer = NULL; spin_unlock_irq(&slave_active_lock); } mutex_unlock(&register_mutex); } out: if (timeri->private_free) timeri->private_free(timeri); kfree(timeri->owner); kfree(timeri); if (timer) module_put(timer->module); return 0; } unsigned long snd_timer_resolution(struct snd_timer_instance *timeri) { struct snd_timer * timer; if (timeri == NULL) return 0; if ((timer = timeri->timer) != NULL) { if (timer->hw.c_resolution) return timer->hw.c_resolution(timer); return timer->hw.resolution; } return 0; } static void snd_timer_notify1(struct snd_timer_instance *ti, int event) { struct snd_timer *timer; unsigned long flags; unsigned long resolution = 0; struct snd_timer_instance *ts; struct timespec tstamp; if (timer_tstamp_monotonic) ktime_get_ts(&tstamp); else getnstimeofday(&tstamp); if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START || event > SNDRV_TIMER_EVENT_PAUSE)) return; if (event == SNDRV_TIMER_EVENT_START || event == SNDRV_TIMER_EVENT_CONTINUE) resolution = snd_timer_resolution(ti); if (ti->ccallback) ti->ccallback(ti, event, &tstamp, resolution); if (ti->flags & SNDRV_TIMER_IFLG_SLAVE) return; timer = ti->timer; if (timer == NULL) return; if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) return; spin_lock_irqsave(&timer->lock, flags); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) ts->ccallback(ti, event + 100, &tstamp, resolution); spin_unlock_irqrestore(&timer->lock, flags); } static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri, unsigned long sticks) { list_move_tail(&timeri->active_list, &timer->active_list_head); if (timer->running) { if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) goto __start_now; timer->flags |= SNDRV_TIMER_FLG_RESCHED; timeri->flags |= SNDRV_TIMER_IFLG_START; return 1; /* delayed start */ } else { timer->sticks = sticks; timer->hw.start(timer); __start_now: timer->running++; timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; return 0; } } static int snd_timer_start_slave(struct snd_timer_instance *timeri) { unsigned long flags; spin_lock_irqsave(&slave_active_lock, flags); timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; if (timeri->master) list_add_tail(&timeri->active_list, &timeri->master->slave_active_head); spin_unlock_irqrestore(&slave_active_lock, flags); return 1; /* delayed start */ } /* * start the timer instance */ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) { struct snd_timer *timer; int result = -EINVAL; unsigned long flags; if (timeri == NULL || ticks < 1) return -EINVAL; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { result = snd_timer_start_slave(timeri); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } timer = timeri->timer; if (timer == NULL) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); timeri->ticks = timeri->cticks = ticks; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, ticks); spin_unlock_irqrestore(&timer->lock, flags); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } static int _snd_timer_stop(struct snd_timer_instance * timeri, int keep_flag, int event) { struct snd_timer *timer; unsigned long flags; if (snd_BUG_ON(!timeri)) return -ENXIO; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { if (!keep_flag) { spin_lock_irqsave(&slave_active_lock, flags); timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; spin_unlock_irqrestore(&slave_active_lock, flags); } goto __end; } timer = timeri->timer; if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); list_del_init(&timeri->ack_list); list_del_init(&timeri->active_list); if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) && !(--timer->running)) { timer->hw.stop(timer); if (timer->flags & SNDRV_TIMER_FLG_RESCHED) { timer->flags &= ~SNDRV_TIMER_FLG_RESCHED; snd_timer_reschedule(timer, 0); if (timer->flags & SNDRV_TIMER_FLG_CHANGE) { timer->flags &= ~SNDRV_TIMER_FLG_CHANGE; timer->hw.start(timer); } } } if (!keep_flag) timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); spin_unlock_irqrestore(&timer->lock, flags); __end: if (event != SNDRV_TIMER_EVENT_RESOLUTION) snd_timer_notify1(timeri, event); return 0; } /* * stop the timer instance. * * do not call this from the timer callback! */ int snd_timer_stop(struct snd_timer_instance *timeri) { struct snd_timer *timer; unsigned long flags; int err; err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP); if (err < 0) return err; timer = timeri->timer; if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); timeri->cticks = timeri->ticks; timeri->pticks = 0; spin_unlock_irqrestore(&timer->lock, flags); return 0; } /* * start again.. the tick is kept. */ int snd_timer_continue(struct snd_timer_instance *timeri) { struct snd_timer *timer; int result = -EINVAL; unsigned long flags; if (timeri == NULL) return result; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) return snd_timer_start_slave(timeri); timer = timeri->timer; if (! timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); if (!timeri->cticks) timeri->cticks = 1; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, timer->sticks); spin_unlock_irqrestore(&timer->lock, flags); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE); return result; } /* * pause.. remember the ticks left */ int snd_timer_pause(struct snd_timer_instance * timeri) { return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE); } /* * reschedule the timer * * start pending instances and check the scheduling ticks. * when the scheduling ticks is changed set CHANGE flag to reprogram the timer. */ static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left) { struct snd_timer_instance *ti; unsigned long ticks = ~0UL; list_for_each_entry(ti, &timer->active_list_head, active_list) { if (ti->flags & SNDRV_TIMER_IFLG_START) { ti->flags &= ~SNDRV_TIMER_IFLG_START; ti->flags |= SNDRV_TIMER_IFLG_RUNNING; timer->running++; } if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) { if (ticks > ti->cticks) ticks = ti->cticks; } } if (ticks == ~0UL) { timer->flags &= ~SNDRV_TIMER_FLG_RESCHED; return; } if (ticks > timer->hw.ticks) ticks = timer->hw.ticks; if (ticks_left != ticks) timer->flags |= SNDRV_TIMER_FLG_CHANGE; timer->sticks = ticks; } /* * timer tasklet * */ static void snd_timer_tasklet(unsigned long arg) { struct snd_timer *timer = (struct snd_timer *) arg; struct snd_timer_instance *ti; struct list_head *p; unsigned long resolution, ticks; unsigned long flags; spin_lock_irqsave(&timer->lock, flags); /* now process all callbacks */ while (!list_empty(&timer->sack_list_head)) { p = timer->sack_list_head.next; /* get first item */ ti = list_entry(p, struct snd_timer_instance, ack_list); /* remove from ack_list and make empty */ list_del_init(p); ticks = ti->pticks; ti->pticks = 0; resolution = ti->resolution; ti->flags |= SNDRV_TIMER_IFLG_CALLBACK; spin_unlock(&timer->lock); if (ti->callback) ti->callback(ti, resolution, ticks); spin_lock(&timer->lock); ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK; } spin_unlock_irqrestore(&timer->lock, flags); } /* * timer interrupt * * ticks_left is usually equal to timer->sticks. * */ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left) { struct snd_timer_instance *ti, *ts, *tmp; unsigned long resolution, ticks; struct list_head *p, *ack_list_head; unsigned long flags; int use_tasklet = 0; if (timer == NULL) return; spin_lock_irqsave(&timer->lock, flags); /* remember the current resolution */ if (timer->hw.c_resolution) resolution = timer->hw.c_resolution(timer); else resolution = timer->hw.resolution; /* loop for all active instances * Here we cannot use list_for_each_entry because the active_list of a * processed instance is relinked to done_list_head before the callback * is called. */ list_for_each_entry_safe(ti, tmp, &timer->active_list_head, active_list) { if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING)) continue; ti->pticks += ticks_left; ti->resolution = resolution; if (ti->cticks < ticks_left) ti->cticks = 0; else ti->cticks -= ticks_left; if (ti->cticks) /* not expired */ continue; if (ti->flags & SNDRV_TIMER_IFLG_AUTO) { ti->cticks = ti->ticks; } else { ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING; if (--timer->running) list_del_init(&ti->active_list); } if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) || (ti->flags & SNDRV_TIMER_IFLG_FAST)) ack_list_head = &timer->ack_list_head; else ack_list_head = &timer->sack_list_head; if (list_empty(&ti->ack_list)) list_add_tail(&ti->ack_list, ack_list_head); list_for_each_entry(ts, &ti->slave_active_head, active_list) { ts->pticks = ti->pticks; ts->resolution = resolution; if (list_empty(&ts->ack_list)) list_add_tail(&ts->ack_list, ack_list_head); } } if (timer->flags & SNDRV_TIMER_FLG_RESCHED) snd_timer_reschedule(timer, timer->sticks); if (timer->running) { if (timer->hw.flags & SNDRV_TIMER_HW_STOP) { timer->hw.stop(timer); timer->flags |= SNDRV_TIMER_FLG_CHANGE; } if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) || (timer->flags & SNDRV_TIMER_FLG_CHANGE)) { /* restart timer */ timer->flags &= ~SNDRV_TIMER_FLG_CHANGE; timer->hw.start(timer); } } else { timer->hw.stop(timer); } /* now process all fast callbacks */ while (!list_empty(&timer->ack_list_head)) { p = timer->ack_list_head.next; /* get first item */ ti = list_entry(p, struct snd_timer_instance, ack_list); /* remove from ack_list and make empty */ list_del_init(p); ticks = ti->pticks; ti->pticks = 0; ti->flags |= SNDRV_TIMER_IFLG_CALLBACK; spin_unlock(&timer->lock); if (ti->callback) ti->callback(ti, resolution, ticks); spin_lock(&timer->lock); ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK; } /* do we have any slow callbacks? */ use_tasklet = !list_empty(&timer->sack_list_head); spin_unlock_irqrestore(&timer->lock, flags); if (use_tasklet) tasklet_schedule(&timer->task_queue); } /* */ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, struct snd_timer **rtimer) { struct snd_timer *timer; int err; static struct snd_device_ops ops = { .dev_free = snd_timer_dev_free, .dev_register = snd_timer_dev_register, .dev_disconnect = snd_timer_dev_disconnect, }; if (snd_BUG_ON(!tid)) return -EINVAL; if (rtimer) *rtimer = NULL; timer = kzalloc(sizeof(*timer), GFP_KERNEL); if (!timer) return -ENOMEM; timer->tmr_class = tid->dev_class; timer->card = card; timer->tmr_device = tid->device; timer->tmr_subdevice = tid->subdevice; if (id) strlcpy(timer->id, id, sizeof(timer->id)); INIT_LIST_HEAD(&timer->device_list); INIT_LIST_HEAD(&timer->open_list_head); INIT_LIST_HEAD(&timer->active_list_head); INIT_LIST_HEAD(&timer->ack_list_head); INIT_LIST_HEAD(&timer->sack_list_head); spin_lock_init(&timer->lock); tasklet_init(&timer->task_queue, snd_timer_tasklet, (unsigned long)timer); if (card != NULL) { timer->module = card->module; err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops); if (err < 0) { snd_timer_free(timer); return err; } } if (rtimer) *rtimer = timer; return 0; } static int snd_timer_free(struct snd_timer *timer) { if (!timer) return 0; mutex_lock(&register_mutex); if (! list_empty(&timer->open_list_head)) { struct list_head *p, *n; struct snd_timer_instance *ti; pr_warn("ALSA: timer %p is busy?\n", timer); list_for_each_safe(p, n, &timer->open_list_head) { list_del_init(p); ti = list_entry(p, struct snd_timer_instance, open_list); ti->timer = NULL; } } list_del(&timer->device_list); mutex_unlock(&register_mutex); if (timer->private_free) timer->private_free(timer); kfree(timer); return 0; } static int snd_timer_dev_free(struct snd_device *device) { struct snd_timer *timer = device->device_data; return snd_timer_free(timer); } static int snd_timer_dev_register(struct snd_device *dev) { struct snd_timer *timer = dev->device_data; struct snd_timer *timer1; if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop)) return -ENXIO; if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) && !timer->hw.resolution && timer->hw.c_resolution == NULL) return -EINVAL; mutex_lock(&register_mutex); list_for_each_entry(timer1, &snd_timer_list, device_list) { if (timer1->tmr_class > timer->tmr_class) break; if (timer1->tmr_class < timer->tmr_class) continue; if (timer1->card && timer->card) { if (timer1->card->number > timer->card->number) break; if (timer1->card->number < timer->card->number) continue; } if (timer1->tmr_device > timer->tmr_device) break; if (timer1->tmr_device < timer->tmr_device) continue; if (timer1->tmr_subdevice > timer->tmr_subdevice) break; if (timer1->tmr_subdevice < timer->tmr_subdevice) continue; /* conflicts.. */ mutex_unlock(&register_mutex); return -EBUSY; } list_add_tail(&timer->device_list, &timer1->device_list); mutex_unlock(&register_mutex); return 0; } static int snd_timer_dev_disconnect(struct snd_device *device) { struct snd_timer *timer = device->device_data; mutex_lock(&register_mutex); list_del_init(&timer->device_list); mutex_unlock(&register_mutex); return 0; } void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp) { unsigned long flags; unsigned long resolution = 0; struct snd_timer_instance *ti, *ts; if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)) return; if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART || event > SNDRV_TIMER_EVENT_MRESUME)) return; spin_lock_irqsave(&timer->lock, flags); if (event == SNDRV_TIMER_EVENT_MSTART || event == SNDRV_TIMER_EVENT_MCONTINUE || event == SNDRV_TIMER_EVENT_MRESUME) { if (timer->hw.c_resolution) resolution = timer->hw.c_resolution(timer); else resolution = timer->hw.resolution; } list_for_each_entry(ti, &timer->active_list_head, active_list) { if (ti->ccallback) ti->ccallback(ti, event, tstamp, resolution); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) ts->ccallback(ts, event, tstamp, resolution); } spin_unlock_irqrestore(&timer->lock, flags); } /* * exported functions for global timers */ int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer) { struct snd_timer_id tid; tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = -1; tid.device = device; tid.subdevice = 0; return snd_timer_new(NULL, id, &tid, rtimer); } int snd_timer_global_free(struct snd_timer *timer) { return snd_timer_free(timer); } int snd_timer_global_register(struct snd_timer *timer) { struct snd_device dev; memset(&dev, 0, sizeof(dev)); dev.device_data = timer; return snd_timer_dev_register(&dev); } /* * System timer */ struct snd_timer_system_private { struct timer_list tlist; unsigned long last_expires; unsigned long last_jiffies; unsigned long correction; }; static void snd_timer_s_function(unsigned long data) { struct snd_timer *timer = (struct snd_timer *)data; struct snd_timer_system_private *priv = timer->private_data; unsigned long jiff = jiffies; if (time_after(jiff, priv->last_expires)) priv->correction += (long)jiff - (long)priv->last_expires; snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies); } static int snd_timer_s_start(struct snd_timer * timer) { struct snd_timer_system_private *priv; unsigned long njiff; priv = (struct snd_timer_system_private *) timer->private_data; njiff = (priv->last_jiffies = jiffies); if (priv->correction > timer->sticks - 1) { priv->correction -= timer->sticks - 1; njiff++; } else { njiff += timer->sticks - priv->correction; priv->correction = 0; } priv->last_expires = priv->tlist.expires = njiff; add_timer(&priv->tlist); return 0; } static int snd_timer_s_stop(struct snd_timer * timer) { struct snd_timer_system_private *priv; unsigned long jiff; priv = (struct snd_timer_system_private *) timer->private_data; del_timer(&priv->tlist); jiff = jiffies; if (time_before(jiff, priv->last_expires)) timer->sticks = priv->last_expires - jiff; else timer->sticks = 1; priv->correction = 0; return 0; } static struct snd_timer_hardware snd_timer_system = { .flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET, .resolution = 1000000000L / HZ, .ticks = 10000000L, .start = snd_timer_s_start, .stop = snd_timer_s_stop }; static void snd_timer_free_system(struct snd_timer *timer) { kfree(timer->private_data); } static int snd_timer_register_system(void) { struct snd_timer *timer; struct snd_timer_system_private *priv; int err; err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer); if (err < 0) return err; strcpy(timer->name, "system timer"); timer->hw = snd_timer_system; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) { snd_timer_free(timer); return -ENOMEM; } setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer); timer->private_data = priv; timer->private_free = snd_timer_free_system; return snd_timer_global_register(timer); } #ifdef CONFIG_SND_PROC_FS /* * Info interface */ static void snd_timer_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_timer *timer; struct snd_timer_instance *ti; mutex_lock(&register_mutex); list_for_each_entry(timer, &snd_timer_list, device_list) { switch (timer->tmr_class) { case SNDRV_TIMER_CLASS_GLOBAL: snd_iprintf(buffer, "G%i: ", timer->tmr_device); break; case SNDRV_TIMER_CLASS_CARD: snd_iprintf(buffer, "C%i-%i: ", timer->card->number, timer->tmr_device); break; case SNDRV_TIMER_CLASS_PCM: snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number, timer->tmr_device, timer->tmr_subdevice); break; default: snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class, timer->card ? timer->card->number : -1, timer->tmr_device, timer->tmr_subdevice); } snd_iprintf(buffer, "%s :", timer->name); if (timer->hw.resolution) snd_iprintf(buffer, " %lu.%03luus (%lu ticks)", timer->hw.resolution / 1000, timer->hw.resolution % 1000, timer->hw.ticks); if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) snd_iprintf(buffer, " SLAVE"); snd_iprintf(buffer, "\n"); list_for_each_entry(ti, &timer->open_list_head, open_list) snd_iprintf(buffer, " Client %s : %s\n", ti->owner ? ti->owner : "unknown", ti->flags & (SNDRV_TIMER_IFLG_START | SNDRV_TIMER_IFLG_RUNNING) ? "running" : "stopped"); } mutex_unlock(&register_mutex); } static struct snd_info_entry *snd_timer_proc_entry; static void __init snd_timer_proc_init(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL); if (entry != NULL) { entry->c.text.read = snd_timer_proc_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } snd_timer_proc_entry = entry; } static void __exit snd_timer_proc_done(void) { snd_info_free_entry(snd_timer_proc_entry); } #else /* !CONFIG_SND_PROC_FS */ #define snd_timer_proc_init() #define snd_timer_proc_done() #endif /* * USER SPACE interface */ static void snd_timer_user_interrupt(struct snd_timer_instance *timeri, unsigned long resolution, unsigned long ticks) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_read *r; int prev; spin_lock(&tu->qlock); if (tu->qused > 0) { prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1; r = &tu->queue[prev]; if (r->resolution == resolution) { r->ticks += ticks; goto __wake; } } if (tu->qused >= tu->queue_size) { tu->overrun++; } else { r = &tu->queue[tu->qtail++]; tu->qtail %= tu->queue_size; r->resolution = resolution; r->ticks = ticks; tu->qused++; } __wake: spin_unlock(&tu->qlock); kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu, struct snd_timer_tread *tread) { if (tu->qused >= tu->queue_size) { tu->overrun++; } else { memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread)); tu->qtail %= tu->queue_size; tu->qused++; } } static void snd_timer_user_ccallback(struct snd_timer_instance *timeri, int event, struct timespec *tstamp, unsigned long resolution) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_tread r1; unsigned long flags; if (event >= SNDRV_TIMER_EVENT_START && event <= SNDRV_TIMER_EVENT_PAUSE) tu->tstamp = *tstamp; if ((tu->filter & (1 << event)) == 0 || !tu->tread) return; r1.event = event; r1.tstamp = *tstamp; r1.val = resolution; spin_lock_irqsave(&tu->qlock, flags); snd_timer_user_append_to_tqueue(tu, &r1); spin_unlock_irqrestore(&tu->qlock, flags); kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri, unsigned long resolution, unsigned long ticks) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_tread *r, r1; struct timespec tstamp; int prev, append = 0; memset(&tstamp, 0, sizeof(tstamp)); spin_lock(&tu->qlock); if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) | (1 << SNDRV_TIMER_EVENT_TICK))) == 0) { spin_unlock(&tu->qlock); return; } if (tu->last_resolution != resolution || ticks > 0) { if (timer_tstamp_monotonic) ktime_get_ts(&tstamp); else getnstimeofday(&tstamp); } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) && tu->last_resolution != resolution) { r1.event = SNDRV_TIMER_EVENT_RESOLUTION; r1.tstamp = tstamp; r1.val = resolution; snd_timer_user_append_to_tqueue(tu, &r1); tu->last_resolution = resolution; append++; } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0) goto __wake; if (ticks == 0) goto __wake; if (tu->qused > 0) { prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1; r = &tu->tqueue[prev]; if (r->event == SNDRV_TIMER_EVENT_TICK) { r->tstamp = tstamp; r->val += ticks; append++; goto __wake; } } r1.event = SNDRV_TIMER_EVENT_TICK; r1.tstamp = tstamp; r1.val = ticks; snd_timer_user_append_to_tqueue(tu, &r1); append++; __wake: spin_unlock(&tu->qlock); if (append == 0) return; kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static int snd_timer_user_open(struct inode *inode, struct file *file) { struct snd_timer_user *tu; int err; err = nonseekable_open(inode, file); if (err < 0) return err; tu = kzalloc(sizeof(*tu), GFP_KERNEL); if (tu == NULL) return -ENOMEM; spin_lock_init(&tu->qlock); init_waitqueue_head(&tu->qchange_sleep); mutex_init(&tu->tread_sem); tu->ticks = 1; tu->queue_size = 128; tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) { kfree(tu); return -ENOMEM; } file->private_data = tu; return 0; } static int snd_timer_user_release(struct inode *inode, struct file *file) { struct snd_timer_user *tu; if (file->private_data) { tu = file->private_data; file->private_data = NULL; if (tu->timeri) snd_timer_close(tu->timeri); kfree(tu->queue); kfree(tu->tqueue); kfree(tu); } return 0; } static void snd_timer_user_zero_id(struct snd_timer_id *id) { id->dev_class = SNDRV_TIMER_CLASS_NONE; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = -1; id->device = -1; id->subdevice = -1; } static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer) { id->dev_class = timer->tmr_class; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = timer->card ? timer->card->number : -1; id->device = timer->tmr_device; id->subdevice = timer->tmr_subdevice; } static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) { struct snd_timer_id id; struct snd_timer *timer; struct list_head *p; if (copy_from_user(&id, _tid, sizeof(id))) return -EFAULT; mutex_lock(&register_mutex); if (id.dev_class < 0) { /* first item */ if (list_empty(&snd_timer_list)) snd_timer_user_zero_id(&id); else { timer = list_entry(snd_timer_list.next, struct snd_timer, device_list); snd_timer_user_copy_id(&id, timer); } } else { switch (id.dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: id.device = id.device < 0 ? 0 : id.device + 1; list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device >= id.device) { snd_timer_user_copy_id(&id, timer); break; } } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: if (id.card < 0) { id.card = 0; } else { if (id.card < 0) { id.card = 0; } else { if (id.device < 0) { id.device = 0; } else { if (id.subdevice < 0) { id.subdevice = 0; } else { id.subdevice++; } } } } list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > id.dev_class) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_class < id.dev_class) continue; if (timer->card->number > id.card) { snd_timer_user_copy_id(&id, timer); break; } if (timer->card->number < id.card) continue; if (timer->tmr_device > id.device) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device < id.device) continue; if (timer->tmr_subdevice > id.subdevice) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_subdevice < id.subdevice) continue; snd_timer_user_copy_id(&id, timer); break; } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; default: snd_timer_user_zero_id(&id); } } mutex_unlock(&register_mutex); if (copy_to_user(_tid, &id, sizeof(*_tid))) return -EFAULT; return 0; } static int snd_timer_user_ginfo(struct file *file, struct snd_timer_ginfo __user *_ginfo) { struct snd_timer_ginfo *ginfo; struct snd_timer_id tid; struct snd_timer *t; struct list_head *p; int err = 0; ginfo = memdup_user(_ginfo, sizeof(*ginfo)); if (IS_ERR(ginfo)) return PTR_ERR(ginfo); tid = ginfo->tid; memset(ginfo, 0, sizeof(*ginfo)); ginfo->tid = tid; mutex_lock(&register_mutex); t = snd_timer_find(&tid); if (t != NULL) { ginfo->card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) ginfo->flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(ginfo->id, t->id, sizeof(ginfo->id)); strlcpy(ginfo->name, t->name, sizeof(ginfo->name)); ginfo->resolution = t->hw.resolution; if (t->hw.resolution_min > 0) { ginfo->resolution_min = t->hw.resolution_min; ginfo->resolution_max = t->hw.resolution_max; } list_for_each(p, &t->open_list_head) { ginfo->clients++; } } else { err = -ENODEV; } mutex_unlock(&register_mutex); if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo))) err = -EFAULT; kfree(ginfo); return err; } static int snd_timer_user_gparams(struct file *file, struct snd_timer_gparams __user *_gparams) { struct snd_timer_gparams gparams; struct snd_timer *t; int err; if (copy_from_user(&gparams, _gparams, sizeof(gparams))) return -EFAULT; mutex_lock(&register_mutex); t = snd_timer_find(&gparams.tid); if (!t) { err = -ENODEV; goto _error; } if (!list_empty(&t->open_list_head)) { err = -EBUSY; goto _error; } if (!t->hw.set_period) { err = -ENOSYS; goto _error; } err = t->hw.set_period(t, gparams.period_num, gparams.period_den); _error: mutex_unlock(&register_mutex); return err; } static int snd_timer_user_gstatus(struct file *file, struct snd_timer_gstatus __user *_gstatus) { struct snd_timer_gstatus gstatus; struct snd_timer_id tid; struct snd_timer *t; int err = 0; if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus))) return -EFAULT; tid = gstatus.tid; memset(&gstatus, 0, sizeof(gstatus)); gstatus.tid = tid; mutex_lock(&register_mutex); t = snd_timer_find(&tid); if (t != NULL) { if (t->hw.c_resolution) gstatus.resolution = t->hw.c_resolution(t); else gstatus.resolution = t->hw.resolution; if (t->hw.precise_resolution) { t->hw.precise_resolution(t, &gstatus.resolution_num, &gstatus.resolution_den); } else { gstatus.resolution_num = gstatus.resolution; gstatus.resolution_den = 1000000000uL; } } else { err = -ENODEV; } mutex_unlock(&register_mutex); if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus))) err = -EFAULT; return err; } static int snd_timer_user_tselect(struct file *file, struct snd_timer_select __user *_tselect) { struct snd_timer_user *tu; struct snd_timer_select tselect; char str[32]; int err = 0; tu = file->private_data; mutex_lock(&tu->tread_sem); if (tu->timeri) { snd_timer_close(tu->timeri); tu->timeri = NULL; } if (copy_from_user(&tselect, _tselect, sizeof(tselect))) { err = -EFAULT; goto __err; } sprintf(str, "application %i", current->pid); if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE) tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION; err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid); if (err < 0) goto __err; kfree(tu->queue); tu->queue = NULL; kfree(tu->tqueue); tu->tqueue = NULL; if (tu->tread) { tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread), GFP_KERNEL); if (tu->tqueue == NULL) err = -ENOMEM; } else { tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) err = -ENOMEM; } if (err < 0) { snd_timer_close(tu->timeri); tu->timeri = NULL; } else { tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST; tu->timeri->callback = tu->tread ? snd_timer_user_tinterrupt : snd_timer_user_interrupt; tu->timeri->ccallback = snd_timer_user_ccallback; tu->timeri->callback_data = (void *)tu; } __err: mutex_unlock(&tu->tread_sem); return err; } static int snd_timer_user_info(struct file *file, struct snd_timer_info __user *_info) { struct snd_timer_user *tu; struct snd_timer_info *info; struct snd_timer *t; int err = 0; tu = file->private_data; if (!tu->timeri) return -EBADFD; t = tu->timeri->timer; if (!t) return -EBADFD; info = kzalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; info->card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) info->flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(info->id, t->id, sizeof(info->id)); strlcpy(info->name, t->name, sizeof(info->name)); info->resolution = t->hw.resolution; if (copy_to_user(_info, info, sizeof(*_info))) err = -EFAULT; kfree(info); return err; } static int snd_timer_user_params(struct file *file, struct snd_timer_params __user *_params) { struct snd_timer_user *tu; struct snd_timer_params params; struct snd_timer *t; struct snd_timer_read *tr; struct snd_timer_tread *ttr; int err; tu = file->private_data; if (!tu->timeri) return -EBADFD; t = tu->timeri->timer; if (!t) return -EBADFD; if (copy_from_user(&params, _params, sizeof(params))) return -EFAULT; if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) { err = -EINVAL; goto _end; } if (params.queue_size > 0 && (params.queue_size < 32 || params.queue_size > 1024)) { err = -EINVAL; goto _end; } if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)| (1<<SNDRV_TIMER_EVENT_TICK)| (1<<SNDRV_TIMER_EVENT_START)| (1<<SNDRV_TIMER_EVENT_STOP)| (1<<SNDRV_TIMER_EVENT_CONTINUE)| (1<<SNDRV_TIMER_EVENT_PAUSE)| (1<<SNDRV_TIMER_EVENT_SUSPEND)| (1<<SNDRV_TIMER_EVENT_RESUME)| (1<<SNDRV_TIMER_EVENT_MSTART)| (1<<SNDRV_TIMER_EVENT_MSTOP)| (1<<SNDRV_TIMER_EVENT_MCONTINUE)| (1<<SNDRV_TIMER_EVENT_MPAUSE)| (1<<SNDRV_TIMER_EVENT_MSUSPEND)| (1<<SNDRV_TIMER_EVENT_MRESUME))) { err = -EINVAL; goto _end; } snd_timer_stop(tu->timeri); spin_lock_irq(&t->lock); tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO| SNDRV_TIMER_IFLG_EXCLUSIVE| SNDRV_TIMER_IFLG_EARLY_EVENT); if (params.flags & SNDRV_TIMER_PSFLG_AUTO) tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO; if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE) tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE; if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT) tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT; spin_unlock_irq(&t->lock); if (params.queue_size > 0 && (unsigned int)tu->queue_size != params.queue_size) { if (tu->tread) { ttr = kmalloc(params.queue_size * sizeof(*ttr), GFP_KERNEL); if (ttr) { kfree(tu->tqueue); tu->queue_size = params.queue_size; tu->tqueue = ttr; } } else { tr = kmalloc(params.queue_size * sizeof(*tr), GFP_KERNEL); if (tr) { kfree(tu->queue); tu->queue_size = params.queue_size; tu->queue = tr; } } } tu->qhead = tu->qtail = tu->qused = 0; if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) { if (tu->tread) { struct snd_timer_tread tread; tread.event = SNDRV_TIMER_EVENT_EARLY; tread.tstamp.tv_sec = 0; tread.tstamp.tv_nsec = 0; tread.val = 0; snd_timer_user_append_to_tqueue(tu, &tread); } else { struct snd_timer_read *r = &tu->queue[0]; r->resolution = 0; r->ticks = 0; tu->qused++; tu->qtail++; } } tu->filter = params.filter; tu->ticks = params.ticks; err = 0; _end: if (copy_to_user(_params, &params, sizeof(params))) return -EFAULT; return err; } static int snd_timer_user_status(struct file *file, struct snd_timer_status __user *_status) { struct snd_timer_user *tu; struct snd_timer_status status; tu = file->private_data; if (!tu->timeri) return -EBADFD; memset(&status, 0, sizeof(status)); status.tstamp = tu->tstamp; status.resolution = snd_timer_resolution(tu->timeri); status.lost = tu->timeri->lost; status.overrun = tu->overrun; spin_lock_irq(&tu->qlock); status.queue = tu->qused; spin_unlock_irq(&tu->qlock); if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0; } static int snd_timer_user_start(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; snd_timer_stop(tu->timeri); tu->timeri->lost = 0; tu->last_resolution = 0; return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0; } static int snd_timer_user_stop(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0; } static int snd_timer_user_continue(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; tu->timeri->lost = 0; return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0; } static int snd_timer_user_pause(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0; } enum { SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20), SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21), SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22), SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23), }; static long snd_timer_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_timer_user *tu; void __user *argp = (void __user *)arg; int __user *p = argp; tu = file->private_data; switch (cmd) { case SNDRV_TIMER_IOCTL_PVERSION: return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0; case SNDRV_TIMER_IOCTL_NEXT_DEVICE: return snd_timer_user_next_device(argp); case SNDRV_TIMER_IOCTL_TREAD: { int xarg; mutex_lock(&tu->tread_sem); if (tu->timeri) { /* too late */ mutex_unlock(&tu->tread_sem); return -EBUSY; } if (get_user(xarg, p)) { mutex_unlock(&tu->tread_sem); return -EFAULT; } tu->tread = xarg ? 1 : 0; mutex_unlock(&tu->tread_sem); return 0; } case SNDRV_TIMER_IOCTL_GINFO: return snd_timer_user_ginfo(file, argp); case SNDRV_TIMER_IOCTL_GPARAMS: return snd_timer_user_gparams(file, argp); case SNDRV_TIMER_IOCTL_GSTATUS: return snd_timer_user_gstatus(file, argp); case SNDRV_TIMER_IOCTL_SELECT: return snd_timer_user_tselect(file, argp); case SNDRV_TIMER_IOCTL_INFO: return snd_timer_user_info(file, argp); case SNDRV_TIMER_IOCTL_PARAMS: return snd_timer_user_params(file, argp); case SNDRV_TIMER_IOCTL_STATUS: return snd_timer_user_status(file, argp); case SNDRV_TIMER_IOCTL_START: case SNDRV_TIMER_IOCTL_START_OLD: return snd_timer_user_start(file); case SNDRV_TIMER_IOCTL_STOP: case SNDRV_TIMER_IOCTL_STOP_OLD: return snd_timer_user_stop(file); case SNDRV_TIMER_IOCTL_CONTINUE: case SNDRV_TIMER_IOCTL_CONTINUE_OLD: return snd_timer_user_continue(file); case SNDRV_TIMER_IOCTL_PAUSE: case SNDRV_TIMER_IOCTL_PAUSE_OLD: return snd_timer_user_pause(file); } return -ENOTTY; } static int snd_timer_user_fasync(int fd, struct file * file, int on) { struct snd_timer_user *tu; tu = file->private_data; return fasync_helper(fd, file, on, &tu->fasync); } static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) { struct snd_timer_user *tu; long result = 0, unit; int err = 0; tu = file->private_data; unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); spin_lock_irq(&tu->qlock); while ((long)count - result >= unit) { while (!tu->qused) { wait_queue_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; break; } set_current_state(TASK_INTERRUPTIBLE); init_waitqueue_entry(&wait, current); add_wait_queue(&tu->qchange_sleep, &wait); spin_unlock_irq(&tu->qlock); schedule(); spin_lock_irq(&tu->qlock); remove_wait_queue(&tu->qchange_sleep, &wait); if (signal_pending(current)) { err = -ERESTARTSYS; break; } } spin_unlock_irq(&tu->qlock); if (err < 0) goto _error; if (tu->tread) { if (copy_to_user(buffer, &tu->tqueue[tu->qhead++], sizeof(struct snd_timer_tread))) { err = -EFAULT; goto _error; } } else { if (copy_to_user(buffer, &tu->queue[tu->qhead++], sizeof(struct snd_timer_read))) { err = -EFAULT; goto _error; } } tu->qhead %= tu->queue_size; result += unit; buffer += unit; spin_lock_irq(&tu->qlock); tu->qused--; } spin_unlock_irq(&tu->qlock); _error: return result > 0 ? result : err; } static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait) { unsigned int mask; struct snd_timer_user *tu; tu = file->private_data; poll_wait(file, &tu->qchange_sleep, wait); mask = 0; if (tu->qused) mask |= POLLIN | POLLRDNORM; return mask; } #ifdef CONFIG_COMPAT #include "timer_compat.c" #else #define snd_timer_user_ioctl_compat NULL #endif static const struct file_operations snd_timer_f_ops = { .owner = THIS_MODULE, .read = snd_timer_user_read, .open = snd_timer_user_open, .release = snd_timer_user_release, .llseek = no_llseek, .poll = snd_timer_user_poll, .unlocked_ioctl = snd_timer_user_ioctl, .compat_ioctl = snd_timer_user_ioctl_compat, .fasync = snd_timer_user_fasync, }; /* unregister the system timer */ static void snd_timer_free_all(void) { struct snd_timer *timer, *n; list_for_each_entry_safe(timer, n, &snd_timer_list, device_list) snd_timer_free(timer); } static struct device timer_dev; /* * ENTRY functions */ static int __init alsa_timer_init(void) { int err; snd_device_initialize(&timer_dev, NULL); dev_set_name(&timer_dev, "timer"); #ifdef SNDRV_OSS_INFO_DEV_TIMERS snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1, "system timer"); #endif err = snd_timer_register_system(); if (err < 0) { pr_err("ALSA: unable to register system timer (%i)\n", err); put_device(&timer_dev); return err; } err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0, &snd_timer_f_ops, NULL, &timer_dev); if (err < 0) { pr_err("ALSA: unable to register timer device (%i)\n", err); snd_timer_free_all(); put_device(&timer_dev); return err; } snd_timer_proc_init(); return 0; } static void __exit alsa_timer_exit(void) { snd_unregister_device(&timer_dev); snd_timer_free_all(); put_device(&timer_dev); snd_timer_proc_done(); #ifdef SNDRV_OSS_INFO_DEV_TIMERS snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1); #endif } module_init(alsa_timer_init) module_exit(alsa_timer_exit) EXPORT_SYMBOL(snd_timer_open); EXPORT_SYMBOL(snd_timer_close); EXPORT_SYMBOL(snd_timer_resolution); EXPORT_SYMBOL(snd_timer_start); EXPORT_SYMBOL(snd_timer_stop); EXPORT_SYMBOL(snd_timer_continue); EXPORT_SYMBOL(snd_timer_pause); EXPORT_SYMBOL(snd_timer_new); EXPORT_SYMBOL(snd_timer_notify); EXPORT_SYMBOL(snd_timer_global_new); EXPORT_SYMBOL(snd_timer_global_free); EXPORT_SYMBOL(snd_timer_global_register); EXPORT_SYMBOL(snd_timer_interrupt);
/* * Timers abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/module.h> #include <linux/string.h> #include <sound/core.h> #include <sound/timer.h> #include <sound/control.h> #include <sound/info.h> #include <sound/minors.h> #include <sound/initval.h> #include <linux/kmod.h> #if IS_ENABLED(CONFIG_SND_HRTIMER) #define DEFAULT_TIMER_LIMIT 4 #elif IS_ENABLED(CONFIG_SND_RTCTIMER) #define DEFAULT_TIMER_LIMIT 2 #else #define DEFAULT_TIMER_LIMIT 1 #endif static int timer_limit = DEFAULT_TIMER_LIMIT; static int timer_tstamp_monotonic = 1; MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA timer interface"); MODULE_LICENSE("GPL"); module_param(timer_limit, int, 0444); MODULE_PARM_DESC(timer_limit, "Maximum global timers in system."); module_param(timer_tstamp_monotonic, int, 0444); MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default)."); MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER); MODULE_ALIAS("devname:snd/timer"); struct snd_timer_user { struct snd_timer_instance *timeri; int tread; /* enhanced read with timestamps and events */ unsigned long ticks; unsigned long overrun; int qhead; int qtail; int qused; int queue_size; struct snd_timer_read *queue; struct snd_timer_tread *tqueue; spinlock_t qlock; unsigned long last_resolution; unsigned int filter; struct timespec tstamp; /* trigger tstamp */ wait_queue_head_t qchange_sleep; struct fasync_struct *fasync; struct mutex ioctl_lock; }; /* list of timers */ static LIST_HEAD(snd_timer_list); /* list of slave instances */ static LIST_HEAD(snd_timer_slave_list); /* lock for slave active lists */ static DEFINE_SPINLOCK(slave_active_lock); static DEFINE_MUTEX(register_mutex); static int snd_timer_free(struct snd_timer *timer); static int snd_timer_dev_free(struct snd_device *device); static int snd_timer_dev_register(struct snd_device *device); static int snd_timer_dev_disconnect(struct snd_device *device); static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left); /* * create a timer instance with the given owner string. * when timer is not NULL, increments the module counter */ static struct snd_timer_instance *snd_timer_instance_new(char *owner, struct snd_timer *timer) { struct snd_timer_instance *timeri; timeri = kzalloc(sizeof(*timeri), GFP_KERNEL); if (timeri == NULL) return NULL; timeri->owner = kstrdup(owner, GFP_KERNEL); if (! timeri->owner) { kfree(timeri); return NULL; } INIT_LIST_HEAD(&timeri->open_list); INIT_LIST_HEAD(&timeri->active_list); INIT_LIST_HEAD(&timeri->ack_list); INIT_LIST_HEAD(&timeri->slave_list_head); INIT_LIST_HEAD(&timeri->slave_active_head); timeri->timer = timer; if (timer && !try_module_get(timer->module)) { kfree(timeri->owner); kfree(timeri); return NULL; } return timeri; } /* * find a timer instance from the given timer id */ static struct snd_timer *snd_timer_find(struct snd_timer_id *tid) { struct snd_timer *timer = NULL; list_for_each_entry(timer, &snd_timer_list, device_list) { if (timer->tmr_class != tid->dev_class) continue; if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD || timer->tmr_class == SNDRV_TIMER_CLASS_PCM) && (timer->card == NULL || timer->card->number != tid->card)) continue; if (timer->tmr_device != tid->device) continue; if (timer->tmr_subdevice != tid->subdevice) continue; return timer; } return NULL; } #ifdef CONFIG_MODULES static void snd_timer_request(struct snd_timer_id *tid) { switch (tid->dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: if (tid->device < timer_limit) request_module("snd-timer-%i", tid->device); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: if (tid->card < snd_ecards_limit) request_module("snd-card-%i", tid->card); break; default: break; } } #endif /* * look for a master instance matching with the slave id of the given slave. * when found, relink the open_link of the slave. * * call this with register_mutex down. */ static void snd_timer_check_slave(struct snd_timer_instance *slave) { struct snd_timer *timer; struct snd_timer_instance *master; /* FIXME: it's really dumb to look up all entries.. */ list_for_each_entry(timer, &snd_timer_list, device_list) { list_for_each_entry(master, &timer->open_list_head, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { list_move_tail(&slave->open_list, &master->slave_list_head); spin_lock_irq(&slave_active_lock); slave->master = master; slave->timer = master->timer; spin_unlock_irq(&slave_active_lock); return; } } } } /* * look for slave instances matching with the slave id of the given master. * when found, relink the open_link of slaves. * * call this with register_mutex down. */ static void snd_timer_check_master(struct snd_timer_instance *master) { struct snd_timer_instance *slave, *tmp; /* check all pending slaves */ list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { list_move_tail(&slave->open_list, &master->slave_list_head); spin_lock_irq(&slave_active_lock); slave->master = master; slave->timer = master->timer; if (slave->flags & SNDRV_TIMER_IFLG_RUNNING) list_add_tail(&slave->active_list, &master->slave_active_head); spin_unlock_irq(&slave_active_lock); } } } /* * open a timer instance * when opening a master, the slave id must be here given. */ int snd_timer_open(struct snd_timer_instance **ti, char *owner, struct snd_timer_id *tid, unsigned int slave_id) { struct snd_timer *timer; struct snd_timer_instance *timeri = NULL; if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) { /* open a slave instance */ if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE || tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) { pr_debug("ALSA: timer: invalid slave class %i\n", tid->dev_sclass); return -EINVAL; } mutex_lock(&register_mutex); timeri = snd_timer_instance_new(owner, NULL); if (!timeri) { mutex_unlock(&register_mutex); return -ENOMEM; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = tid->device; timeri->flags |= SNDRV_TIMER_IFLG_SLAVE; list_add_tail(&timeri->open_list, &snd_timer_slave_list); snd_timer_check_slave(timeri); mutex_unlock(&register_mutex); *ti = timeri; return 0; } /* open a master instance */ mutex_lock(&register_mutex); timer = snd_timer_find(tid); #ifdef CONFIG_MODULES if (!timer) { mutex_unlock(&register_mutex); snd_timer_request(tid); mutex_lock(&register_mutex); timer = snd_timer_find(tid); } #endif if (!timer) { mutex_unlock(&register_mutex); return -ENODEV; } if (!list_empty(&timer->open_list_head)) { timeri = list_entry(timer->open_list_head.next, struct snd_timer_instance, open_list); if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) { mutex_unlock(&register_mutex); return -EBUSY; } } timeri = snd_timer_instance_new(owner, timer); if (!timeri) { mutex_unlock(&register_mutex); return -ENOMEM; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = slave_id; if (list_empty(&timer->open_list_head) && timer->hw.open) timer->hw.open(timer); list_add_tail(&timeri->open_list, &timer->open_list_head); snd_timer_check_master(timeri); mutex_unlock(&register_mutex); *ti = timeri; return 0; } static int _snd_timer_stop(struct snd_timer_instance *timeri, int keep_flag, int event); /* * close a timer instance */ int snd_timer_close(struct snd_timer_instance *timeri) { struct snd_timer *timer = NULL; struct snd_timer_instance *slave, *tmp; if (snd_BUG_ON(!timeri)) return -ENXIO; /* force to stop the timer */ snd_timer_stop(timeri); if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { /* wait, until the active callback is finished */ spin_lock_irq(&slave_active_lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&slave_active_lock); udelay(10); spin_lock_irq(&slave_active_lock); } spin_unlock_irq(&slave_active_lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); mutex_unlock(&register_mutex); } else { timer = timeri->timer; if (snd_BUG_ON(!timer)) goto out; /* wait, until the active callback is finished */ spin_lock_irq(&timer->lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&timer->lock); udelay(10); spin_lock_irq(&timer->lock); } spin_unlock_irq(&timer->lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); if (timer && list_empty(&timer->open_list_head) && timer->hw.close) timer->hw.close(timer); /* remove slave links */ list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head, open_list) { spin_lock_irq(&slave_active_lock); _snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION); list_move_tail(&slave->open_list, &snd_timer_slave_list); slave->master = NULL; slave->timer = NULL; spin_unlock_irq(&slave_active_lock); } mutex_unlock(&register_mutex); } out: if (timeri->private_free) timeri->private_free(timeri); kfree(timeri->owner); kfree(timeri); if (timer) module_put(timer->module); return 0; } unsigned long snd_timer_resolution(struct snd_timer_instance *timeri) { struct snd_timer * timer; if (timeri == NULL) return 0; if ((timer = timeri->timer) != NULL) { if (timer->hw.c_resolution) return timer->hw.c_resolution(timer); return timer->hw.resolution; } return 0; } static void snd_timer_notify1(struct snd_timer_instance *ti, int event) { struct snd_timer *timer; unsigned long flags; unsigned long resolution = 0; struct snd_timer_instance *ts; struct timespec tstamp; if (timer_tstamp_monotonic) ktime_get_ts(&tstamp); else getnstimeofday(&tstamp); if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START || event > SNDRV_TIMER_EVENT_PAUSE)) return; if (event == SNDRV_TIMER_EVENT_START || event == SNDRV_TIMER_EVENT_CONTINUE) resolution = snd_timer_resolution(ti); if (ti->ccallback) ti->ccallback(ti, event, &tstamp, resolution); if (ti->flags & SNDRV_TIMER_IFLG_SLAVE) return; timer = ti->timer; if (timer == NULL) return; if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) return; spin_lock_irqsave(&timer->lock, flags); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) ts->ccallback(ti, event + 100, &tstamp, resolution); spin_unlock_irqrestore(&timer->lock, flags); } static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri, unsigned long sticks) { list_move_tail(&timeri->active_list, &timer->active_list_head); if (timer->running) { if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) goto __start_now; timer->flags |= SNDRV_TIMER_FLG_RESCHED; timeri->flags |= SNDRV_TIMER_IFLG_START; return 1; /* delayed start */ } else { timer->sticks = sticks; timer->hw.start(timer); __start_now: timer->running++; timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; return 0; } } static int snd_timer_start_slave(struct snd_timer_instance *timeri) { unsigned long flags; spin_lock_irqsave(&slave_active_lock, flags); timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; if (timeri->master) list_add_tail(&timeri->active_list, &timeri->master->slave_active_head); spin_unlock_irqrestore(&slave_active_lock, flags); return 1; /* delayed start */ } /* * start the timer instance */ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) { struct snd_timer *timer; int result = -EINVAL; unsigned long flags; if (timeri == NULL || ticks < 1) return -EINVAL; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { result = snd_timer_start_slave(timeri); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } timer = timeri->timer; if (timer == NULL) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); timeri->ticks = timeri->cticks = ticks; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, ticks); spin_unlock_irqrestore(&timer->lock, flags); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } static int _snd_timer_stop(struct snd_timer_instance * timeri, int keep_flag, int event) { struct snd_timer *timer; unsigned long flags; if (snd_BUG_ON(!timeri)) return -ENXIO; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { if (!keep_flag) { spin_lock_irqsave(&slave_active_lock, flags); timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; spin_unlock_irqrestore(&slave_active_lock, flags); } goto __end; } timer = timeri->timer; if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); list_del_init(&timeri->ack_list); list_del_init(&timeri->active_list); if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) && !(--timer->running)) { timer->hw.stop(timer); if (timer->flags & SNDRV_TIMER_FLG_RESCHED) { timer->flags &= ~SNDRV_TIMER_FLG_RESCHED; snd_timer_reschedule(timer, 0); if (timer->flags & SNDRV_TIMER_FLG_CHANGE) { timer->flags &= ~SNDRV_TIMER_FLG_CHANGE; timer->hw.start(timer); } } } if (!keep_flag) timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); spin_unlock_irqrestore(&timer->lock, flags); __end: if (event != SNDRV_TIMER_EVENT_RESOLUTION) snd_timer_notify1(timeri, event); return 0; } /* * stop the timer instance. * * do not call this from the timer callback! */ int snd_timer_stop(struct snd_timer_instance *timeri) { struct snd_timer *timer; unsigned long flags; int err; err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP); if (err < 0) return err; timer = timeri->timer; if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); timeri->cticks = timeri->ticks; timeri->pticks = 0; spin_unlock_irqrestore(&timer->lock, flags); return 0; } /* * start again.. the tick is kept. */ int snd_timer_continue(struct snd_timer_instance *timeri) { struct snd_timer *timer; int result = -EINVAL; unsigned long flags; if (timeri == NULL) return result; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) return snd_timer_start_slave(timeri); timer = timeri->timer; if (! timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); if (!timeri->cticks) timeri->cticks = 1; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, timer->sticks); spin_unlock_irqrestore(&timer->lock, flags); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE); return result; } /* * pause.. remember the ticks left */ int snd_timer_pause(struct snd_timer_instance * timeri) { return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE); } /* * reschedule the timer * * start pending instances and check the scheduling ticks. * when the scheduling ticks is changed set CHANGE flag to reprogram the timer. */ static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left) { struct snd_timer_instance *ti; unsigned long ticks = ~0UL; list_for_each_entry(ti, &timer->active_list_head, active_list) { if (ti->flags & SNDRV_TIMER_IFLG_START) { ti->flags &= ~SNDRV_TIMER_IFLG_START; ti->flags |= SNDRV_TIMER_IFLG_RUNNING; timer->running++; } if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) { if (ticks > ti->cticks) ticks = ti->cticks; } } if (ticks == ~0UL) { timer->flags &= ~SNDRV_TIMER_FLG_RESCHED; return; } if (ticks > timer->hw.ticks) ticks = timer->hw.ticks; if (ticks_left != ticks) timer->flags |= SNDRV_TIMER_FLG_CHANGE; timer->sticks = ticks; } /* * timer tasklet * */ static void snd_timer_tasklet(unsigned long arg) { struct snd_timer *timer = (struct snd_timer *) arg; struct snd_timer_instance *ti; struct list_head *p; unsigned long resolution, ticks; unsigned long flags; spin_lock_irqsave(&timer->lock, flags); /* now process all callbacks */ while (!list_empty(&timer->sack_list_head)) { p = timer->sack_list_head.next; /* get first item */ ti = list_entry(p, struct snd_timer_instance, ack_list); /* remove from ack_list and make empty */ list_del_init(p); ticks = ti->pticks; ti->pticks = 0; resolution = ti->resolution; ti->flags |= SNDRV_TIMER_IFLG_CALLBACK; spin_unlock(&timer->lock); if (ti->callback) ti->callback(ti, resolution, ticks); spin_lock(&timer->lock); ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK; } spin_unlock_irqrestore(&timer->lock, flags); } /* * timer interrupt * * ticks_left is usually equal to timer->sticks. * */ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left) { struct snd_timer_instance *ti, *ts, *tmp; unsigned long resolution, ticks; struct list_head *p, *ack_list_head; unsigned long flags; int use_tasklet = 0; if (timer == NULL) return; spin_lock_irqsave(&timer->lock, flags); /* remember the current resolution */ if (timer->hw.c_resolution) resolution = timer->hw.c_resolution(timer); else resolution = timer->hw.resolution; /* loop for all active instances * Here we cannot use list_for_each_entry because the active_list of a * processed instance is relinked to done_list_head before the callback * is called. */ list_for_each_entry_safe(ti, tmp, &timer->active_list_head, active_list) { if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING)) continue; ti->pticks += ticks_left; ti->resolution = resolution; if (ti->cticks < ticks_left) ti->cticks = 0; else ti->cticks -= ticks_left; if (ti->cticks) /* not expired */ continue; if (ti->flags & SNDRV_TIMER_IFLG_AUTO) { ti->cticks = ti->ticks; } else { ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING; if (--timer->running) list_del_init(&ti->active_list); } if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) || (ti->flags & SNDRV_TIMER_IFLG_FAST)) ack_list_head = &timer->ack_list_head; else ack_list_head = &timer->sack_list_head; if (list_empty(&ti->ack_list)) list_add_tail(&ti->ack_list, ack_list_head); list_for_each_entry(ts, &ti->slave_active_head, active_list) { ts->pticks = ti->pticks; ts->resolution = resolution; if (list_empty(&ts->ack_list)) list_add_tail(&ts->ack_list, ack_list_head); } } if (timer->flags & SNDRV_TIMER_FLG_RESCHED) snd_timer_reschedule(timer, timer->sticks); if (timer->running) { if (timer->hw.flags & SNDRV_TIMER_HW_STOP) { timer->hw.stop(timer); timer->flags |= SNDRV_TIMER_FLG_CHANGE; } if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) || (timer->flags & SNDRV_TIMER_FLG_CHANGE)) { /* restart timer */ timer->flags &= ~SNDRV_TIMER_FLG_CHANGE; timer->hw.start(timer); } } else { timer->hw.stop(timer); } /* now process all fast callbacks */ while (!list_empty(&timer->ack_list_head)) { p = timer->ack_list_head.next; /* get first item */ ti = list_entry(p, struct snd_timer_instance, ack_list); /* remove from ack_list and make empty */ list_del_init(p); ticks = ti->pticks; ti->pticks = 0; ti->flags |= SNDRV_TIMER_IFLG_CALLBACK; spin_unlock(&timer->lock); if (ti->callback) ti->callback(ti, resolution, ticks); spin_lock(&timer->lock); ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK; } /* do we have any slow callbacks? */ use_tasklet = !list_empty(&timer->sack_list_head); spin_unlock_irqrestore(&timer->lock, flags); if (use_tasklet) tasklet_schedule(&timer->task_queue); } /* */ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, struct snd_timer **rtimer) { struct snd_timer *timer; int err; static struct snd_device_ops ops = { .dev_free = snd_timer_dev_free, .dev_register = snd_timer_dev_register, .dev_disconnect = snd_timer_dev_disconnect, }; if (snd_BUG_ON(!tid)) return -EINVAL; if (rtimer) *rtimer = NULL; timer = kzalloc(sizeof(*timer), GFP_KERNEL); if (!timer) return -ENOMEM; timer->tmr_class = tid->dev_class; timer->card = card; timer->tmr_device = tid->device; timer->tmr_subdevice = tid->subdevice; if (id) strlcpy(timer->id, id, sizeof(timer->id)); INIT_LIST_HEAD(&timer->device_list); INIT_LIST_HEAD(&timer->open_list_head); INIT_LIST_HEAD(&timer->active_list_head); INIT_LIST_HEAD(&timer->ack_list_head); INIT_LIST_HEAD(&timer->sack_list_head); spin_lock_init(&timer->lock); tasklet_init(&timer->task_queue, snd_timer_tasklet, (unsigned long)timer); if (card != NULL) { timer->module = card->module; err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops); if (err < 0) { snd_timer_free(timer); return err; } } if (rtimer) *rtimer = timer; return 0; } static int snd_timer_free(struct snd_timer *timer) { if (!timer) return 0; mutex_lock(&register_mutex); if (! list_empty(&timer->open_list_head)) { struct list_head *p, *n; struct snd_timer_instance *ti; pr_warn("ALSA: timer %p is busy?\n", timer); list_for_each_safe(p, n, &timer->open_list_head) { list_del_init(p); ti = list_entry(p, struct snd_timer_instance, open_list); ti->timer = NULL; } } list_del(&timer->device_list); mutex_unlock(&register_mutex); if (timer->private_free) timer->private_free(timer); kfree(timer); return 0; } static int snd_timer_dev_free(struct snd_device *device) { struct snd_timer *timer = device->device_data; return snd_timer_free(timer); } static int snd_timer_dev_register(struct snd_device *dev) { struct snd_timer *timer = dev->device_data; struct snd_timer *timer1; if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop)) return -ENXIO; if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) && !timer->hw.resolution && timer->hw.c_resolution == NULL) return -EINVAL; mutex_lock(&register_mutex); list_for_each_entry(timer1, &snd_timer_list, device_list) { if (timer1->tmr_class > timer->tmr_class) break; if (timer1->tmr_class < timer->tmr_class) continue; if (timer1->card && timer->card) { if (timer1->card->number > timer->card->number) break; if (timer1->card->number < timer->card->number) continue; } if (timer1->tmr_device > timer->tmr_device) break; if (timer1->tmr_device < timer->tmr_device) continue; if (timer1->tmr_subdevice > timer->tmr_subdevice) break; if (timer1->tmr_subdevice < timer->tmr_subdevice) continue; /* conflicts.. */ mutex_unlock(&register_mutex); return -EBUSY; } list_add_tail(&timer->device_list, &timer1->device_list); mutex_unlock(&register_mutex); return 0; } static int snd_timer_dev_disconnect(struct snd_device *device) { struct snd_timer *timer = device->device_data; mutex_lock(&register_mutex); list_del_init(&timer->device_list); mutex_unlock(&register_mutex); return 0; } void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp) { unsigned long flags; unsigned long resolution = 0; struct snd_timer_instance *ti, *ts; if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)) return; if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART || event > SNDRV_TIMER_EVENT_MRESUME)) return; spin_lock_irqsave(&timer->lock, flags); if (event == SNDRV_TIMER_EVENT_MSTART || event == SNDRV_TIMER_EVENT_MCONTINUE || event == SNDRV_TIMER_EVENT_MRESUME) { if (timer->hw.c_resolution) resolution = timer->hw.c_resolution(timer); else resolution = timer->hw.resolution; } list_for_each_entry(ti, &timer->active_list_head, active_list) { if (ti->ccallback) ti->ccallback(ti, event, tstamp, resolution); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) ts->ccallback(ts, event, tstamp, resolution); } spin_unlock_irqrestore(&timer->lock, flags); } /* * exported functions for global timers */ int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer) { struct snd_timer_id tid; tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = -1; tid.device = device; tid.subdevice = 0; return snd_timer_new(NULL, id, &tid, rtimer); } int snd_timer_global_free(struct snd_timer *timer) { return snd_timer_free(timer); } int snd_timer_global_register(struct snd_timer *timer) { struct snd_device dev; memset(&dev, 0, sizeof(dev)); dev.device_data = timer; return snd_timer_dev_register(&dev); } /* * System timer */ struct snd_timer_system_private { struct timer_list tlist; unsigned long last_expires; unsigned long last_jiffies; unsigned long correction; }; static void snd_timer_s_function(unsigned long data) { struct snd_timer *timer = (struct snd_timer *)data; struct snd_timer_system_private *priv = timer->private_data; unsigned long jiff = jiffies; if (time_after(jiff, priv->last_expires)) priv->correction += (long)jiff - (long)priv->last_expires; snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies); } static int snd_timer_s_start(struct snd_timer * timer) { struct snd_timer_system_private *priv; unsigned long njiff; priv = (struct snd_timer_system_private *) timer->private_data; njiff = (priv->last_jiffies = jiffies); if (priv->correction > timer->sticks - 1) { priv->correction -= timer->sticks - 1; njiff++; } else { njiff += timer->sticks - priv->correction; priv->correction = 0; } priv->last_expires = priv->tlist.expires = njiff; add_timer(&priv->tlist); return 0; } static int snd_timer_s_stop(struct snd_timer * timer) { struct snd_timer_system_private *priv; unsigned long jiff; priv = (struct snd_timer_system_private *) timer->private_data; del_timer(&priv->tlist); jiff = jiffies; if (time_before(jiff, priv->last_expires)) timer->sticks = priv->last_expires - jiff; else timer->sticks = 1; priv->correction = 0; return 0; } static struct snd_timer_hardware snd_timer_system = { .flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET, .resolution = 1000000000L / HZ, .ticks = 10000000L, .start = snd_timer_s_start, .stop = snd_timer_s_stop }; static void snd_timer_free_system(struct snd_timer *timer) { kfree(timer->private_data); } static int snd_timer_register_system(void) { struct snd_timer *timer; struct snd_timer_system_private *priv; int err; err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer); if (err < 0) return err; strcpy(timer->name, "system timer"); timer->hw = snd_timer_system; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) { snd_timer_free(timer); return -ENOMEM; } setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer); timer->private_data = priv; timer->private_free = snd_timer_free_system; return snd_timer_global_register(timer); } #ifdef CONFIG_SND_PROC_FS /* * Info interface */ static void snd_timer_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_timer *timer; struct snd_timer_instance *ti; mutex_lock(&register_mutex); list_for_each_entry(timer, &snd_timer_list, device_list) { switch (timer->tmr_class) { case SNDRV_TIMER_CLASS_GLOBAL: snd_iprintf(buffer, "G%i: ", timer->tmr_device); break; case SNDRV_TIMER_CLASS_CARD: snd_iprintf(buffer, "C%i-%i: ", timer->card->number, timer->tmr_device); break; case SNDRV_TIMER_CLASS_PCM: snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number, timer->tmr_device, timer->tmr_subdevice); break; default: snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class, timer->card ? timer->card->number : -1, timer->tmr_device, timer->tmr_subdevice); } snd_iprintf(buffer, "%s :", timer->name); if (timer->hw.resolution) snd_iprintf(buffer, " %lu.%03luus (%lu ticks)", timer->hw.resolution / 1000, timer->hw.resolution % 1000, timer->hw.ticks); if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) snd_iprintf(buffer, " SLAVE"); snd_iprintf(buffer, "\n"); list_for_each_entry(ti, &timer->open_list_head, open_list) snd_iprintf(buffer, " Client %s : %s\n", ti->owner ? ti->owner : "unknown", ti->flags & (SNDRV_TIMER_IFLG_START | SNDRV_TIMER_IFLG_RUNNING) ? "running" : "stopped"); } mutex_unlock(&register_mutex); } static struct snd_info_entry *snd_timer_proc_entry; static void __init snd_timer_proc_init(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL); if (entry != NULL) { entry->c.text.read = snd_timer_proc_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } snd_timer_proc_entry = entry; } static void __exit snd_timer_proc_done(void) { snd_info_free_entry(snd_timer_proc_entry); } #else /* !CONFIG_SND_PROC_FS */ #define snd_timer_proc_init() #define snd_timer_proc_done() #endif /* * USER SPACE interface */ static void snd_timer_user_interrupt(struct snd_timer_instance *timeri, unsigned long resolution, unsigned long ticks) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_read *r; int prev; spin_lock(&tu->qlock); if (tu->qused > 0) { prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1; r = &tu->queue[prev]; if (r->resolution == resolution) { r->ticks += ticks; goto __wake; } } if (tu->qused >= tu->queue_size) { tu->overrun++; } else { r = &tu->queue[tu->qtail++]; tu->qtail %= tu->queue_size; r->resolution = resolution; r->ticks = ticks; tu->qused++; } __wake: spin_unlock(&tu->qlock); kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu, struct snd_timer_tread *tread) { if (tu->qused >= tu->queue_size) { tu->overrun++; } else { memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread)); tu->qtail %= tu->queue_size; tu->qused++; } } static void snd_timer_user_ccallback(struct snd_timer_instance *timeri, int event, struct timespec *tstamp, unsigned long resolution) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_tread r1; unsigned long flags; if (event >= SNDRV_TIMER_EVENT_START && event <= SNDRV_TIMER_EVENT_PAUSE) tu->tstamp = *tstamp; if ((tu->filter & (1 << event)) == 0 || !tu->tread) return; r1.event = event; r1.tstamp = *tstamp; r1.val = resolution; spin_lock_irqsave(&tu->qlock, flags); snd_timer_user_append_to_tqueue(tu, &r1); spin_unlock_irqrestore(&tu->qlock, flags); kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri, unsigned long resolution, unsigned long ticks) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_tread *r, r1; struct timespec tstamp; int prev, append = 0; memset(&tstamp, 0, sizeof(tstamp)); spin_lock(&tu->qlock); if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) | (1 << SNDRV_TIMER_EVENT_TICK))) == 0) { spin_unlock(&tu->qlock); return; } if (tu->last_resolution != resolution || ticks > 0) { if (timer_tstamp_monotonic) ktime_get_ts(&tstamp); else getnstimeofday(&tstamp); } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) && tu->last_resolution != resolution) { r1.event = SNDRV_TIMER_EVENT_RESOLUTION; r1.tstamp = tstamp; r1.val = resolution; snd_timer_user_append_to_tqueue(tu, &r1); tu->last_resolution = resolution; append++; } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0) goto __wake; if (ticks == 0) goto __wake; if (tu->qused > 0) { prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1; r = &tu->tqueue[prev]; if (r->event == SNDRV_TIMER_EVENT_TICK) { r->tstamp = tstamp; r->val += ticks; append++; goto __wake; } } r1.event = SNDRV_TIMER_EVENT_TICK; r1.tstamp = tstamp; r1.val = ticks; snd_timer_user_append_to_tqueue(tu, &r1); append++; __wake: spin_unlock(&tu->qlock); if (append == 0) return; kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static int snd_timer_user_open(struct inode *inode, struct file *file) { struct snd_timer_user *tu; int err; err = nonseekable_open(inode, file); if (err < 0) return err; tu = kzalloc(sizeof(*tu), GFP_KERNEL); if (tu == NULL) return -ENOMEM; spin_lock_init(&tu->qlock); init_waitqueue_head(&tu->qchange_sleep); mutex_init(&tu->ioctl_lock); tu->ticks = 1; tu->queue_size = 128; tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) { kfree(tu); return -ENOMEM; } file->private_data = tu; return 0; } static int snd_timer_user_release(struct inode *inode, struct file *file) { struct snd_timer_user *tu; if (file->private_data) { tu = file->private_data; file->private_data = NULL; mutex_lock(&tu->ioctl_lock); if (tu->timeri) snd_timer_close(tu->timeri); mutex_unlock(&tu->ioctl_lock); kfree(tu->queue); kfree(tu->tqueue); kfree(tu); } return 0; } static void snd_timer_user_zero_id(struct snd_timer_id *id) { id->dev_class = SNDRV_TIMER_CLASS_NONE; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = -1; id->device = -1; id->subdevice = -1; } static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer) { id->dev_class = timer->tmr_class; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = timer->card ? timer->card->number : -1; id->device = timer->tmr_device; id->subdevice = timer->tmr_subdevice; } static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) { struct snd_timer_id id; struct snd_timer *timer; struct list_head *p; if (copy_from_user(&id, _tid, sizeof(id))) return -EFAULT; mutex_lock(&register_mutex); if (id.dev_class < 0) { /* first item */ if (list_empty(&snd_timer_list)) snd_timer_user_zero_id(&id); else { timer = list_entry(snd_timer_list.next, struct snd_timer, device_list); snd_timer_user_copy_id(&id, timer); } } else { switch (id.dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: id.device = id.device < 0 ? 0 : id.device + 1; list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device >= id.device) { snd_timer_user_copy_id(&id, timer); break; } } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: if (id.card < 0) { id.card = 0; } else { if (id.card < 0) { id.card = 0; } else { if (id.device < 0) { id.device = 0; } else { if (id.subdevice < 0) { id.subdevice = 0; } else { id.subdevice++; } } } } list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > id.dev_class) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_class < id.dev_class) continue; if (timer->card->number > id.card) { snd_timer_user_copy_id(&id, timer); break; } if (timer->card->number < id.card) continue; if (timer->tmr_device > id.device) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device < id.device) continue; if (timer->tmr_subdevice > id.subdevice) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_subdevice < id.subdevice) continue; snd_timer_user_copy_id(&id, timer); break; } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; default: snd_timer_user_zero_id(&id); } } mutex_unlock(&register_mutex); if (copy_to_user(_tid, &id, sizeof(*_tid))) return -EFAULT; return 0; } static int snd_timer_user_ginfo(struct file *file, struct snd_timer_ginfo __user *_ginfo) { struct snd_timer_ginfo *ginfo; struct snd_timer_id tid; struct snd_timer *t; struct list_head *p; int err = 0; ginfo = memdup_user(_ginfo, sizeof(*ginfo)); if (IS_ERR(ginfo)) return PTR_ERR(ginfo); tid = ginfo->tid; memset(ginfo, 0, sizeof(*ginfo)); ginfo->tid = tid; mutex_lock(&register_mutex); t = snd_timer_find(&tid); if (t != NULL) { ginfo->card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) ginfo->flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(ginfo->id, t->id, sizeof(ginfo->id)); strlcpy(ginfo->name, t->name, sizeof(ginfo->name)); ginfo->resolution = t->hw.resolution; if (t->hw.resolution_min > 0) { ginfo->resolution_min = t->hw.resolution_min; ginfo->resolution_max = t->hw.resolution_max; } list_for_each(p, &t->open_list_head) { ginfo->clients++; } } else { err = -ENODEV; } mutex_unlock(&register_mutex); if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo))) err = -EFAULT; kfree(ginfo); return err; } static int snd_timer_user_gparams(struct file *file, struct snd_timer_gparams __user *_gparams) { struct snd_timer_gparams gparams; struct snd_timer *t; int err; if (copy_from_user(&gparams, _gparams, sizeof(gparams))) return -EFAULT; mutex_lock(&register_mutex); t = snd_timer_find(&gparams.tid); if (!t) { err = -ENODEV; goto _error; } if (!list_empty(&t->open_list_head)) { err = -EBUSY; goto _error; } if (!t->hw.set_period) { err = -ENOSYS; goto _error; } err = t->hw.set_period(t, gparams.period_num, gparams.period_den); _error: mutex_unlock(&register_mutex); return err; } static int snd_timer_user_gstatus(struct file *file, struct snd_timer_gstatus __user *_gstatus) { struct snd_timer_gstatus gstatus; struct snd_timer_id tid; struct snd_timer *t; int err = 0; if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus))) return -EFAULT; tid = gstatus.tid; memset(&gstatus, 0, sizeof(gstatus)); gstatus.tid = tid; mutex_lock(&register_mutex); t = snd_timer_find(&tid); if (t != NULL) { if (t->hw.c_resolution) gstatus.resolution = t->hw.c_resolution(t); else gstatus.resolution = t->hw.resolution; if (t->hw.precise_resolution) { t->hw.precise_resolution(t, &gstatus.resolution_num, &gstatus.resolution_den); } else { gstatus.resolution_num = gstatus.resolution; gstatus.resolution_den = 1000000000uL; } } else { err = -ENODEV; } mutex_unlock(&register_mutex); if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus))) err = -EFAULT; return err; } static int snd_timer_user_tselect(struct file *file, struct snd_timer_select __user *_tselect) { struct snd_timer_user *tu; struct snd_timer_select tselect; char str[32]; int err = 0; tu = file->private_data; if (tu->timeri) { snd_timer_close(tu->timeri); tu->timeri = NULL; } if (copy_from_user(&tselect, _tselect, sizeof(tselect))) { err = -EFAULT; goto __err; } sprintf(str, "application %i", current->pid); if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE) tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION; err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid); if (err < 0) goto __err; kfree(tu->queue); tu->queue = NULL; kfree(tu->tqueue); tu->tqueue = NULL; if (tu->tread) { tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread), GFP_KERNEL); if (tu->tqueue == NULL) err = -ENOMEM; } else { tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) err = -ENOMEM; } if (err < 0) { snd_timer_close(tu->timeri); tu->timeri = NULL; } else { tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST; tu->timeri->callback = tu->tread ? snd_timer_user_tinterrupt : snd_timer_user_interrupt; tu->timeri->ccallback = snd_timer_user_ccallback; tu->timeri->callback_data = (void *)tu; } __err: return err; } static int snd_timer_user_info(struct file *file, struct snd_timer_info __user *_info) { struct snd_timer_user *tu; struct snd_timer_info *info; struct snd_timer *t; int err = 0; tu = file->private_data; if (!tu->timeri) return -EBADFD; t = tu->timeri->timer; if (!t) return -EBADFD; info = kzalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; info->card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) info->flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(info->id, t->id, sizeof(info->id)); strlcpy(info->name, t->name, sizeof(info->name)); info->resolution = t->hw.resolution; if (copy_to_user(_info, info, sizeof(*_info))) err = -EFAULT; kfree(info); return err; } static int snd_timer_user_params(struct file *file, struct snd_timer_params __user *_params) { struct snd_timer_user *tu; struct snd_timer_params params; struct snd_timer *t; struct snd_timer_read *tr; struct snd_timer_tread *ttr; int err; tu = file->private_data; if (!tu->timeri) return -EBADFD; t = tu->timeri->timer; if (!t) return -EBADFD; if (copy_from_user(&params, _params, sizeof(params))) return -EFAULT; if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) { err = -EINVAL; goto _end; } if (params.queue_size > 0 && (params.queue_size < 32 || params.queue_size > 1024)) { err = -EINVAL; goto _end; } if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)| (1<<SNDRV_TIMER_EVENT_TICK)| (1<<SNDRV_TIMER_EVENT_START)| (1<<SNDRV_TIMER_EVENT_STOP)| (1<<SNDRV_TIMER_EVENT_CONTINUE)| (1<<SNDRV_TIMER_EVENT_PAUSE)| (1<<SNDRV_TIMER_EVENT_SUSPEND)| (1<<SNDRV_TIMER_EVENT_RESUME)| (1<<SNDRV_TIMER_EVENT_MSTART)| (1<<SNDRV_TIMER_EVENT_MSTOP)| (1<<SNDRV_TIMER_EVENT_MCONTINUE)| (1<<SNDRV_TIMER_EVENT_MPAUSE)| (1<<SNDRV_TIMER_EVENT_MSUSPEND)| (1<<SNDRV_TIMER_EVENT_MRESUME))) { err = -EINVAL; goto _end; } snd_timer_stop(tu->timeri); spin_lock_irq(&t->lock); tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO| SNDRV_TIMER_IFLG_EXCLUSIVE| SNDRV_TIMER_IFLG_EARLY_EVENT); if (params.flags & SNDRV_TIMER_PSFLG_AUTO) tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO; if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE) tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE; if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT) tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT; spin_unlock_irq(&t->lock); if (params.queue_size > 0 && (unsigned int)tu->queue_size != params.queue_size) { if (tu->tread) { ttr = kmalloc(params.queue_size * sizeof(*ttr), GFP_KERNEL); if (ttr) { kfree(tu->tqueue); tu->queue_size = params.queue_size; tu->tqueue = ttr; } } else { tr = kmalloc(params.queue_size * sizeof(*tr), GFP_KERNEL); if (tr) { kfree(tu->queue); tu->queue_size = params.queue_size; tu->queue = tr; } } } tu->qhead = tu->qtail = tu->qused = 0; if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) { if (tu->tread) { struct snd_timer_tread tread; tread.event = SNDRV_TIMER_EVENT_EARLY; tread.tstamp.tv_sec = 0; tread.tstamp.tv_nsec = 0; tread.val = 0; snd_timer_user_append_to_tqueue(tu, &tread); } else { struct snd_timer_read *r = &tu->queue[0]; r->resolution = 0; r->ticks = 0; tu->qused++; tu->qtail++; } } tu->filter = params.filter; tu->ticks = params.ticks; err = 0; _end: if (copy_to_user(_params, &params, sizeof(params))) return -EFAULT; return err; } static int snd_timer_user_status(struct file *file, struct snd_timer_status __user *_status) { struct snd_timer_user *tu; struct snd_timer_status status; tu = file->private_data; if (!tu->timeri) return -EBADFD; memset(&status, 0, sizeof(status)); status.tstamp = tu->tstamp; status.resolution = snd_timer_resolution(tu->timeri); status.lost = tu->timeri->lost; status.overrun = tu->overrun; spin_lock_irq(&tu->qlock); status.queue = tu->qused; spin_unlock_irq(&tu->qlock); if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0; } static int snd_timer_user_start(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; snd_timer_stop(tu->timeri); tu->timeri->lost = 0; tu->last_resolution = 0; return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0; } static int snd_timer_user_stop(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0; } static int snd_timer_user_continue(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; tu->timeri->lost = 0; return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0; } static int snd_timer_user_pause(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0; } enum { SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20), SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21), SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22), SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23), }; static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_timer_user *tu; void __user *argp = (void __user *)arg; int __user *p = argp; tu = file->private_data; switch (cmd) { case SNDRV_TIMER_IOCTL_PVERSION: return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0; case SNDRV_TIMER_IOCTL_NEXT_DEVICE: return snd_timer_user_next_device(argp); case SNDRV_TIMER_IOCTL_TREAD: { int xarg; if (tu->timeri) /* too late */ return -EBUSY; if (get_user(xarg, p)) return -EFAULT; tu->tread = xarg ? 1 : 0; return 0; } case SNDRV_TIMER_IOCTL_GINFO: return snd_timer_user_ginfo(file, argp); case SNDRV_TIMER_IOCTL_GPARAMS: return snd_timer_user_gparams(file, argp); case SNDRV_TIMER_IOCTL_GSTATUS: return snd_timer_user_gstatus(file, argp); case SNDRV_TIMER_IOCTL_SELECT: return snd_timer_user_tselect(file, argp); case SNDRV_TIMER_IOCTL_INFO: return snd_timer_user_info(file, argp); case SNDRV_TIMER_IOCTL_PARAMS: return snd_timer_user_params(file, argp); case SNDRV_TIMER_IOCTL_STATUS: return snd_timer_user_status(file, argp); case SNDRV_TIMER_IOCTL_START: case SNDRV_TIMER_IOCTL_START_OLD: return snd_timer_user_start(file); case SNDRV_TIMER_IOCTL_STOP: case SNDRV_TIMER_IOCTL_STOP_OLD: return snd_timer_user_stop(file); case SNDRV_TIMER_IOCTL_CONTINUE: case SNDRV_TIMER_IOCTL_CONTINUE_OLD: return snd_timer_user_continue(file); case SNDRV_TIMER_IOCTL_PAUSE: case SNDRV_TIMER_IOCTL_PAUSE_OLD: return snd_timer_user_pause(file); } return -ENOTTY; } static long snd_timer_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_timer_user *tu = file->private_data; long ret; mutex_lock(&tu->ioctl_lock); ret = __snd_timer_user_ioctl(file, cmd, arg); mutex_unlock(&tu->ioctl_lock); return ret; } static int snd_timer_user_fasync(int fd, struct file * file, int on) { struct snd_timer_user *tu; tu = file->private_data; return fasync_helper(fd, file, on, &tu->fasync); } static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) { struct snd_timer_user *tu; long result = 0, unit; int err = 0; tu = file->private_data; unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); spin_lock_irq(&tu->qlock); while ((long)count - result >= unit) { while (!tu->qused) { wait_queue_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; break; } set_current_state(TASK_INTERRUPTIBLE); init_waitqueue_entry(&wait, current); add_wait_queue(&tu->qchange_sleep, &wait); spin_unlock_irq(&tu->qlock); schedule(); spin_lock_irq(&tu->qlock); remove_wait_queue(&tu->qchange_sleep, &wait); if (signal_pending(current)) { err = -ERESTARTSYS; break; } } spin_unlock_irq(&tu->qlock); if (err < 0) goto _error; if (tu->tread) { if (copy_to_user(buffer, &tu->tqueue[tu->qhead++], sizeof(struct snd_timer_tread))) { err = -EFAULT; goto _error; } } else { if (copy_to_user(buffer, &tu->queue[tu->qhead++], sizeof(struct snd_timer_read))) { err = -EFAULT; goto _error; } } tu->qhead %= tu->queue_size; result += unit; buffer += unit; spin_lock_irq(&tu->qlock); tu->qused--; } spin_unlock_irq(&tu->qlock); _error: return result > 0 ? result : err; } static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait) { unsigned int mask; struct snd_timer_user *tu; tu = file->private_data; poll_wait(file, &tu->qchange_sleep, wait); mask = 0; if (tu->qused) mask |= POLLIN | POLLRDNORM; return mask; } #ifdef CONFIG_COMPAT #include "timer_compat.c" #else #define snd_timer_user_ioctl_compat NULL #endif static const struct file_operations snd_timer_f_ops = { .owner = THIS_MODULE, .read = snd_timer_user_read, .open = snd_timer_user_open, .release = snd_timer_user_release, .llseek = no_llseek, .poll = snd_timer_user_poll, .unlocked_ioctl = snd_timer_user_ioctl, .compat_ioctl = snd_timer_user_ioctl_compat, .fasync = snd_timer_user_fasync, }; /* unregister the system timer */ static void snd_timer_free_all(void) { struct snd_timer *timer, *n; list_for_each_entry_safe(timer, n, &snd_timer_list, device_list) snd_timer_free(timer); } static struct device timer_dev; /* * ENTRY functions */ static int __init alsa_timer_init(void) { int err; snd_device_initialize(&timer_dev, NULL); dev_set_name(&timer_dev, "timer"); #ifdef SNDRV_OSS_INFO_DEV_TIMERS snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1, "system timer"); #endif err = snd_timer_register_system(); if (err < 0) { pr_err("ALSA: unable to register system timer (%i)\n", err); put_device(&timer_dev); return err; } err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0, &snd_timer_f_ops, NULL, &timer_dev); if (err < 0) { pr_err("ALSA: unable to register timer device (%i)\n", err); snd_timer_free_all(); put_device(&timer_dev); return err; } snd_timer_proc_init(); return 0; } static void __exit alsa_timer_exit(void) { snd_unregister_device(&timer_dev); snd_timer_free_all(); put_device(&timer_dev); snd_timer_proc_done(); #ifdef SNDRV_OSS_INFO_DEV_TIMERS snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1); #endif } module_init(alsa_timer_init) module_exit(alsa_timer_exit) EXPORT_SYMBOL(snd_timer_open); EXPORT_SYMBOL(snd_timer_close); EXPORT_SYMBOL(snd_timer_resolution); EXPORT_SYMBOL(snd_timer_start); EXPORT_SYMBOL(snd_timer_stop); EXPORT_SYMBOL(snd_timer_continue); EXPORT_SYMBOL(snd_timer_pause); EXPORT_SYMBOL(snd_timer_new); EXPORT_SYMBOL(snd_timer_notify); EXPORT_SYMBOL(snd_timer_global_new); EXPORT_SYMBOL(snd_timer_global_free); EXPORT_SYMBOL(snd_timer_global_register); EXPORT_SYMBOL(snd_timer_interrupt);
static int snd_timer_user_open(struct inode *inode, struct file *file) { struct snd_timer_user *tu; int err; err = nonseekable_open(inode, file); if (err < 0) return err; tu = kzalloc(sizeof(*tu), GFP_KERNEL); if (tu == NULL) return -ENOMEM; spin_lock_init(&tu->qlock); init_waitqueue_head(&tu->qchange_sleep); mutex_init(&tu->tread_sem); tu->ticks = 1; tu->queue_size = 128; tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) { kfree(tu); return -ENOMEM; } file->private_data = tu; return 0; }
static int snd_timer_user_open(struct inode *inode, struct file *file) { struct snd_timer_user *tu; int err; err = nonseekable_open(inode, file); if (err < 0) return err; tu = kzalloc(sizeof(*tu), GFP_KERNEL); if (tu == NULL) return -ENOMEM; spin_lock_init(&tu->qlock); init_waitqueue_head(&tu->qchange_sleep); mutex_init(&tu->ioctl_lock); tu->ticks = 1; tu->queue_size = 128; tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) { kfree(tu); return -ENOMEM; } file->private_data = tu; return 0; }
{'added': [(76, '\tstruct mutex ioctl_lock;'), (1256, '\tmutex_init(&tu->ioctl_lock);'), (1276, '\t\tmutex_lock(&tu->ioctl_lock);'), (1279, '\t\tmutex_unlock(&tu->ioctl_lock);'), (1772, 'static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,'), (1789, '\t\tif (tu->timeri)\t/* too late */'), (1791, '\t\tif (get_user(xarg, p))'), (1826, 'static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,'), (1827, '\t\t\t\t unsigned long arg)'), (1828, '{'), (1829, '\tstruct snd_timer_user *tu = file->private_data;'), (1830, '\tlong ret;'), (1831, ''), (1832, '\tmutex_lock(&tu->ioctl_lock);'), (1833, '\tret = __snd_timer_user_ioctl(file, cmd, arg);'), (1834, '\tmutex_unlock(&tu->ioctl_lock);'), (1835, '\treturn ret;'), (1836, '}'), (1837, '')], 'deleted': [(76, '\tstruct mutex tread_sem;'), (1256, '\tmutex_init(&tu->tread_sem);'), (1515, '\tmutex_lock(&tu->tread_sem);'), (1559, ' \tmutex_unlock(&tu->tread_sem);'), (1772, 'static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,'), (1789, '\t\tmutex_lock(&tu->tread_sem);'), (1790, '\t\tif (tu->timeri)\t{\t/* too late */'), (1791, '\t\t\tmutex_unlock(&tu->tread_sem);'), (1793, '\t\t}'), (1794, '\t\tif (get_user(xarg, p)) {'), (1795, '\t\t\tmutex_unlock(&tu->tread_sem);'), (1797, '\t\t}'), (1799, '\t\tmutex_unlock(&tu->tread_sem);')]}
19
13
1,691
10,207
24
146
4
https://github.com/torvalds/linux
CVE-2016-2546
CWE-362
1,901
enc624j600_driver.c
C
enc624j600ReadPhyReg
/** * @file enc624j600_driver.c * @brief ENC624J600/ENC424J600 Ethernet controller * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "core/net.h" #include "drivers/eth/enc624j600_driver.h" #include "debug.h" /** * @brief ENC624J600 driver **/ const NicDriver enc624j600Driver = { NIC_TYPE_ETHERNET, ETH_MTU, enc624j600Init, enc624j600Tick, enc624j600EnableIrq, enc624j600DisableIrq, enc624j600EventHandler, enc624j600SendPacket, enc624j600UpdateMacAddrFilter, NULL, NULL, NULL, TRUE, TRUE, TRUE, FALSE }; /** * @brief ENC624J600 controller initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t enc624j600Init(NetInterface *interface) { uint16_t temp; Enc624j600Context *context; //Debug message TRACE_INFO("Initializing ENC624J600 Ethernet controller...\r\n"); //Initialize SPI interface->spiDriver->init(); //Initialize external interrupt line interface->extIntDriver->init(); //Point to the driver context context = (Enc624j600Context *) interface->nicContext; //Initialize driver specific variables context->nextPacket = ENC624J600_RX_BUFFER_START; //Allocate RX buffer context->rxBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); //Failed to allocate memory? if(context->rxBuffer == NULL) { return ERROR_OUT_OF_MEMORY; } //Issue a system reset enc624j600SoftReset(interface); //Disable CLKOUT output enc624j600WriteReg(interface, ENC624J600_REG_ECON2, ECON2_ETHEN | ECON2_STRCH); //Optionally set the station MAC address if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed station address temp = enc624j600ReadReg(interface, ENC624J600_REG_MAADR1); interface->macAddr.w[0] = letoh16(temp); temp = enc624j600ReadReg(interface, ENC624J600_REG_MAADR2); interface->macAddr.w[1] = letoh16(temp); temp = enc624j600ReadReg(interface, ENC624J600_REG_MAADR3); interface->macAddr.w[2] = letoh16(temp); //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } else { //Override the factory preprogrammed address temp = htole16(interface->macAddr.w[0]); enc624j600WriteReg(interface, ENC624J600_REG_MAADR1, temp); temp = htole16(interface->macAddr.w[1]); enc624j600WriteReg(interface, ENC624J600_REG_MAADR2, temp); temp = htole16(interface->macAddr.w[2]); enc624j600WriteReg(interface, ENC624J600_REG_MAADR3, temp); } //Set receive buffer location enc624j600WriteReg(interface, ENC624J600_REG_ERXST, ENC624J600_RX_BUFFER_START); //Program the tail pointer ERXTAIL to the last even address of the buffer enc624j600WriteReg(interface, ENC624J600_REG_ERXTAIL, ENC624J600_RX_BUFFER_STOP); //Configure the receive filters enc624j600WriteReg(interface, ENC624J600_REG_ERXFCON, ERXFCON_HTEN | ERXFCON_CRCEN | ERXFCON_RUNTEN | ERXFCON_UCEN | ERXFCON_BCEN); //Initialize the hash table enc624j600WriteReg(interface, ENC624J600_REG_EHT1, 0x0000); enc624j600WriteReg(interface, ENC624J600_REG_EHT2, 0x0000); enc624j600WriteReg(interface, ENC624J600_REG_EHT3, 0x0000); enc624j600WriteReg(interface, ENC624J600_REG_EHT4, 0x0000); //All short frames will be zero-padded to 60 bytes and a valid CRC is then appended enc624j600WriteReg(interface, ENC624J600_REG_MACON2, MACON2_DEFER | MACON2_PADCFG0 | MACON2_TXCRCEN | MACON2_R1); //Program the MAMXFL register with the maximum frame length to be accepted enc624j600WriteReg(interface, ENC624J600_REG_MAMXFL, ETH_MAX_FRAME_SIZE); //PHY initialization enc624j600WritePhyReg(interface, ENC624J600_PHY_REG_PHANA, PHANA_ADPAUS0 | PHANA_AD100FD | PHANA_AD100 | PHANA_AD10FD | PHANA_AD10 | PHANA_ADIEEE0); //Clear interrupt flags enc624j600WriteReg(interface, ENC624J600_REG_EIR, 0x0000); //Configure interrupts as desired enc624j600WriteReg(interface, ENC624J600_REG_EIE, EIE_INTIE | EIE_LINKIE | EIE_PKTIE | EIE_TXIE | EIE_TXABTIE); //Set RXEN to enable reception enc624j600SetBit(interface, ENC624J600_REG_ECON1, ECON1_RXEN); //Dump registers for debugging purpose enc624j600DumpReg(interface); enc624j600DumpPhyReg(interface); //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Force the TCP/IP stack to poll the link state at startup interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; } /** * @brief ENC624J600 timer handler * @param[in] interface Underlying network interface **/ void enc624j600Tick(NetInterface *interface) { } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void enc624j600EnableIrq(NetInterface *interface) { //Enable interrupts interface->extIntDriver->enableIrq(); } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void enc624j600DisableIrq(NetInterface *interface) { //Disable interrupts interface->extIntDriver->disableIrq(); } /** * @brief ENC624J600 interrupt service routine * @param[in] interface Underlying network interface * @return TRUE if a higher priority task must be woken. Else FALSE is returned **/ bool_t enc624j600IrqHandler(NetInterface *interface) { bool_t flag; uint16_t status; //This flag will be set if a higher priority task must be woken flag = FALSE; //Clear the INTIE bit, immediately after an interrupt event enc624j600ClearBit(interface, ENC624J600_REG_EIE, EIE_INTIE); //Read interrupt status register status = enc624j600ReadReg(interface, ENC624J600_REG_EIR); //Link status change? if((status & EIR_LINKIF) != 0) { //Disable LINKIE interrupt enc624j600ClearBit(interface, ENC624J600_REG_EIE, EIE_LINKIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet received? if((status & EIR_PKTIF) != 0) { //Disable PKTIE interrupt enc624j600ClearBit(interface, ENC624J600_REG_EIE, EIE_PKTIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((status & (EIR_TXIF | EIR_TXABTIF)) != 0) { //Clear interrupt flags enc624j600ClearBit(interface, ENC624J600_REG_EIR, EIR_TXIF | EIR_TXABTIF); //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } //Once the interrupt has been serviced, the INTIE bit //is set again to re-enable interrupts enc624j600SetBit(interface, ENC624J600_REG_EIE, EIE_INTIE); //A higher priority task must be woken? return flag; } /** * @brief ENC624J600 event handler * @param[in] interface Underlying network interface **/ void enc624j600EventHandler(NetInterface *interface) { error_t error; uint16_t status; uint16_t value; //Read interrupt status register status = enc624j600ReadReg(interface, ENC624J600_REG_EIR); //Check whether the link state has changed if((status & EIR_LINKIF) != 0) { //Clear interrupt flag enc624j600ClearBit(interface, ENC624J600_REG_EIR, EIR_LINKIF); //Read Ethernet status register value = enc624j600ReadReg(interface, ENC624J600_REG_ESTAT); //Check link state if((value & ESTAT_PHYLNK) != 0) { //Read PHY status register 3 value = enc624j600ReadPhyReg(interface, ENC624J600_PHY_REG_PHSTAT3); //Get current speed if((value & PHSTAT3_SPDDPX1) != 0) { interface->linkSpeed = NIC_LINK_SPEED_100MBPS; } else { interface->linkSpeed = NIC_LINK_SPEED_10MBPS; } //Determine the new duplex mode if((value & PHSTAT3_SPDDPX2) != 0) { interface->duplexMode = NIC_FULL_DUPLEX_MODE; } else { interface->duplexMode = NIC_HALF_DUPLEX_MODE; } //Link is up interface->linkState = TRUE; //Update MAC configuration parameters for proper operation enc624j600UpdateMacConfig(interface); } else { //Link is down interface->linkState = FALSE; } //Process link state change event nicNotifyLinkChange(interface); } //Check whether a packet has been received? if((status & EIR_PKTIF) != 0) { //Clear interrupt flag enc624j600ClearBit(interface, ENC624J600_REG_EIR, EIR_PKTIF); //Process all pending packets do { //Read incoming packet error = enc624j600ReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable LINKIE and PKTIE interrupts enc624j600SetBit(interface, ENC624J600_REG_EIE, EIE_LINKIE | EIE_PKTIE); } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t enc624j600SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t length; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > 1536) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the link is up before transmitting the frame if(!interface->linkState) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Drop current packet return NO_ERROR; } //Ensure that the transmitter is ready to send if(enc624j600ReadReg(interface, ENC624J600_REG_ECON1) & ECON1_TXRTS) { return ERROR_FAILURE; } //Point to the SRAM buffer enc624j600WriteReg(interface, ENC624J600_REG_EGPWRPT, ENC624J600_TX_BUFFER_START); //Copy the packet to the SRAM buffer enc624j600WriteBuffer(interface, ENC624J600_CMD_WGPDATA, buffer, offset); //Program ETXST to the start address of the packet enc624j600WriteReg(interface, ENC624J600_REG_ETXST, ENC624J600_TX_BUFFER_START); //Program ETXLEN with the length of data copied to the memory enc624j600WriteReg(interface, ENC624J600_REG_ETXLEN, length); //Clear TXIF and TXABTIF interrupt flags enc624j600ClearBit(interface, ENC624J600_REG_EIR, EIR_TXIF | EIR_TXABTIF); //Set the TXRTS bit to initiate transmission enc624j600SetBit(interface, ENC624J600_REG_ECON1, ECON1_TXRTS); //Successful processing return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t enc624j600ReceivePacket(NetInterface *interface) { error_t error; uint16_t n; uint32_t status; Enc624j600Context *context; //Point to the driver context context = (Enc624j600Context *) interface->nicContext; //Verify that a packet is waiting by ensuring that PKTCNT is non-zero if(enc624j600ReadReg(interface, ENC624J600_REG_ESTAT) & ESTAT_PKTCNT) { //Point to the next packet enc624j600WriteReg(interface, ENC624J600_REG_ERXRDPT, context->nextPacket); //Read the first two bytes, which are the address of the next packet enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA, (uint8_t *) &context->nextPacket, sizeof(uint16_t)); //Convert the value to host byte order context->nextPacket = letoh16(context->nextPacket); //Get the length of the received frame in bytes enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA, (uint8_t *) &n, sizeof(uint16_t)); //Convert the value to host byte order n = letoh16(n); //Read the receive status vector (RSV) enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA, (uint8_t *) &status, sizeof(uint32_t)); //Convert the value to host byte order status = letoh32(status); //Make sure no error occurred if((status & RSV_RECEIVED_OK) != 0) { //Limit the number of data to read n = MIN(n, ETH_MAX_FRAME_SIZE); //Read the Ethernet frame enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA, context->rxBuffer, n); //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } //Update the ERXTAIL pointer value to the point where the packet //has been processed, taking care to wrap back at the end of the //received memory buffer if(context->nextPacket == ENC624J600_RX_BUFFER_START) { enc624j600WriteReg(interface, ENC624J600_REG_ERXTAIL, ENC624J600_RX_BUFFER_STOP); } else { enc624j600WriteReg(interface, ENC624J600_REG_ERXTAIL, context->nextPacket - 2); } //Set PKTDEC to decrement the PKTCNT bits enc624j600SetBit(interface, ENC624J600_REG_ECON1, ECON1_PKTDEC); } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, context->rxBuffer, n, &ancillary); } //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t enc624j600UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint16_t hashTable[4]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = enc624j600CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = (crc >> 23) & 0x3F; //Update hash table contents hashTable[k / 16] |= (1 << (k % 16)); } } //Write the hash table to the ENC624J600 controller enc624j600WriteReg(interface, ENC624J600_REG_EHT1, hashTable[0]); enc624j600WriteReg(interface, ENC624J600_REG_EHT2, hashTable[1]); enc624j600WriteReg(interface, ENC624J600_REG_EHT3, hashTable[2]); enc624j600WriteReg(interface, ENC624J600_REG_EHT4, hashTable[3]); //Debug message TRACE_DEBUG(" EHT1 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT1)); TRACE_DEBUG(" EHT2 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT2)); TRACE_DEBUG(" EHT3 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT3)); TRACE_DEBUG(" EHT4 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT4)); //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface **/ void enc624j600UpdateMacConfig(NetInterface *interface) { uint16_t duplexMode; //Determine the new duplex mode by reading the PHYDPX bit duplexMode = enc624j600ReadReg(interface, ENC624J600_REG_ESTAT) & ESTAT_PHYDPX; //Full-duplex mode? if(duplexMode) { //Configure the FULDPX bit to match the current duplex mode enc624j600WriteReg(interface, ENC624J600_REG_MACON2, MACON2_DEFER | MACON2_PADCFG2 | MACON2_PADCFG0 | MACON2_TXCRCEN | MACON2_R1 | MACON2_FULDPX); //Configure the Back-to-Back Inter-Packet Gap register enc624j600WriteReg(interface, ENC624J600_REG_MABBIPG, 0x15); } //Half-duplex mode? else { //Configure the FULDPX bit to match the current duplex mode enc624j600WriteReg(interface, ENC624J600_REG_MACON2, MACON2_DEFER | MACON2_PADCFG2 | MACON2_PADCFG0 | MACON2_TXCRCEN | MACON2_R1); //Configure the Back-to-Back Inter-Packet Gap register enc624j600WriteReg(interface, ENC624J600_REG_MABBIPG, 0x12); } } /** * @brief Reset ENC624J600 controller * @param[in] interface Underlying network interface * @return Error code **/ error_t enc624j600SoftReset(NetInterface *interface) { //Wait for the SPI interface to be ready do { //Write 0x1234 to EUDAST enc624j600WriteReg(interface, ENC624J600_REG_EUDAST, 0x1234); //Read back register and check contents } while(enc624j600ReadReg(interface, ENC624J600_REG_EUDAST) != 0x1234); //Poll CLKRDY and wait for it to become set while((enc624j600ReadReg(interface, ENC624J600_REG_ESTAT) & ESTAT_CLKRDY) == 0) { } //Issue a system reset command by setting ETHRST enc624j600SetBit(interface, ENC624J600_REG_ECON2, ECON2_ETHRST); //Wait at least 25us for the reset to take place sleep(1); //Read EUDAST to confirm that the system reset took place. //EUDAST should have reverted back to its reset default if(enc624j600ReadReg(interface, ENC624J600_REG_EUDAST) != 0x0000) { return ERROR_FAILURE; } //Wait at least 256us for the PHY registers and PHY //status bits to become available sleep(1); //The controller is now ready to accept further commands return NO_ERROR; } /** * @brief Write ENC624J600 register * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] data Register value **/ void enc624j600WriteReg(NetInterface *interface, uint8_t address, uint16_t data) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC624J600_CMD_WCRU); //Write register address interface->spiDriver->transfer(address); //Write register value interface->spiDriver->transfer(LSB(data)); interface->spiDriver->transfer(MSB(data)); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Read ENC624J600 register * @param[in] interface Underlying network interface * @param[in] address Register address * @return Register value **/ uint16_t enc624j600ReadReg(NetInterface *interface, uint8_t address) { uint16_t data; //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC624J600_CMD_RCRU); //Write register address interface->spiDriver->transfer(address); //Read the lower 8 bits of data data = interface->spiDriver->transfer(0x00); //Read the upper 8 bits of data data |= interface->spiDriver->transfer(0x00) << 8; //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); //Return register contents return data; } /** * @brief Write PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @param[in] data Register value **/ void enc624j600WritePhyReg(NetInterface *interface, uint8_t address, uint16_t data) { //Write the address of the PHY register to write to enc624j600WriteReg(interface, ENC624J600_REG_MIREGADR, MIREGADR_R8 | address); //Write the 16 bits of data into the MIWR register enc624j600WriteReg(interface, ENC624J600_REG_MIWR, data); //Wait until the PHY register has been written while((enc624j600ReadReg(interface, ENC624J600_REG_MISTAT) & MISTAT_BUSY) != 0) { } } /** * @brief Read PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @return Register value **/ uint16_t enc624j600ReadPhyReg(NetInterface *interface, uint8_t address) { //Write the address of the PHY register to read from enc624j600WriteReg(interface, ENC624J600_REG_MIREGADR, MIREGADR_R8 | address); //Start read operation enc624j600WriteReg(interface, ENC624J600_REG_MICMD, MICMD_MIIRD); //Wait at least 25.6us before polling the BUSY bit usleep(100); //Wait for the read operation to complete while((enc624j600ReadReg(interface, ENC624J600_REG_MISTAT) & MISTAT_BUSY) != 0) { } //Clear command register enc624j600WriteReg(interface, ENC624J600_REG_MICMD, 0x00); //Return register contents return enc624j600ReadReg(interface, ENC624J600_REG_MIRD); } /** * @brief Write SRAM buffer * @param[in] interface Underlying network interface * @param[in] opcode SRAM buffer operation * @param[in] buffer Multi-part buffer containing the data to be written * @param[in] offset Offset to the first data byte **/ void enc624j600WriteBuffer(NetInterface *interface, uint8_t opcode, const NetBuffer *buffer, size_t offset) { uint_t i; size_t j; size_t n; uint8_t *p; //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(opcode); //Loop through data chunks for(i = 0; i < buffer->chunkCount; i++) { //Is there any data to copy from the current chunk? if(offset < buffer->chunk[i].length) { //Point to the first byte to be read p = (uint8_t *) buffer->chunk[i].address + offset; //Compute the number of bytes to copy at a time n = buffer->chunk[i].length - offset; //Copy data to SRAM buffer for(j = 0; j < n; j++) { interface->spiDriver->transfer(p[j]); } //Process the next block from the start offset = 0; } else { //Skip the current chunk offset -= buffer->chunk[i].length; } } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Read SRAM buffer * @param[in] interface Underlying network interface * @param[in] opcode SRAM buffer operation * @param[in] data Buffer where to store the incoming data * @param[in] length Number of data to read **/ void enc624j600ReadBuffer(NetInterface *interface, uint8_t opcode, uint8_t *data, size_t length) { size_t i; //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(opcode); //Copy data from SRAM buffer for(i = 0; i < length; i++) { data[i] = interface->spiDriver->transfer(0x00); } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Set bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to set in the target register **/ void enc624j600SetBit(NetInterface *interface, uint8_t address, uint16_t mask) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC624J600_CMD_BFSU); //Write register address interface->spiDriver->transfer(address); //Write bit mask interface->spiDriver->transfer(LSB(mask)); interface->spiDriver->transfer(MSB(mask)); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Clear bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to clear in the target register **/ void enc624j600ClearBit(NetInterface *interface, uint8_t address, uint16_t mask) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC624J600_CMD_BFCU); //Write register address interface->spiDriver->transfer(address); //Write bit mask interface->spiDriver->transfer(LSB(mask)); interface->spiDriver->transfer(MSB(mask)); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief CRC calculation using the polynomial 0x4C11DB7 * @param[in] data Pointer to the data over which to calculate the CRC * @param[in] length Number of bytes to process * @return Resulting CRC value **/ uint32_t enc624j600CalcCrc(const void *data, size_t length) { uint_t i; uint_t j; uint32_t crc; const uint8_t *p; //Point to the data over which to calculate the CRC p = (uint8_t *) data; //CRC preset value crc = 0xFFFFFFFF; //Loop through data for(i = 0; i < length; i++) { //The message is processed bit by bit for(j = 0; j < 8; j++) { //Update CRC value if((((crc >> 31) ^ (p[i] >> j)) & 0x01) != 0) { crc = (crc << 1) ^ 0x04C11DB7; } else { crc = crc << 1; } } } //Return CRC value return crc; } /** * @brief Dump registers for debugging purpose * @param[in] interface Underlying network interface **/ void enc624j600DumpReg(NetInterface *interface) { #if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG) uint8_t i; uint8_t bank; uint16_t address; //Display header TRACE_DEBUG(" Bank 0 Bank 1 Bank 2 Bank 3 Unbanked\r\n"); //Loop through register addresses for(i = 0; i < 32; i += 2) { //Display register address TRACE_DEBUG("%02" PRIX8 ": ", i); //Loop through bank numbers for(bank = 0; bank < 5; bank++) { //Format register address address = 0x7E00 | (bank << 5) | i; //Display register contents TRACE_DEBUG("0x%04" PRIX16 " ", enc624j600ReadReg(interface, address)); } //Jump to the following line TRACE_DEBUG("\r\n"); } //Terminate with a line feed TRACE_DEBUG("\r\n"); #endif } /** * @brief Dump PHY registers for debugging purpose * @param[in] interface Underlying network interface **/ void enc624j600DumpPhyReg(NetInterface *interface) { #if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG) uint8_t i; //Loop through PHY registers for(i = 0; i < 32; i++) { //Display current PHY register TRACE_DEBUG("%02" PRIX8 ": 0x%04" PRIX16 "\r\n", i, enc624j600ReadPhyReg(interface, i)); } //Terminate with a line feed TRACE_DEBUG("\r\n"); #endif }
/** * @file enc624j600_driver.c * @brief ENC624J600/ENC424J600 Ethernet controller * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "core/net.h" #include "drivers/eth/enc624j600_driver.h" #include "debug.h" /** * @brief ENC624J600 driver **/ const NicDriver enc624j600Driver = { NIC_TYPE_ETHERNET, ETH_MTU, enc624j600Init, enc624j600Tick, enc624j600EnableIrq, enc624j600DisableIrq, enc624j600EventHandler, enc624j600SendPacket, enc624j600UpdateMacAddrFilter, NULL, NULL, NULL, TRUE, TRUE, TRUE, FALSE }; /** * @brief ENC624J600 controller initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t enc624j600Init(NetInterface *interface) { uint16_t temp; Enc624j600Context *context; //Debug message TRACE_INFO("Initializing ENC624J600 Ethernet controller...\r\n"); //Initialize SPI interface->spiDriver->init(); //Initialize external interrupt line interface->extIntDriver->init(); //Point to the driver context context = (Enc624j600Context *) interface->nicContext; //Initialize driver specific variables context->nextPacket = ENC624J600_RX_BUFFER_START; //Allocate RX buffer context->rxBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); //Failed to allocate memory? if(context->rxBuffer == NULL) { return ERROR_OUT_OF_MEMORY; } //Issue a system reset enc624j600SoftReset(interface); //Disable CLKOUT output enc624j600WriteReg(interface, ENC624J600_ECON2, ENC624J600_ECON2_ETHEN | ENC624J600_ECON2_STRCH); //Optionally set the station MAC address if(macCompAddr(&interface->macAddr, &MAC_UNSPECIFIED_ADDR)) { //Use the factory preprogrammed station address temp = enc624j600ReadReg(interface, ENC624J600_MAADR1); interface->macAddr.w[0] = letoh16(temp); temp = enc624j600ReadReg(interface, ENC624J600_MAADR2); interface->macAddr.w[1] = letoh16(temp); temp = enc624j600ReadReg(interface, ENC624J600_MAADR3); interface->macAddr.w[2] = letoh16(temp); //Generate the 64-bit interface identifier macAddrToEui64(&interface->macAddr, &interface->eui64); } else { //Override the factory preprogrammed address temp = htole16(interface->macAddr.w[0]); enc624j600WriteReg(interface, ENC624J600_MAADR1, temp); temp = htole16(interface->macAddr.w[1]); enc624j600WriteReg(interface, ENC624J600_MAADR2, temp); temp = htole16(interface->macAddr.w[2]); enc624j600WriteReg(interface, ENC624J600_MAADR3, temp); } //Set receive buffer location enc624j600WriteReg(interface, ENC624J600_ERXST, ENC624J600_RX_BUFFER_START); //Program the tail pointer ERXTAIL to the last even address of the buffer enc624j600WriteReg(interface, ENC624J600_ERXTAIL, ENC624J600_RX_BUFFER_STOP); //Configure the receive filters enc624j600WriteReg(interface, ENC624J600_ERXFCON, ENC624J600_ERXFCON_HTEN | ENC624J600_ERXFCON_CRCEN | ENC624J600_ERXFCON_RUNTEN | ENC624J600_ERXFCON_UCEN | ENC624J600_ERXFCON_BCEN); //Initialize the hash table enc624j600WriteReg(interface, ENC624J600_EHT1, 0x0000); enc624j600WriteReg(interface, ENC624J600_EHT2, 0x0000); enc624j600WriteReg(interface, ENC624J600_EHT3, 0x0000); enc624j600WriteReg(interface, ENC624J600_EHT4, 0x0000); //All short frames will be zero-padded to 60 bytes and a valid CRC is then //appended enc624j600WriteReg(interface, ENC624J600_MACON2, ENC624J600_MACON2_DEFER | ENC624J600_MACON2_PADCFG_AUTO | ENC624J600_MACON2_TXCRCEN | ENC624J600_MACON2_R1_DEFAULT); //Program the MAMXFL register with the maximum frame length to be accepted enc624j600WriteReg(interface, ENC624J600_MAMXFL, ETH_MAX_FRAME_SIZE); //PHY initialization enc624j600WritePhyReg(interface, ENC624J600_PHANA, ENC624J600_PHANA_ADPAUS0 | ENC624J600_PHANA_AD100FD | ENC624J600_PHANA_AD100 | ENC624J600_PHANA_AD10FD | ENC624J600_PHANA_AD10 | ENC624J600_PHANA_ADIEEE_DEFAULT); //Clear interrupt flags enc624j600WriteReg(interface, ENC624J600_EIR, 0x0000); //Configure interrupts as desired enc624j600WriteReg(interface, ENC624J600_EIE, ENC624J600_EIE_INTIE | ENC624J600_EIE_LINKIE | ENC624J600_EIE_PKTIE | ENC624J600_EIE_TXIE | ENC624J600_EIE_TXABTIE); //Set RXEN to enable reception enc624j600SetBit(interface, ENC624J600_ECON1, ENC624J600_ECON1_RXEN); //Dump registers for debugging purpose enc624j600DumpReg(interface); enc624j600DumpPhyReg(interface); //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Force the TCP/IP stack to poll the link state at startup interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; } /** * @brief ENC624J600 timer handler * @param[in] interface Underlying network interface **/ void enc624j600Tick(NetInterface *interface) { } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void enc624j600EnableIrq(NetInterface *interface) { //Enable interrupts interface->extIntDriver->enableIrq(); } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void enc624j600DisableIrq(NetInterface *interface) { //Disable interrupts interface->extIntDriver->disableIrq(); } /** * @brief ENC624J600 interrupt service routine * @param[in] interface Underlying network interface * @return TRUE if a higher priority task must be woken. Else FALSE is returned **/ bool_t enc624j600IrqHandler(NetInterface *interface) { bool_t flag; uint16_t status; //This flag will be set if a higher priority task must be woken flag = FALSE; //Clear the INTIE bit, immediately after an interrupt event enc624j600ClearBit(interface, ENC624J600_EIE, ENC624J600_EIE_INTIE); //Read interrupt status register status = enc624j600ReadReg(interface, ENC624J600_EIR); //Link status change? if((status & ENC624J600_EIR_LINKIF) != 0) { //Disable LINKIE interrupt enc624j600ClearBit(interface, ENC624J600_EIE, ENC624J600_EIE_LINKIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet received? if((status & ENC624J600_EIR_PKTIF) != 0) { //Disable PKTIE interrupt enc624j600ClearBit(interface, ENC624J600_EIE, ENC624J600_EIE_PKTIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((status & (ENC624J600_EIR_TXIF | ENC624J600_EIR_TXABTIF)) != 0) { //Clear interrupt flags enc624j600ClearBit(interface, ENC624J600_EIR, ENC624J600_EIR_TXIF | ENC624J600_EIR_TXABTIF); //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } //Once the interrupt has been serviced, the INTIE bit //is set again to re-enable interrupts enc624j600SetBit(interface, ENC624J600_EIE, ENC624J600_EIE_INTIE); //A higher priority task must be woken? return flag; } /** * @brief ENC624J600 event handler * @param[in] interface Underlying network interface **/ void enc624j600EventHandler(NetInterface *interface) { error_t error; uint16_t status; uint16_t value; //Read interrupt status register status = enc624j600ReadReg(interface, ENC624J600_EIR); //Check whether the link state has changed if((status & ENC624J600_EIR_LINKIF) != 0) { //Clear interrupt flag enc624j600ClearBit(interface, ENC624J600_EIR, ENC624J600_EIR_LINKIF); //Read Ethernet status register value = enc624j600ReadReg(interface, ENC624J600_ESTAT); //Check link state if((value & ENC624J600_ESTAT_PHYLNK) != 0) { //Read PHY status register 3 value = enc624j600ReadPhyReg(interface, ENC624J600_PHSTAT3); //Get current speed if((value & ENC624J600_PHSTAT3_SPDDPX1) != 0) { interface->linkSpeed = NIC_LINK_SPEED_100MBPS; } else { interface->linkSpeed = NIC_LINK_SPEED_10MBPS; } //Determine the new duplex mode if((value & ENC624J600_PHSTAT3_SPDDPX2) != 0) { interface->duplexMode = NIC_FULL_DUPLEX_MODE; } else { interface->duplexMode = NIC_HALF_DUPLEX_MODE; } //Link is up interface->linkState = TRUE; //Update MAC configuration parameters for proper operation enc624j600UpdateMacConfig(interface); } else { //Link is down interface->linkState = FALSE; } //Process link state change event nicNotifyLinkChange(interface); } //Check whether a packet has been received? if((status & ENC624J600_EIR_PKTIF) != 0) { //Clear interrupt flag enc624j600ClearBit(interface, ENC624J600_EIR, ENC624J600_EIR_PKTIF); //Process all pending packets do { //Read incoming packet error = enc624j600ReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable LINKIE and PKTIE interrupts enc624j600SetBit(interface, ENC624J600_EIE, ENC624J600_EIE_LINKIE | ENC624J600_EIE_PKTIE); } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t enc624j600SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t length; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > 1536) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the link is up before transmitting the frame if(!interface->linkState) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Drop current packet return NO_ERROR; } //Ensure that the transmitter is ready to send if(enc624j600ReadReg(interface, ENC624J600_ECON1) & ENC624J600_ECON1_TXRTS) { return ERROR_FAILURE; } //Point to the SRAM buffer enc624j600WriteReg(interface, ENC624J600_EGPWRPT, ENC624J600_TX_BUFFER_START); //Copy the packet to the SRAM buffer enc624j600WriteBuffer(interface, ENC624J600_CMD_WGPDATA, buffer, offset); //Program ETXST to the start address of the packet enc624j600WriteReg(interface, ENC624J600_ETXST, ENC624J600_TX_BUFFER_START); //Program ETXLEN with the length of data copied to the memory enc624j600WriteReg(interface, ENC624J600_ETXLEN, length); //Clear TXIF and TXABTIF interrupt flags enc624j600ClearBit(interface, ENC624J600_EIR, ENC624J600_EIR_TXIF | ENC624J600_EIR_TXABTIF); //Set the TXRTS bit to initiate transmission enc624j600SetBit(interface, ENC624J600_ECON1, ENC624J600_ECON1_TXRTS); //Successful processing return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t enc624j600ReceivePacket(NetInterface *interface) { error_t error; uint16_t length; uint32_t status; uint8_t header[8]; Enc624j600Context *context; //Point to the driver context context = (Enc624j600Context *) interface->nicContext; //Verify that a packet is waiting by ensuring that PKTCNT is non-zero if(enc624j600ReadReg(interface, ENC624J600_ESTAT) & ENC624J600_ESTAT_PKTCNT) { //Point to the next packet enc624j600WriteReg(interface, ENC624J600_ERXRDPT, context->nextPacket); //The packet is preceded by a 8-byte header enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA, header, sizeof(header)); //The first two bytes are the address of the next packet context->nextPacket = LOAD16LE(header); //Get the length of the received packet length = LOAD16LE(header + 2); //Get the receive status vector (RSV) status = LOAD32LE(header + 4); //Make sure no error occurred if((status & ENC624J600_RSV_RECEIVED_OK) != 0) { //Limit the number of data to read length = MIN(length, ETH_MAX_FRAME_SIZE); //Read the Ethernet frame enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA, context->rxBuffer, length); //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } //Update the ERXTAIL pointer value to the point where the packet //has been processed, taking care to wrap back at the end of the //received memory buffer if(context->nextPacket == ENC624J600_RX_BUFFER_START) { enc624j600WriteReg(interface, ENC624J600_ERXTAIL, ENC624J600_RX_BUFFER_STOP); } else { enc624j600WriteReg(interface, ENC624J600_ERXTAIL, context->nextPacket - 2); } //Set PKTDEC to decrement the PKTCNT bits enc624j600SetBit(interface, ENC624J600_ECON1, ENC624J600_ECON1_PKTDEC); } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, context->rxBuffer, length, &ancillary); } //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t enc624j600UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint16_t hashTable[4]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = enc624j600CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = (crc >> 23) & 0x3F; //Update hash table contents hashTable[k / 16] |= (1 << (k % 16)); } } //Write the hash table to the ENC624J600 controller enc624j600WriteReg(interface, ENC624J600_EHT1, hashTable[0]); enc624j600WriteReg(interface, ENC624J600_EHT2, hashTable[1]); enc624j600WriteReg(interface, ENC624J600_EHT3, hashTable[2]); enc624j600WriteReg(interface, ENC624J600_EHT4, hashTable[3]); //Debug message TRACE_DEBUG(" EHT1 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_EHT1)); TRACE_DEBUG(" EHT2 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_EHT2)); TRACE_DEBUG(" EHT3 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_EHT3)); TRACE_DEBUG(" EHT4 = %04" PRIX16 "\r\n", enc624j600ReadReg(interface, ENC624J600_EHT4)); //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface **/ void enc624j600UpdateMacConfig(NetInterface *interface) { uint16_t duplexMode; //Determine the new duplex mode by reading the PHYDPX bit duplexMode = enc624j600ReadReg(interface, ENC624J600_ESTAT) & ENC624J600_ESTAT_PHYDPX; //Full-duplex mode? if(duplexMode) { //Configure the FULDPX bit to match the current duplex mode enc624j600WriteReg(interface, ENC624J600_MACON2, ENC624J600_MACON2_DEFER | ENC624J600_MACON2_PADCFG_AUTO | ENC624J600_MACON2_TXCRCEN | ENC624J600_MACON2_R1_DEFAULT | ENC624J600_MACON2_FULDPX); //Configure the Back-to-Back Inter-Packet Gap register enc624j600WriteReg(interface, ENC624J600_MABBIPG, ENC624J600_MABBIPG_BBIPG_DEFAULT_FD); } //Half-duplex mode? else { //Configure the FULDPX bit to match the current duplex mode enc624j600WriteReg(interface, ENC624J600_MACON2, ENC624J600_MACON2_DEFER | ENC624J600_MACON2_PADCFG_AUTO | ENC624J600_MACON2_TXCRCEN | ENC624J600_MACON2_R1_DEFAULT); //Configure the Back-to-Back Inter-Packet Gap register enc624j600WriteReg(interface, ENC624J600_MABBIPG, ENC624J600_MABBIPG_BBIPG_DEFAULT_HD); } } /** * @brief Reset ENC624J600 controller * @param[in] interface Underlying network interface * @return Error code **/ error_t enc624j600SoftReset(NetInterface *interface) { //Wait for the SPI interface to be ready do { //Write 0x1234 to EUDAST enc624j600WriteReg(interface, ENC624J600_EUDAST, 0x1234); //Read back register and check contents } while(enc624j600ReadReg(interface, ENC624J600_EUDAST) != 0x1234); //Poll CLKRDY and wait for it to become set while((enc624j600ReadReg(interface, ENC624J600_ESTAT) & ENC624J600_ESTAT_CLKRDY) == 0) { } //Issue a system reset command by setting ETHRST enc624j600SetBit(interface, ENC624J600_ECON2, ENC624J600_ECON2_ETHRST); //Wait at least 25us for the reset to take place sleep(1); //Read EUDAST to confirm that the system reset took place. //EUDAST should have reverted back to its reset default if(enc624j600ReadReg(interface, ENC624J600_EUDAST) != 0x0000) { return ERROR_FAILURE; } //Wait at least 256us for the PHY registers and PHY //status bits to become available sleep(1); //The controller is now ready to accept further commands return NO_ERROR; } /** * @brief Write ENC624J600 register * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] data Register value **/ void enc624j600WriteReg(NetInterface *interface, uint8_t address, uint16_t data) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC624J600_CMD_WCRU); //Write register address interface->spiDriver->transfer(address); //Write register value interface->spiDriver->transfer(LSB(data)); interface->spiDriver->transfer(MSB(data)); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Read ENC624J600 register * @param[in] interface Underlying network interface * @param[in] address Register address * @return Register value **/ uint16_t enc624j600ReadReg(NetInterface *interface, uint8_t address) { uint16_t data; //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC624J600_CMD_RCRU); //Write register address interface->spiDriver->transfer(address); //Read the lower 8 bits of data data = interface->spiDriver->transfer(0x00); //Read the upper 8 bits of data data |= interface->spiDriver->transfer(0x00) << 8; //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); //Return register contents return data; } /** * @brief Write PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @param[in] data Register value **/ void enc624j600WritePhyReg(NetInterface *interface, uint8_t address, uint16_t data) { uint16_t status; //Write the address of the PHY register to write to enc624j600WriteReg(interface, ENC624J600_MIREGADR, ENC624J600_MIREGADR_R12_8_DEFAULT | address); //Write the 16 bits of data into the MIWR register enc624j600WriteReg(interface, ENC624J600_MIWR, data); //Wait until the PHY register has been written do { //Read MII Management Status register status = enc624j600ReadReg(interface, ENC624J600_MISTAT); //Check the value of the busy status bit } while((status & ENC624J600_MISTAT_BUSY) != 0); } /** * @brief Read PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @return Register value **/ uint16_t enc624j600ReadPhyReg(NetInterface *interface, uint8_t address) { uint16_t status; //Write the address of the PHY register to read from enc624j600WriteReg(interface, ENC624J600_MIREGADR, ENC624J600_MIREGADR_R12_8_DEFAULT | address); //Start read operation enc624j600WriteReg(interface, ENC624J600_MICMD, ENC624J600_MICMD_MIIRD); //Wait at least 25.6us before polling the BUSY bit usleep(100); //Wait for the read operation to complete do { //Read MII Management Status register status = enc624j600ReadReg(interface, ENC624J600_MISTAT); //Check the value of the busy status bit } while((status & ENC624J600_MISTAT_BUSY) != 0); //Clear command register enc624j600WriteReg(interface, ENC624J600_MICMD, 0x00); //Return register contents return enc624j600ReadReg(interface, ENC624J600_MIRD); } /** * @brief Write SRAM buffer * @param[in] interface Underlying network interface * @param[in] opcode SRAM buffer operation * @param[in] buffer Multi-part buffer containing the data to be written * @param[in] offset Offset to the first data byte **/ void enc624j600WriteBuffer(NetInterface *interface, uint8_t opcode, const NetBuffer *buffer, size_t offset) { uint_t i; size_t j; size_t n; uint8_t *p; //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(opcode); //Loop through data chunks for(i = 0; i < buffer->chunkCount; i++) { //Is there any data to copy from the current chunk? if(offset < buffer->chunk[i].length) { //Point to the first byte to be read p = (uint8_t *) buffer->chunk[i].address + offset; //Compute the number of bytes to copy at a time n = buffer->chunk[i].length - offset; //Copy data to SRAM buffer for(j = 0; j < n; j++) { interface->spiDriver->transfer(p[j]); } //Process the next block from the start offset = 0; } else { //Skip the current chunk offset -= buffer->chunk[i].length; } } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Read SRAM buffer * @param[in] interface Underlying network interface * @param[in] opcode SRAM buffer operation * @param[in] data Buffer where to store the incoming data * @param[in] length Number of data to read **/ void enc624j600ReadBuffer(NetInterface *interface, uint8_t opcode, uint8_t *data, size_t length) { size_t i; //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(opcode); //Copy data from SRAM buffer for(i = 0; i < length; i++) { data[i] = interface->spiDriver->transfer(0x00); } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Set bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to set in the target register **/ void enc624j600SetBit(NetInterface *interface, uint8_t address, uint16_t mask) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC624J600_CMD_BFSU); //Write register address interface->spiDriver->transfer(address); //Write bit mask interface->spiDriver->transfer(LSB(mask)); interface->spiDriver->transfer(MSB(mask)); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Clear bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to clear in the target register **/ void enc624j600ClearBit(NetInterface *interface, uint8_t address, uint16_t mask) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC624J600_CMD_BFCU); //Write register address interface->spiDriver->transfer(address); //Write bit mask interface->spiDriver->transfer(LSB(mask)); interface->spiDriver->transfer(MSB(mask)); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief CRC calculation using the polynomial 0x4C11DB7 * @param[in] data Pointer to the data over which to calculate the CRC * @param[in] length Number of bytes to process * @return Resulting CRC value **/ uint32_t enc624j600CalcCrc(const void *data, size_t length) { uint_t i; uint_t j; uint32_t crc; const uint8_t *p; //Point to the data over which to calculate the CRC p = (uint8_t *) data; //CRC preset value crc = 0xFFFFFFFF; //Loop through data for(i = 0; i < length; i++) { //The message is processed bit by bit for(j = 0; j < 8; j++) { //Update CRC value if((((crc >> 31) ^ (p[i] >> j)) & 0x01) != 0) { crc = (crc << 1) ^ 0x04C11DB7; } else { crc = crc << 1; } } } //Return CRC value return crc; } /** * @brief Dump registers for debugging purpose * @param[in] interface Underlying network interface **/ void enc624j600DumpReg(NetInterface *interface) { #if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG) uint8_t i; uint8_t bank; uint16_t address; //Display header TRACE_DEBUG(" Bank 0 Bank 1 Bank 2 Bank 3 Unbanked\r\n"); //Loop through register addresses for(i = 0; i < 32; i += 2) { //Display register address TRACE_DEBUG("%02" PRIX8 ": ", i); //Loop through bank numbers for(bank = 0; bank < 5; bank++) { //Format register address address = 0x7E00 | (bank << 5) | i; //Display register contents TRACE_DEBUG("0x%04" PRIX16 " ", enc624j600ReadReg(interface, address)); } //Jump to the following line TRACE_DEBUG("\r\n"); } //Terminate with a line feed TRACE_DEBUG("\r\n"); #endif } /** * @brief Dump PHY registers for debugging purpose * @param[in] interface Underlying network interface **/ void enc624j600DumpPhyReg(NetInterface *interface) { #if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG) uint8_t i; //Loop through PHY registers for(i = 0; i < 32; i++) { //Display current PHY register TRACE_DEBUG("%02" PRIX8 ": 0x%04" PRIX16 "\r\n", i, enc624j600ReadPhyReg(interface, i)); } //Terminate with a line feed TRACE_DEBUG("\r\n"); #endif }
uint16_t enc624j600ReadPhyReg(NetInterface *interface, uint8_t address) { //Write the address of the PHY register to read from enc624j600WriteReg(interface, ENC624J600_REG_MIREGADR, MIREGADR_R8 | address); //Start read operation enc624j600WriteReg(interface, ENC624J600_REG_MICMD, MICMD_MIIRD); //Wait at least 25.6us before polling the BUSY bit usleep(100); //Wait for the read operation to complete while((enc624j600ReadReg(interface, ENC624J600_REG_MISTAT) & MISTAT_BUSY) != 0) { } //Clear command register enc624j600WriteReg(interface, ENC624J600_REG_MICMD, 0x00); //Return register contents return enc624j600ReadReg(interface, ENC624J600_REG_MIRD); }
uint16_t enc624j600ReadPhyReg(NetInterface *interface, uint8_t address) { uint16_t status; //Write the address of the PHY register to read from enc624j600WriteReg(interface, ENC624J600_MIREGADR, ENC624J600_MIREGADR_R12_8_DEFAULT | address); //Start read operation enc624j600WriteReg(interface, ENC624J600_MICMD, ENC624J600_MICMD_MIIRD); //Wait at least 25.6us before polling the BUSY bit usleep(100); //Wait for the read operation to complete do { //Read MII Management Status register status = enc624j600ReadReg(interface, ENC624J600_MISTAT); //Check the value of the busy status bit } while((status & ENC624J600_MISTAT_BUSY) != 0); //Clear command register enc624j600WriteReg(interface, ENC624J600_MICMD, 0x00); //Return register contents return enc624j600ReadReg(interface, ENC624J600_MIRD); }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (102, ' enc624j600WriteReg(interface, ENC624J600_ECON2, ENC624J600_ECON2_ETHEN |'), (103, ' ENC624J600_ECON2_STRCH);'), (109, ' temp = enc624j600ReadReg(interface, ENC624J600_MAADR1);'), (111, ' temp = enc624j600ReadReg(interface, ENC624J600_MAADR2);'), (113, ' temp = enc624j600ReadReg(interface, ENC624J600_MAADR3);'), (123, ' enc624j600WriteReg(interface, ENC624J600_MAADR1, temp);'), (125, ' enc624j600WriteReg(interface, ENC624J600_MAADR2, temp);'), (127, ' enc624j600WriteReg(interface, ENC624J600_MAADR3, temp);'), (131, ' enc624j600WriteReg(interface, ENC624J600_ERXST, ENC624J600_RX_BUFFER_START);'), (133, ' enc624j600WriteReg(interface, ENC624J600_ERXTAIL, ENC624J600_RX_BUFFER_STOP);'), (136, ' enc624j600WriteReg(interface, ENC624J600_ERXFCON, ENC624J600_ERXFCON_HTEN |'), (137, ' ENC624J600_ERXFCON_CRCEN | ENC624J600_ERXFCON_RUNTEN |'), (138, ' ENC624J600_ERXFCON_UCEN | ENC624J600_ERXFCON_BCEN);'), (141, ' enc624j600WriteReg(interface, ENC624J600_EHT1, 0x0000);'), (142, ' enc624j600WriteReg(interface, ENC624J600_EHT2, 0x0000);'), (143, ' enc624j600WriteReg(interface, ENC624J600_EHT3, 0x0000);'), (144, ' enc624j600WriteReg(interface, ENC624J600_EHT4, 0x0000);'), (146, ' //All short frames will be zero-padded to 60 bytes and a valid CRC is then'), (147, ' //appended'), (148, ' enc624j600WriteReg(interface, ENC624J600_MACON2, ENC624J600_MACON2_DEFER |'), (149, ' ENC624J600_MACON2_PADCFG_AUTO | ENC624J600_MACON2_TXCRCEN |'), (150, ' ENC624J600_MACON2_R1_DEFAULT);'), (153, ' enc624j600WriteReg(interface, ENC624J600_MAMXFL, ETH_MAX_FRAME_SIZE);'), (156, ' enc624j600WritePhyReg(interface, ENC624J600_PHANA, ENC624J600_PHANA_ADPAUS0 |'), (157, ' ENC624J600_PHANA_AD100FD | ENC624J600_PHANA_AD100 | ENC624J600_PHANA_AD10FD |'), (158, ' ENC624J600_PHANA_AD10 | ENC624J600_PHANA_ADIEEE_DEFAULT);'), (161, ' enc624j600WriteReg(interface, ENC624J600_EIR, 0x0000);'), (164, ' enc624j600WriteReg(interface, ENC624J600_EIE, ENC624J600_EIE_INTIE |'), (165, ' ENC624J600_EIE_LINKIE | ENC624J600_EIE_PKTIE | ENC624J600_EIE_TXIE |'), (166, ' ENC624J600_EIE_TXABTIE);'), (169, ' enc624j600SetBit(interface, ENC624J600_ECON1, ENC624J600_ECON1_RXEN);'), (237, ' enc624j600ClearBit(interface, ENC624J600_EIE, ENC624J600_EIE_INTIE);'), (240, ' status = enc624j600ReadReg(interface, ENC624J600_EIR);'), (243, ' if((status & ENC624J600_EIR_LINKIF) != 0)'), (246, ' enc624j600ClearBit(interface, ENC624J600_EIE, ENC624J600_EIE_LINKIE);'), (255, ' if((status & ENC624J600_EIR_PKTIF) != 0)'), (258, ' enc624j600ClearBit(interface, ENC624J600_EIE, ENC624J600_EIE_PKTIE);'), (267, ' if((status & (ENC624J600_EIR_TXIF | ENC624J600_EIR_TXABTIF)) != 0)'), (270, ' enc624j600ClearBit(interface, ENC624J600_EIR, ENC624J600_EIR_TXIF |'), (271, ' ENC624J600_EIR_TXABTIF);'), (279, ' enc624j600SetBit(interface, ENC624J600_EIE, ENC624J600_EIE_INTIE);'), (298, ' status = enc624j600ReadReg(interface, ENC624J600_EIR);'), (301, ' if((status & ENC624J600_EIR_LINKIF) != 0)'), (304, ' enc624j600ClearBit(interface, ENC624J600_EIR, ENC624J600_EIR_LINKIF);'), (306, ' value = enc624j600ReadReg(interface, ENC624J600_ESTAT);'), (309, ' if((value & ENC624J600_ESTAT_PHYLNK) != 0)'), (312, ' value = enc624j600ReadPhyReg(interface, ENC624J600_PHSTAT3);'), (315, ' if((value & ENC624J600_PHSTAT3_SPDDPX1) != 0)'), (325, ' if((value & ENC624J600_PHSTAT3_SPDDPX2) != 0)'), (351, ' if((status & ENC624J600_EIR_PKTIF) != 0)'), (354, ' enc624j600ClearBit(interface, ENC624J600_EIR, ENC624J600_EIR_PKTIF);'), (367, ' enc624j600SetBit(interface, ENC624J600_EIE, ENC624J600_EIE_LINKIE |'), (368, ' ENC624J600_EIE_PKTIE);'), (409, ' if(enc624j600ReadReg(interface, ENC624J600_ECON1) & ENC624J600_ECON1_TXRTS)'), (415, ' enc624j600WriteReg(interface, ENC624J600_EGPWRPT, ENC624J600_TX_BUFFER_START);'), (420, ' enc624j600WriteReg(interface, ENC624J600_ETXST, ENC624J600_TX_BUFFER_START);'), (422, ' enc624j600WriteReg(interface, ENC624J600_ETXLEN, length);'), (425, ' enc624j600ClearBit(interface, ENC624J600_EIR, ENC624J600_EIR_TXIF |'), (426, ' ENC624J600_EIR_TXABTIF);'), (427, ''), (429, ' enc624j600SetBit(interface, ENC624J600_ECON1, ENC624J600_ECON1_TXRTS);'), (445, ' uint16_t length;'), (447, ' uint8_t header[8];'), (454, ' if(enc624j600ReadReg(interface, ENC624J600_ESTAT) & ENC624J600_ESTAT_PKTCNT)'), (457, ' enc624j600WriteReg(interface, ENC624J600_ERXRDPT, context->nextPacket);'), (459, ' //The packet is preceded by a 8-byte header'), (460, ' enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA, header, sizeof(header));'), (462, ' //The first two bytes are the address of the next packet'), (463, ' context->nextPacket = LOAD16LE(header);'), (464, ' //Get the length of the received packet'), (465, ' length = LOAD16LE(header + 2);'), (466, ' //Get the receive status vector (RSV)'), (467, ' status = LOAD32LE(header + 4);'), (470, ' if((status & ENC624J600_RSV_RECEIVED_OK) != 0)'), (473, ' length = MIN(length, ETH_MAX_FRAME_SIZE);'), (474, ''), (476, ' enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA,'), (477, ' context->rxBuffer, length);'), (478, ''), (493, ' enc624j600WriteReg(interface, ENC624J600_ERXTAIL,'), (494, ' ENC624J600_RX_BUFFER_STOP);'), (498, ' enc624j600WriteReg(interface, ENC624J600_ERXTAIL,'), (499, ' context->nextPacket - 2);'), (503, ' enc624j600SetBit(interface, ENC624J600_ECON1, ENC624J600_ECON1_PKTDEC);'), (520, ' nicProcessPacket(interface, context->rxBuffer, length, &ancillary);'), (568, ' enc624j600WriteReg(interface, ENC624J600_EHT1, hashTable[0]);'), (569, ' enc624j600WriteReg(interface, ENC624J600_EHT2, hashTable[1]);'), (570, ' enc624j600WriteReg(interface, ENC624J600_EHT3, hashTable[2]);'), (571, ' enc624j600WriteReg(interface, ENC624J600_EHT4, hashTable[3]);'), (574, ' TRACE_DEBUG(" EHT1 = %04" PRIX16 "\\r\\n", enc624j600ReadReg(interface, ENC624J600_EHT1));'), (575, ' TRACE_DEBUG(" EHT2 = %04" PRIX16 "\\r\\n", enc624j600ReadReg(interface, ENC624J600_EHT2));'), (576, ' TRACE_DEBUG(" EHT3 = %04" PRIX16 "\\r\\n", enc624j600ReadReg(interface, ENC624J600_EHT3));'), (577, ' TRACE_DEBUG(" EHT4 = %04" PRIX16 "\\r\\n", enc624j600ReadReg(interface, ENC624J600_EHT4));'), (594, ' duplexMode = enc624j600ReadReg(interface, ENC624J600_ESTAT) & ENC624J600_ESTAT_PHYDPX;'), (600, ' enc624j600WriteReg(interface, ENC624J600_MACON2, ENC624J600_MACON2_DEFER |'), (601, ' ENC624J600_MACON2_PADCFG_AUTO | ENC624J600_MACON2_TXCRCEN |'), (602, ' ENC624J600_MACON2_R1_DEFAULT | ENC624J600_MACON2_FULDPX);'), (603, ''), (605, ' enc624j600WriteReg(interface, ENC624J600_MABBIPG,'), (606, ' ENC624J600_MABBIPG_BBIPG_DEFAULT_FD);'), (612, ' enc624j600WriteReg(interface, ENC624J600_MACON2, ENC624J600_MACON2_DEFER |'), (613, ' ENC624J600_MACON2_PADCFG_AUTO | ENC624J600_MACON2_TXCRCEN |'), (614, ' ENC624J600_MACON2_R1_DEFAULT);'), (615, ''), (617, ' enc624j600WriteReg(interface, ENC624J600_MABBIPG,'), (618, ' ENC624J600_MABBIPG_BBIPG_DEFAULT_HD);'), (635, ' enc624j600WriteReg(interface, ENC624J600_EUDAST, 0x1234);'), (637, ' } while(enc624j600ReadReg(interface, ENC624J600_EUDAST) != 0x1234);'), (640, ' while((enc624j600ReadReg(interface, ENC624J600_ESTAT) & ENC624J600_ESTAT_CLKRDY) == 0)'), (645, ' enc624j600SetBit(interface, ENC624J600_ECON2, ENC624J600_ECON2_ETHRST);'), (651, ' if(enc624j600ReadReg(interface, ENC624J600_EUDAST) != 0x0000)'), (732, ' uint16_t status;'), (733, ''), (735, ' enc624j600WriteReg(interface, ENC624J600_MIREGADR,'), (736, ' ENC624J600_MIREGADR_R12_8_DEFAULT | address);'), (737, ''), (739, ' enc624j600WriteReg(interface, ENC624J600_MIWR, data);'), (742, ' do'), (744, ' //Read MII Management Status register'), (745, ' status = enc624j600ReadReg(interface, ENC624J600_MISTAT);'), (746, ' //Check the value of the busy status bit'), (747, ' } while((status & ENC624J600_MISTAT_BUSY) != 0);'), (760, ' uint16_t status;'), (761, ''), (763, ' enc624j600WriteReg(interface, ENC624J600_MIREGADR,'), (764, ' ENC624J600_MIREGADR_R12_8_DEFAULT | address);'), (765, ''), (767, ' enc624j600WriteReg(interface, ENC624J600_MICMD, ENC624J600_MICMD_MIIRD);'), (771, ''), (773, ' do'), (775, ' //Read MII Management Status register'), (776, ' status = enc624j600ReadReg(interface, ENC624J600_MISTAT);'), (777, ' //Check the value of the busy status bit'), (778, ' } while((status & ENC624J600_MISTAT_BUSY) != 0);'), (781, ' enc624j600WriteReg(interface, ENC624J600_MICMD, 0x00);'), (784, ' return enc624j600ReadReg(interface, ENC624J600_MIRD);')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (102, ' enc624j600WriteReg(interface, ENC624J600_REG_ECON2, ECON2_ETHEN | ECON2_STRCH);'), (108, ' temp = enc624j600ReadReg(interface, ENC624J600_REG_MAADR1);'), (110, ' temp = enc624j600ReadReg(interface, ENC624J600_REG_MAADR2);'), (112, ' temp = enc624j600ReadReg(interface, ENC624J600_REG_MAADR3);'), (122, ' enc624j600WriteReg(interface, ENC624J600_REG_MAADR1, temp);'), (124, ' enc624j600WriteReg(interface, ENC624J600_REG_MAADR2, temp);'), (126, ' enc624j600WriteReg(interface, ENC624J600_REG_MAADR3, temp);'), (130, ' enc624j600WriteReg(interface, ENC624J600_REG_ERXST, ENC624J600_RX_BUFFER_START);'), (132, ' enc624j600WriteReg(interface, ENC624J600_REG_ERXTAIL, ENC624J600_RX_BUFFER_STOP);'), (135, ' enc624j600WriteReg(interface, ENC624J600_REG_ERXFCON, ERXFCON_HTEN |'), (136, ' ERXFCON_CRCEN | ERXFCON_RUNTEN | ERXFCON_UCEN | ERXFCON_BCEN);'), (139, ' enc624j600WriteReg(interface, ENC624J600_REG_EHT1, 0x0000);'), (140, ' enc624j600WriteReg(interface, ENC624J600_REG_EHT2, 0x0000);'), (141, ' enc624j600WriteReg(interface, ENC624J600_REG_EHT3, 0x0000);'), (142, ' enc624j600WriteReg(interface, ENC624J600_REG_EHT4, 0x0000);'), (144, ' //All short frames will be zero-padded to 60 bytes and a valid CRC is then appended'), (145, ' enc624j600WriteReg(interface, ENC624J600_REG_MACON2,'), (146, ' MACON2_DEFER | MACON2_PADCFG0 | MACON2_TXCRCEN | MACON2_R1);'), (149, ' enc624j600WriteReg(interface, ENC624J600_REG_MAMXFL, ETH_MAX_FRAME_SIZE);'), (152, ' enc624j600WritePhyReg(interface, ENC624J600_PHY_REG_PHANA, PHANA_ADPAUS0 |'), (153, ' PHANA_AD100FD | PHANA_AD100 | PHANA_AD10FD | PHANA_AD10 | PHANA_ADIEEE0);'), (156, ' enc624j600WriteReg(interface, ENC624J600_REG_EIR, 0x0000);'), (159, ' enc624j600WriteReg(interface, ENC624J600_REG_EIE, EIE_INTIE |'), (160, ' EIE_LINKIE | EIE_PKTIE | EIE_TXIE | EIE_TXABTIE);'), (163, ' enc624j600SetBit(interface, ENC624J600_REG_ECON1, ECON1_RXEN);'), (231, ' enc624j600ClearBit(interface, ENC624J600_REG_EIE, EIE_INTIE);'), (234, ' status = enc624j600ReadReg(interface, ENC624J600_REG_EIR);'), (237, ' if((status & EIR_LINKIF) != 0)'), (240, ' enc624j600ClearBit(interface, ENC624J600_REG_EIE, EIE_LINKIE);'), (249, ' if((status & EIR_PKTIF) != 0)'), (252, ' enc624j600ClearBit(interface, ENC624J600_REG_EIE, EIE_PKTIE);'), (261, ' if((status & (EIR_TXIF | EIR_TXABTIF)) != 0)'), (264, ' enc624j600ClearBit(interface, ENC624J600_REG_EIR, EIR_TXIF | EIR_TXABTIF);'), (272, ' enc624j600SetBit(interface, ENC624J600_REG_EIE, EIE_INTIE);'), (291, ' status = enc624j600ReadReg(interface, ENC624J600_REG_EIR);'), (294, ' if((status & EIR_LINKIF) != 0)'), (297, ' enc624j600ClearBit(interface, ENC624J600_REG_EIR, EIR_LINKIF);'), (299, ' value = enc624j600ReadReg(interface, ENC624J600_REG_ESTAT);'), (302, ' if((value & ESTAT_PHYLNK) != 0)'), (305, ' value = enc624j600ReadPhyReg(interface, ENC624J600_PHY_REG_PHSTAT3);'), (308, ' if((value & PHSTAT3_SPDDPX1) != 0)'), (318, ' if((value & PHSTAT3_SPDDPX2) != 0)'), (344, ' if((status & EIR_PKTIF) != 0)'), (347, ' enc624j600ClearBit(interface, ENC624J600_REG_EIR, EIR_PKTIF);'), (360, ' enc624j600SetBit(interface, ENC624J600_REG_EIE, EIE_LINKIE | EIE_PKTIE);'), (401, ' if(enc624j600ReadReg(interface, ENC624J600_REG_ECON1) & ECON1_TXRTS)'), (407, ' enc624j600WriteReg(interface, ENC624J600_REG_EGPWRPT, ENC624J600_TX_BUFFER_START);'), (412, ' enc624j600WriteReg(interface, ENC624J600_REG_ETXST, ENC624J600_TX_BUFFER_START);'), (414, ' enc624j600WriteReg(interface, ENC624J600_REG_ETXLEN, length);'), (417, ' enc624j600ClearBit(interface, ENC624J600_REG_EIR, EIR_TXIF | EIR_TXABTIF);'), (419, ' enc624j600SetBit(interface, ENC624J600_REG_ECON1, ECON1_TXRTS);'), (435, ' uint16_t n;'), (443, ' if(enc624j600ReadReg(interface, ENC624J600_REG_ESTAT) & ESTAT_PKTCNT)'), (446, ' enc624j600WriteReg(interface, ENC624J600_REG_ERXRDPT, context->nextPacket);'), (447, ''), (448, ' //Read the first two bytes, which are the address of the next packet'), (449, ' enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA,'), (450, ' (uint8_t *) &context->nextPacket, sizeof(uint16_t));'), (451, ''), (452, ' //Convert the value to host byte order'), (453, ' context->nextPacket = letoh16(context->nextPacket);'), (454, ''), (455, ' //Get the length of the received frame in bytes'), (456, ' enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA,'), (457, ' (uint8_t *) &n, sizeof(uint16_t));'), (458, ''), (459, ' //Convert the value to host byte order'), (460, ' n = letoh16(n);'), (462, ' //Read the receive status vector (RSV)'), (463, ' enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA,'), (464, ' (uint8_t *) &status, sizeof(uint32_t));'), (466, ' //Convert the value to host byte order'), (467, ' status = letoh32(status);'), (470, ' if((status & RSV_RECEIVED_OK) != 0)'), (473, ' n = MIN(n, ETH_MAX_FRAME_SIZE);'), (475, ' enc624j600ReadBuffer(interface, ENC624J600_CMD_RRXDATA, context->rxBuffer, n);'), (490, ' enc624j600WriteReg(interface, ENC624J600_REG_ERXTAIL, ENC624J600_RX_BUFFER_STOP);'), (494, ' enc624j600WriteReg(interface, ENC624J600_REG_ERXTAIL, context->nextPacket - 2);'), (498, ' enc624j600SetBit(interface, ENC624J600_REG_ECON1, ECON1_PKTDEC);'), (515, ' nicProcessPacket(interface, context->rxBuffer, n, &ancillary);'), (563, ' enc624j600WriteReg(interface, ENC624J600_REG_EHT1, hashTable[0]);'), (564, ' enc624j600WriteReg(interface, ENC624J600_REG_EHT2, hashTable[1]);'), (565, ' enc624j600WriteReg(interface, ENC624J600_REG_EHT3, hashTable[2]);'), (566, ' enc624j600WriteReg(interface, ENC624J600_REG_EHT4, hashTable[3]);'), (569, ' TRACE_DEBUG(" EHT1 = %04" PRIX16 "\\r\\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT1));'), (570, ' TRACE_DEBUG(" EHT2 = %04" PRIX16 "\\r\\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT2));'), (571, ' TRACE_DEBUG(" EHT3 = %04" PRIX16 "\\r\\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT3));'), (572, ' TRACE_DEBUG(" EHT4 = %04" PRIX16 "\\r\\n", enc624j600ReadReg(interface, ENC624J600_REG_EHT4));'), (589, ' duplexMode = enc624j600ReadReg(interface, ENC624J600_REG_ESTAT) & ESTAT_PHYDPX;'), (595, ' enc624j600WriteReg(interface, ENC624J600_REG_MACON2, MACON2_DEFER |'), (596, ' MACON2_PADCFG2 | MACON2_PADCFG0 | MACON2_TXCRCEN | MACON2_R1 | MACON2_FULDPX);'), (598, ' enc624j600WriteReg(interface, ENC624J600_REG_MABBIPG, 0x15);'), (604, ' enc624j600WriteReg(interface, ENC624J600_REG_MACON2, MACON2_DEFER |'), (605, ' MACON2_PADCFG2 | MACON2_PADCFG0 | MACON2_TXCRCEN | MACON2_R1);'), (607, ' enc624j600WriteReg(interface, ENC624J600_REG_MABBIPG, 0x12);'), (624, ' enc624j600WriteReg(interface, ENC624J600_REG_EUDAST, 0x1234);'), (626, ' } while(enc624j600ReadReg(interface, ENC624J600_REG_EUDAST) != 0x1234);'), (629, ' while((enc624j600ReadReg(interface, ENC624J600_REG_ESTAT) & ESTAT_CLKRDY) == 0)'), (634, ' enc624j600SetBit(interface, ENC624J600_REG_ECON2, ECON2_ETHRST);'), (640, ' if(enc624j600ReadReg(interface, ENC624J600_REG_EUDAST) != 0x0000)'), (722, ' enc624j600WriteReg(interface, ENC624J600_REG_MIREGADR, MIREGADR_R8 | address);'), (724, ' enc624j600WriteReg(interface, ENC624J600_REG_MIWR, data);'), (727, ' while((enc624j600ReadReg(interface, ENC624J600_REG_MISTAT) & MISTAT_BUSY) != 0)'), (729, ' }'), (743, ' enc624j600WriteReg(interface, ENC624J600_REG_MIREGADR, MIREGADR_R8 | address);'), (745, ' enc624j600WriteReg(interface, ENC624J600_REG_MICMD, MICMD_MIIRD);'), (750, ' while((enc624j600ReadReg(interface, ENC624J600_REG_MISTAT) & MISTAT_BUSY) != 0)'), (752, ' }'), (755, ' enc624j600WriteReg(interface, ENC624J600_REG_MICMD, 0x00);'), (758, ' return enc624j600ReadReg(interface, ENC624J600_REG_MIRD);')]}
138
112
478
2,621
11
70
2
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
CWE-20
1,311
wasm.c
C
r_bin_wasm_get_element_entries
/* radare2 - LGPL - Copyright 2017 - pancake, cgvwzq */ #include <r_types.h> #include <r_util.h> #include <r_lib.h> #include <r_bin.h> #include "wasm.h" // Consume functions static size_t consume_u32 (ut8 *buf, ut8 *max, ut32 *out, ut32 *offset) { size_t n; if (!buf || !max || !out) { return 0; } if (!(n = read_u32_leb128 (buf, max, out)) || n > 5) { return 0; } if (offset) { *offset += n; } return n; } static size_t consume_s32 (ut8 *buf, ut8 *max, st32 *out, ut32 *offset) { size_t n; if (!buf || !max || !out) { return 0; } if (!(n = read_i32_leb128 (buf, max, out)) || n > 5) { return 0; } if (offset) { *offset += n; } return n; } static size_t consume_u8 (ut8 *buf, ut8 *max, ut8 *out, ut32 *offset) { size_t n; ut32 tmp; if (!(n = consume_u32 (buf, max, &tmp, offset)) || n > 1) { return 0; } *out = tmp & 0x7f; return 1; } static size_t consume_s8 (ut8 *buf, ut8 *max, st8 *out, ut32 *offset) { size_t n; ut32 tmp; if (!(n = consume_u32 (buf, max, &tmp, offset)) || n > 1) { return 0; } *out = (st8)(tmp & 0x7f); return 1; } static size_t consume_str (ut8 *buf, ut8 *max, size_t sz, char *out, ut32 *offset) { if (!buf || !max || !out || !sz) { return 0; } if (!(buf + sz < max)) { return 0; } strncpy ((char*)out, (char*)buf, R_MIN (R_BIN_WASM_STRING_LENGTH-1, sz)); if (offset) *offset += sz; return sz; } static size_t consume_init_expr (ut8 *buf, ut8 *max, ut8 eoc, void *out, ut32 *offset) { ut32 i = 0; while (buf + i < max && buf[i] != eoc) { // TODO: calc the expresion with the bytcode (ESIL?) i += 1; } if (buf[i] != eoc) { return 0; } if (offset) { *offset += i + 1; } return i + 1; } static size_t consume_locals (ut8 *buf, ut8 *max, ut32 count, RBinWasmCodeEntry *out, ut32 *offset) { ut32 i = 0, j = 0; if (count < 1) return 0; // memory leak if (!(out->locals = (struct r_bin_wasm_local_entry_t*) malloc (sizeof(struct r_bin_wasm_local_entry_t) * count))) { return 0; } while (buf + i < max && j < count) { if (!(consume_u32 (buf + i, max, &out->locals[j].count, &i))) { free (out->locals); return 0; } if (!(consume_s8 (buf + i, max, (st8*)&out->locals[j].type, &i))) { free (out->locals); return 0; } j += 1; } if (offset) *offset += i; return j; } static size_t consume_limits (ut8 *buf, ut8 *max, struct r_bin_wasm_resizable_limits_t *out, ut32 *offset) { ut32 i = 0; if (!(consume_u8 (buf + i, max, &out->flags, &i))) return 0; if (!(consume_u32 (buf + i, max, &out->initial, &i))) return 0; if (out->flags && (!(consume_u32 (buf + i, max, &out->maximum, &i)))) return 0; if (offset) *offset += i; return i; } // Utils static RList *r_bin_wasm_get_sections_by_id (RList *sections, ut8 id) { RBinWasmSection *sec = NULL; RList *ret = NULL; RListIter *iter = NULL; // memory leak if (!(ret = r_list_new ())) { return NULL; } r_list_foreach (sections, iter, sec) { if (sec->id == id) { r_list_append(ret, sec); } } return ret; } #define R_BIN_WASM_VALUETYPETOSTRING(p, type, i) {\ switch(type) {\ case R_BIN_WASM_VALUETYPE_i32:\ strcpy(p, "i32");\ break;\ case R_BIN_WASM_VALUETYPE_i64:\ strcpy(p, "i64");\ break;\ case R_BIN_WASM_VALUETYPE_f32:\ strcpy(p, "f32");\ break;\ case R_BIN_WASM_VALUETYPE_f64:\ strcpy(p, "f64");\ break;\ }\ i+= 3;\ } static char *r_bin_wasm_type_entry_to_string (RBinWasmTypeEntry *ptr) { if (!ptr || ptr->to_str) { return NULL; } char *ret; int p, i = 0, sz; sz = (ptr->param_count + ptr->return_count) * 5 + 9; // memory leak if (!(ret = (char*) malloc (sz * sizeof(char)))) { return NULL; } strcpy (ret + i, "("); i++; for (p = 0; p < ptr->param_count; p++ ) { R_BIN_WASM_VALUETYPETOSTRING (ret+i, ptr->param_types[p], i); // i+=3 if (p < ptr->param_count - 1) { strcpy (ret+i, ", "); i += 2; } } strcpy (ret + i, ") -> ("); i += 6; if (ptr->return_count == 1) { R_BIN_WASM_VALUETYPETOSTRING (ret + i, ptr->return_type, i); } strcpy (ret + i, ")"); return ret; } // Parsing static RList *r_bin_wasm_get_type_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmTypeEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmTypeEntry))) { return ret; } if (!(consume_u8 (buf + i, buf + len, &ptr->form, &i))) { free (ptr); return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->param_count, &i))) { free (ptr); return ret; } if (!(i + ptr->param_count < len)) { free (ptr); return ret; } int j; for (j = 0; j < ptr->param_count; j++) { if (!(consume_s8 (buf + i, buf + len, (st8*)&ptr->param_types[j], &i))) { free (ptr); return ret; } } if (!(consume_s8 (buf + i, buf + len, &ptr->return_count, &i))) { free (ptr); return ret; } if (ptr->return_count > 1) { free(ptr); return ret; } if (ptr->return_count == 1) { if (!(consume_s8 (buf + i, buf + len, (st8*)&ptr->return_type, &i))) { free(ptr); return ret; } } ptr->to_str = r_bin_wasm_type_entry_to_string (ptr); r_list_append (ret, ptr); r += 1; } return ret; } static RList *r_bin_wasm_get_import_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmImportEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmImportEntry))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->module_len, &i))) { goto culvert; } if (!(consume_str (buf + i, buf + len, ptr->module_len, ptr->module_str, &i))) { goto culvert; } if (!(consume_u32 (buf + i, buf + len, &ptr->field_len, &i))) { goto culvert; } if (!(consume_str (buf + i, buf + len, ptr->field_len, ptr->field_str, &i))) { goto culvert; } if (!(consume_u8 (buf + i, buf + len, &ptr->kind, &i))) { goto culvert; } switch (ptr->kind) { case 0: // Function if (!(consume_u32 (buf + i, buf + len, &ptr->type_f, &i))) { goto sewer; } break; case 1: // Table if (!(consume_u8 (buf + i, buf + len, (ut8*)&ptr->type_t.elem_type, &i))) { goto sewer; // varint7 } if (!(consume_limits (buf + i, buf + len, &ptr->type_t.limits, &i))) { goto sewer; } break; case 2: // Memory if (!(consume_limits (buf + i, buf + len, &ptr->type_m.limits, &i))) { goto sewer; } break; case 3: // Global if (!(consume_u8 (buf + i, buf + len, (ut8*)&ptr->type_g.content_type, &i))) { goto sewer; // varint7 } if (!(consume_u8 (buf + i, buf + len, (ut8*)&ptr->type_g.mutability, &i))) { goto sewer; // varuint1 } break; default: goto sewer; } r_list_append (ret, ptr); r++; } return ret; sewer: ret = NULL; culvert: free (ptr); return ret; } static RList *r_bin_wasm_get_export_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmExportEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmExportEntry))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->field_len, &i))) { free (ptr); return ret; } if (!(consume_str (buf + i, buf + len, ptr->field_len, ptr->field_str, &i))) { free (ptr); return ret; } if (!(consume_u8 (buf + i, buf + len, &ptr->kind, &i))) { free (ptr); return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) { free (ptr); return ret; } r_list_append (ret, ptr); r++; } return ret; } static RList *r_bin_wasm_get_code_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmCodeEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, j = 0, r = 0; size_t n = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmCodeEntry))) { return ret; } if (!(n = consume_u32 (buf + i, buf + len, &ptr->body_size, &i))) { free (ptr); return ret; } if (!(i + ptr->body_size - 1 < len)) { free (ptr); return ret; } j = i; if (!(n = consume_u32 (buf + i, buf + len, &ptr->local_count, &i))) { free (ptr); return ret; } if ((n = consume_locals (buf + i, buf + len, ptr->local_count,ptr, &i)) < ptr->local_count) { free (ptr); return ret; } ptr->code = sec->payload_data + i; ptr->len = ptr->body_size - (i - j); i += ptr->len - 1; // consume bytecode if (!(consume_u8 (buf + i, buf + len, &ptr->byte, &i))) { free (ptr); return ret; } if (ptr->byte != R_BIN_WASM_END_OF_CODE) { free (ptr); return ret; } // search 'r' in function_space, if present get signature from types // if export get name r_list_append (ret, ptr); r += 1; } return ret; } static RList *r_bin_wasm_get_data_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmDataEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; size_t n = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmDataEntry))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) { free (ptr); return ret; } if (!(n = consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) { free (ptr); return ret; } ptr->offset.len = n; if (!(consume_u32 (buf + i, buf + len, &ptr->size, &i))) { free (ptr); return ret; } ptr->data = sec->payload_data + i; r_list_append (ret, ptr); r += 1; } return ret; } static RBinWasmStartEntry *r_bin_wasm_get_start (RBinWasmObj *bin, RBinWasmSection *sec) { RBinWasmStartEntry *ptr; if (!(ptr = R_NEW0 (RBinWasmStartEntry))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 i = 0; if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) { free (ptr); return NULL; } return ptr; } static RList *r_bin_wasm_get_memory_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmMemoryEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmMemoryEntry))) { return ret; } if (!(consume_limits (buf + i, buf + len, &ptr->limits, &i))) { free (ptr); return ret; } r_list_append (ret, ptr); r += 1; } return ret; } static RList *r_bin_wasm_get_table_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmTableEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmTableEntry))) { return ret; } if (!(consume_u8 (buf + i, buf + len, &ptr->element_type, &i))) { free (ptr); return ret; } if (!(consume_limits (buf + i, buf + len, &ptr->limits, &i))) { free (ptr); return ret; } r_list_append (ret, ptr); r += 1; } return ret; } static RList *r_bin_wasm_get_global_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmGlobalEntry *ptr = NULL; int buflen = bin->buf->length; if (sec->payload_data + 32 > buflen) { return NULL; } if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && len < buflen && r < count) { if (!(ptr = R_NEW0 (RBinWasmGlobalEntry))) { return ret; } if (len + 8 > buflen || !(consume_u8 (buf + i, buf + len, (ut8*)&ptr->content_type, &i))) { goto beach; } if (len + 8 > buflen || !(consume_u8 (buf + i, buf + len, &ptr->mutability, &i))) { goto beach; } if (len + 8 > buflen || !(consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) { goto beach; } r_list_append (ret, ptr); r++; } return ret; beach: free (ptr); return ret; } static RList *r_bin_wasm_get_element_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmElementEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmElementEntry))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) { free (ptr); return ret; } if (!(consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) { free (ptr); return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->num_elem, &i))) { free (ptr); return ret; } ut32 j = 0; while (i < len && j < ptr->num_elem ) { // TODO: allocate space and fill entry ut32 e; if (!(consume_u32 (buf + i, buf + len, &e, &i))) { free (ptr); return ret; } } r_list_append (ret, ptr); r += 1; } return ret; } // Public functions RBinWasmObj *r_bin_wasm_init (RBinFile *arch) { RBinWasmObj *bin = R_NEW0 (RBinWasmObj); if (!bin) { return NULL; } if (!(bin->buf = r_buf_new ())) { free (bin); return NULL; } bin->size = (ut32)arch->buf->length; if (!r_buf_set_bytes (bin->buf, arch->buf->buf, bin->size)) { r_bin_wasm_destroy (arch); free (bin); return NULL; } bin->g_sections = r_bin_wasm_get_sections (bin); // TODO: recursive invocation more natural with streamed parsing // but dependency problems when sections are disordered (against spec) bin->g_types = r_bin_wasm_get_types (bin); bin->g_imports = r_bin_wasm_get_imports (bin); bin->g_exports = r_bin_wasm_get_exports (bin); bin->g_tables = r_bin_wasm_get_tables (bin); bin->g_memories = r_bin_wasm_get_memories (bin); bin->g_globals = r_bin_wasm_get_globals (bin); bin->g_codes = r_bin_wasm_get_codes (bin); bin->g_datas = r_bin_wasm_get_datas (bin); // entrypoint from Start section bin->entrypoint = r_bin_wasm_get_entrypoint (bin); return bin; } void r_bin_wasm_destroy (RBinFile *arch) { RBinWasmObj *bin; if (!arch || !arch->o || !arch->o->bin_obj) { return; } bin = arch->o->bin_obj; r_buf_free (bin->buf); r_list_free (bin->g_sections); r_list_free (bin->g_types); r_list_free (bin->g_imports); r_list_free (bin->g_exports); r_list_free (bin->g_tables); r_list_free (bin->g_memories); r_list_free (bin->g_globals); r_list_free (bin->g_codes); r_list_free (bin->g_datas); free (bin->g_start); free (bin); arch->o->bin_obj = NULL; } RList *r_bin_wasm_get_sections (RBinWasmObj *bin) { RList *ret = NULL; RBinWasmSection *ptr = NULL; if (!bin) { return NULL; } if (bin->g_sections) { return bin->g_sections; } if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf; ut32 len = bin->size, i = 8; // skip magic bytes + version while (i < len) { //r_buf_read_* api but it makes sense going through the array directly if (!(ptr = R_NEW0 (RBinWasmSection))) { return ret; } if (!(consume_u8 (buf + i, buf + len, &ptr->id, &i))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->size, &i))) { free(ptr); return NULL; } ptr->count = 0; ptr->offset = i; switch (ptr->id) { case R_BIN_WASM_SECTION_CUSTOM: //eprintf("custom section: 0x%x, ", i); if (!(consume_u32 (buf + i, buf + len, &ptr->name_len, &i))) { free(ptr); return ret; } if (!(consume_str (buf + i, buf + len, ptr->name_len, ptr->name, &i))) { free(ptr); return ret; } //eprintf("%s\n", ptr->name); break; case R_BIN_WASM_SECTION_TYPE: //eprintf("section type: 0x%x, ", i); strcpy (ptr->name, "type"); ptr->name_len = 4; break; case R_BIN_WASM_SECTION_IMPORT: //eprintf("section import: 0x%x, ", i); strcpy (ptr->name, "import"); ptr->name_len = 6; break; case R_BIN_WASM_SECTION_FUNCTION: //eprintf("section function: 0x%x, ", i); strcpy (ptr->name, "function"); ptr->name_len = 8; break; case R_BIN_WASM_SECTION_TABLE: //eprintf("section table: 0x%x, ", i); strcpy (ptr->name, "table"); ptr->name_len = 5; break; case R_BIN_WASM_SECTION_MEMORY: //eprintf("section memory: 0x%x, ", i); strcpy (ptr->name, "memory"); ptr->name_len = 6; break; case R_BIN_WASM_SECTION_GLOBAL: //eprintf("section global: 0x%x, ", i); strcpy (ptr->name, "global"); ptr->name_len = 6; break; case R_BIN_WASM_SECTION_EXPORT: //eprintf("section export: 0x%x, ", i); strcpy (ptr->name, "export"); ptr->name_len = 6; break; case R_BIN_WASM_SECTION_START: //eprintf("section start: 0x%x\n", i); strcpy (ptr->name, "start"); ptr->name_len = 5; break; case R_BIN_WASM_SECTION_ELEMENT: //eprintf("section element: 0x%x, ", i); strncpy (ptr->name, "element", R_BIN_WASM_STRING_LENGTH); ptr->name_len = 7; break; case R_BIN_WASM_SECTION_CODE: //eprintf("section code: 0x%x, ", i); strncpy (ptr->name, "code", R_BIN_WASM_STRING_LENGTH); ptr->name_len = 4; break; case R_BIN_WASM_SECTION_DATA: //eprintf("section data: 0x%x, ", i); strncpy (ptr->name, "data", R_BIN_WASM_STRING_LENGTH); ptr->name_len = 4; break; default: eprintf("unkown section id: %d\n", ptr->id); i += ptr->size - 1; // next continue; } if (ptr->id != R_BIN_WASM_SECTION_START && ptr->id != R_BIN_WASM_SECTION_CUSTOM) { if (!(consume_u32 (buf + i, buf + len, &ptr->count, &i))) { free (ptr); return ret; } //eprintf("count %d\n", ptr->count); } ptr->payload_data = i; ptr->payload_len = ptr->size - (i - ptr->offset); r_list_append (ret, ptr); i += ptr->payload_len; // next } bin->g_sections = ret; return ret; } ut32 r_bin_wasm_get_entrypoint (RBinWasmObj *bin) { RList *secs = NULL; RBinWasmStartEntry *start = NULL; RBinWasmSection *sec = NULL; RBinWasmCodeEntry *func = NULL; if (!bin || !bin->g_sections) { return 0; } if (bin->entrypoint) { return bin->entrypoint; } if (bin->g_start) { start = bin->g_start; } else if (!(secs = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_START))) { return 0; } else if (!(sec = (RBinWasmSection*) r_list_first (secs))) { return 0; } else { start = r_bin_wasm_get_start (bin, sec); bin->g_start = start; } if (!start) { return 0; } // FIX: entrypoint can be also an import func = r_list_get_n (r_bin_wasm_get_codes (bin), start->index); return (ut32)func? func->code: 0; } RList *r_bin_wasm_get_imports (RBinWasmObj *bin) { RBinWasmSection *import = NULL; RList *imports = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_imports) { return bin->g_imports; } if (!(imports = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_IMPORT))) { return r_list_new(); } // support for multiple import sections against spec if (!(import = (RBinWasmSection*) r_list_first (imports))) { return r_list_new(); } return bin->g_imports = r_bin_wasm_get_import_entries (bin, import); } RList *r_bin_wasm_get_exports (RBinWasmObj *bin) { RBinWasmSection *export = NULL; RList *exports = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_exports) { return bin->g_exports; } if (!(exports= r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_EXPORT))) { return r_list_new(); } // support for multiple export sections against spec if (!(export = (RBinWasmSection*) r_list_first (exports))) { return r_list_new(); } bin->g_exports = r_bin_wasm_get_export_entries (bin, export); return bin->g_exports; } RList *r_bin_wasm_get_types (RBinWasmObj *bin) { RBinWasmSection *type = NULL; RList *types = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_types) { return bin->g_types; } if (!(types = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_TYPE))) { return r_list_new(); } // support for multiple export sections against spec if (!(type = (RBinWasmSection*) r_list_first (types))) { return r_list_new(); } bin->g_types = r_bin_wasm_get_type_entries (bin, type); return bin->g_types; } RList *r_bin_wasm_get_tables (RBinWasmObj *bin) { RBinWasmSection *table = NULL; RList *tables = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_tables) { return bin->g_tables; } if (!(tables = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_TABLE))) { return r_list_new(); } // support for multiple export sections against spec if (!(table = (RBinWasmSection*) r_list_first (tables))) { r_list_free (tables); return r_list_new(); } bin->g_tables = r_bin_wasm_get_table_entries (bin, table); r_list_free (tables); return bin->g_tables; } RList *r_bin_wasm_get_memories (RBinWasmObj *bin) { RBinWasmSection *memory; RList *memories; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_memories) { return bin->g_memories; } if (!(memories = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_MEMORY))) { return r_list_new(); } // support for multiple export sections against spec if (!(memory = (RBinWasmSection*) r_list_first (memories))) { return r_list_new(); } bin->g_memories = r_bin_wasm_get_memory_entries (bin, memory); return bin->g_memories; } RList *r_bin_wasm_get_globals (RBinWasmObj *bin) { RBinWasmSection *global = NULL; RList *globals = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_globals) { return bin->g_globals; } if (!(globals = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_GLOBAL))) { return r_list_new(); } // support for multiple export sections against spec if (!(global = (RBinWasmSection*) r_list_first (globals))) { return r_list_new(); } bin->g_globals = r_bin_wasm_get_global_entries (bin, global); return bin->g_globals; } RList *r_bin_wasm_get_elements (RBinWasmObj *bin) { RBinWasmSection *element = NULL; RList *elements = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_elements) { return bin->g_elements; } if (!(elements = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_ELEMENT))) { return r_list_new(); } // support for multiple export sections against spec if (!(element = (RBinWasmSection*) r_list_first (elements))) { return r_list_new(); } bin->g_elements = r_bin_wasm_get_element_entries (bin, element); return bin->g_elements; } RList *r_bin_wasm_get_codes (RBinWasmObj *bin) { RBinWasmSection *code = NULL;; RList *codes = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_codes) { return bin->g_codes; } if (!(codes = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_CODE))) { return r_list_new(); } // support for multiple export sections against spec if (!(code = (RBinWasmSection*) r_list_first (codes))) { return r_list_new(); } bin->g_codes = r_bin_wasm_get_code_entries (bin, code); return bin->g_codes; } RList *r_bin_wasm_get_datas (RBinWasmObj *bin) { RBinWasmSection *data = NULL; RList *datas = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_datas) { return bin->g_datas; } if (!(datas = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_DATA))) { return r_list_new(); } // support for multiple export sections against spec if (!(data = (RBinWasmSection*) r_list_first (datas))) { return r_list_new(); } bin->g_datas = r_bin_wasm_get_data_entries (bin, data); return bin->g_datas; }
/* radare2 - LGPL - Copyright 2017 - pancake, cgvwzq */ #include <r_types.h> #include <r_util.h> #include <r_lib.h> #include <r_bin.h> #include "wasm.h" // Consume functions static size_t consume_u32 (ut8 *buf, ut8 *max, ut32 *out, ut32 *offset) { size_t n; if (!buf || !max || !out) { return 0; } if (!(n = read_u32_leb128 (buf, max, out)) || n > 5) { return 0; } if (offset) { *offset += n; } return n; } static size_t consume_s32 (ut8 *buf, ut8 *max, st32 *out, ut32 *offset) { size_t n; if (!buf || !max || !out) { return 0; } if (!(n = read_i32_leb128 (buf, max, out)) || n > 5) { return 0; } if (offset) { *offset += n; } return n; } static size_t consume_u8 (ut8 *buf, ut8 *max, ut8 *out, ut32 *offset) { size_t n; ut32 tmp; if (!(n = consume_u32 (buf, max, &tmp, offset)) || n > 1) { return 0; } *out = tmp & 0x7f; return 1; } static size_t consume_s8 (ut8 *buf, ut8 *max, st8 *out, ut32 *offset) { size_t n; ut32 tmp; if (!(n = consume_u32 (buf, max, &tmp, offset)) || n > 1) { return 0; } *out = (st8)(tmp & 0x7f); return 1; } static size_t consume_str (ut8 *buf, ut8 *max, size_t sz, char *out, ut32 *offset) { if (!buf || !max || !out || !sz) { return 0; } if (!(buf + sz < max)) { return 0; } strncpy ((char*)out, (char*)buf, R_MIN (R_BIN_WASM_STRING_LENGTH-1, sz)); if (offset) *offset += sz; return sz; } static size_t consume_init_expr (ut8 *buf, ut8 *max, ut8 eoc, void *out, ut32 *offset) { ut32 i = 0; while (buf + i < max && buf[i] != eoc) { // TODO: calc the expresion with the bytcode (ESIL?) i++; } if (buf[i] != eoc) { return 0; } if (offset) { *offset += i + 1; } return i + 1; } static size_t consume_locals (ut8 *buf, ut8 *max, ut32 count, RBinWasmCodeEntry *out, ut32 *offset) { ut32 i = 0, j = 0; if (count < 1) return 0; // memory leak if (!(out->locals = (struct r_bin_wasm_local_entry_t*) malloc (sizeof(struct r_bin_wasm_local_entry_t) * count))) { return 0; } while (buf + i < max && j < count) { if (!(consume_u32 (buf + i, max, &out->locals[j].count, &i))) { free (out->locals); return 0; } if (!(consume_s8 (buf + i, max, (st8*)&out->locals[j].type, &i))) { free (out->locals); return 0; } j += 1; } if (offset) *offset += i; return j; } static size_t consume_limits (ut8 *buf, ut8 *max, struct r_bin_wasm_resizable_limits_t *out, ut32 *offset) { ut32 i = 0; if (!(consume_u8 (buf + i, max, &out->flags, &i))) return 0; if (!(consume_u32 (buf + i, max, &out->initial, &i))) return 0; if (out->flags && (!(consume_u32 (buf + i, max, &out->maximum, &i)))) return 0; if (offset) *offset += i; return i; } // Utils static RList *r_bin_wasm_get_sections_by_id (RList *sections, ut8 id) { RBinWasmSection *sec = NULL; RList *ret = NULL; RListIter *iter = NULL; // memory leak if (!(ret = r_list_new ())) { return NULL; } r_list_foreach (sections, iter, sec) { if (sec->id == id) { r_list_append(ret, sec); } } return ret; } #define R_BIN_WASM_VALUETYPETOSTRING(p, type, i) {\ switch(type) {\ case R_BIN_WASM_VALUETYPE_i32:\ strcpy(p, "i32");\ break;\ case R_BIN_WASM_VALUETYPE_i64:\ strcpy(p, "i64");\ break;\ case R_BIN_WASM_VALUETYPE_f32:\ strcpy(p, "f32");\ break;\ case R_BIN_WASM_VALUETYPE_f64:\ strcpy(p, "f64");\ break;\ }\ i+= 3;\ } static char *r_bin_wasm_type_entry_to_string (RBinWasmTypeEntry *ptr) { if (!ptr || ptr->to_str) { return NULL; } char *ret; int p, i = 0, sz; sz = (ptr->param_count + ptr->return_count) * 5 + 9; // memory leak if (!(ret = (char*) malloc (sz * sizeof(char)))) { return NULL; } strcpy (ret + i, "("); i++; for (p = 0; p < ptr->param_count; p++ ) { R_BIN_WASM_VALUETYPETOSTRING (ret+i, ptr->param_types[p], i); // i+=3 if (p < ptr->param_count - 1) { strcpy (ret+i, ", "); i += 2; } } strcpy (ret + i, ") -> ("); i += 6; if (ptr->return_count == 1) { R_BIN_WASM_VALUETYPETOSTRING (ret + i, ptr->return_type, i); } strcpy (ret + i, ")"); return ret; } // Parsing static RList *r_bin_wasm_get_type_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmTypeEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmTypeEntry))) { return ret; } if (!(consume_u8 (buf + i, buf + len, &ptr->form, &i))) { free (ptr); return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->param_count, &i))) { free (ptr); return ret; } if (!(i + ptr->param_count < len)) { free (ptr); return ret; } int j; for (j = 0; j < ptr->param_count; j++) { if (!(consume_s8 (buf + i, buf + len, (st8*)&ptr->param_types[j], &i))) { free (ptr); return ret; } } if (!(consume_s8 (buf + i, buf + len, &ptr->return_count, &i))) { free (ptr); return ret; } if (ptr->return_count > 1) { free(ptr); return ret; } if (ptr->return_count == 1) { if (!(consume_s8 (buf + i, buf + len, (st8*)&ptr->return_type, &i))) { free(ptr); return ret; } } ptr->to_str = r_bin_wasm_type_entry_to_string (ptr); r_list_append (ret, ptr); r += 1; } return ret; } static RList *r_bin_wasm_get_import_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmImportEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmImportEntry))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->module_len, &i))) { goto culvert; } if (!(consume_str (buf + i, buf + len, ptr->module_len, ptr->module_str, &i))) { goto culvert; } if (!(consume_u32 (buf + i, buf + len, &ptr->field_len, &i))) { goto culvert; } if (!(consume_str (buf + i, buf + len, ptr->field_len, ptr->field_str, &i))) { goto culvert; } if (!(consume_u8 (buf + i, buf + len, &ptr->kind, &i))) { goto culvert; } switch (ptr->kind) { case 0: // Function if (!(consume_u32 (buf + i, buf + len, &ptr->type_f, &i))) { goto sewer; } break; case 1: // Table if (!(consume_u8 (buf + i, buf + len, (ut8*)&ptr->type_t.elem_type, &i))) { goto sewer; // varint7 } if (!(consume_limits (buf + i, buf + len, &ptr->type_t.limits, &i))) { goto sewer; } break; case 2: // Memory if (!(consume_limits (buf + i, buf + len, &ptr->type_m.limits, &i))) { goto sewer; } break; case 3: // Global if (!(consume_u8 (buf + i, buf + len, (ut8*)&ptr->type_g.content_type, &i))) { goto sewer; // varint7 } if (!(consume_u8 (buf + i, buf + len, (ut8*)&ptr->type_g.mutability, &i))) { goto sewer; // varuint1 } break; default: goto sewer; } r_list_append (ret, ptr); r++; } return ret; sewer: ret = NULL; culvert: free (ptr); return ret; } static RList *r_bin_wasm_get_export_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmExportEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmExportEntry))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->field_len, &i))) { free (ptr); return ret; } if (!(consume_str (buf + i, buf + len, ptr->field_len, ptr->field_str, &i))) { free (ptr); return ret; } if (!(consume_u8 (buf + i, buf + len, &ptr->kind, &i))) { free (ptr); return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) { free (ptr); return ret; } r_list_append (ret, ptr); r++; } return ret; } static RList *r_bin_wasm_get_code_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmCodeEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, j = 0, r = 0; size_t n = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmCodeEntry))) { return ret; } if (!(n = consume_u32 (buf + i, buf + len, &ptr->body_size, &i))) { free (ptr); return ret; } if (!(i + ptr->body_size - 1 < len)) { free (ptr); return ret; } j = i; if (!(n = consume_u32 (buf + i, buf + len, &ptr->local_count, &i))) { free (ptr); return ret; } if ((n = consume_locals (buf + i, buf + len, ptr->local_count,ptr, &i)) < ptr->local_count) { free (ptr); return ret; } ptr->code = sec->payload_data + i; ptr->len = ptr->body_size - (i - j); i += ptr->len - 1; // consume bytecode if (!(consume_u8 (buf + i, buf + len, &ptr->byte, &i))) { free (ptr); return ret; } if (ptr->byte != R_BIN_WASM_END_OF_CODE) { free (ptr); return ret; } // search 'r' in function_space, if present get signature from types // if export get name r_list_append (ret, ptr); r += 1; } return ret; } static RList *r_bin_wasm_get_data_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmDataEntry *ptr = NULL; ut32 len = sec->payload_len; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; int buflen = bin->buf->length - (ut32)sec->payload_data; ut32 count = sec->count; ut32 i = 0, r = 0; size_t n = 0; while (i < len && len < buflen && r < count) { if (!(ptr = R_NEW0 (RBinWasmDataEntry))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) { goto beach; } if (i + 4 >= buflen) { goto beach; } if (!(n = consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) { goto beach; } ptr->offset.len = n; if (!(consume_u32 (buf + i, buf + len, &ptr->size, &i))) { goto beach; } if (i + 4 >= buflen) { goto beach; } ptr->data = sec->payload_data + i; r_list_append (ret, ptr); r += 1; } return ret; beach: free (ptr); return ret; } static RBinWasmStartEntry *r_bin_wasm_get_start (RBinWasmObj *bin, RBinWasmSection *sec) { RBinWasmStartEntry *ptr; if (!(ptr = R_NEW0 (RBinWasmStartEntry))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 i = 0; if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) { free (ptr); return NULL; } return ptr; } static RList *r_bin_wasm_get_memory_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmMemoryEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmMemoryEntry))) { return ret; } if (!(consume_limits (buf + i, buf + len, &ptr->limits, &i))) { free (ptr); return ret; } r_list_append (ret, ptr); r += 1; } return ret; } static RList *r_bin_wasm_get_table_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmTableEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmTableEntry))) { return ret; } if (!(consume_u8 (buf + i, buf + len, &ptr->element_type, &i))) { free (ptr); return ret; } if (!(consume_limits (buf + i, buf + len, &ptr->limits, &i))) { free (ptr); return ret; } r_list_append (ret, ptr); r += 1; } return ret; } static RList *r_bin_wasm_get_global_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmGlobalEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; int buflen = bin->buf->length - (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && len < buflen && r < count) { if (!(ptr = R_NEW0 (RBinWasmGlobalEntry))) { return ret; } if (len + 8 > buflen || !(consume_u8 (buf + i, buf + len, (ut8*)&ptr->content_type, &i))) { goto beach; } if (len + 8 > buflen || !(consume_u8 (buf + i, buf + len, &ptr->mutability, &i))) { goto beach; } if (len + 8 > buflen || !(consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) { goto beach; } r_list_append (ret, ptr); r++; } return ret; beach: free (ptr); return ret; } static RList *r_bin_wasm_get_element_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmElementEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; int buflen = bin->buf->length - (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && len < buflen && r < count) { if (!(ptr = R_NEW0 (RBinWasmElementEntry))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) { goto beach; } if (!(consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) { goto beach; } if (!(consume_u32 (buf + i, buf + len, &ptr->num_elem, &i))) { goto beach; } ut32 j = 0; while (i < len && j < ptr->num_elem) { // TODO: allocate space and fill entry ut32 e; if (!(consume_u32 (buf + i, buf + len, &e, &i))) { free (ptr); return ret; } } r_list_append (ret, ptr); r += 1; } return ret; beach: free (ptr); return ret; } // Public functions RBinWasmObj *r_bin_wasm_init (RBinFile *arch) { RBinWasmObj *bin = R_NEW0 (RBinWasmObj); if (!bin) { return NULL; } if (!(bin->buf = r_buf_new ())) { free (bin); return NULL; } bin->size = (ut32)arch->buf->length; if (!r_buf_set_bytes (bin->buf, arch->buf->buf, bin->size)) { r_bin_wasm_destroy (arch); free (bin); return NULL; } bin->g_sections = r_bin_wasm_get_sections (bin); // TODO: recursive invocation more natural with streamed parsing // but dependency problems when sections are disordered (against spec) bin->g_types = r_bin_wasm_get_types (bin); bin->g_imports = r_bin_wasm_get_imports (bin); bin->g_exports = r_bin_wasm_get_exports (bin); bin->g_tables = r_bin_wasm_get_tables (bin); bin->g_memories = r_bin_wasm_get_memories (bin); bin->g_globals = r_bin_wasm_get_globals (bin); bin->g_codes = r_bin_wasm_get_codes (bin); bin->g_datas = r_bin_wasm_get_datas (bin); // entrypoint from Start section bin->entrypoint = r_bin_wasm_get_entrypoint (bin); return bin; } void r_bin_wasm_destroy (RBinFile *arch) { RBinWasmObj *bin; if (!arch || !arch->o || !arch->o->bin_obj) { return; } bin = arch->o->bin_obj; r_buf_free (bin->buf); r_list_free (bin->g_sections); r_list_free (bin->g_types); r_list_free (bin->g_imports); r_list_free (bin->g_exports); r_list_free (bin->g_tables); r_list_free (bin->g_memories); r_list_free (bin->g_globals); r_list_free (bin->g_codes); r_list_free (bin->g_datas); free (bin->g_start); free (bin); arch->o->bin_obj = NULL; } RList *r_bin_wasm_get_sections (RBinWasmObj *bin) { RList *ret = NULL; RBinWasmSection *ptr = NULL; if (!bin) { return NULL; } if (bin->g_sections) { return bin->g_sections; } if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf; ut32 len = bin->size, i = 8; // skip magic bytes + version while (i < len) { //r_buf_read_* api but it makes sense going through the array directly if (!(ptr = R_NEW0 (RBinWasmSection))) { return ret; } if (!(consume_u8 (buf + i, buf + len, &ptr->id, &i))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->size, &i))) { free(ptr); return NULL; } ptr->count = 0; ptr->offset = i; switch (ptr->id) { case R_BIN_WASM_SECTION_CUSTOM: //eprintf("custom section: 0x%x, ", i); if (!(consume_u32 (buf + i, buf + len, &ptr->name_len, &i))) { free(ptr); return ret; } if (!(consume_str (buf + i, buf + len, ptr->name_len, ptr->name, &i))) { free(ptr); return ret; } //eprintf("%s\n", ptr->name); break; case R_BIN_WASM_SECTION_TYPE: //eprintf("section type: 0x%x, ", i); strcpy (ptr->name, "type"); ptr->name_len = 4; break; case R_BIN_WASM_SECTION_IMPORT: //eprintf("section import: 0x%x, ", i); strcpy (ptr->name, "import"); ptr->name_len = 6; break; case R_BIN_WASM_SECTION_FUNCTION: //eprintf("section function: 0x%x, ", i); strcpy (ptr->name, "function"); ptr->name_len = 8; break; case R_BIN_WASM_SECTION_TABLE: //eprintf("section table: 0x%x, ", i); strcpy (ptr->name, "table"); ptr->name_len = 5; break; case R_BIN_WASM_SECTION_MEMORY: //eprintf("section memory: 0x%x, ", i); strcpy (ptr->name, "memory"); ptr->name_len = 6; break; case R_BIN_WASM_SECTION_GLOBAL: //eprintf("section global: 0x%x, ", i); strcpy (ptr->name, "global"); ptr->name_len = 6; break; case R_BIN_WASM_SECTION_EXPORT: //eprintf("section export: 0x%x, ", i); strcpy (ptr->name, "export"); ptr->name_len = 6; break; case R_BIN_WASM_SECTION_START: //eprintf("section start: 0x%x\n", i); strcpy (ptr->name, "start"); ptr->name_len = 5; break; case R_BIN_WASM_SECTION_ELEMENT: //eprintf("section element: 0x%x, ", i); strncpy (ptr->name, "element", R_BIN_WASM_STRING_LENGTH); ptr->name_len = 7; break; case R_BIN_WASM_SECTION_CODE: //eprintf("section code: 0x%x, ", i); strncpy (ptr->name, "code", R_BIN_WASM_STRING_LENGTH); ptr->name_len = 4; break; case R_BIN_WASM_SECTION_DATA: //eprintf("section data: 0x%x, ", i); strncpy (ptr->name, "data", R_BIN_WASM_STRING_LENGTH); ptr->name_len = 4; break; default: eprintf("unkown section id: %d\n", ptr->id); i += ptr->size - 1; // next continue; } if (ptr->id != R_BIN_WASM_SECTION_START && ptr->id != R_BIN_WASM_SECTION_CUSTOM) { if (!(consume_u32 (buf + i, buf + len, &ptr->count, &i))) { free (ptr); return ret; } //eprintf("count %d\n", ptr->count); } ptr->payload_data = i; ptr->payload_len = ptr->size - (i - ptr->offset); r_list_append (ret, ptr); i += ptr->payload_len; // next } bin->g_sections = ret; return ret; } ut32 r_bin_wasm_get_entrypoint (RBinWasmObj *bin) { RList *secs = NULL; RBinWasmStartEntry *start = NULL; RBinWasmSection *sec = NULL; RBinWasmCodeEntry *func = NULL; if (!bin || !bin->g_sections) { return 0; } if (bin->entrypoint) { return bin->entrypoint; } if (bin->g_start) { start = bin->g_start; } else if (!(secs = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_START))) { return 0; } else if (!(sec = (RBinWasmSection*) r_list_first (secs))) { return 0; } else { start = r_bin_wasm_get_start (bin, sec); bin->g_start = start; } if (!start) { return 0; } // FIX: entrypoint can be also an import func = r_list_get_n (r_bin_wasm_get_codes (bin), start->index); return (ut32)func? func->code: 0; } RList *r_bin_wasm_get_imports (RBinWasmObj *bin) { RBinWasmSection *import = NULL; RList *imports = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_imports) { return bin->g_imports; } if (!(imports = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_IMPORT))) { return r_list_new(); } // support for multiple import sections against spec if (!(import = (RBinWasmSection*) r_list_first (imports))) { return r_list_new(); } return bin->g_imports = r_bin_wasm_get_import_entries (bin, import); } RList *r_bin_wasm_get_exports (RBinWasmObj *bin) { RBinWasmSection *export = NULL; RList *exports = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_exports) { return bin->g_exports; } if (!(exports= r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_EXPORT))) { return r_list_new(); } // support for multiple export sections against spec if (!(export = (RBinWasmSection*) r_list_first (exports))) { return r_list_new(); } bin->g_exports = r_bin_wasm_get_export_entries (bin, export); return bin->g_exports; } RList *r_bin_wasm_get_types (RBinWasmObj *bin) { RBinWasmSection *type = NULL; RList *types = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_types) { return bin->g_types; } if (!(types = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_TYPE))) { return r_list_new(); } // support for multiple export sections against spec if (!(type = (RBinWasmSection*) r_list_first (types))) { return r_list_new(); } bin->g_types = r_bin_wasm_get_type_entries (bin, type); return bin->g_types; } RList *r_bin_wasm_get_tables (RBinWasmObj *bin) { RBinWasmSection *table = NULL; RList *tables = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_tables) { return bin->g_tables; } if (!(tables = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_TABLE))) { return r_list_new(); } // support for multiple export sections against spec if (!(table = (RBinWasmSection*) r_list_first (tables))) { r_list_free (tables); return r_list_new(); } bin->g_tables = r_bin_wasm_get_table_entries (bin, table); r_list_free (tables); return bin->g_tables; } RList *r_bin_wasm_get_memories (RBinWasmObj *bin) { RBinWasmSection *memory; RList *memories; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_memories) { return bin->g_memories; } if (!(memories = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_MEMORY))) { return r_list_new(); } // support for multiple export sections against spec if (!(memory = (RBinWasmSection*) r_list_first (memories))) { return r_list_new(); } bin->g_memories = r_bin_wasm_get_memory_entries (bin, memory); return bin->g_memories; } RList *r_bin_wasm_get_globals (RBinWasmObj *bin) { RBinWasmSection *global = NULL; RList *globals = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_globals) { return bin->g_globals; } if (!(globals = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_GLOBAL))) { return r_list_new(); } // support for multiple export sections against spec if (!(global = (RBinWasmSection*) r_list_first (globals))) { return r_list_new(); } bin->g_globals = r_bin_wasm_get_global_entries (bin, global); return bin->g_globals; } RList *r_bin_wasm_get_elements (RBinWasmObj *bin) { RBinWasmSection *element = NULL; RList *elements = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_elements) { return bin->g_elements; } if (!(elements = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_ELEMENT))) { return r_list_new(); } // support for multiple export sections against spec if (!(element = (RBinWasmSection*) r_list_first (elements))) { return r_list_new(); } bin->g_elements = r_bin_wasm_get_element_entries (bin, element); return bin->g_elements; } RList *r_bin_wasm_get_codes (RBinWasmObj *bin) { RBinWasmSection *code = NULL;; RList *codes = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_codes) { return bin->g_codes; } if (!(codes = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_CODE))) { return r_list_new(); } // support for multiple export sections against spec if (!(code = (RBinWasmSection*) r_list_first (codes))) { return r_list_new(); } bin->g_codes = r_bin_wasm_get_code_entries (bin, code); return bin->g_codes; } RList *r_bin_wasm_get_datas (RBinWasmObj *bin) { RBinWasmSection *data = NULL; RList *datas = NULL; if (!bin || !bin->g_sections) { return NULL; } if (bin->g_datas) { return bin->g_datas; } if (!(datas = r_bin_wasm_get_sections_by_id (bin->g_sections, R_BIN_WASM_SECTION_DATA))) { return r_list_new(); } // support for multiple export sections against spec if (!(data = (RBinWasmSection*) r_list_first (datas))) { return r_list_new(); } bin->g_datas = r_bin_wasm_get_data_entries (bin, data); return bin->g_datas; }
static RList *r_bin_wasm_get_element_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmElementEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && r < count) { if (!(ptr = R_NEW0 (RBinWasmElementEntry))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) { free (ptr); return ret; } if (!(consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) { free (ptr); return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->num_elem, &i))) { free (ptr); return ret; } ut32 j = 0; while (i < len && j < ptr->num_elem ) { // TODO: allocate space and fill entry ut32 e; if (!(consume_u32 (buf + i, buf + len, &e, &i))) { free (ptr); return ret; } } r_list_append (ret, ptr); r += 1; } return ret; }
static RList *r_bin_wasm_get_element_entries (RBinWasmObj *bin, RBinWasmSection *sec) { RList *ret = NULL; RBinWasmElementEntry *ptr = NULL; if (!(ret = r_list_newf ((RListFree)free))) { return NULL; } ut8* buf = bin->buf->buf + (ut32)sec->payload_data; int buflen = bin->buf->length - (ut32)sec->payload_data; ut32 len = sec->payload_len; ut32 count = sec->count; ut32 i = 0, r = 0; while (i < len && len < buflen && r < count) { if (!(ptr = R_NEW0 (RBinWasmElementEntry))) { return ret; } if (!(consume_u32 (buf + i, buf + len, &ptr->index, &i))) { goto beach; } if (!(consume_init_expr (buf + i, buf + len, R_BIN_WASM_END_OF_CODE, NULL, &i))) { goto beach; } if (!(consume_u32 (buf + i, buf + len, &ptr->num_elem, &i))) { goto beach; } ut32 j = 0; while (i < len && j < ptr->num_elem) { // TODO: allocate space and fill entry ut32 e; if (!(consume_u32 (buf + i, buf + len, &e, &i))) { free (ptr); return ret; } } r_list_append (ret, ptr); r += 1; } return ret; beach: free (ptr); return ret; }
{'added': [(70, ''), (75, '\t\ti++;'), (454, '\tut32 len = sec->payload_len;'), (461, '\tint buflen = bin->buf->length - (ut32)sec->payload_data;'), (466, '\twhile (i < len && len < buflen && r < count) {'), (471, '\t\t\tgoto beach;'), (472, '\t\t}'), (473, '\t\tif (i + 4 >= buflen) {'), (474, '\t\t\tgoto beach;'), (477, '\t\t\tgoto beach;'), (481, '\t\t\tgoto beach;'), (482, '\t\t}'), (483, '\t\tif (i + 4 >= buflen) {'), (484, '\t\t\tgoto beach;'), (493, '\treturn ret;'), (494, 'beach:'), (495, '\tfree (ptr);'), (602, '\tint buflen = bin->buf->length - (ut32)sec->payload_data;'), (639, '\tint buflen = bin->buf->length - (ut32)sec->payload_data;'), (644, '\twhile (i < len && len < buflen && r < count) {'), (649, '\t\t\tgoto beach;'), (652, '\t\t\tgoto beach;'), (655, '\t\t\tgoto beach;'), (658, '\t\twhile (i < len && j < ptr->num_elem) {'), (670, '\treturn ret;'), (671, 'beach:'), (672, '\tfree (ptr);')], 'deleted': [(74, '\t\ti += 1;'), (451, ''), (460, '\tut32 len = sec->payload_len;'), (465, '\twhile (i < len && r < count) {'), (466, ''), (470, ''), (472, '\t\t\tfree (ptr);'), (473, '\t\t\treturn ret;'), (475, ''), (477, '\t\t\tfree (ptr);'), (478, '\t\t\treturn ret;'), (480, ''), (482, ''), (484, '\t\t\tfree (ptr);'), (485, '\t\t\treturn ret;'), (487, ''), (495, ''), (596, '\tint buflen = bin->buf->length;'), (597, '\tif (sec->payload_data + 32 > buflen) {'), (598, '\t\treturn NULL;'), (599, '\t}'), (634, ''), (647, '\twhile (i < len && r < count) {'), (648, ''), (652, ''), (654, '\t\t\tfree (ptr);'), (655, '\t\t\treturn ret;'), (657, ''), (659, '\t\t\tfree (ptr);'), (660, '\t\t\treturn ret;'), (662, ''), (664, '\t\t\tfree (ptr);'), (665, '\t\t\treturn ret;'), (667, ''), (669, '\t\twhile (i < len && j < ptr->num_elem\t) {'), (677, ''), (681, ''), (683, '')]}
27
38
873
6,063
39
282
11
https://github.com/radare/radare2
CVE-2017-7854
CWE-125
2,655
http.c
C
http_buf_read
/* * HTTP protocol for ffmpeg client * Copyright (c) 2000, 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #if CONFIG_ZLIB #include <zlib.h> #endif /* CONFIG_ZLIB */ #include "libavutil/avassert.h" #include "libavutil/avstring.h" #include "libavutil/opt.h" #include "libavutil/time.h" #include "avformat.h" #include "http.h" #include "httpauth.h" #include "internal.h" #include "network.h" #include "os_support.h" #include "url.h" /* XXX: POST protocol is not completely implemented because ffmpeg uses * only a subset of it. */ /* The IO buffer size is unrelated to the max URL size in itself, but needs * to be large enough to fit the full request headers (including long * path names). */ #define BUFFER_SIZE MAX_URL_SIZE #define MAX_REDIRECTS 8 #define HTTP_SINGLE 1 #define HTTP_MUTLI 2 typedef enum { LOWER_PROTO, READ_HEADERS, WRITE_REPLY_HEADERS, FINISH }HandshakeState; typedef struct HTTPContext { const AVClass *class; URLContext *hd; unsigned char buffer[BUFFER_SIZE], *buf_ptr, *buf_end; int line_count; int http_code; /* Used if "Transfer-Encoding: chunked" otherwise -1. */ int64_t chunksize; int64_t off, end_off, filesize; char *location; HTTPAuthState auth_state; HTTPAuthState proxy_auth_state; char *http_proxy; char *headers; char *mime_type; char *user_agent; #if FF_API_HTTP_USER_AGENT char *user_agent_deprecated; #endif char *content_type; /* Set if the server correctly handles Connection: close and will close * the connection after feeding us the content. */ int willclose; int seekable; /**< Control seekability, 0 = disable, 1 = enable, -1 = probe. */ int chunked_post; /* A flag which indicates if the end of chunked encoding has been sent. */ int end_chunked_post; /* A flag which indicates we have finished to read POST reply. */ int end_header; /* A flag which indicates if we use persistent connections. */ int multiple_requests; uint8_t *post_data; int post_datalen; int is_akamai; int is_mediagateway; char *cookies; ///< holds newline (\n) delimited Set-Cookie header field values (without the "Set-Cookie: " field name) /* A dictionary containing cookies keyed by cookie name */ AVDictionary *cookie_dict; int icy; /* how much data was read since the last ICY metadata packet */ int icy_data_read; /* after how many bytes of read data a new metadata packet will be found */ int icy_metaint; char *icy_metadata_headers; char *icy_metadata_packet; AVDictionary *metadata; #if CONFIG_ZLIB int compressed; z_stream inflate_stream; uint8_t *inflate_buffer; #endif /* CONFIG_ZLIB */ AVDictionary *chained_options; int send_expect_100; char *method; int reconnect; int reconnect_at_eof; int reconnect_streamed; int reconnect_delay; int reconnect_delay_max; int listen; char *resource; int reply_code; int is_multi_client; HandshakeState handshake_step; int is_connected_server; } HTTPContext; #define OFFSET(x) offsetof(HTTPContext, x) #define D AV_OPT_FLAG_DECODING_PARAM #define E AV_OPT_FLAG_ENCODING_PARAM #define DEFAULT_USER_AGENT "Lavf/" AV_STRINGIFY(LIBAVFORMAT_VERSION) static const AVOption options[] = { { "seekable", "control seekability of connection", OFFSET(seekable), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, D }, { "chunked_post", "use chunked transfer-encoding for posts", OFFSET(chunked_post), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E }, { "http_proxy", "set HTTP proxy to tunnel through", OFFSET(http_proxy), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "headers", "set custom HTTP headers, can override built in default headers", OFFSET(headers), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "content_type", "set a specific content type for the POST messages", OFFSET(content_type), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "user_agent", "override User-Agent header", OFFSET(user_agent), AV_OPT_TYPE_STRING, { .str = DEFAULT_USER_AGENT }, 0, 0, D }, #if FF_API_HTTP_USER_AGENT { "user-agent", "override User-Agent header", OFFSET(user_agent_deprecated), AV_OPT_TYPE_STRING, { .str = DEFAULT_USER_AGENT }, 0, 0, D }, #endif { "multiple_requests", "use persistent connections", OFFSET(multiple_requests), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D | E }, { "post_data", "set custom HTTP post data", OFFSET(post_data), AV_OPT_TYPE_BINARY, .flags = D | E }, { "mime_type", "export the MIME type", OFFSET(mime_type), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT | AV_OPT_FLAG_READONLY }, { "cookies", "set cookies to be sent in applicable future requests, use newline delimited Set-Cookie HTTP field value syntax", OFFSET(cookies), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D }, { "icy", "request ICY metadata", OFFSET(icy), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, D }, { "icy_metadata_headers", "return ICY metadata headers", OFFSET(icy_metadata_headers), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT }, { "icy_metadata_packet", "return current ICY metadata packet", OFFSET(icy_metadata_packet), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT }, { "metadata", "metadata read from the bitstream", OFFSET(metadata), AV_OPT_TYPE_DICT, {0}, 0, 0, AV_OPT_FLAG_EXPORT }, { "auth_type", "HTTP authentication type", OFFSET(auth_state.auth_type), AV_OPT_TYPE_INT, { .i64 = HTTP_AUTH_NONE }, HTTP_AUTH_NONE, HTTP_AUTH_BASIC, D | E, "auth_type"}, { "none", "No auth method set, autodetect", 0, AV_OPT_TYPE_CONST, { .i64 = HTTP_AUTH_NONE }, 0, 0, D | E, "auth_type"}, { "basic", "HTTP basic authentication", 0, AV_OPT_TYPE_CONST, { .i64 = HTTP_AUTH_BASIC }, 0, 0, D | E, "auth_type"}, { "send_expect_100", "Force sending an Expect: 100-continue header for POST", OFFSET(send_expect_100), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E }, { "location", "The actual location of the data received", OFFSET(location), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "offset", "initial byte offset", OFFSET(off), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, D }, { "end_offset", "try to limit the request to bytes preceding this offset", OFFSET(end_off), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, D }, { "method", "Override the HTTP method or set the expected HTTP method from a client", OFFSET(method), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "reconnect", "auto reconnect after disconnect before EOF", OFFSET(reconnect), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_at_eof", "auto reconnect at EOF", OFFSET(reconnect_at_eof), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_streamed", "auto reconnect streamed / non seekable streams", OFFSET(reconnect_streamed), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_delay_max", "max reconnect delay in seconds after which to give up", OFFSET(reconnect_delay_max), AV_OPT_TYPE_INT, { .i64 = 120 }, 0, UINT_MAX/1000/1000, D }, { "listen", "listen on HTTP", OFFSET(listen), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, D | E }, { "resource", "The resource requested by a client", OFFSET(resource), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E }, { "reply_code", "The http status code to return to a client", OFFSET(reply_code), AV_OPT_TYPE_INT, { .i64 = 200}, INT_MIN, 599, E}, { NULL } }; static int http_connect(URLContext *h, const char *path, const char *local_path, const char *hoststr, const char *auth, const char *proxyauth, int *new_location); static int http_read_header(URLContext *h, int *new_location); void ff_http_init_auth_state(URLContext *dest, const URLContext *src) { memcpy(&((HTTPContext *)dest->priv_data)->auth_state, &((HTTPContext *)src->priv_data)->auth_state, sizeof(HTTPAuthState)); memcpy(&((HTTPContext *)dest->priv_data)->proxy_auth_state, &((HTTPContext *)src->priv_data)->proxy_auth_state, sizeof(HTTPAuthState)); } static int http_open_cnx_internal(URLContext *h, AVDictionary **options) { const char *path, *proxy_path, *lower_proto = "tcp", *local_path; char hostname[1024], hoststr[1024], proto[10]; char auth[1024], proxyauth[1024] = ""; char path1[MAX_URL_SIZE]; char buf[1024], urlbuf[MAX_URL_SIZE]; int port, use_proxy, err, location_changed = 0; HTTPContext *s = h->priv_data; av_url_split(proto, sizeof(proto), auth, sizeof(auth), hostname, sizeof(hostname), &port, path1, sizeof(path1), s->location); ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL); proxy_path = s->http_proxy ? s->http_proxy : getenv("http_proxy"); use_proxy = !ff_http_match_no_proxy(getenv("no_proxy"), hostname) && proxy_path && av_strstart(proxy_path, "http://", NULL); if (!strcmp(proto, "https")) { lower_proto = "tls"; use_proxy = 0; if (port < 0) port = 443; } if (port < 0) port = 80; if (path1[0] == '\0') path = "/"; else path = path1; local_path = path; if (use_proxy) { /* Reassemble the request URL without auth string - we don't * want to leak the auth to the proxy. */ ff_url_join(urlbuf, sizeof(urlbuf), proto, NULL, hostname, port, "%s", path1); path = urlbuf; av_url_split(NULL, 0, proxyauth, sizeof(proxyauth), hostname, sizeof(hostname), &port, NULL, 0, proxy_path); } ff_url_join(buf, sizeof(buf), lower_proto, NULL, hostname, port, NULL); if (!s->hd) { err = ffurl_open_whitelist(&s->hd, buf, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, options, h->protocol_whitelist, h->protocol_blacklist, h); if (err < 0) return err; } err = http_connect(h, path, local_path, hoststr, auth, proxyauth, &location_changed); if (err < 0) return err; return location_changed; } /* return non zero if error */ static int http_open_cnx(URLContext *h, AVDictionary **options) { HTTPAuthType cur_auth_type, cur_proxy_auth_type; HTTPContext *s = h->priv_data; int location_changed, attempts = 0, redirects = 0; redo: av_dict_copy(options, s->chained_options, 0); cur_auth_type = s->auth_state.auth_type; cur_proxy_auth_type = s->auth_state.auth_type; location_changed = http_open_cnx_internal(h, options); if (location_changed < 0) goto fail; attempts++; if (s->http_code == 401) { if ((cur_auth_type == HTTP_AUTH_NONE || s->auth_state.stale) && s->auth_state.auth_type != HTTP_AUTH_NONE && attempts < 4) { ffurl_closep(&s->hd); goto redo; } else goto fail; } if (s->http_code == 407) { if ((cur_proxy_auth_type == HTTP_AUTH_NONE || s->proxy_auth_state.stale) && s->proxy_auth_state.auth_type != HTTP_AUTH_NONE && attempts < 4) { ffurl_closep(&s->hd); goto redo; } else goto fail; } if ((s->http_code == 301 || s->http_code == 302 || s->http_code == 303 || s->http_code == 307) && location_changed == 1) { /* url moved, get next */ ffurl_closep(&s->hd); if (redirects++ >= MAX_REDIRECTS) return AVERROR(EIO); /* Restart the authentication process with the new target, which * might use a different auth mechanism. */ memset(&s->auth_state, 0, sizeof(s->auth_state)); attempts = 0; location_changed = 0; goto redo; } return 0; fail: if (s->hd) ffurl_closep(&s->hd); if (location_changed < 0) return location_changed; return ff_http_averror(s->http_code, AVERROR(EIO)); } int ff_http_do_new_request(URLContext *h, const char *uri) { HTTPContext *s = h->priv_data; AVDictionary *options = NULL; int ret; s->off = 0; s->icy_data_read = 0; av_free(s->location); s->location = av_strdup(uri); if (!s->location) return AVERROR(ENOMEM); ret = http_open_cnx(h, &options); av_dict_free(&options); return ret; } int ff_http_averror(int status_code, int default_averror) { switch (status_code) { case 400: return AVERROR_HTTP_BAD_REQUEST; case 401: return AVERROR_HTTP_UNAUTHORIZED; case 403: return AVERROR_HTTP_FORBIDDEN; case 404: return AVERROR_HTTP_NOT_FOUND; default: break; } if (status_code >= 400 && status_code <= 499) return AVERROR_HTTP_OTHER_4XX; else if (status_code >= 500) return AVERROR_HTTP_SERVER_ERROR; else return default_averror; } static int http_write_reply(URLContext* h, int status_code) { int ret, body = 0, reply_code, message_len; const char *reply_text, *content_type; HTTPContext *s = h->priv_data; char message[BUFFER_SIZE]; content_type = "text/plain"; if (status_code < 0) body = 1; switch (status_code) { case AVERROR_HTTP_BAD_REQUEST: case 400: reply_code = 400; reply_text = "Bad Request"; break; case AVERROR_HTTP_FORBIDDEN: case 403: reply_code = 403; reply_text = "Forbidden"; break; case AVERROR_HTTP_NOT_FOUND: case 404: reply_code = 404; reply_text = "Not Found"; break; case 200: reply_code = 200; reply_text = "OK"; content_type = s->content_type ? s->content_type : "application/octet-stream"; break; case AVERROR_HTTP_SERVER_ERROR: case 500: reply_code = 500; reply_text = "Internal server error"; break; default: return AVERROR(EINVAL); } if (body) { s->chunked_post = 0; message_len = snprintf(message, sizeof(message), "HTTP/1.1 %03d %s\r\n" "Content-Type: %s\r\n" "Content-Length: %"SIZE_SPECIFIER"\r\n" "%s" "\r\n" "%03d %s\r\n", reply_code, reply_text, content_type, strlen(reply_text) + 6, // 3 digit status code + space + \r\n s->headers ? s->headers : "", reply_code, reply_text); } else { s->chunked_post = 1; message_len = snprintf(message, sizeof(message), "HTTP/1.1 %03d %s\r\n" "Content-Type: %s\r\n" "Transfer-Encoding: chunked\r\n" "%s" "\r\n", reply_code, reply_text, content_type, s->headers ? s->headers : ""); } av_log(h, AV_LOG_TRACE, "HTTP reply header: \n%s----\n", message); if ((ret = ffurl_write(s->hd, message, message_len)) < 0) return ret; return 0; } static void handle_http_errors(URLContext *h, int error) { av_assert0(error < 0); http_write_reply(h, error); } static int http_handshake(URLContext *c) { int ret, err, new_location; HTTPContext *ch = c->priv_data; URLContext *cl = ch->hd; switch (ch->handshake_step) { case LOWER_PROTO: av_log(c, AV_LOG_TRACE, "Lower protocol\n"); if ((ret = ffurl_handshake(cl)) > 0) return 2 + ret; if (ret < 0) return ret; ch->handshake_step = READ_HEADERS; ch->is_connected_server = 1; return 2; case READ_HEADERS: av_log(c, AV_LOG_TRACE, "Read headers\n"); if ((err = http_read_header(c, &new_location)) < 0) { handle_http_errors(c, err); return err; } ch->handshake_step = WRITE_REPLY_HEADERS; return 1; case WRITE_REPLY_HEADERS: av_log(c, AV_LOG_TRACE, "Reply code: %d\n", ch->reply_code); if ((err = http_write_reply(c, ch->reply_code)) < 0) return err; ch->handshake_step = FINISH; return 1; case FINISH: return 0; } // this should never be reached. return AVERROR(EINVAL); } static int http_listen(URLContext *h, const char *uri, int flags, AVDictionary **options) { HTTPContext *s = h->priv_data; int ret; char hostname[1024], proto[10]; char lower_url[100]; const char *lower_proto = "tcp"; int port; av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri); if (!strcmp(proto, "https")) lower_proto = "tls"; ff_url_join(lower_url, sizeof(lower_url), lower_proto, NULL, hostname, port, NULL); if ((ret = av_dict_set_int(options, "listen", s->listen, 0)) < 0) goto fail; if ((ret = ffurl_open_whitelist(&s->hd, lower_url, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, options, h->protocol_whitelist, h->protocol_blacklist, h )) < 0) goto fail; s->handshake_step = LOWER_PROTO; if (s->listen == HTTP_SINGLE) { /* single client */ s->reply_code = 200; while ((ret = http_handshake(h)) > 0); } fail: av_dict_free(&s->chained_options); return ret; } static int http_open(URLContext *h, const char *uri, int flags, AVDictionary **options) { HTTPContext *s = h->priv_data; int ret; if( s->seekable == 1 ) h->is_streamed = 0; else h->is_streamed = 1; s->filesize = -1; s->location = av_strdup(uri); if (!s->location) return AVERROR(ENOMEM); if (options) av_dict_copy(&s->chained_options, *options, 0); if (s->headers) { int len = strlen(s->headers); if (len < 2 || strcmp("\r\n", s->headers + len - 2)) { av_log(h, AV_LOG_WARNING, "No trailing CRLF found in HTTP header.\n"); ret = av_reallocp(&s->headers, len + 3); if (ret < 0) return ret; s->headers[len] = '\r'; s->headers[len + 1] = '\n'; s->headers[len + 2] = '\0'; } } if (s->listen) { return http_listen(h, uri, flags, options); } ret = http_open_cnx(h, options); if (ret < 0) av_dict_free(&s->chained_options); return ret; } static int http_accept(URLContext *s, URLContext **c) { int ret; HTTPContext *sc = s->priv_data; HTTPContext *cc; URLContext *sl = sc->hd; URLContext *cl = NULL; av_assert0(sc->listen); if ((ret = ffurl_alloc(c, s->filename, s->flags, &sl->interrupt_callback)) < 0) goto fail; cc = (*c)->priv_data; if ((ret = ffurl_accept(sl, &cl)) < 0) goto fail; cc->hd = cl; cc->is_multi_client = 1; fail: return ret; } static int http_getc(HTTPContext *s) { int len; if (s->buf_ptr >= s->buf_end) { len = ffurl_read(s->hd, s->buffer, BUFFER_SIZE); if (len < 0) { return len; } else if (len == 0) { return AVERROR_EOF; } else { s->buf_ptr = s->buffer; s->buf_end = s->buffer + len; } } return *s->buf_ptr++; } static int http_get_line(HTTPContext *s, char *line, int line_size) { int ch; char *q; q = line; for (;;) { ch = http_getc(s); if (ch < 0) return ch; if (ch == '\n') { /* process line */ if (q > line && q[-1] == '\r') q--; *q = '\0'; return 0; } else { if ((q - line) < line_size - 1) *q++ = ch; } } } static int check_http_code(URLContext *h, int http_code, const char *end) { HTTPContext *s = h->priv_data; /* error codes are 4xx and 5xx, but regard 401 as a success, so we * don't abort until all headers have been parsed. */ if (http_code >= 400 && http_code < 600 && (http_code != 401 || s->auth_state.auth_type != HTTP_AUTH_NONE) && (http_code != 407 || s->proxy_auth_state.auth_type != HTTP_AUTH_NONE)) { end += strspn(end, SPACE_CHARS); av_log(h, AV_LOG_WARNING, "HTTP error %d %s\n", http_code, end); return ff_http_averror(http_code, AVERROR(EIO)); } return 0; } static int parse_location(HTTPContext *s, const char *p) { char redirected_location[MAX_URL_SIZE], *new_loc; ff_make_absolute_url(redirected_location, sizeof(redirected_location), s->location, p); new_loc = av_strdup(redirected_location); if (!new_loc) return AVERROR(ENOMEM); av_free(s->location); s->location = new_loc; return 0; } /* "bytes $from-$to/$document_size" */ static void parse_content_range(URLContext *h, const char *p) { HTTPContext *s = h->priv_data; const char *slash; if (!strncmp(p, "bytes ", 6)) { p += 6; s->off = strtoll(p, NULL, 10); if ((slash = strchr(p, '/')) && strlen(slash) > 0) s->filesize = strtoll(slash + 1, NULL, 10); } if (s->seekable == -1 && (!s->is_akamai || s->filesize != 2147483647)) h->is_streamed = 0; /* we _can_ in fact seek */ } static int parse_content_encoding(URLContext *h, const char *p) { if (!av_strncasecmp(p, "gzip", 4) || !av_strncasecmp(p, "deflate", 7)) { #if CONFIG_ZLIB HTTPContext *s = h->priv_data; s->compressed = 1; inflateEnd(&s->inflate_stream); if (inflateInit2(&s->inflate_stream, 32 + 15) != Z_OK) { av_log(h, AV_LOG_WARNING, "Error during zlib initialisation: %s\n", s->inflate_stream.msg); return AVERROR(ENOSYS); } if (zlibCompileFlags() & (1 << 17)) { av_log(h, AV_LOG_WARNING, "Your zlib was compiled without gzip support.\n"); return AVERROR(ENOSYS); } #else av_log(h, AV_LOG_WARNING, "Compressed (%s) content, need zlib with gzip support\n", p); return AVERROR(ENOSYS); #endif /* CONFIG_ZLIB */ } else if (!av_strncasecmp(p, "identity", 8)) { // The normal, no-encoding case (although servers shouldn't include // the header at all if this is the case). } else { av_log(h, AV_LOG_WARNING, "Unknown content coding: %s\n", p); } return 0; } // Concat all Icy- header lines static int parse_icy(HTTPContext *s, const char *tag, const char *p) { int len = 4 + strlen(p) + strlen(tag); int is_first = !s->icy_metadata_headers; int ret; av_dict_set(&s->metadata, tag, p, 0); if (s->icy_metadata_headers) len += strlen(s->icy_metadata_headers); if ((ret = av_reallocp(&s->icy_metadata_headers, len)) < 0) return ret; if (is_first) *s->icy_metadata_headers = '\0'; av_strlcatf(s->icy_metadata_headers, len, "%s: %s\n", tag, p); return 0; } static int parse_cookie(HTTPContext *s, const char *p, AVDictionary **cookies) { char *eql, *name; // duplicate the cookie name (dict will dupe the value) if (!(eql = strchr(p, '='))) return AVERROR(EINVAL); if (!(name = av_strndup(p, eql - p))) return AVERROR(ENOMEM); // add the cookie to the dictionary av_dict_set(cookies, name, eql, AV_DICT_DONT_STRDUP_KEY); return 0; } static int cookie_string(AVDictionary *dict, char **cookies) { AVDictionaryEntry *e = NULL; int len = 1; // determine how much memory is needed for the cookies string while (e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX)) len += strlen(e->key) + strlen(e->value) + 1; // reallocate the cookies e = NULL; if (*cookies) av_free(*cookies); *cookies = av_malloc(len); if (!*cookies) return AVERROR(ENOMEM); *cookies[0] = '\0'; // write out the cookies while (e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX)) av_strlcatf(*cookies, len, "%s%s\n", e->key, e->value); return 0; } static int process_line(URLContext *h, char *line, int line_count, int *new_location) { HTTPContext *s = h->priv_data; const char *auto_method = h->flags & AVIO_FLAG_READ ? "POST" : "GET"; char *tag, *p, *end, *method, *resource, *version; int ret; /* end of header */ if (line[0] == '\0') { s->end_header = 1; return 0; } p = line; if (line_count == 0) { if (s->is_connected_server) { // HTTP method method = p; while (*p && !av_isspace(*p)) p++; *(p++) = '\0'; av_log(h, AV_LOG_TRACE, "Received method: %s\n", method); if (s->method) { if (av_strcasecmp(s->method, method)) { av_log(h, AV_LOG_ERROR, "Received and expected HTTP method do not match. (%s expected, %s received)\n", s->method, method); return ff_http_averror(400, AVERROR(EIO)); } } else { // use autodetected HTTP method to expect av_log(h, AV_LOG_TRACE, "Autodetected %s HTTP method\n", auto_method); if (av_strcasecmp(auto_method, method)) { av_log(h, AV_LOG_ERROR, "Received and autodetected HTTP method did not match " "(%s autodetected %s received)\n", auto_method, method); return ff_http_averror(400, AVERROR(EIO)); } if (!(s->method = av_strdup(method))) return AVERROR(ENOMEM); } // HTTP resource while (av_isspace(*p)) p++; resource = p; while (!av_isspace(*p)) p++; *(p++) = '\0'; av_log(h, AV_LOG_TRACE, "Requested resource: %s\n", resource); if (!(s->resource = av_strdup(resource))) return AVERROR(ENOMEM); // HTTP version while (av_isspace(*p)) p++; version = p; while (*p && !av_isspace(*p)) p++; *p = '\0'; if (av_strncasecmp(version, "HTTP/", 5)) { av_log(h, AV_LOG_ERROR, "Malformed HTTP version string.\n"); return ff_http_averror(400, AVERROR(EIO)); } av_log(h, AV_LOG_TRACE, "HTTP version string: %s\n", version); } else { while (!av_isspace(*p) && *p != '\0') p++; while (av_isspace(*p)) p++; s->http_code = strtol(p, &end, 10); av_log(h, AV_LOG_TRACE, "http_code=%d\n", s->http_code); if ((ret = check_http_code(h, s->http_code, end)) < 0) return ret; } } else { while (*p != '\0' && *p != ':') p++; if (*p != ':') return 1; *p = '\0'; tag = line; p++; while (av_isspace(*p)) p++; if (!av_strcasecmp(tag, "Location")) { if ((ret = parse_location(s, p)) < 0) return ret; *new_location = 1; } else if (!av_strcasecmp(tag, "Content-Length") && s->filesize == -1) { s->filesize = strtoll(p, NULL, 10); } else if (!av_strcasecmp(tag, "Content-Range")) { parse_content_range(h, p); } else if (!av_strcasecmp(tag, "Accept-Ranges") && !strncmp(p, "bytes", 5) && s->seekable == -1) { h->is_streamed = 0; } else if (!av_strcasecmp(tag, "Transfer-Encoding") && !av_strncasecmp(p, "chunked", 7)) { s->filesize = -1; s->chunksize = 0; } else if (!av_strcasecmp(tag, "WWW-Authenticate")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp(tag, "Authentication-Info")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp(tag, "Proxy-Authenticate")) { ff_http_auth_handle_header(&s->proxy_auth_state, tag, p); } else if (!av_strcasecmp(tag, "Connection")) { if (!strcmp(p, "close")) s->willclose = 1; } else if (!av_strcasecmp(tag, "Server")) { if (!av_strcasecmp(p, "AkamaiGHost")) { s->is_akamai = 1; } else if (!av_strncasecmp(p, "MediaGateway", 12)) { s->is_mediagateway = 1; } } else if (!av_strcasecmp(tag, "Content-Type")) { av_free(s->mime_type); s->mime_type = av_strdup(p); } else if (!av_strcasecmp(tag, "Set-Cookie")) { if (parse_cookie(s, p, &s->cookie_dict)) av_log(h, AV_LOG_WARNING, "Unable to parse '%s'\n", p); } else if (!av_strcasecmp(tag, "Icy-MetaInt")) { s->icy_metaint = strtoll(p, NULL, 10); } else if (!av_strncasecmp(tag, "Icy-", 4)) { if ((ret = parse_icy(s, tag, p)) < 0) return ret; } else if (!av_strcasecmp(tag, "Content-Encoding")) { if ((ret = parse_content_encoding(h, p)) < 0) return ret; } } return 1; } /** * Create a string containing cookie values for use as a HTTP cookie header * field value for a particular path and domain from the cookie values stored in * the HTTP protocol context. The cookie string is stored in *cookies. * * @return a negative value if an error condition occurred, 0 otherwise */ static int get_cookies(HTTPContext *s, char **cookies, const char *path, const char *domain) { // cookie strings will look like Set-Cookie header field values. Multiple // Set-Cookie fields will result in multiple values delimited by a newline int ret = 0; char *next, *cookie, *set_cookies = av_strdup(s->cookies), *cset_cookies = set_cookies; if (!set_cookies) return AVERROR(EINVAL); // destroy any cookies in the dictionary. av_dict_free(&s->cookie_dict); *cookies = NULL; while ((cookie = av_strtok(set_cookies, "\n", &next))) { int domain_offset = 0; char *param, *next_param, *cdomain = NULL, *cpath = NULL, *cvalue = NULL; set_cookies = NULL; // store the cookie in a dict in case it is updated in the response if (parse_cookie(s, cookie, &s->cookie_dict)) av_log(s, AV_LOG_WARNING, "Unable to parse '%s'\n", cookie); while ((param = av_strtok(cookie, "; ", &next_param))) { if (cookie) { // first key-value pair is the actual cookie value cvalue = av_strdup(param); cookie = NULL; } else if (!av_strncasecmp("path=", param, 5)) { av_free(cpath); cpath = av_strdup(&param[5]); } else if (!av_strncasecmp("domain=", param, 7)) { // if the cookie specifies a sub-domain, skip the leading dot thereby // supporting URLs that point to sub-domains and the master domain int leading_dot = (param[7] == '.'); av_free(cdomain); cdomain = av_strdup(&param[7+leading_dot]); } else { // ignore unknown attributes } } if (!cdomain) cdomain = av_strdup(domain); // ensure all of the necessary values are valid if (!cdomain || !cpath || !cvalue) { av_log(s, AV_LOG_WARNING, "Invalid cookie found, no value, path or domain specified\n"); goto done_cookie; } // check if the request path matches the cookie path if (av_strncasecmp(path, cpath, strlen(cpath))) goto done_cookie; // the domain should be at least the size of our cookie domain domain_offset = strlen(domain) - strlen(cdomain); if (domain_offset < 0) goto done_cookie; // match the cookie domain if (av_strcasecmp(&domain[domain_offset], cdomain)) goto done_cookie; // cookie parameters match, so copy the value if (!*cookies) { if (!(*cookies = av_strdup(cvalue))) { ret = AVERROR(ENOMEM); goto done_cookie; } } else { char *tmp = *cookies; size_t str_size = strlen(cvalue) + strlen(*cookies) + 3; if (!(*cookies = av_malloc(str_size))) { ret = AVERROR(ENOMEM); goto done_cookie; } snprintf(*cookies, str_size, "%s; %s", tmp, cvalue); av_free(tmp); } done_cookie: av_freep(&cdomain); av_freep(&cpath); av_freep(&cvalue); if (ret < 0) { if (*cookies) av_freep(cookies); av_free(cset_cookies); return ret; } } av_free(cset_cookies); return 0; } static inline int has_header(const char *str, const char *header) { /* header + 2 to skip over CRLF prefix. (make sure you have one!) */ if (!str) return 0; return av_stristart(str, header + 2, NULL) || av_stristr(str, header); } static int http_read_header(URLContext *h, int *new_location) { HTTPContext *s = h->priv_data; char line[MAX_URL_SIZE]; int err = 0; s->chunksize = -1; for (;;) { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; av_log(h, AV_LOG_TRACE, "header='%s'\n", line); err = process_line(h, line, s->line_count, new_location); if (err < 0) return err; if (err == 0) break; s->line_count++; } if (s->seekable == -1 && s->is_mediagateway && s->filesize == 2000000000) h->is_streamed = 1; /* we can in fact _not_ seek */ // add any new cookies into the existing cookie string cookie_string(s->cookie_dict, &s->cookies); av_dict_free(&s->cookie_dict); return err; } static int http_connect(URLContext *h, const char *path, const char *local_path, const char *hoststr, const char *auth, const char *proxyauth, int *new_location) { HTTPContext *s = h->priv_data; int post, err; char headers[HTTP_HEADERS_SIZE] = ""; char *authstr = NULL, *proxyauthstr = NULL; int64_t off = s->off; int len = 0; const char *method; int send_expect_100 = 0; /* send http header */ post = h->flags & AVIO_FLAG_WRITE; if (s->post_data) { /* force POST method and disable chunked encoding when * custom HTTP post data is set */ post = 1; s->chunked_post = 0; } if (s->method) method = s->method; else method = post ? "POST" : "GET"; authstr = ff_http_auth_create_response(&s->auth_state, auth, local_path, method); proxyauthstr = ff_http_auth_create_response(&s->proxy_auth_state, proxyauth, local_path, method); if (post && !s->post_data) { send_expect_100 = s->send_expect_100; /* The user has supplied authentication but we don't know the auth type, * send Expect: 100-continue to get the 401 response including the * WWW-Authenticate header, or an 100 continue if no auth actually * is needed. */ if (auth && *auth && s->auth_state.auth_type == HTTP_AUTH_NONE && s->http_code != 401) send_expect_100 = 1; } #if FF_API_HTTP_USER_AGENT if (strcmp(s->user_agent_deprecated, DEFAULT_USER_AGENT)) { av_log(s, AV_LOG_WARNING, "the user-agent option is deprecated, please use user_agent option\n"); s->user_agent = av_strdup(s->user_agent_deprecated); } #endif /* set default headers if needed */ if (!has_header(s->headers, "\r\nUser-Agent: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "User-Agent: %s\r\n", s->user_agent); if (!has_header(s->headers, "\r\nAccept: ")) len += av_strlcpy(headers + len, "Accept: */*\r\n", sizeof(headers) - len); // Note: we send this on purpose even when s->off is 0 when we're probing, // since it allows us to detect more reliably if a (non-conforming) // server supports seeking by analysing the reply headers. if (!has_header(s->headers, "\r\nRange: ") && !post && (s->off > 0 || s->end_off || s->seekable == -1)) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Range: bytes=%"PRId64"-", s->off); if (s->end_off) len += av_strlcatf(headers + len, sizeof(headers) - len, "%"PRId64, s->end_off - 1); len += av_strlcpy(headers + len, "\r\n", sizeof(headers) - len); } if (send_expect_100 && !has_header(s->headers, "\r\nExpect: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Expect: 100-continue\r\n"); if (!has_header(s->headers, "\r\nConnection: ")) { if (s->multiple_requests) len += av_strlcpy(headers + len, "Connection: keep-alive\r\n", sizeof(headers) - len); else len += av_strlcpy(headers + len, "Connection: close\r\n", sizeof(headers) - len); } if (!has_header(s->headers, "\r\nHost: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Host: %s\r\n", hoststr); if (!has_header(s->headers, "\r\nContent-Length: ") && s->post_data) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Length: %d\r\n", s->post_datalen); if (!has_header(s->headers, "\r\nContent-Type: ") && s->content_type) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Type: %s\r\n", s->content_type); if (!has_header(s->headers, "\r\nCookie: ") && s->cookies) { char *cookies = NULL; if (!get_cookies(s, &cookies, path, hoststr) && cookies) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Cookie: %s\r\n", cookies); av_free(cookies); } } if (!has_header(s->headers, "\r\nIcy-MetaData: ") && s->icy) len += av_strlcatf(headers + len, sizeof(headers) - len, "Icy-MetaData: %d\r\n", 1); /* now add in custom headers */ if (s->headers) av_strlcpy(headers + len, s->headers, sizeof(headers) - len); snprintf(s->buffer, sizeof(s->buffer), "%s %s HTTP/1.1\r\n" "%s" "%s" "%s" "%s%s" "\r\n", method, path, post && s->chunked_post ? "Transfer-Encoding: chunked\r\n" : "", headers, authstr ? authstr : "", proxyauthstr ? "Proxy-" : "", proxyauthstr ? proxyauthstr : ""); av_log(h, AV_LOG_DEBUG, "request: %s\n", s->buffer); if ((err = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto done; if (s->post_data) if ((err = ffurl_write(s->hd, s->post_data, s->post_datalen)) < 0) goto done; /* init input buffer */ s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->off = 0; s->icy_data_read = 0; s->filesize = -1; s->willclose = 0; s->end_chunked_post = 0; s->end_header = 0; if (post && !s->post_data && !send_expect_100) { /* Pretend that it did work. We didn't read any header yet, since * we've still to send the POST data, but the code calling this * function will check http_code after we return. */ s->http_code = 200; err = 0; goto done; } /* wait for header */ err = http_read_header(h, new_location); if (err < 0) goto done; if (*new_location) s->off = off; err = (off == s->off) ? 0 : -1; done: av_freep(&authstr); av_freep(&proxyauthstr); return err; } static int http_buf_read(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int len; /* read bytes from input buffer first */ len = s->buf_end - s->buf_ptr; if (len > 0) { if (len > size) len = size; memcpy(buf, s->buf_ptr, len); s->buf_ptr += len; } else { int64_t target_end = s->end_off ? s->end_off : s->filesize; if ((!s->willclose || s->chunksize < 0) && target_end >= 0 && s->off >= target_end) return AVERROR_EOF; len = ffurl_read(s->hd, buf, size); if (!len && (!s->willclose || s->chunksize < 0) && target_end >= 0 && s->off < target_end) { av_log(h, AV_LOG_ERROR, "Stream ends prematurely at %"PRId64", should be %"PRId64"\n", s->off, target_end ); return AVERROR(EIO); } } if (len > 0) { s->off += len; if (s->chunksize > 0) s->chunksize -= len; } return len; } #if CONFIG_ZLIB #define DECOMPRESS_BUF_SIZE (256 * 1024) static int http_buf_read_compressed(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int ret; if (!s->inflate_buffer) { s->inflate_buffer = av_malloc(DECOMPRESS_BUF_SIZE); if (!s->inflate_buffer) return AVERROR(ENOMEM); } if (s->inflate_stream.avail_in == 0) { int read = http_buf_read(h, s->inflate_buffer, DECOMPRESS_BUF_SIZE); if (read <= 0) return read; s->inflate_stream.next_in = s->inflate_buffer; s->inflate_stream.avail_in = read; } s->inflate_stream.avail_out = size; s->inflate_stream.next_out = buf; ret = inflate(&s->inflate_stream, Z_SYNC_FLUSH); if (ret != Z_OK && ret != Z_STREAM_END) av_log(h, AV_LOG_WARNING, "inflate return value: %d, %s\n", ret, s->inflate_stream.msg); return size - s->inflate_stream.avail_out; } #endif /* CONFIG_ZLIB */ static int64_t http_seek_internal(URLContext *h, int64_t off, int whence, int force_reconnect); static int http_read_stream(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int err, new_location, read_ret; int64_t seek_ret; if (!s->hd) return AVERROR_EOF; if (s->end_chunked_post && !s->end_header) { err = http_read_header(h, &new_location); if (err < 0) return err; } if (s->chunksize >= 0) { if (!s->chunksize) { char line[32]; do { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; } while (!*line); /* skip CR LF from last chunk */ s->chunksize = strtoll(line, NULL, 16); av_log(NULL, AV_LOG_TRACE, "Chunked encoding data size: %"PRId64"'\n", s->chunksize); if (!s->chunksize) return 0; } size = FFMIN(size, s->chunksize); } #if CONFIG_ZLIB if (s->compressed) return http_buf_read_compressed(h, buf, size); #endif /* CONFIG_ZLIB */ read_ret = http_buf_read(h, buf, size); if ( (read_ret < 0 && s->reconnect && (!h->is_streamed || s->reconnect_streamed) && s->filesize > 0 && s->off < s->filesize) || (read_ret == 0 && s->reconnect_at_eof && (!h->is_streamed || s->reconnect_streamed))) { int64_t target = h->is_streamed ? 0 : s->off; if (s->reconnect_delay > s->reconnect_delay_max) return AVERROR(EIO); av_log(h, AV_LOG_INFO, "Will reconnect at %"PRId64" error=%s.\n", s->off, av_err2str(read_ret)); av_usleep(1000U*1000*s->reconnect_delay); s->reconnect_delay = 1 + 2*s->reconnect_delay; seek_ret = http_seek_internal(h, target, SEEK_SET, 1); if (seek_ret != target) { av_log(h, AV_LOG_ERROR, "Failed to reconnect at %"PRId64".\n", target); return read_ret; } read_ret = http_buf_read(h, buf, size); } else s->reconnect_delay = 0; return read_ret; } // Like http_read_stream(), but no short reads. // Assumes partial reads are an error. static int http_read_stream_all(URLContext *h, uint8_t *buf, int size) { int pos = 0; while (pos < size) { int len = http_read_stream(h, buf + pos, size - pos); if (len < 0) return len; pos += len; } return pos; } static void update_metadata(HTTPContext *s, char *data) { char *key; char *val; char *end; char *next = data; while (*next) { key = next; val = strstr(key, "='"); if (!val) break; end = strstr(val, "';"); if (!end) break; *val = '\0'; *end = '\0'; val += 2; av_dict_set(&s->metadata, key, val, 0); next = end + 2; } } static int store_icy(URLContext *h, int size) { HTTPContext *s = h->priv_data; /* until next metadata packet */ int remaining = s->icy_metaint - s->icy_data_read; if (remaining < 0) return AVERROR_INVALIDDATA; if (!remaining) { /* The metadata packet is variable sized. It has a 1 byte header * which sets the length of the packet (divided by 16). If it's 0, * the metadata doesn't change. After the packet, icy_metaint bytes * of normal data follows. */ uint8_t ch; int len = http_read_stream_all(h, &ch, 1); if (len < 0) return len; if (ch > 0) { char data[255 * 16 + 1]; int ret; len = ch * 16; ret = http_read_stream_all(h, data, len); if (ret < 0) return ret; data[len + 1] = 0; if ((ret = av_opt_set(s, "icy_metadata_packet", data, 0)) < 0) return ret; update_metadata(s, data); } s->icy_data_read = 0; remaining = s->icy_metaint; } return FFMIN(size, remaining); } static int http_read(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; if (s->icy_metaint > 0) { size = store_icy(h, size); if (size < 0) return size; } size = http_read_stream(h, buf, size); if (size > 0) s->icy_data_read += size; return size; } /* used only when posting data */ static int http_write(URLContext *h, const uint8_t *buf, int size) { char temp[11] = ""; /* 32-bit hex + CRLF + nul */ int ret; char crlf[] = "\r\n"; HTTPContext *s = h->priv_data; if (!s->chunked_post) { /* non-chunked data is sent without any special encoding */ return ffurl_write(s->hd, buf, size); } /* silently ignore zero-size data since chunk encoding that would * signal EOF */ if (size > 0) { /* upload data using chunked encoding */ snprintf(temp, sizeof(temp), "%x\r\n", size); if ((ret = ffurl_write(s->hd, temp, strlen(temp))) < 0 || (ret = ffurl_write(s->hd, buf, size)) < 0 || (ret = ffurl_write(s->hd, crlf, sizeof(crlf) - 1)) < 0) return ret; } return size; } static int http_shutdown(URLContext *h, int flags) { int ret = 0; char footer[] = "0\r\n\r\n"; HTTPContext *s = h->priv_data; /* signal end of chunked encoding if used */ if (((flags & AVIO_FLAG_WRITE) && s->chunked_post) || ((flags & AVIO_FLAG_READ) && s->chunked_post && s->listen)) { ret = ffurl_write(s->hd, footer, sizeof(footer) - 1); ret = ret > 0 ? 0 : ret; s->end_chunked_post = 1; } return ret; } static int http_close(URLContext *h) { int ret = 0; HTTPContext *s = h->priv_data; #if CONFIG_ZLIB inflateEnd(&s->inflate_stream); av_freep(&s->inflate_buffer); #endif /* CONFIG_ZLIB */ if (!s->end_chunked_post) /* Close the write direction by sending the end of chunked encoding. */ ret = http_shutdown(h, h->flags); if (s->hd) ffurl_closep(&s->hd); av_dict_free(&s->chained_options); return ret; } static int64_t http_seek_internal(URLContext *h, int64_t off, int whence, int force_reconnect) { HTTPContext *s = h->priv_data; URLContext *old_hd = s->hd; int64_t old_off = s->off; uint8_t old_buf[BUFFER_SIZE]; int old_buf_size, ret; AVDictionary *options = NULL; if (whence == AVSEEK_SIZE) return s->filesize; else if (!force_reconnect && ((whence == SEEK_CUR && off == 0) || (whence == SEEK_SET && off == s->off))) return s->off; else if ((s->filesize == -1 && whence == SEEK_END)) return AVERROR(ENOSYS); if (whence == SEEK_CUR) off += s->off; else if (whence == SEEK_END) off += s->filesize; else if (whence != SEEK_SET) return AVERROR(EINVAL); if (off < 0) return AVERROR(EINVAL); s->off = off; if (s->off && h->is_streamed) return AVERROR(ENOSYS); /* we save the old context in case the seek fails */ old_buf_size = s->buf_end - s->buf_ptr; memcpy(old_buf, s->buf_ptr, old_buf_size); s->hd = NULL; /* if it fails, continue on old connection */ if ((ret = http_open_cnx(h, &options)) < 0) { av_dict_free(&options); memcpy(s->buffer, old_buf, old_buf_size); s->buf_ptr = s->buffer; s->buf_end = s->buffer + old_buf_size; s->hd = old_hd; s->off = old_off; return ret; } av_dict_free(&options); ffurl_close(old_hd); return off; } static int64_t http_seek(URLContext *h, int64_t off, int whence) { return http_seek_internal(h, off, whence, 0); } static int http_get_file_handle(URLContext *h) { HTTPContext *s = h->priv_data; return ffurl_get_file_handle(s->hd); } #define HTTP_CLASS(flavor) \ static const AVClass flavor ## _context_class = { \ .class_name = # flavor, \ .item_name = av_default_item_name, \ .option = options, \ .version = LIBAVUTIL_VERSION_INT, \ } #if CONFIG_HTTP_PROTOCOL HTTP_CLASS(http); const URLProtocol ff_http_protocol = { .name = "http", .url_open2 = http_open, .url_accept = http_accept, .url_handshake = http_handshake, .url_read = http_read, .url_write = http_write, .url_seek = http_seek, .url_close = http_close, .url_get_file_handle = http_get_file_handle, .url_shutdown = http_shutdown, .priv_data_size = sizeof(HTTPContext), .priv_data_class = &http_context_class, .flags = URL_PROTOCOL_FLAG_NETWORK, .default_whitelist = "http,https,tls,rtp,tcp,udp,crypto,httpproxy" }; #endif /* CONFIG_HTTP_PROTOCOL */ #if CONFIG_HTTPS_PROTOCOL HTTP_CLASS(https); const URLProtocol ff_https_protocol = { .name = "https", .url_open2 = http_open, .url_read = http_read, .url_write = http_write, .url_seek = http_seek, .url_close = http_close, .url_get_file_handle = http_get_file_handle, .url_shutdown = http_shutdown, .priv_data_size = sizeof(HTTPContext), .priv_data_class = &https_context_class, .flags = URL_PROTOCOL_FLAG_NETWORK, .default_whitelist = "http,https,tls,rtp,tcp,udp,crypto,httpproxy" }; #endif /* CONFIG_HTTPS_PROTOCOL */ #if CONFIG_HTTPPROXY_PROTOCOL static int http_proxy_close(URLContext *h) { HTTPContext *s = h->priv_data; if (s->hd) ffurl_closep(&s->hd); return 0; } static int http_proxy_open(URLContext *h, const char *uri, int flags) { HTTPContext *s = h->priv_data; char hostname[1024], hoststr[1024]; char auth[1024], pathbuf[1024], *path; char lower_url[100]; int port, ret = 0, attempts = 0; HTTPAuthType cur_auth_type; char *authstr; int new_loc; if( s->seekable == 1 ) h->is_streamed = 0; else h->is_streamed = 1; av_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port, pathbuf, sizeof(pathbuf), uri); ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL); path = pathbuf; if (*path == '/') path++; ff_url_join(lower_url, sizeof(lower_url), "tcp", NULL, hostname, port, NULL); redo: ret = ffurl_open_whitelist(&s->hd, lower_url, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, NULL, h->protocol_whitelist, h->protocol_blacklist, h); if (ret < 0) return ret; authstr = ff_http_auth_create_response(&s->proxy_auth_state, auth, path, "CONNECT"); snprintf(s->buffer, sizeof(s->buffer), "CONNECT %s HTTP/1.1\r\n" "Host: %s\r\n" "Connection: close\r\n" "%s%s" "\r\n", path, hoststr, authstr ? "Proxy-" : "", authstr ? authstr : ""); av_freep(&authstr); if ((ret = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto fail; s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->filesize = -1; cur_auth_type = s->proxy_auth_state.auth_type; /* Note: This uses buffering, potentially reading more than the * HTTP header. If tunneling a protocol where the server starts * the conversation, we might buffer part of that here, too. * Reading that requires using the proper ffurl_read() function * on this URLContext, not using the fd directly (as the tls * protocol does). This shouldn't be an issue for tls though, * since the client starts the conversation there, so there * is no extra data that we might buffer up here. */ ret = http_read_header(h, &new_loc); if (ret < 0) goto fail; attempts++; if (s->http_code == 407 && (cur_auth_type == HTTP_AUTH_NONE || s->proxy_auth_state.stale) && s->proxy_auth_state.auth_type != HTTP_AUTH_NONE && attempts < 2) { ffurl_closep(&s->hd); goto redo; } if (s->http_code < 400) return 0; ret = ff_http_averror(s->http_code, AVERROR(EIO)); fail: http_proxy_close(h); return ret; } static int http_proxy_write(URLContext *h, const uint8_t *buf, int size) { HTTPContext *s = h->priv_data; return ffurl_write(s->hd, buf, size); } const URLProtocol ff_httpproxy_protocol = { .name = "httpproxy", .url_open = http_proxy_open, .url_read = http_buf_read, .url_write = http_proxy_write, .url_close = http_proxy_close, .url_get_file_handle = http_get_file_handle, .priv_data_size = sizeof(HTTPContext), .flags = URL_PROTOCOL_FLAG_NETWORK, }; #endif /* CONFIG_HTTPPROXY_PROTOCOL */
/* * HTTP protocol for ffmpeg client * Copyright (c) 2000, 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #if CONFIG_ZLIB #include <zlib.h> #endif /* CONFIG_ZLIB */ #include "libavutil/avassert.h" #include "libavutil/avstring.h" #include "libavutil/opt.h" #include "libavutil/time.h" #include "avformat.h" #include "http.h" #include "httpauth.h" #include "internal.h" #include "network.h" #include "os_support.h" #include "url.h" /* XXX: POST protocol is not completely implemented because ffmpeg uses * only a subset of it. */ /* The IO buffer size is unrelated to the max URL size in itself, but needs * to be large enough to fit the full request headers (including long * path names). */ #define BUFFER_SIZE MAX_URL_SIZE #define MAX_REDIRECTS 8 #define HTTP_SINGLE 1 #define HTTP_MUTLI 2 typedef enum { LOWER_PROTO, READ_HEADERS, WRITE_REPLY_HEADERS, FINISH }HandshakeState; typedef struct HTTPContext { const AVClass *class; URLContext *hd; unsigned char buffer[BUFFER_SIZE], *buf_ptr, *buf_end; int line_count; int http_code; /* Used if "Transfer-Encoding: chunked" otherwise -1. */ uint64_t chunksize; uint64_t off, end_off, filesize; char *location; HTTPAuthState auth_state; HTTPAuthState proxy_auth_state; char *http_proxy; char *headers; char *mime_type; char *user_agent; #if FF_API_HTTP_USER_AGENT char *user_agent_deprecated; #endif char *content_type; /* Set if the server correctly handles Connection: close and will close * the connection after feeding us the content. */ int willclose; int seekable; /**< Control seekability, 0 = disable, 1 = enable, -1 = probe. */ int chunked_post; /* A flag which indicates if the end of chunked encoding has been sent. */ int end_chunked_post; /* A flag which indicates we have finished to read POST reply. */ int end_header; /* A flag which indicates if we use persistent connections. */ int multiple_requests; uint8_t *post_data; int post_datalen; int is_akamai; int is_mediagateway; char *cookies; ///< holds newline (\n) delimited Set-Cookie header field values (without the "Set-Cookie: " field name) /* A dictionary containing cookies keyed by cookie name */ AVDictionary *cookie_dict; int icy; /* how much data was read since the last ICY metadata packet */ uint64_t icy_data_read; /* after how many bytes of read data a new metadata packet will be found */ uint64_t icy_metaint; char *icy_metadata_headers; char *icy_metadata_packet; AVDictionary *metadata; #if CONFIG_ZLIB int compressed; z_stream inflate_stream; uint8_t *inflate_buffer; #endif /* CONFIG_ZLIB */ AVDictionary *chained_options; int send_expect_100; char *method; int reconnect; int reconnect_at_eof; int reconnect_streamed; int reconnect_delay; int reconnect_delay_max; int listen; char *resource; int reply_code; int is_multi_client; HandshakeState handshake_step; int is_connected_server; } HTTPContext; #define OFFSET(x) offsetof(HTTPContext, x) #define D AV_OPT_FLAG_DECODING_PARAM #define E AV_OPT_FLAG_ENCODING_PARAM #define DEFAULT_USER_AGENT "Lavf/" AV_STRINGIFY(LIBAVFORMAT_VERSION) static const AVOption options[] = { { "seekable", "control seekability of connection", OFFSET(seekable), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, D }, { "chunked_post", "use chunked transfer-encoding for posts", OFFSET(chunked_post), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E }, { "http_proxy", "set HTTP proxy to tunnel through", OFFSET(http_proxy), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "headers", "set custom HTTP headers, can override built in default headers", OFFSET(headers), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "content_type", "set a specific content type for the POST messages", OFFSET(content_type), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "user_agent", "override User-Agent header", OFFSET(user_agent), AV_OPT_TYPE_STRING, { .str = DEFAULT_USER_AGENT }, 0, 0, D }, #if FF_API_HTTP_USER_AGENT { "user-agent", "override User-Agent header", OFFSET(user_agent_deprecated), AV_OPT_TYPE_STRING, { .str = DEFAULT_USER_AGENT }, 0, 0, D }, #endif { "multiple_requests", "use persistent connections", OFFSET(multiple_requests), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D | E }, { "post_data", "set custom HTTP post data", OFFSET(post_data), AV_OPT_TYPE_BINARY, .flags = D | E }, { "mime_type", "export the MIME type", OFFSET(mime_type), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT | AV_OPT_FLAG_READONLY }, { "cookies", "set cookies to be sent in applicable future requests, use newline delimited Set-Cookie HTTP field value syntax", OFFSET(cookies), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D }, { "icy", "request ICY metadata", OFFSET(icy), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, D }, { "icy_metadata_headers", "return ICY metadata headers", OFFSET(icy_metadata_headers), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT }, { "icy_metadata_packet", "return current ICY metadata packet", OFFSET(icy_metadata_packet), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT }, { "metadata", "metadata read from the bitstream", OFFSET(metadata), AV_OPT_TYPE_DICT, {0}, 0, 0, AV_OPT_FLAG_EXPORT }, { "auth_type", "HTTP authentication type", OFFSET(auth_state.auth_type), AV_OPT_TYPE_INT, { .i64 = HTTP_AUTH_NONE }, HTTP_AUTH_NONE, HTTP_AUTH_BASIC, D | E, "auth_type"}, { "none", "No auth method set, autodetect", 0, AV_OPT_TYPE_CONST, { .i64 = HTTP_AUTH_NONE }, 0, 0, D | E, "auth_type"}, { "basic", "HTTP basic authentication", 0, AV_OPT_TYPE_CONST, { .i64 = HTTP_AUTH_BASIC }, 0, 0, D | E, "auth_type"}, { "send_expect_100", "Force sending an Expect: 100-continue header for POST", OFFSET(send_expect_100), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E }, { "location", "The actual location of the data received", OFFSET(location), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "offset", "initial byte offset", OFFSET(off), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, D }, { "end_offset", "try to limit the request to bytes preceding this offset", OFFSET(end_off), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, D }, { "method", "Override the HTTP method or set the expected HTTP method from a client", OFFSET(method), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "reconnect", "auto reconnect after disconnect before EOF", OFFSET(reconnect), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_at_eof", "auto reconnect at EOF", OFFSET(reconnect_at_eof), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_streamed", "auto reconnect streamed / non seekable streams", OFFSET(reconnect_streamed), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_delay_max", "max reconnect delay in seconds after which to give up", OFFSET(reconnect_delay_max), AV_OPT_TYPE_INT, { .i64 = 120 }, 0, UINT_MAX/1000/1000, D }, { "listen", "listen on HTTP", OFFSET(listen), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, D | E }, { "resource", "The resource requested by a client", OFFSET(resource), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E }, { "reply_code", "The http status code to return to a client", OFFSET(reply_code), AV_OPT_TYPE_INT, { .i64 = 200}, INT_MIN, 599, E}, { NULL } }; static int http_connect(URLContext *h, const char *path, const char *local_path, const char *hoststr, const char *auth, const char *proxyauth, int *new_location); static int http_read_header(URLContext *h, int *new_location); void ff_http_init_auth_state(URLContext *dest, const URLContext *src) { memcpy(&((HTTPContext *)dest->priv_data)->auth_state, &((HTTPContext *)src->priv_data)->auth_state, sizeof(HTTPAuthState)); memcpy(&((HTTPContext *)dest->priv_data)->proxy_auth_state, &((HTTPContext *)src->priv_data)->proxy_auth_state, sizeof(HTTPAuthState)); } static int http_open_cnx_internal(URLContext *h, AVDictionary **options) { const char *path, *proxy_path, *lower_proto = "tcp", *local_path; char hostname[1024], hoststr[1024], proto[10]; char auth[1024], proxyauth[1024] = ""; char path1[MAX_URL_SIZE]; char buf[1024], urlbuf[MAX_URL_SIZE]; int port, use_proxy, err, location_changed = 0; HTTPContext *s = h->priv_data; av_url_split(proto, sizeof(proto), auth, sizeof(auth), hostname, sizeof(hostname), &port, path1, sizeof(path1), s->location); ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL); proxy_path = s->http_proxy ? s->http_proxy : getenv("http_proxy"); use_proxy = !ff_http_match_no_proxy(getenv("no_proxy"), hostname) && proxy_path && av_strstart(proxy_path, "http://", NULL); if (!strcmp(proto, "https")) { lower_proto = "tls"; use_proxy = 0; if (port < 0) port = 443; } if (port < 0) port = 80; if (path1[0] == '\0') path = "/"; else path = path1; local_path = path; if (use_proxy) { /* Reassemble the request URL without auth string - we don't * want to leak the auth to the proxy. */ ff_url_join(urlbuf, sizeof(urlbuf), proto, NULL, hostname, port, "%s", path1); path = urlbuf; av_url_split(NULL, 0, proxyauth, sizeof(proxyauth), hostname, sizeof(hostname), &port, NULL, 0, proxy_path); } ff_url_join(buf, sizeof(buf), lower_proto, NULL, hostname, port, NULL); if (!s->hd) { err = ffurl_open_whitelist(&s->hd, buf, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, options, h->protocol_whitelist, h->protocol_blacklist, h); if (err < 0) return err; } err = http_connect(h, path, local_path, hoststr, auth, proxyauth, &location_changed); if (err < 0) return err; return location_changed; } /* return non zero if error */ static int http_open_cnx(URLContext *h, AVDictionary **options) { HTTPAuthType cur_auth_type, cur_proxy_auth_type; HTTPContext *s = h->priv_data; int location_changed, attempts = 0, redirects = 0; redo: av_dict_copy(options, s->chained_options, 0); cur_auth_type = s->auth_state.auth_type; cur_proxy_auth_type = s->auth_state.auth_type; location_changed = http_open_cnx_internal(h, options); if (location_changed < 0) goto fail; attempts++; if (s->http_code == 401) { if ((cur_auth_type == HTTP_AUTH_NONE || s->auth_state.stale) && s->auth_state.auth_type != HTTP_AUTH_NONE && attempts < 4) { ffurl_closep(&s->hd); goto redo; } else goto fail; } if (s->http_code == 407) { if ((cur_proxy_auth_type == HTTP_AUTH_NONE || s->proxy_auth_state.stale) && s->proxy_auth_state.auth_type != HTTP_AUTH_NONE && attempts < 4) { ffurl_closep(&s->hd); goto redo; } else goto fail; } if ((s->http_code == 301 || s->http_code == 302 || s->http_code == 303 || s->http_code == 307) && location_changed == 1) { /* url moved, get next */ ffurl_closep(&s->hd); if (redirects++ >= MAX_REDIRECTS) return AVERROR(EIO); /* Restart the authentication process with the new target, which * might use a different auth mechanism. */ memset(&s->auth_state, 0, sizeof(s->auth_state)); attempts = 0; location_changed = 0; goto redo; } return 0; fail: if (s->hd) ffurl_closep(&s->hd); if (location_changed < 0) return location_changed; return ff_http_averror(s->http_code, AVERROR(EIO)); } int ff_http_do_new_request(URLContext *h, const char *uri) { HTTPContext *s = h->priv_data; AVDictionary *options = NULL; int ret; s->off = 0; s->icy_data_read = 0; av_free(s->location); s->location = av_strdup(uri); if (!s->location) return AVERROR(ENOMEM); ret = http_open_cnx(h, &options); av_dict_free(&options); return ret; } int ff_http_averror(int status_code, int default_averror) { switch (status_code) { case 400: return AVERROR_HTTP_BAD_REQUEST; case 401: return AVERROR_HTTP_UNAUTHORIZED; case 403: return AVERROR_HTTP_FORBIDDEN; case 404: return AVERROR_HTTP_NOT_FOUND; default: break; } if (status_code >= 400 && status_code <= 499) return AVERROR_HTTP_OTHER_4XX; else if (status_code >= 500) return AVERROR_HTTP_SERVER_ERROR; else return default_averror; } static int http_write_reply(URLContext* h, int status_code) { int ret, body = 0, reply_code, message_len; const char *reply_text, *content_type; HTTPContext *s = h->priv_data; char message[BUFFER_SIZE]; content_type = "text/plain"; if (status_code < 0) body = 1; switch (status_code) { case AVERROR_HTTP_BAD_REQUEST: case 400: reply_code = 400; reply_text = "Bad Request"; break; case AVERROR_HTTP_FORBIDDEN: case 403: reply_code = 403; reply_text = "Forbidden"; break; case AVERROR_HTTP_NOT_FOUND: case 404: reply_code = 404; reply_text = "Not Found"; break; case 200: reply_code = 200; reply_text = "OK"; content_type = s->content_type ? s->content_type : "application/octet-stream"; break; case AVERROR_HTTP_SERVER_ERROR: case 500: reply_code = 500; reply_text = "Internal server error"; break; default: return AVERROR(EINVAL); } if (body) { s->chunked_post = 0; message_len = snprintf(message, sizeof(message), "HTTP/1.1 %03d %s\r\n" "Content-Type: %s\r\n" "Content-Length: %"SIZE_SPECIFIER"\r\n" "%s" "\r\n" "%03d %s\r\n", reply_code, reply_text, content_type, strlen(reply_text) + 6, // 3 digit status code + space + \r\n s->headers ? s->headers : "", reply_code, reply_text); } else { s->chunked_post = 1; message_len = snprintf(message, sizeof(message), "HTTP/1.1 %03d %s\r\n" "Content-Type: %s\r\n" "Transfer-Encoding: chunked\r\n" "%s" "\r\n", reply_code, reply_text, content_type, s->headers ? s->headers : ""); } av_log(h, AV_LOG_TRACE, "HTTP reply header: \n%s----\n", message); if ((ret = ffurl_write(s->hd, message, message_len)) < 0) return ret; return 0; } static void handle_http_errors(URLContext *h, int error) { av_assert0(error < 0); http_write_reply(h, error); } static int http_handshake(URLContext *c) { int ret, err, new_location; HTTPContext *ch = c->priv_data; URLContext *cl = ch->hd; switch (ch->handshake_step) { case LOWER_PROTO: av_log(c, AV_LOG_TRACE, "Lower protocol\n"); if ((ret = ffurl_handshake(cl)) > 0) return 2 + ret; if (ret < 0) return ret; ch->handshake_step = READ_HEADERS; ch->is_connected_server = 1; return 2; case READ_HEADERS: av_log(c, AV_LOG_TRACE, "Read headers\n"); if ((err = http_read_header(c, &new_location)) < 0) { handle_http_errors(c, err); return err; } ch->handshake_step = WRITE_REPLY_HEADERS; return 1; case WRITE_REPLY_HEADERS: av_log(c, AV_LOG_TRACE, "Reply code: %d\n", ch->reply_code); if ((err = http_write_reply(c, ch->reply_code)) < 0) return err; ch->handshake_step = FINISH; return 1; case FINISH: return 0; } // this should never be reached. return AVERROR(EINVAL); } static int http_listen(URLContext *h, const char *uri, int flags, AVDictionary **options) { HTTPContext *s = h->priv_data; int ret; char hostname[1024], proto[10]; char lower_url[100]; const char *lower_proto = "tcp"; int port; av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri); if (!strcmp(proto, "https")) lower_proto = "tls"; ff_url_join(lower_url, sizeof(lower_url), lower_proto, NULL, hostname, port, NULL); if ((ret = av_dict_set_int(options, "listen", s->listen, 0)) < 0) goto fail; if ((ret = ffurl_open_whitelist(&s->hd, lower_url, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, options, h->protocol_whitelist, h->protocol_blacklist, h )) < 0) goto fail; s->handshake_step = LOWER_PROTO; if (s->listen == HTTP_SINGLE) { /* single client */ s->reply_code = 200; while ((ret = http_handshake(h)) > 0); } fail: av_dict_free(&s->chained_options); return ret; } static int http_open(URLContext *h, const char *uri, int flags, AVDictionary **options) { HTTPContext *s = h->priv_data; int ret; if( s->seekable == 1 ) h->is_streamed = 0; else h->is_streamed = 1; s->filesize = UINT64_MAX; s->location = av_strdup(uri); if (!s->location) return AVERROR(ENOMEM); if (options) av_dict_copy(&s->chained_options, *options, 0); if (s->headers) { int len = strlen(s->headers); if (len < 2 || strcmp("\r\n", s->headers + len - 2)) { av_log(h, AV_LOG_WARNING, "No trailing CRLF found in HTTP header.\n"); ret = av_reallocp(&s->headers, len + 3); if (ret < 0) return ret; s->headers[len] = '\r'; s->headers[len + 1] = '\n'; s->headers[len + 2] = '\0'; } } if (s->listen) { return http_listen(h, uri, flags, options); } ret = http_open_cnx(h, options); if (ret < 0) av_dict_free(&s->chained_options); return ret; } static int http_accept(URLContext *s, URLContext **c) { int ret; HTTPContext *sc = s->priv_data; HTTPContext *cc; URLContext *sl = sc->hd; URLContext *cl = NULL; av_assert0(sc->listen); if ((ret = ffurl_alloc(c, s->filename, s->flags, &sl->interrupt_callback)) < 0) goto fail; cc = (*c)->priv_data; if ((ret = ffurl_accept(sl, &cl)) < 0) goto fail; cc->hd = cl; cc->is_multi_client = 1; fail: return ret; } static int http_getc(HTTPContext *s) { int len; if (s->buf_ptr >= s->buf_end) { len = ffurl_read(s->hd, s->buffer, BUFFER_SIZE); if (len < 0) { return len; } else if (len == 0) { return AVERROR_EOF; } else { s->buf_ptr = s->buffer; s->buf_end = s->buffer + len; } } return *s->buf_ptr++; } static int http_get_line(HTTPContext *s, char *line, int line_size) { int ch; char *q; q = line; for (;;) { ch = http_getc(s); if (ch < 0) return ch; if (ch == '\n') { /* process line */ if (q > line && q[-1] == '\r') q--; *q = '\0'; return 0; } else { if ((q - line) < line_size - 1) *q++ = ch; } } } static int check_http_code(URLContext *h, int http_code, const char *end) { HTTPContext *s = h->priv_data; /* error codes are 4xx and 5xx, but regard 401 as a success, so we * don't abort until all headers have been parsed. */ if (http_code >= 400 && http_code < 600 && (http_code != 401 || s->auth_state.auth_type != HTTP_AUTH_NONE) && (http_code != 407 || s->proxy_auth_state.auth_type != HTTP_AUTH_NONE)) { end += strspn(end, SPACE_CHARS); av_log(h, AV_LOG_WARNING, "HTTP error %d %s\n", http_code, end); return ff_http_averror(http_code, AVERROR(EIO)); } return 0; } static int parse_location(HTTPContext *s, const char *p) { char redirected_location[MAX_URL_SIZE], *new_loc; ff_make_absolute_url(redirected_location, sizeof(redirected_location), s->location, p); new_loc = av_strdup(redirected_location); if (!new_loc) return AVERROR(ENOMEM); av_free(s->location); s->location = new_loc; return 0; } /* "bytes $from-$to/$document_size" */ static void parse_content_range(URLContext *h, const char *p) { HTTPContext *s = h->priv_data; const char *slash; if (!strncmp(p, "bytes ", 6)) { p += 6; s->off = strtoull(p, NULL, 10); if ((slash = strchr(p, '/')) && strlen(slash) > 0) s->filesize = strtoull(slash + 1, NULL, 10); } if (s->seekable == -1 && (!s->is_akamai || s->filesize != 2147483647)) h->is_streamed = 0; /* we _can_ in fact seek */ } static int parse_content_encoding(URLContext *h, const char *p) { if (!av_strncasecmp(p, "gzip", 4) || !av_strncasecmp(p, "deflate", 7)) { #if CONFIG_ZLIB HTTPContext *s = h->priv_data; s->compressed = 1; inflateEnd(&s->inflate_stream); if (inflateInit2(&s->inflate_stream, 32 + 15) != Z_OK) { av_log(h, AV_LOG_WARNING, "Error during zlib initialisation: %s\n", s->inflate_stream.msg); return AVERROR(ENOSYS); } if (zlibCompileFlags() & (1 << 17)) { av_log(h, AV_LOG_WARNING, "Your zlib was compiled without gzip support.\n"); return AVERROR(ENOSYS); } #else av_log(h, AV_LOG_WARNING, "Compressed (%s) content, need zlib with gzip support\n", p); return AVERROR(ENOSYS); #endif /* CONFIG_ZLIB */ } else if (!av_strncasecmp(p, "identity", 8)) { // The normal, no-encoding case (although servers shouldn't include // the header at all if this is the case). } else { av_log(h, AV_LOG_WARNING, "Unknown content coding: %s\n", p); } return 0; } // Concat all Icy- header lines static int parse_icy(HTTPContext *s, const char *tag, const char *p) { int len = 4 + strlen(p) + strlen(tag); int is_first = !s->icy_metadata_headers; int ret; av_dict_set(&s->metadata, tag, p, 0); if (s->icy_metadata_headers) len += strlen(s->icy_metadata_headers); if ((ret = av_reallocp(&s->icy_metadata_headers, len)) < 0) return ret; if (is_first) *s->icy_metadata_headers = '\0'; av_strlcatf(s->icy_metadata_headers, len, "%s: %s\n", tag, p); return 0; } static int parse_cookie(HTTPContext *s, const char *p, AVDictionary **cookies) { char *eql, *name; // duplicate the cookie name (dict will dupe the value) if (!(eql = strchr(p, '='))) return AVERROR(EINVAL); if (!(name = av_strndup(p, eql - p))) return AVERROR(ENOMEM); // add the cookie to the dictionary av_dict_set(cookies, name, eql, AV_DICT_DONT_STRDUP_KEY); return 0; } static int cookie_string(AVDictionary *dict, char **cookies) { AVDictionaryEntry *e = NULL; int len = 1; // determine how much memory is needed for the cookies string while (e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX)) len += strlen(e->key) + strlen(e->value) + 1; // reallocate the cookies e = NULL; if (*cookies) av_free(*cookies); *cookies = av_malloc(len); if (!*cookies) return AVERROR(ENOMEM); *cookies[0] = '\0'; // write out the cookies while (e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX)) av_strlcatf(*cookies, len, "%s%s\n", e->key, e->value); return 0; } static int process_line(URLContext *h, char *line, int line_count, int *new_location) { HTTPContext *s = h->priv_data; const char *auto_method = h->flags & AVIO_FLAG_READ ? "POST" : "GET"; char *tag, *p, *end, *method, *resource, *version; int ret; /* end of header */ if (line[0] == '\0') { s->end_header = 1; return 0; } p = line; if (line_count == 0) { if (s->is_connected_server) { // HTTP method method = p; while (*p && !av_isspace(*p)) p++; *(p++) = '\0'; av_log(h, AV_LOG_TRACE, "Received method: %s\n", method); if (s->method) { if (av_strcasecmp(s->method, method)) { av_log(h, AV_LOG_ERROR, "Received and expected HTTP method do not match. (%s expected, %s received)\n", s->method, method); return ff_http_averror(400, AVERROR(EIO)); } } else { // use autodetected HTTP method to expect av_log(h, AV_LOG_TRACE, "Autodetected %s HTTP method\n", auto_method); if (av_strcasecmp(auto_method, method)) { av_log(h, AV_LOG_ERROR, "Received and autodetected HTTP method did not match " "(%s autodetected %s received)\n", auto_method, method); return ff_http_averror(400, AVERROR(EIO)); } if (!(s->method = av_strdup(method))) return AVERROR(ENOMEM); } // HTTP resource while (av_isspace(*p)) p++; resource = p; while (!av_isspace(*p)) p++; *(p++) = '\0'; av_log(h, AV_LOG_TRACE, "Requested resource: %s\n", resource); if (!(s->resource = av_strdup(resource))) return AVERROR(ENOMEM); // HTTP version while (av_isspace(*p)) p++; version = p; while (*p && !av_isspace(*p)) p++; *p = '\0'; if (av_strncasecmp(version, "HTTP/", 5)) { av_log(h, AV_LOG_ERROR, "Malformed HTTP version string.\n"); return ff_http_averror(400, AVERROR(EIO)); } av_log(h, AV_LOG_TRACE, "HTTP version string: %s\n", version); } else { while (!av_isspace(*p) && *p != '\0') p++; while (av_isspace(*p)) p++; s->http_code = strtol(p, &end, 10); av_log(h, AV_LOG_TRACE, "http_code=%d\n", s->http_code); if ((ret = check_http_code(h, s->http_code, end)) < 0) return ret; } } else { while (*p != '\0' && *p != ':') p++; if (*p != ':') return 1; *p = '\0'; tag = line; p++; while (av_isspace(*p)) p++; if (!av_strcasecmp(tag, "Location")) { if ((ret = parse_location(s, p)) < 0) return ret; *new_location = 1; } else if (!av_strcasecmp(tag, "Content-Length") && s->filesize == UINT64_MAX) { s->filesize = strtoull(p, NULL, 10); } else if (!av_strcasecmp(tag, "Content-Range")) { parse_content_range(h, p); } else if (!av_strcasecmp(tag, "Accept-Ranges") && !strncmp(p, "bytes", 5) && s->seekable == -1) { h->is_streamed = 0; } else if (!av_strcasecmp(tag, "Transfer-Encoding") && !av_strncasecmp(p, "chunked", 7)) { s->filesize = UINT64_MAX; s->chunksize = 0; } else if (!av_strcasecmp(tag, "WWW-Authenticate")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp(tag, "Authentication-Info")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp(tag, "Proxy-Authenticate")) { ff_http_auth_handle_header(&s->proxy_auth_state, tag, p); } else if (!av_strcasecmp(tag, "Connection")) { if (!strcmp(p, "close")) s->willclose = 1; } else if (!av_strcasecmp(tag, "Server")) { if (!av_strcasecmp(p, "AkamaiGHost")) { s->is_akamai = 1; } else if (!av_strncasecmp(p, "MediaGateway", 12)) { s->is_mediagateway = 1; } } else if (!av_strcasecmp(tag, "Content-Type")) { av_free(s->mime_type); s->mime_type = av_strdup(p); } else if (!av_strcasecmp(tag, "Set-Cookie")) { if (parse_cookie(s, p, &s->cookie_dict)) av_log(h, AV_LOG_WARNING, "Unable to parse '%s'\n", p); } else if (!av_strcasecmp(tag, "Icy-MetaInt")) { s->icy_metaint = strtoull(p, NULL, 10); } else if (!av_strncasecmp(tag, "Icy-", 4)) { if ((ret = parse_icy(s, tag, p)) < 0) return ret; } else if (!av_strcasecmp(tag, "Content-Encoding")) { if ((ret = parse_content_encoding(h, p)) < 0) return ret; } } return 1; } /** * Create a string containing cookie values for use as a HTTP cookie header * field value for a particular path and domain from the cookie values stored in * the HTTP protocol context. The cookie string is stored in *cookies. * * @return a negative value if an error condition occurred, 0 otherwise */ static int get_cookies(HTTPContext *s, char **cookies, const char *path, const char *domain) { // cookie strings will look like Set-Cookie header field values. Multiple // Set-Cookie fields will result in multiple values delimited by a newline int ret = 0; char *next, *cookie, *set_cookies = av_strdup(s->cookies), *cset_cookies = set_cookies; if (!set_cookies) return AVERROR(EINVAL); // destroy any cookies in the dictionary. av_dict_free(&s->cookie_dict); *cookies = NULL; while ((cookie = av_strtok(set_cookies, "\n", &next))) { int domain_offset = 0; char *param, *next_param, *cdomain = NULL, *cpath = NULL, *cvalue = NULL; set_cookies = NULL; // store the cookie in a dict in case it is updated in the response if (parse_cookie(s, cookie, &s->cookie_dict)) av_log(s, AV_LOG_WARNING, "Unable to parse '%s'\n", cookie); while ((param = av_strtok(cookie, "; ", &next_param))) { if (cookie) { // first key-value pair is the actual cookie value cvalue = av_strdup(param); cookie = NULL; } else if (!av_strncasecmp("path=", param, 5)) { av_free(cpath); cpath = av_strdup(&param[5]); } else if (!av_strncasecmp("domain=", param, 7)) { // if the cookie specifies a sub-domain, skip the leading dot thereby // supporting URLs that point to sub-domains and the master domain int leading_dot = (param[7] == '.'); av_free(cdomain); cdomain = av_strdup(&param[7+leading_dot]); } else { // ignore unknown attributes } } if (!cdomain) cdomain = av_strdup(domain); // ensure all of the necessary values are valid if (!cdomain || !cpath || !cvalue) { av_log(s, AV_LOG_WARNING, "Invalid cookie found, no value, path or domain specified\n"); goto done_cookie; } // check if the request path matches the cookie path if (av_strncasecmp(path, cpath, strlen(cpath))) goto done_cookie; // the domain should be at least the size of our cookie domain domain_offset = strlen(domain) - strlen(cdomain); if (domain_offset < 0) goto done_cookie; // match the cookie domain if (av_strcasecmp(&domain[domain_offset], cdomain)) goto done_cookie; // cookie parameters match, so copy the value if (!*cookies) { if (!(*cookies = av_strdup(cvalue))) { ret = AVERROR(ENOMEM); goto done_cookie; } } else { char *tmp = *cookies; size_t str_size = strlen(cvalue) + strlen(*cookies) + 3; if (!(*cookies = av_malloc(str_size))) { ret = AVERROR(ENOMEM); goto done_cookie; } snprintf(*cookies, str_size, "%s; %s", tmp, cvalue); av_free(tmp); } done_cookie: av_freep(&cdomain); av_freep(&cpath); av_freep(&cvalue); if (ret < 0) { if (*cookies) av_freep(cookies); av_free(cset_cookies); return ret; } } av_free(cset_cookies); return 0; } static inline int has_header(const char *str, const char *header) { /* header + 2 to skip over CRLF prefix. (make sure you have one!) */ if (!str) return 0; return av_stristart(str, header + 2, NULL) || av_stristr(str, header); } static int http_read_header(URLContext *h, int *new_location) { HTTPContext *s = h->priv_data; char line[MAX_URL_SIZE]; int err = 0; s->chunksize = UINT64_MAX; for (;;) { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; av_log(h, AV_LOG_TRACE, "header='%s'\n", line); err = process_line(h, line, s->line_count, new_location); if (err < 0) return err; if (err == 0) break; s->line_count++; } if (s->seekable == -1 && s->is_mediagateway && s->filesize == 2000000000) h->is_streamed = 1; /* we can in fact _not_ seek */ // add any new cookies into the existing cookie string cookie_string(s->cookie_dict, &s->cookies); av_dict_free(&s->cookie_dict); return err; } static int http_connect(URLContext *h, const char *path, const char *local_path, const char *hoststr, const char *auth, const char *proxyauth, int *new_location) { HTTPContext *s = h->priv_data; int post, err; char headers[HTTP_HEADERS_SIZE] = ""; char *authstr = NULL, *proxyauthstr = NULL; uint64_t off = s->off; int len = 0; const char *method; int send_expect_100 = 0; /* send http header */ post = h->flags & AVIO_FLAG_WRITE; if (s->post_data) { /* force POST method and disable chunked encoding when * custom HTTP post data is set */ post = 1; s->chunked_post = 0; } if (s->method) method = s->method; else method = post ? "POST" : "GET"; authstr = ff_http_auth_create_response(&s->auth_state, auth, local_path, method); proxyauthstr = ff_http_auth_create_response(&s->proxy_auth_state, proxyauth, local_path, method); if (post && !s->post_data) { send_expect_100 = s->send_expect_100; /* The user has supplied authentication but we don't know the auth type, * send Expect: 100-continue to get the 401 response including the * WWW-Authenticate header, or an 100 continue if no auth actually * is needed. */ if (auth && *auth && s->auth_state.auth_type == HTTP_AUTH_NONE && s->http_code != 401) send_expect_100 = 1; } #if FF_API_HTTP_USER_AGENT if (strcmp(s->user_agent_deprecated, DEFAULT_USER_AGENT)) { av_log(s, AV_LOG_WARNING, "the user-agent option is deprecated, please use user_agent option\n"); s->user_agent = av_strdup(s->user_agent_deprecated); } #endif /* set default headers if needed */ if (!has_header(s->headers, "\r\nUser-Agent: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "User-Agent: %s\r\n", s->user_agent); if (!has_header(s->headers, "\r\nAccept: ")) len += av_strlcpy(headers + len, "Accept: */*\r\n", sizeof(headers) - len); // Note: we send this on purpose even when s->off is 0 when we're probing, // since it allows us to detect more reliably if a (non-conforming) // server supports seeking by analysing the reply headers. if (!has_header(s->headers, "\r\nRange: ") && !post && (s->off > 0 || s->end_off || s->seekable == -1)) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Range: bytes=%"PRIu64"-", s->off); if (s->end_off) len += av_strlcatf(headers + len, sizeof(headers) - len, "%"PRId64, s->end_off - 1); len += av_strlcpy(headers + len, "\r\n", sizeof(headers) - len); } if (send_expect_100 && !has_header(s->headers, "\r\nExpect: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Expect: 100-continue\r\n"); if (!has_header(s->headers, "\r\nConnection: ")) { if (s->multiple_requests) len += av_strlcpy(headers + len, "Connection: keep-alive\r\n", sizeof(headers) - len); else len += av_strlcpy(headers + len, "Connection: close\r\n", sizeof(headers) - len); } if (!has_header(s->headers, "\r\nHost: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Host: %s\r\n", hoststr); if (!has_header(s->headers, "\r\nContent-Length: ") && s->post_data) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Length: %d\r\n", s->post_datalen); if (!has_header(s->headers, "\r\nContent-Type: ") && s->content_type) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Type: %s\r\n", s->content_type); if (!has_header(s->headers, "\r\nCookie: ") && s->cookies) { char *cookies = NULL; if (!get_cookies(s, &cookies, path, hoststr) && cookies) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Cookie: %s\r\n", cookies); av_free(cookies); } } if (!has_header(s->headers, "\r\nIcy-MetaData: ") && s->icy) len += av_strlcatf(headers + len, sizeof(headers) - len, "Icy-MetaData: %d\r\n", 1); /* now add in custom headers */ if (s->headers) av_strlcpy(headers + len, s->headers, sizeof(headers) - len); snprintf(s->buffer, sizeof(s->buffer), "%s %s HTTP/1.1\r\n" "%s" "%s" "%s" "%s%s" "\r\n", method, path, post && s->chunked_post ? "Transfer-Encoding: chunked\r\n" : "", headers, authstr ? authstr : "", proxyauthstr ? "Proxy-" : "", proxyauthstr ? proxyauthstr : ""); av_log(h, AV_LOG_DEBUG, "request: %s\n", s->buffer); if ((err = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto done; if (s->post_data) if ((err = ffurl_write(s->hd, s->post_data, s->post_datalen)) < 0) goto done; /* init input buffer */ s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->off = 0; s->icy_data_read = 0; s->filesize = UINT64_MAX; s->willclose = 0; s->end_chunked_post = 0; s->end_header = 0; if (post && !s->post_data && !send_expect_100) { /* Pretend that it did work. We didn't read any header yet, since * we've still to send the POST data, but the code calling this * function will check http_code after we return. */ s->http_code = 200; err = 0; goto done; } /* wait for header */ err = http_read_header(h, new_location); if (err < 0) goto done; if (*new_location) s->off = off; err = (off == s->off) ? 0 : -1; done: av_freep(&authstr); av_freep(&proxyauthstr); return err; } static int http_buf_read(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int len; /* read bytes from input buffer first */ len = s->buf_end - s->buf_ptr; if (len > 0) { if (len > size) len = size; memcpy(buf, s->buf_ptr, len); s->buf_ptr += len; } else { uint64_t target_end = s->end_off ? s->end_off : s->filesize; if ((!s->willclose || s->chunksize == UINT64_MAX) && s->off >= target_end) return AVERROR_EOF; len = ffurl_read(s->hd, buf, size); if (!len && (!s->willclose || s->chunksize == UINT64_MAX) && s->off < target_end) { av_log(h, AV_LOG_ERROR, "Stream ends prematurely at %"PRIu64", should be %"PRIu64"\n", s->off, target_end ); return AVERROR(EIO); } } if (len > 0) { s->off += len; if (s->chunksize > 0) s->chunksize -= len; } return len; } #if CONFIG_ZLIB #define DECOMPRESS_BUF_SIZE (256 * 1024) static int http_buf_read_compressed(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int ret; if (!s->inflate_buffer) { s->inflate_buffer = av_malloc(DECOMPRESS_BUF_SIZE); if (!s->inflate_buffer) return AVERROR(ENOMEM); } if (s->inflate_stream.avail_in == 0) { int read = http_buf_read(h, s->inflate_buffer, DECOMPRESS_BUF_SIZE); if (read <= 0) return read; s->inflate_stream.next_in = s->inflate_buffer; s->inflate_stream.avail_in = read; } s->inflate_stream.avail_out = size; s->inflate_stream.next_out = buf; ret = inflate(&s->inflate_stream, Z_SYNC_FLUSH); if (ret != Z_OK && ret != Z_STREAM_END) av_log(h, AV_LOG_WARNING, "inflate return value: %d, %s\n", ret, s->inflate_stream.msg); return size - s->inflate_stream.avail_out; } #endif /* CONFIG_ZLIB */ static int64_t http_seek_internal(URLContext *h, int64_t off, int whence, int force_reconnect); static int http_read_stream(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int err, new_location, read_ret; int64_t seek_ret; if (!s->hd) return AVERROR_EOF; if (s->end_chunked_post && !s->end_header) { err = http_read_header(h, &new_location); if (err < 0) return err; } if (s->chunksize != UINT64_MAX) { if (!s->chunksize) { char line[32]; do { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; } while (!*line); /* skip CR LF from last chunk */ s->chunksize = strtoull(line, NULL, 16); av_log(h, AV_LOG_TRACE, "Chunked encoding data size: %"PRIu64"'\n", s->chunksize); if (!s->chunksize) return 0; else if (s->chunksize == UINT64_MAX) { av_log(h, AV_LOG_ERROR, "Invalid chunk size %"PRIu64"\n", s->chunksize); return AVERROR(EINVAL); } } size = FFMIN(size, s->chunksize); } #if CONFIG_ZLIB if (s->compressed) return http_buf_read_compressed(h, buf, size); #endif /* CONFIG_ZLIB */ read_ret = http_buf_read(h, buf, size); if ( (read_ret < 0 && s->reconnect && (!h->is_streamed || s->reconnect_streamed) && s->filesize > 0 && s->off < s->filesize) || (read_ret == 0 && s->reconnect_at_eof && (!h->is_streamed || s->reconnect_streamed))) { uint64_t target = h->is_streamed ? 0 : s->off; if (s->reconnect_delay > s->reconnect_delay_max) return AVERROR(EIO); av_log(h, AV_LOG_INFO, "Will reconnect at %"PRIu64" error=%s.\n", s->off, av_err2str(read_ret)); av_usleep(1000U*1000*s->reconnect_delay); s->reconnect_delay = 1 + 2*s->reconnect_delay; seek_ret = http_seek_internal(h, target, SEEK_SET, 1); if (seek_ret != target) { av_log(h, AV_LOG_ERROR, "Failed to reconnect at %"PRIu64".\n", target); return read_ret; } read_ret = http_buf_read(h, buf, size); } else s->reconnect_delay = 0; return read_ret; } // Like http_read_stream(), but no short reads. // Assumes partial reads are an error. static int http_read_stream_all(URLContext *h, uint8_t *buf, int size) { int pos = 0; while (pos < size) { int len = http_read_stream(h, buf + pos, size - pos); if (len < 0) return len; pos += len; } return pos; } static void update_metadata(HTTPContext *s, char *data) { char *key; char *val; char *end; char *next = data; while (*next) { key = next; val = strstr(key, "='"); if (!val) break; end = strstr(val, "';"); if (!end) break; *val = '\0'; *end = '\0'; val += 2; av_dict_set(&s->metadata, key, val, 0); next = end + 2; } } static int store_icy(URLContext *h, int size) { HTTPContext *s = h->priv_data; /* until next metadata packet */ uint64_t remaining; if (s->icy_metaint < s->icy_data_read) return AVERROR_INVALIDDATA; remaining = s->icy_metaint - s->icy_data_read; if (!remaining) { /* The metadata packet is variable sized. It has a 1 byte header * which sets the length of the packet (divided by 16). If it's 0, * the metadata doesn't change. After the packet, icy_metaint bytes * of normal data follows. */ uint8_t ch; int len = http_read_stream_all(h, &ch, 1); if (len < 0) return len; if (ch > 0) { char data[255 * 16 + 1]; int ret; len = ch * 16; ret = http_read_stream_all(h, data, len); if (ret < 0) return ret; data[len + 1] = 0; if ((ret = av_opt_set(s, "icy_metadata_packet", data, 0)) < 0) return ret; update_metadata(s, data); } s->icy_data_read = 0; remaining = s->icy_metaint; } return FFMIN(size, remaining); } static int http_read(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; if (s->icy_metaint > 0) { size = store_icy(h, size); if (size < 0) return size; } size = http_read_stream(h, buf, size); if (size > 0) s->icy_data_read += size; return size; } /* used only when posting data */ static int http_write(URLContext *h, const uint8_t *buf, int size) { char temp[11] = ""; /* 32-bit hex + CRLF + nul */ int ret; char crlf[] = "\r\n"; HTTPContext *s = h->priv_data; if (!s->chunked_post) { /* non-chunked data is sent without any special encoding */ return ffurl_write(s->hd, buf, size); } /* silently ignore zero-size data since chunk encoding that would * signal EOF */ if (size > 0) { /* upload data using chunked encoding */ snprintf(temp, sizeof(temp), "%x\r\n", size); if ((ret = ffurl_write(s->hd, temp, strlen(temp))) < 0 || (ret = ffurl_write(s->hd, buf, size)) < 0 || (ret = ffurl_write(s->hd, crlf, sizeof(crlf) - 1)) < 0) return ret; } return size; } static int http_shutdown(URLContext *h, int flags) { int ret = 0; char footer[] = "0\r\n\r\n"; HTTPContext *s = h->priv_data; /* signal end of chunked encoding if used */ if (((flags & AVIO_FLAG_WRITE) && s->chunked_post) || ((flags & AVIO_FLAG_READ) && s->chunked_post && s->listen)) { ret = ffurl_write(s->hd, footer, sizeof(footer) - 1); ret = ret > 0 ? 0 : ret; s->end_chunked_post = 1; } return ret; } static int http_close(URLContext *h) { int ret = 0; HTTPContext *s = h->priv_data; #if CONFIG_ZLIB inflateEnd(&s->inflate_stream); av_freep(&s->inflate_buffer); #endif /* CONFIG_ZLIB */ if (!s->end_chunked_post) /* Close the write direction by sending the end of chunked encoding. */ ret = http_shutdown(h, h->flags); if (s->hd) ffurl_closep(&s->hd); av_dict_free(&s->chained_options); return ret; } static int64_t http_seek_internal(URLContext *h, int64_t off, int whence, int force_reconnect) { HTTPContext *s = h->priv_data; URLContext *old_hd = s->hd; uint64_t old_off = s->off; uint8_t old_buf[BUFFER_SIZE]; int old_buf_size, ret; AVDictionary *options = NULL; if (whence == AVSEEK_SIZE) return s->filesize; else if (!force_reconnect && ((whence == SEEK_CUR && off == 0) || (whence == SEEK_SET && off == s->off))) return s->off; else if ((s->filesize == UINT64_MAX && whence == SEEK_END)) return AVERROR(ENOSYS); if (whence == SEEK_CUR) off += s->off; else if (whence == SEEK_END) off += s->filesize; else if (whence != SEEK_SET) return AVERROR(EINVAL); if (off < 0) return AVERROR(EINVAL); s->off = off; if (s->off && h->is_streamed) return AVERROR(ENOSYS); /* we save the old context in case the seek fails */ old_buf_size = s->buf_end - s->buf_ptr; memcpy(old_buf, s->buf_ptr, old_buf_size); s->hd = NULL; /* if it fails, continue on old connection */ if ((ret = http_open_cnx(h, &options)) < 0) { av_dict_free(&options); memcpy(s->buffer, old_buf, old_buf_size); s->buf_ptr = s->buffer; s->buf_end = s->buffer + old_buf_size; s->hd = old_hd; s->off = old_off; return ret; } av_dict_free(&options); ffurl_close(old_hd); return off; } static int64_t http_seek(URLContext *h, int64_t off, int whence) { return http_seek_internal(h, off, whence, 0); } static int http_get_file_handle(URLContext *h) { HTTPContext *s = h->priv_data; return ffurl_get_file_handle(s->hd); } #define HTTP_CLASS(flavor) \ static const AVClass flavor ## _context_class = { \ .class_name = # flavor, \ .item_name = av_default_item_name, \ .option = options, \ .version = LIBAVUTIL_VERSION_INT, \ } #if CONFIG_HTTP_PROTOCOL HTTP_CLASS(http); const URLProtocol ff_http_protocol = { .name = "http", .url_open2 = http_open, .url_accept = http_accept, .url_handshake = http_handshake, .url_read = http_read, .url_write = http_write, .url_seek = http_seek, .url_close = http_close, .url_get_file_handle = http_get_file_handle, .url_shutdown = http_shutdown, .priv_data_size = sizeof(HTTPContext), .priv_data_class = &http_context_class, .flags = URL_PROTOCOL_FLAG_NETWORK, .default_whitelist = "http,https,tls,rtp,tcp,udp,crypto,httpproxy" }; #endif /* CONFIG_HTTP_PROTOCOL */ #if CONFIG_HTTPS_PROTOCOL HTTP_CLASS(https); const URLProtocol ff_https_protocol = { .name = "https", .url_open2 = http_open, .url_read = http_read, .url_write = http_write, .url_seek = http_seek, .url_close = http_close, .url_get_file_handle = http_get_file_handle, .url_shutdown = http_shutdown, .priv_data_size = sizeof(HTTPContext), .priv_data_class = &https_context_class, .flags = URL_PROTOCOL_FLAG_NETWORK, .default_whitelist = "http,https,tls,rtp,tcp,udp,crypto,httpproxy" }; #endif /* CONFIG_HTTPS_PROTOCOL */ #if CONFIG_HTTPPROXY_PROTOCOL static int http_proxy_close(URLContext *h) { HTTPContext *s = h->priv_data; if (s->hd) ffurl_closep(&s->hd); return 0; } static int http_proxy_open(URLContext *h, const char *uri, int flags) { HTTPContext *s = h->priv_data; char hostname[1024], hoststr[1024]; char auth[1024], pathbuf[1024], *path; char lower_url[100]; int port, ret = 0, attempts = 0; HTTPAuthType cur_auth_type; char *authstr; int new_loc; if( s->seekable == 1 ) h->is_streamed = 0; else h->is_streamed = 1; av_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port, pathbuf, sizeof(pathbuf), uri); ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL); path = pathbuf; if (*path == '/') path++; ff_url_join(lower_url, sizeof(lower_url), "tcp", NULL, hostname, port, NULL); redo: ret = ffurl_open_whitelist(&s->hd, lower_url, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, NULL, h->protocol_whitelist, h->protocol_blacklist, h); if (ret < 0) return ret; authstr = ff_http_auth_create_response(&s->proxy_auth_state, auth, path, "CONNECT"); snprintf(s->buffer, sizeof(s->buffer), "CONNECT %s HTTP/1.1\r\n" "Host: %s\r\n" "Connection: close\r\n" "%s%s" "\r\n", path, hoststr, authstr ? "Proxy-" : "", authstr ? authstr : ""); av_freep(&authstr); if ((ret = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto fail; s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->filesize = UINT64_MAX; cur_auth_type = s->proxy_auth_state.auth_type; /* Note: This uses buffering, potentially reading more than the * HTTP header. If tunneling a protocol where the server starts * the conversation, we might buffer part of that here, too. * Reading that requires using the proper ffurl_read() function * on this URLContext, not using the fd directly (as the tls * protocol does). This shouldn't be an issue for tls though, * since the client starts the conversation there, so there * is no extra data that we might buffer up here. */ ret = http_read_header(h, &new_loc); if (ret < 0) goto fail; attempts++; if (s->http_code == 407 && (cur_auth_type == HTTP_AUTH_NONE || s->proxy_auth_state.stale) && s->proxy_auth_state.auth_type != HTTP_AUTH_NONE && attempts < 2) { ffurl_closep(&s->hd); goto redo; } if (s->http_code < 400) return 0; ret = ff_http_averror(s->http_code, AVERROR(EIO)); fail: http_proxy_close(h); return ret; } static int http_proxy_write(URLContext *h, const uint8_t *buf, int size) { HTTPContext *s = h->priv_data; return ffurl_write(s->hd, buf, size); } const URLProtocol ff_httpproxy_protocol = { .name = "httpproxy", .url_open = http_proxy_open, .url_read = http_buf_read, .url_write = http_proxy_write, .url_close = http_proxy_close, .url_get_file_handle = http_get_file_handle, .priv_data_size = sizeof(HTTPContext), .flags = URL_PROTOCOL_FLAG_NETWORK, }; #endif /* CONFIG_HTTPPROXY_PROTOCOL */
static int http_buf_read(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int len; /* read bytes from input buffer first */ len = s->buf_end - s->buf_ptr; if (len > 0) { if (len > size) len = size; memcpy(buf, s->buf_ptr, len); s->buf_ptr += len; } else { int64_t target_end = s->end_off ? s->end_off : s->filesize; if ((!s->willclose || s->chunksize < 0) && target_end >= 0 && s->off >= target_end) return AVERROR_EOF; len = ffurl_read(s->hd, buf, size); if (!len && (!s->willclose || s->chunksize < 0) && target_end >= 0 && s->off < target_end) { av_log(h, AV_LOG_ERROR, "Stream ends prematurely at %"PRId64", should be %"PRId64"\n", s->off, target_end ); return AVERROR(EIO); } } if (len > 0) { s->off += len; if (s->chunksize > 0) s->chunksize -= len; } return len; }
static int http_buf_read(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int len; /* read bytes from input buffer first */ len = s->buf_end - s->buf_ptr; if (len > 0) { if (len > size) len = size; memcpy(buf, s->buf_ptr, len); s->buf_ptr += len; } else { uint64_t target_end = s->end_off ? s->end_off : s->filesize; if ((!s->willclose || s->chunksize == UINT64_MAX) && s->off >= target_end) return AVERROR_EOF; len = ffurl_read(s->hd, buf, size); if (!len && (!s->willclose || s->chunksize == UINT64_MAX) && s->off < target_end) { av_log(h, AV_LOG_ERROR, "Stream ends prematurely at %"PRIu64", should be %"PRIu64"\n", s->off, target_end ); return AVERROR(EIO); } } if (len > 0) { s->off += len; if (s->chunksize > 0) s->chunksize -= len; } return len; }
{'added': [(65, ' uint64_t chunksize;'), (66, ' uint64_t off, end_off, filesize;'), (98, ' uint64_t icy_data_read;'), (100, ' uint64_t icy_metaint;'), (492, ' s->filesize = UINT64_MAX;'), (619, ' s->off = strtoull(p, NULL, 10);'), (621, ' s->filesize = strtoull(slash + 1, NULL, 10);'), (811, ' } else if (!av_strcasecmp(tag, "Content-Length") &&'), (812, ' s->filesize == UINT64_MAX) {'), (813, ' s->filesize = strtoull(p, NULL, 10);'), (822, ' s->filesize = UINT64_MAX;'), (846, ' s->icy_metaint = strtoull(p, NULL, 10);'), (976, ' s->chunksize = UINT64_MAX;'), (1010, ' uint64_t off = s->off;'), (1064, ' "Range: bytes=%"PRIu64"-", s->off);'), (1139, ' s->filesize = UINT64_MAX;'), (1179, ' uint64_t target_end = s->end_off ? s->end_off : s->filesize;'), (1180, ' if ((!s->willclose || s->chunksize == UINT64_MAX) && s->off >= target_end)'), (1183, ' if (!len && (!s->willclose || s->chunksize == UINT64_MAX) && s->off < target_end) {'), (1185, ' "Stream ends prematurely at %"PRIu64", should be %"PRIu64"\\n",'), (1249, ' if (s->chunksize != UINT64_MAX) {'), (1258, ' s->chunksize = strtoull(line, NULL, 16);'), (1260, ' av_log(h, AV_LOG_TRACE,'), (1261, ' "Chunked encoding data size: %"PRIu64"\'\\n",'), (1266, ' else if (s->chunksize == UINT64_MAX) {'), (1267, ' av_log(h, AV_LOG_ERROR, "Invalid chunk size %"PRIu64"\\n",'), (1268, ' s->chunksize);'), (1269, ' return AVERROR(EINVAL);'), (1270, ' }'), (1281, ' uint64_t target = h->is_streamed ? 0 : s->off;'), (1286, ' av_log(h, AV_LOG_INFO, "Will reconnect at %"PRIu64" error=%s.\\n", s->off, av_err2str(read_ret));'), (1291, ' av_log(h, AV_LOG_ERROR, "Failed to reconnect at %"PRIu64".\\n", target);'), (1346, ' uint64_t remaining;'), (1348, ' if (s->icy_metaint < s->icy_data_read)'), (1350, ' remaining = s->icy_metaint - s->icy_data_read;'), (1464, ' uint64_t old_off = s->off;'), (1475, ' else if ((s->filesize == UINT64_MAX && whence == SEEK_END))'), (1630, ' s->filesize = UINT64_MAX;')], 'deleted': [(65, ' int64_t chunksize;'), (66, ' int64_t off, end_off, filesize;'), (98, ' int icy_data_read;'), (100, ' int icy_metaint;'), (492, ' s->filesize = -1;'), (619, ' s->off = strtoll(p, NULL, 10);'), (621, ' s->filesize = strtoll(slash + 1, NULL, 10);'), (811, ' } else if (!av_strcasecmp(tag, "Content-Length") && s->filesize == -1) {'), (812, ' s->filesize = strtoll(p, NULL, 10);'), (821, ' s->filesize = -1;'), (845, ' s->icy_metaint = strtoll(p, NULL, 10);'), (975, ' s->chunksize = -1;'), (1009, ' int64_t off = s->off;'), (1063, ' "Range: bytes=%"PRId64"-", s->off);'), (1138, ' s->filesize = -1;'), (1178, ' int64_t target_end = s->end_off ? s->end_off : s->filesize;'), (1179, ' if ((!s->willclose || s->chunksize < 0) &&'), (1180, ' target_end >= 0 && s->off >= target_end)'), (1183, ' if (!len && (!s->willclose || s->chunksize < 0) &&'), (1184, ' target_end >= 0 && s->off < target_end) {'), (1186, ' "Stream ends prematurely at %"PRId64", should be %"PRId64"\\n",'), (1250, ' if (s->chunksize >= 0) {'), (1259, ' s->chunksize = strtoll(line, NULL, 16);'), (1261, ' av_log(NULL, AV_LOG_TRACE, "Chunked encoding data size: %"PRId64"\'\\n",'), (1276, ' int64_t target = h->is_streamed ? 0 : s->off;'), (1281, ' av_log(h, AV_LOG_INFO, "Will reconnect at %"PRId64" error=%s.\\n", s->off, av_err2str(read_ret));'), (1286, ' av_log(h, AV_LOG_ERROR, "Failed to reconnect at %"PRId64".\\n", target);'), (1341, ' int remaining = s->icy_metaint - s->icy_data_read;'), (1343, ' if (remaining < 0)'), (1458, ' int64_t old_off = s->off;'), (1469, ' else if ((s->filesize == -1 && whence == SEEK_END))'), (1624, ' s->filesize = -1;')]}
38
32
1,328
9,692
32
216
15
https://github.com/FFmpeg/FFmpeg
CVE-2016-10190
CWE-119
2,565
batch_matmul.cc
C
tflite::ops::builtin::batch_matmul::Prepare
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/reference/batch_matmul.h" #include <stddef.h> #include <algorithm> #include <cstdint> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/optimized/batch_matmul.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/tensor_utils.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace batch_matmul { static const int kInputLHSTensor = 0; static const int kInputRHSTensor = 1; static const int kOutputTensor = 0; static const int kNumTempTensorsForAdjoints = 2; static const int kNumTempTensorsForHybrid = 5; // This file has two implementations of Transpose. enum KernelType { kReference, kGenericOptimized, }; struct OpData { // The scaling factor from input to output (aka the 'real multiplier') can // be represented as a fixed point multiplier plus a left shift. int32_t output_multiplier; int output_shift; // The range of the fused activation layer. For example for kNone and // uint8_t these would be 0 and 255. int32_t output_activation_min; int32_t output_activation_max; // The index of the temporary tensors where we store transposed LHS/RHS. int scratch_tensor_index; bool rhs_transposed; bool compute_row_sums = false; }; struct OpContext { OpContext(TfLiteContext* context, TfLiteNode* node) { params = reinterpret_cast<TfLiteBatchMatMulParams*>(node->builtin_data); lhs = GetInput(context, node, kInputLHSTensor); rhs = GetInput(context, node, kInputRHSTensor); output = GetOutput(context, node, 0); } TfLiteBatchMatMulParams* params; const TfLiteTensor* lhs; const TfLiteTensor* rhs; TfLiteTensor* output; }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* op_data = new OpData(); // If the RHS is constant, we only transpose once. op_data->rhs_transposed = false; // Creates the temp tensors to store the transposed LHS and/or RHS, and // extra buffers for the quantized case. context->AddTensors(context, kNumTempTensorsForAdjoints + kNumTempTensorsForHybrid, &op_data->scratch_tensor_index); return op_data; } void Free(TfLiteContext* context, void* buffer) { delete static_cast<OpData*>(buffer); } TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const RuntimeShape& extended_lhs_shape, const RuntimeShape& extended_rhs_shape, bool adj_x, bool adj_y, int output_rank, TfLiteTensor* output) { TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); // Fill in any broadcast dimensions. for (int i = 0; i < output_rank - 2; ++i) { const int lhs_dim = extended_lhs_shape.Dims(i); const int rhs_dim = extended_rhs_shape.Dims(i); int broadcast_dim = lhs_dim; if ((lhs_dim != rhs_dim) && (lhs_dim == 1)) { broadcast_dim = rhs_dim; } output_shape->data[i] = broadcast_dim; } // Fill in the matmul dimensions. int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2; int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1; output_shape->data[output_rank - 2] = extended_lhs_shape.Dims(lhs_rows_index); output_shape->data[output_rank - 1] = extended_rhs_shape.Dims(rhs_cols_index); TfLiteStatus stat = context->ResizeTensor(context, output, output_shape); return stat; } // Initializes temp tensors to store transposed operands. TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node, OpContext* op_context) { // Create temporary tensors to hold transposed LHS/RHS. OpData* op_data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* lhs = op_context->lhs; const TfLiteTensor* rhs = op_context->rhs; TfLiteIntArrayFree(node->temporaries); // For "hybrid" quantization, we impose the constraint that the LHS // is float (typically an activation from a prior layer) and the RHS // is quantized int8. bool is_hybrid = (op_context->lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8); if (is_hybrid) { node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints + kNumTempTensorsForHybrid); } else { node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints); } const int lhs_rank = NumDimensions(lhs); const int rhs_rank = NumDimensions(rhs); const int batch_size = op_context->params->adj_x ? lhs->dims->data[lhs_rank - 2] : lhs->dims->data[lhs_rank - 1]; const int num_units = op_context->params->adj_x ? lhs->dims->data[lhs_rank - 1] : lhs->dims->data[lhs_rank - 2]; // Temp tensor for Transposed LHS; { node->temporaries->data[0] = op_data->scratch_tensor_index; TfLiteTensor* scratch_buffer = GetTemporary(context, node, /*index=*/0); TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(lhs_rank); for (int i = 0; i < lhs_rank - 2; ++i) { scratch_buffer_size->data[i] = lhs->dims->data[i]; } // Swap last two dimensions. scratch_buffer_size->data[lhs_rank - 2] = lhs->dims->data[lhs_rank - 1]; scratch_buffer_size->data[lhs_rank - 1] = lhs->dims->data[lhs_rank - 2]; scratch_buffer->type = op_context->lhs->type; scratch_buffer->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer, scratch_buffer_size)); } // We need a temp buffer for the RHS if we need to transpose the RHS. We // transpose by default, so that the two inputs (LHS and RHS) are in a proper // layout for our fast matrix multiplication routines. If the transpose flag // is set by the caller, the data is already in the desired layout. { node->temporaries->data[1] = op_data->scratch_tensor_index + 1; TfLiteTensor* scratch_buffer = GetTemporary(context, node, /*index=*/1); const TfLiteTensor* rhs = op_context->rhs; int rhs_rank = NumDimensions(rhs); TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(rhs_rank); for (int i = 0; i < rhs_rank - 2; ++i) { scratch_buffer_size->data[i] = rhs->dims->data[i]; } // Swap last two dimensions. scratch_buffer_size->data[rhs_rank - 2] = rhs->dims->data[rhs_rank - 1]; scratch_buffer_size->data[rhs_rank - 1] = rhs->dims->data[rhs_rank - 2]; if (IsConstantTensor(op_context->rhs)) { scratch_buffer->allocation_type = kTfLiteArenaRwPersistent; } else { scratch_buffer->allocation_type = kTfLiteArenaRw; } scratch_buffer->type = op_context->rhs->type; scratch_buffer->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer, scratch_buffer_size)); } // If we have to perform on-the-fly quantization (with quantized weights and // float inputs) first we need to quantize the inputs. Allocate temporary // buffer to store the intermediate quantized values, the batch scaling // factors, the accumulator buffer (optimized version), the input offsets, // and the sums of the rows for each weights matrix. // RHS = weights, LHS = inputs if (is_hybrid) { // Calculate the total number of LHS batches. int num_batches = 1; for (int i = 0; i < lhs_rank - 2; ++i) { num_batches *= lhs->dims->data[i]; } int num_weights_matrices = 1; for (int i = 0; i < rhs_rank - 2; ++i) { num_weights_matrices *= rhs->dims->data[i]; } op_data->compute_row_sums = true; node->temporaries->data[2] = op_data->scratch_tensor_index + 2; TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/2); input_quantized->type = op_context->rhs->type; input_quantized->allocation_type = kTfLiteArenaRw; TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(op_context->lhs->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); node->temporaries->data[3] = op_data->scratch_tensor_index + 3; TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/3); scaling_factors->type = kTfLiteFloat32; scaling_factors->allocation_type = kTfLiteArenaRw; // Total size of scaling factors is batch size * number of total batches int scaling_dims[1] = {num_batches * batch_size}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1); scaling_factors_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, scaling_factors_size)); } node->temporaries->data[4] = op_data->scratch_tensor_index + 4; TfLiteTensor* accum_scratch = GetTemporary(context, node, /*index=*/4); accum_scratch->type = kTfLiteInt32; accum_scratch->allocation_type = kTfLiteArenaRw; int accum_scratch_dims[2] = {num_units, batch_size}; if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2, accum_scratch_dims)) { TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2); accum_size->data[0] = num_units; accum_size->data[1] = batch_size; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, accum_scratch, accum_size)); } node->temporaries->data[5] = op_data->scratch_tensor_index + 5; TfLiteTensor* input_offsets = GetTemporary(context, node, /*index=*/5); input_offsets->type = kTfLiteInt32; input_offsets->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) { TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1); input_offsets_size->data[0] = num_batches * batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets, input_offsets_size)); } node->temporaries->data[6] = op_data->scratch_tensor_index + 6; TfLiteTensor* row_sums = GetTemporary(context, node, /*index=*/6); row_sums->type = kTfLiteInt32; row_sums->allocation_type = kTfLiteArenaRwPersistent; int row_sums_dims[1] = {num_weights_matrices * num_units}; if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1); row_sums_size->data[0] = row_sums_dims[0]; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, row_sums, row_sums_size)); } } return kTfLiteOk; } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpContext op_context(context, node); TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); bool adj_x = op_context.params->adj_x; bool adj_y = op_context.params->adj_y; const TfLiteTensor* lhs_data = GetInput(context, node, kInputLHSTensor); const TfLiteTensor* rhs_data = GetInput(context, node, kInputRHSTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Note that quantized inference requires that all tensors have their // parameters set. This is usually done during quantized training. if (lhs_data->type == kTfLiteInt8) { double real_multiplier = 0.0; TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( context, lhs_data, rhs_data, output, &real_multiplier)); int exponent; QuantizeMultiplier(real_multiplier, &op_data->output_multiplier, &exponent); op_data->output_shift = exponent; // BatchMatMul has no fused activation functions. Therefore, set // output activation min and max to min and max of int8_t type, // respecitvely. op_data->output_activation_min = std::numeric_limits<int8_t>::min(); op_data->output_activation_max = std::numeric_limits<int8_t>::max(); } TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 || lhs_data->type == kTfLiteInt8); TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 || rhs_data->type == kTfLiteInt8); // Support dimensions between 2 and 4, inclusive. TF_LITE_ENSURE(context, NumDimensions(lhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(lhs_data) <= 4); TF_LITE_ENSURE(context, NumDimensions(rhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(rhs_data) <= 4); const int lhs_rank = NumDimensions(lhs_data); const int rhs_rank = NumDimensions(rhs_data); const int output_rank = std::max(lhs_rank, rhs_rank); const RuntimeShape extended_lhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data)); const RuntimeShape extended_rhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data)); // Ensure any batch dimensions obey broacasting rules. for (int i = 0; i < output_rank - 2; ++i) { const int lhs_dim = extended_lhs_shape.Dims(i); const int rhs_dim = extended_rhs_shape.Dims(i); if (lhs_dim != rhs_dim) { if (lhs_dim != 1) { TF_LITE_ENSURE_EQ(context, rhs_dim, 1); } } } // Ensure other dimensions work for matrix multiplication. int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2) : extended_lhs_shape.Dims(output_rank - 1); int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1) : extended_rhs_shape.Dims(output_rank - 2); TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs); TfLiteStatus status = ResizeOutputTensor(context, extended_lhs_shape, extended_rhs_shape, adj_x, adj_y, output_rank, output); return status; } template <typename scalar> void TransposeRowsColumnsImpl(const TfLiteTensor* tensor_in, const scalar* input, TfLiteTensor* tensor_out, scalar* output) { RuntimeShape transposed_shape(GetTensorShape(tensor_in)); RuntimeShape shape(GetTensorShape(tensor_in)); TransposeParams params; int rank = NumDimensions(tensor_in); params.perm_count = rank; for (int i = 0; i < rank - 2; ++i) { params.perm[i] = i; } // Transpose the last two dimensions. params.perm[rank - 2] = rank - 1; params.perm[rank - 1] = rank - 2; transposed_shape.SetDim(rank - 1, shape.Dims(rank - 2)); transposed_shape.SetDim(rank - 2, shape.Dims(rank - 1)); optimized_ops::Transpose(params, shape, input, transposed_shape, output); } TfLiteStatus TransposeRowsColumns(TfLiteContext* context, const TfLiteTensor* tensor_in, TfLiteTensor* tensor_out) { if (tensor_in->type == kTfLiteFloat32) { TransposeRowsColumnsImpl<float>(tensor_in, GetTensorData<float>(tensor_in), tensor_out, GetTensorData<float>(tensor_out)); return kTfLiteOk; } else if (tensor_in->type == kTfLiteInt8) { TransposeRowsColumnsImpl<int8_t>( tensor_in, GetTensorData<int8_t>(tensor_in), tensor_out, GetTensorData<int8_t>(tensor_out)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Can only transpose tensors with float and int8 type."); return kTfLiteError; } } RuntimeShape SwapRowColumnDims(const RuntimeShape& shape) { RuntimeShape swapped_shape(shape); const int32_t dims = shape.DimensionsCount(); swapped_shape.SetDim(dims - 2, shape.Dims(dims - 1)); swapped_shape.SetDim(dims - 1, shape.Dims(dims - 2)); return swapped_shape; } template <KernelType kernel_type> TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node, OpData* data, const RuntimeShape& input_shape, const TfLiteTensor* input, const RuntimeShape& filter_shape, const TfLiteTensor* filter, TfLiteTensor* input_quantized, TfLiteTensor* scaling_factors, TfLiteTensor* accum_scratch, TfLiteTensor* row_sums, TfLiteTensor* input_offsets, TfLiteTensor* output) { const int32_t num_input_dims = input_shape.DimensionsCount(); // Input row/cols have been swapped at this point, so dims are // {input_size, num_batches} const int input_size = input_shape.Dims(num_input_dims - 2); const int batch_size = input_shape.Dims(num_input_dims - 1); int num_batches_to_quantize = batch_size; for (int i = 0; i < input_shape.DimensionsCount() - 2; ++i) { num_batches_to_quantize *= input_shape.Dims(i); } // Quantize input from float to uint8 + quantization params (scaling factor). float* scaling_factors_ptr = GetTensorData<float>(scaling_factors); int32_t* input_offset_ptr = nullptr; int32_t* row_sums_ptr = nullptr; // Only asymmetric quantization is supported. input_offset_ptr = GetTensorData<int32_t>(input_offsets); row_sums_ptr = GetTensorData<int32_t>(row_sums); int8_t* quant_data = GetTensorData<int8_t>(input_quantized); const int8_t* filter_data = GetTensorData<int8_t>(filter); const float* input_ptr = GetTensorData<float>(input); // Quantize each batch independently. for (int b = 0; b < num_batches_to_quantize; ++b) { const int offset = b * input_size; tensor_utils::AsymmetricQuantizeFloats( input_ptr + offset, input_size, quant_data + offset, &scaling_factors_ptr[b], &input_offset_ptr[b]); // Incorporate scaling of the filter. scaling_factors_ptr[b] *= filter->params.scale; } RuntimeShape output_shape = GetTensorShape(output); int output_size = 1; for (int i = 0; i < output_shape.DimensionsCount(); ++i) { output_size *= output_shape.Dims(i); } std::fill_n(GetTensorData<float>(output), output_size, 0.0f); if (kernel_type == kGenericOptimized) { optimized_ops::BatchMatMul( filter_shape, filter_data, input_shape, quant_data, scaling_factors_ptr, input_offset_ptr, row_sums_ptr, GetTensorShape(output), GetTensorData<int32_t>(accum_scratch), GetTensorData<float>(output), &(data->compute_row_sums), CpuBackendContext::GetFromContext(context)); } else { reference_ops::BatchMatMul( filter_shape, filter_data, input_shape, quant_data, scaling_factors_ptr, input_offset_ptr, row_sums_ptr, GetTensorShape(output), GetTensorData<float>(output), &(data->compute_row_sums)); } return kTfLiteOk; } template <KernelType kernel_type> TfLiteStatus EvalInt8(TfLiteContext* context, const OpData* data, const RuntimeShape& lhs_shape, const TfLiteTensor* lhs, const RuntimeShape& rhs_shape, const TfLiteTensor* rhs, const RuntimeShape& output_shape, TfLiteTensor* output) { // Reuse params struct from FullyConnected Op. FullyConnectedParams op_params; int32_t input_offset = -lhs->params.zero_point; int32_t filter_offset = -rhs->params.zero_point; int32_t output_offset = output->params.zero_point; op_params.input_offset = input_offset; op_params.weights_offset = filter_offset; op_params.output_offset = output_offset; op_params.output_multiplier = data->output_multiplier; op_params.output_shift = data->output_shift; op_params.quantized_activation_min = data->output_activation_min; op_params.quantized_activation_max = data->output_activation_max; op_params.lhs_cacheable = IsConstantTensor(lhs); op_params.rhs_cacheable = IsConstantTensor(rhs); if (kernel_type == kReference) { reference_ops::BatchMatMul(op_params, rhs_shape, GetTensorData<int8_t>(rhs), lhs_shape, GetTensorData<int8_t>(lhs), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::BatchMatMul(op_params, rhs_shape, GetTensorData<int8_t>(rhs), lhs_shape, GetTensorData<int8_t>(lhs), GetTensorShape(output), GetTensorData<int8_t>(output), CpuBackendContext::GetFromContext(context)); } return kTfLiteOk; } template <KernelType kernel_type> TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, OpData* data, const RuntimeShape& lhs_shape, const TfLiteTensor* lhs, const RuntimeShape& rhs_shape, const TfLiteTensor* rhs, TfLiteTensor* output) { if (lhs->type == kTfLiteFloat32) { TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/2); TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/3); TfLiteTensor* accum_scratch = GetTemporary(context, node, /*index=*/4); TfLiteTensor* input_offsets = GetTemporary(context, node, /*index=*/5); TfLiteTensor* row_sums = GetTemporary(context, node, /*index=*/6); return EvalHybrid<kernel_type>( context, node, data, lhs_shape, lhs, rhs_shape, rhs, input_quantized, scaling_factors, accum_scratch, row_sums, input_offsets, output); } else if (lhs->type == kTfLiteInt8) { return EvalInt8<kernel_type>(context, data, lhs_shape, lhs, rhs_shape, rhs, GetTensorShape(output), output); } else { TF_LITE_KERNEL_LOG( context, "Currently only hybrid and int8 quantization is supported.\n"); return kTfLiteError; } return kTfLiteOk; } TfLiteTensor* GetTempRhs(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* rhs) { TfLiteTensor* transposed_rhs = GetTemporary(context, node, 1); if (rhs->type == kTfLiteInt8) { // Get the quantization params from the RHS tensor. transposed_rhs->params.scale = rhs->params.scale; transposed_rhs->params.zero_point = rhs->params.zero_point; } return transposed_rhs; } TfLiteTensor* GetTempLhs(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* lhs) { TfLiteTensor* transposed_lhs = GetTemporary(context, node, 0); if (lhs->type == kTfLiteInt8) { // Get the quantization params from the LHS tensor. transposed_lhs->params.scale = lhs->params.scale; transposed_lhs->params.zero_point = lhs->params.zero_point; } return transposed_lhs; } // Perform a batch matrix multiply on // LHS <..., A, B> X RHS<..., B, C> // where the leading dimensions of LHS and RHS obey broadcasting rules // (this Op will apply broadcasting rules). // We assume that LHS and RHS are both row oriented (adjacent values in memory // are in the same row) and will output in the same memory layout. However, // our fast GEMM libraries assume RCC layout (LHS row oriented, // RHS column oriented, output column oriented). Therefore, we perform // RHS <..., C, B> X LHS <..., B, A> // where output is a C X A column-oriented, which is equivalent to // A X C row-oriented. template <KernelType kernel_type> TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* lhs = GetInput(context, node, kInputLHSTensor); const TfLiteTensor* rhs = GetInput(context, node, kInputRHSTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); RuntimeShape orig_lhs_shape = GetTensorShape(lhs); RuntimeShape orig_rhs_shape = GetTensorShape(rhs); bool adj_y = op_context.params->adj_y; bool adj_x = op_context.params->adj_x; const TfLiteTensor* rhs_tensor = adj_y ? rhs : GetTempRhs(context, node, rhs); const TfLiteTensor* lhs_tensor = adj_x ? GetTempLhs(context, node, lhs) : lhs; if (!adj_y) { // TODO(b/154760341) Constant tensors should already be transposed, but // we transpose once if necessary for now. if (!(IsConstantTensor(rhs) && op_data->rhs_transposed)) { TransposeRowsColumns(context, rhs, GetTemporary(context, node, 1)); op_data->rhs_transposed = true; } } if (adj_x) { TransposeRowsColumns(context, lhs, GetTemporary(context, node, 0)); } RuntimeShape rhs_shape = adj_y ? orig_rhs_shape : SwapRowColumnDims(orig_rhs_shape); RuntimeShape lhs_shape = adj_x ? orig_lhs_shape : SwapRowColumnDims(orig_lhs_shape); switch (rhs->type) { case kTfLiteFloat32: // Note we pass RHS args first, LHS args second. See note above. if (kernel_type == kGenericOptimized) { optimized_ops::BatchMatMul(rhs_shape, GetTensorData<float>(rhs_tensor), lhs_shape, GetTensorData<float>(lhs_tensor), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); } else { reference_ops::BatchMatMul(rhs_shape, GetTensorData<float>(rhs_tensor), lhs_shape, GetTensorData<float>(lhs_tensor), GetTensorShape(output), GetTensorData<float>(output)); } break; case kTfLiteInt8: EvalQuantized<kernel_type>(context, node, op_data, lhs_shape, lhs_tensor, rhs_shape, rhs_tensor, output); break; default: TF_LITE_KERNEL_LOG(context, "Currently BatchMatMul doesn't support type: %s", TfLiteTypeGetName(lhs->type)); return kTfLiteError; } return kTfLiteOk; } } // namespace batch_matmul TfLiteRegistration* Register_BATCH_MATMUL_REF() { static TfLiteRegistration r = {batch_matmul::Init, batch_matmul::Free, batch_matmul::Prepare, batch_matmul::Eval<batch_matmul::kReference>}; return &r; } TfLiteRegistration* Register_BATCH_MATMUL_GENERIC_OPTIMIZED() { static TfLiteRegistration r = { batch_matmul::Init, batch_matmul::Free, batch_matmul::Prepare, batch_matmul::Eval<batch_matmul::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_BATCH_MATMUL() { return Register_BATCH_MATMUL_GENERIC_OPTIMIZED(); } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/reference/batch_matmul.h" #include <stddef.h> #include <algorithm> #include <cstdint> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/optimized/batch_matmul.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/tensor_utils.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace batch_matmul { static const int kInputLHSTensor = 0; static const int kInputRHSTensor = 1; static const int kOutputTensor = 0; static const int kNumTempTensorsForAdjoints = 2; static const int kNumTempTensorsForHybrid = 5; // This file has two implementations of Transpose. enum KernelType { kReference, kGenericOptimized, }; struct OpData { // The scaling factor from input to output (aka the 'real multiplier') can // be represented as a fixed point multiplier plus a left shift. int32_t output_multiplier; int output_shift; // The range of the fused activation layer. For example for kNone and // uint8_t these would be 0 and 255. int32_t output_activation_min; int32_t output_activation_max; // The index of the temporary tensors where we store transposed LHS/RHS. int scratch_tensor_index; bool rhs_transposed; bool compute_row_sums = false; }; struct OpContext { OpContext(TfLiteContext* context, TfLiteNode* node) { params = reinterpret_cast<TfLiteBatchMatMulParams*>(node->builtin_data); lhs = GetInput(context, node, kInputLHSTensor); rhs = GetInput(context, node, kInputRHSTensor); output = GetOutput(context, node, 0); } TfLiteBatchMatMulParams* params; const TfLiteTensor* lhs; const TfLiteTensor* rhs; TfLiteTensor* output; }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* op_data = new OpData(); // If the RHS is constant, we only transpose once. op_data->rhs_transposed = false; // Creates the temp tensors to store the transposed LHS and/or RHS, and // extra buffers for the quantized case. context->AddTensors(context, kNumTempTensorsForAdjoints + kNumTempTensorsForHybrid, &op_data->scratch_tensor_index); return op_data; } void Free(TfLiteContext* context, void* buffer) { delete static_cast<OpData*>(buffer); } TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const RuntimeShape& extended_lhs_shape, const RuntimeShape& extended_rhs_shape, bool adj_x, bool adj_y, int output_rank, TfLiteTensor* output) { TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); // Fill in any broadcast dimensions. for (int i = 0; i < output_rank - 2; ++i) { const int lhs_dim = extended_lhs_shape.Dims(i); const int rhs_dim = extended_rhs_shape.Dims(i); int broadcast_dim = lhs_dim; if ((lhs_dim != rhs_dim) && (lhs_dim == 1)) { broadcast_dim = rhs_dim; } output_shape->data[i] = broadcast_dim; } // Fill in the matmul dimensions. int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2; int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1; output_shape->data[output_rank - 2] = extended_lhs_shape.Dims(lhs_rows_index); output_shape->data[output_rank - 1] = extended_rhs_shape.Dims(rhs_cols_index); TfLiteStatus stat = context->ResizeTensor(context, output, output_shape); return stat; } // Initializes temp tensors to store transposed operands. TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node, OpContext* op_context) { // Create temporary tensors to hold transposed LHS/RHS. OpData* op_data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* lhs = op_context->lhs; const TfLiteTensor* rhs = op_context->rhs; TfLiteIntArrayFree(node->temporaries); // For "hybrid" quantization, we impose the constraint that the LHS // is float (typically an activation from a prior layer) and the RHS // is quantized int8. bool is_hybrid = (op_context->lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8); if (is_hybrid) { node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints + kNumTempTensorsForHybrid); } else { node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints); } const int lhs_rank = NumDimensions(lhs); const int rhs_rank = NumDimensions(rhs); const int batch_size = op_context->params->adj_x ? lhs->dims->data[lhs_rank - 2] : lhs->dims->data[lhs_rank - 1]; const int num_units = op_context->params->adj_x ? lhs->dims->data[lhs_rank - 1] : lhs->dims->data[lhs_rank - 2]; // Temp tensor for Transposed LHS; { node->temporaries->data[0] = op_data->scratch_tensor_index; TfLiteTensor* scratch_buffer; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, /*index=*/0, &scratch_buffer)); TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(lhs_rank); for (int i = 0; i < lhs_rank - 2; ++i) { scratch_buffer_size->data[i] = lhs->dims->data[i]; } // Swap last two dimensions. scratch_buffer_size->data[lhs_rank - 2] = lhs->dims->data[lhs_rank - 1]; scratch_buffer_size->data[lhs_rank - 1] = lhs->dims->data[lhs_rank - 2]; scratch_buffer->type = op_context->lhs->type; scratch_buffer->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer, scratch_buffer_size)); } // We need a temp buffer for the RHS if we need to transpose the RHS. We // transpose by default, so that the two inputs (LHS and RHS) are in a proper // layout for our fast matrix multiplication routines. If the transpose flag // is set by the caller, the data is already in the desired layout. { node->temporaries->data[1] = op_data->scratch_tensor_index + 1; TfLiteTensor* scratch_buffer; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, /*index=*/1, &scratch_buffer)); const TfLiteTensor* rhs = op_context->rhs; int rhs_rank = NumDimensions(rhs); TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(rhs_rank); for (int i = 0; i < rhs_rank - 2; ++i) { scratch_buffer_size->data[i] = rhs->dims->data[i]; } // Swap last two dimensions. scratch_buffer_size->data[rhs_rank - 2] = rhs->dims->data[rhs_rank - 1]; scratch_buffer_size->data[rhs_rank - 1] = rhs->dims->data[rhs_rank - 2]; if (IsConstantTensor(op_context->rhs)) { scratch_buffer->allocation_type = kTfLiteArenaRwPersistent; } else { scratch_buffer->allocation_type = kTfLiteArenaRw; } scratch_buffer->type = op_context->rhs->type; scratch_buffer->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer, scratch_buffer_size)); } // If we have to perform on-the-fly quantization (with quantized weights and // float inputs) first we need to quantize the inputs. Allocate temporary // buffer to store the intermediate quantized values, the batch scaling // factors, the accumulator buffer (optimized version), the input offsets, // and the sums of the rows for each weights matrix. // RHS = weights, LHS = inputs if (is_hybrid) { // Calculate the total number of LHS batches. int num_batches = 1; for (int i = 0; i < lhs_rank - 2; ++i) { num_batches *= lhs->dims->data[i]; } int num_weights_matrices = 1; for (int i = 0; i < rhs_rank - 2; ++i) { num_weights_matrices *= rhs->dims->data[i]; } op_data->compute_row_sums = true; node->temporaries->data[2] = op_data->scratch_tensor_index + 2; TfLiteTensor* input_quantized; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/2, &input_quantized)); input_quantized->type = op_context->rhs->type; input_quantized->allocation_type = kTfLiteArenaRw; TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(op_context->lhs->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); node->temporaries->data[3] = op_data->scratch_tensor_index + 3; TfLiteTensor* scaling_factors; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/3, &scaling_factors)); scaling_factors->type = kTfLiteFloat32; scaling_factors->allocation_type = kTfLiteArenaRw; // Total size of scaling factors is batch size * number of total batches int scaling_dims[1] = {num_batches * batch_size}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1); scaling_factors_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, scaling_factors_size)); } node->temporaries->data[4] = op_data->scratch_tensor_index + 4; TfLiteTensor* accum_scratch; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, /*index=*/4, &accum_scratch)); accum_scratch->type = kTfLiteInt32; accum_scratch->allocation_type = kTfLiteArenaRw; int accum_scratch_dims[2] = {num_units, batch_size}; if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2, accum_scratch_dims)) { TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2); accum_size->data[0] = num_units; accum_size->data[1] = batch_size; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, accum_scratch, accum_size)); } node->temporaries->data[5] = op_data->scratch_tensor_index + 5; TfLiteTensor* input_offsets; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, /*index=*/5, &input_offsets)); input_offsets->type = kTfLiteInt32; input_offsets->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) { TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1); input_offsets_size->data[0] = num_batches * batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets, input_offsets_size)); } node->temporaries->data[6] = op_data->scratch_tensor_index + 6; TfLiteTensor* row_sums; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/6, &row_sums)); row_sums->type = kTfLiteInt32; row_sums->allocation_type = kTfLiteArenaRwPersistent; int row_sums_dims[1] = {num_weights_matrices * num_units}; if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1); row_sums_size->data[0] = row_sums_dims[0]; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, row_sums, row_sums_size)); } } return kTfLiteOk; } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpContext op_context(context, node); TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); bool adj_x = op_context.params->adj_x; bool adj_y = op_context.params->adj_y; const TfLiteTensor* lhs_data; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputLHSTensor, &lhs_data)); const TfLiteTensor* rhs_data; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputRHSTensor, &rhs_data)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // Note that quantized inference requires that all tensors have their // parameters set. This is usually done during quantized training. if (lhs_data->type == kTfLiteInt8) { double real_multiplier = 0.0; TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( context, lhs_data, rhs_data, output, &real_multiplier)); int exponent; QuantizeMultiplier(real_multiplier, &op_data->output_multiplier, &exponent); op_data->output_shift = exponent; // BatchMatMul has no fused activation functions. Therefore, set // output activation min and max to min and max of int8_t type, // respecitvely. op_data->output_activation_min = std::numeric_limits<int8_t>::min(); op_data->output_activation_max = std::numeric_limits<int8_t>::max(); } TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 || lhs_data->type == kTfLiteInt8); TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 || rhs_data->type == kTfLiteInt8); // Support dimensions between 2 and 4, inclusive. TF_LITE_ENSURE(context, NumDimensions(lhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(lhs_data) <= 4); TF_LITE_ENSURE(context, NumDimensions(rhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(rhs_data) <= 4); const int lhs_rank = NumDimensions(lhs_data); const int rhs_rank = NumDimensions(rhs_data); const int output_rank = std::max(lhs_rank, rhs_rank); const RuntimeShape extended_lhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data)); const RuntimeShape extended_rhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data)); // Ensure any batch dimensions obey broacasting rules. for (int i = 0; i < output_rank - 2; ++i) { const int lhs_dim = extended_lhs_shape.Dims(i); const int rhs_dim = extended_rhs_shape.Dims(i); if (lhs_dim != rhs_dim) { if (lhs_dim != 1) { TF_LITE_ENSURE_EQ(context, rhs_dim, 1); } } } // Ensure other dimensions work for matrix multiplication. int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2) : extended_lhs_shape.Dims(output_rank - 1); int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1) : extended_rhs_shape.Dims(output_rank - 2); TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs); TfLiteStatus status = ResizeOutputTensor(context, extended_lhs_shape, extended_rhs_shape, adj_x, adj_y, output_rank, output); return status; } template <typename scalar> void TransposeRowsColumnsImpl(const TfLiteTensor* tensor_in, const scalar* input, TfLiteTensor* tensor_out, scalar* output) { RuntimeShape transposed_shape(GetTensorShape(tensor_in)); RuntimeShape shape(GetTensorShape(tensor_in)); TransposeParams params; int rank = NumDimensions(tensor_in); params.perm_count = rank; for (int i = 0; i < rank - 2; ++i) { params.perm[i] = i; } // Transpose the last two dimensions. params.perm[rank - 2] = rank - 1; params.perm[rank - 1] = rank - 2; transposed_shape.SetDim(rank - 1, shape.Dims(rank - 2)); transposed_shape.SetDim(rank - 2, shape.Dims(rank - 1)); optimized_ops::Transpose(params, shape, input, transposed_shape, output); } TfLiteStatus TransposeRowsColumns(TfLiteContext* context, const TfLiteTensor* tensor_in, TfLiteTensor* tensor_out) { if (tensor_in->type == kTfLiteFloat32) { TransposeRowsColumnsImpl<float>(tensor_in, GetTensorData<float>(tensor_in), tensor_out, GetTensorData<float>(tensor_out)); return kTfLiteOk; } else if (tensor_in->type == kTfLiteInt8) { TransposeRowsColumnsImpl<int8_t>( tensor_in, GetTensorData<int8_t>(tensor_in), tensor_out, GetTensorData<int8_t>(tensor_out)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Can only transpose tensors with float and int8 type."); return kTfLiteError; } } RuntimeShape SwapRowColumnDims(const RuntimeShape& shape) { RuntimeShape swapped_shape(shape); const int32_t dims = shape.DimensionsCount(); swapped_shape.SetDim(dims - 2, shape.Dims(dims - 1)); swapped_shape.SetDim(dims - 1, shape.Dims(dims - 2)); return swapped_shape; } template <KernelType kernel_type> TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node, OpData* data, const RuntimeShape& input_shape, const TfLiteTensor* input, const RuntimeShape& filter_shape, const TfLiteTensor* filter, TfLiteTensor* input_quantized, TfLiteTensor* scaling_factors, TfLiteTensor* accum_scratch, TfLiteTensor* row_sums, TfLiteTensor* input_offsets, TfLiteTensor* output) { const int32_t num_input_dims = input_shape.DimensionsCount(); // Input row/cols have been swapped at this point, so dims are // {input_size, num_batches} const int input_size = input_shape.Dims(num_input_dims - 2); const int batch_size = input_shape.Dims(num_input_dims - 1); int num_batches_to_quantize = batch_size; for (int i = 0; i < input_shape.DimensionsCount() - 2; ++i) { num_batches_to_quantize *= input_shape.Dims(i); } // Quantize input from float to uint8 + quantization params (scaling factor). float* scaling_factors_ptr = GetTensorData<float>(scaling_factors); int32_t* input_offset_ptr = nullptr; int32_t* row_sums_ptr = nullptr; // Only asymmetric quantization is supported. input_offset_ptr = GetTensorData<int32_t>(input_offsets); row_sums_ptr = GetTensorData<int32_t>(row_sums); int8_t* quant_data = GetTensorData<int8_t>(input_quantized); const int8_t* filter_data = GetTensorData<int8_t>(filter); const float* input_ptr = GetTensorData<float>(input); // Quantize each batch independently. for (int b = 0; b < num_batches_to_quantize; ++b) { const int offset = b * input_size; tensor_utils::AsymmetricQuantizeFloats( input_ptr + offset, input_size, quant_data + offset, &scaling_factors_ptr[b], &input_offset_ptr[b]); // Incorporate scaling of the filter. scaling_factors_ptr[b] *= filter->params.scale; } RuntimeShape output_shape = GetTensorShape(output); int output_size = 1; for (int i = 0; i < output_shape.DimensionsCount(); ++i) { output_size *= output_shape.Dims(i); } std::fill_n(GetTensorData<float>(output), output_size, 0.0f); if (kernel_type == kGenericOptimized) { optimized_ops::BatchMatMul( filter_shape, filter_data, input_shape, quant_data, scaling_factors_ptr, input_offset_ptr, row_sums_ptr, GetTensorShape(output), GetTensorData<int32_t>(accum_scratch), GetTensorData<float>(output), &(data->compute_row_sums), CpuBackendContext::GetFromContext(context)); } else { reference_ops::BatchMatMul( filter_shape, filter_data, input_shape, quant_data, scaling_factors_ptr, input_offset_ptr, row_sums_ptr, GetTensorShape(output), GetTensorData<float>(output), &(data->compute_row_sums)); } return kTfLiteOk; } template <KernelType kernel_type> TfLiteStatus EvalInt8(TfLiteContext* context, const OpData* data, const RuntimeShape& lhs_shape, const TfLiteTensor* lhs, const RuntimeShape& rhs_shape, const TfLiteTensor* rhs, const RuntimeShape& output_shape, TfLiteTensor* output) { // Reuse params struct from FullyConnected Op. FullyConnectedParams op_params; int32_t input_offset = -lhs->params.zero_point; int32_t filter_offset = -rhs->params.zero_point; int32_t output_offset = output->params.zero_point; op_params.input_offset = input_offset; op_params.weights_offset = filter_offset; op_params.output_offset = output_offset; op_params.output_multiplier = data->output_multiplier; op_params.output_shift = data->output_shift; op_params.quantized_activation_min = data->output_activation_min; op_params.quantized_activation_max = data->output_activation_max; op_params.lhs_cacheable = IsConstantTensor(lhs); op_params.rhs_cacheable = IsConstantTensor(rhs); if (kernel_type == kReference) { reference_ops::BatchMatMul(op_params, rhs_shape, GetTensorData<int8_t>(rhs), lhs_shape, GetTensorData<int8_t>(lhs), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::BatchMatMul(op_params, rhs_shape, GetTensorData<int8_t>(rhs), lhs_shape, GetTensorData<int8_t>(lhs), GetTensorShape(output), GetTensorData<int8_t>(output), CpuBackendContext::GetFromContext(context)); } return kTfLiteOk; } template <KernelType kernel_type> TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, OpData* data, const RuntimeShape& lhs_shape, const TfLiteTensor* lhs, const RuntimeShape& rhs_shape, const TfLiteTensor* rhs, TfLiteTensor* output) { if (lhs->type == kTfLiteFloat32) { TfLiteTensor* input_quantized; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/2, &input_quantized)); TfLiteTensor* scaling_factors; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/3, &scaling_factors)); TfLiteTensor* accum_scratch; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, /*index=*/4, &accum_scratch)); TfLiteTensor* input_offsets; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, /*index=*/5, &input_offsets)); TfLiteTensor* row_sums; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/6, &row_sums)); return EvalHybrid<kernel_type>( context, node, data, lhs_shape, lhs, rhs_shape, rhs, input_quantized, scaling_factors, accum_scratch, row_sums, input_offsets, output); } else if (lhs->type == kTfLiteInt8) { return EvalInt8<kernel_type>(context, data, lhs_shape, lhs, rhs_shape, rhs, GetTensorShape(output), output); } else { TF_LITE_KERNEL_LOG( context, "Currently only hybrid and int8 quantization is supported.\n"); return kTfLiteError; } return kTfLiteOk; } TfLiteTensor* GetTempRhs(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* rhs) { TfLiteTensor* transposed_rhs = GetTemporary(context, node, 1); if (transposed_rhs == nullptr) { return nullptr; } if (rhs->type == kTfLiteInt8) { // Get the quantization params from the RHS tensor. transposed_rhs->params.scale = rhs->params.scale; transposed_rhs->params.zero_point = rhs->params.zero_point; } return transposed_rhs; } TfLiteTensor* GetTempLhs(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* lhs) { TfLiteTensor* transposed_lhs = GetTemporary(context, node, 0); if (transposed_lhs == nullptr) { return nullptr; } if (lhs->type == kTfLiteInt8) { // Get the quantization params from the LHS tensor. transposed_lhs->params.scale = lhs->params.scale; transposed_lhs->params.zero_point = lhs->params.zero_point; } return transposed_lhs; } // Perform a batch matrix multiply on // LHS <..., A, B> X RHS<..., B, C> // where the leading dimensions of LHS and RHS obey broadcasting rules // (this Op will apply broadcasting rules). // We assume that LHS and RHS are both row oriented (adjacent values in memory // are in the same row) and will output in the same memory layout. However, // our fast GEMM libraries assume RCC layout (LHS row oriented, // RHS column oriented, output column oriented). Therefore, we perform // RHS <..., C, B> X LHS <..., B, A> // where output is a C X A column-oriented, which is equivalent to // A X C row-oriented. template <KernelType kernel_type> TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* lhs; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputLHSTensor, &lhs)); const TfLiteTensor* rhs; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputRHSTensor, &rhs)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); RuntimeShape orig_lhs_shape = GetTensorShape(lhs); RuntimeShape orig_rhs_shape = GetTensorShape(rhs); bool adj_y = op_context.params->adj_y; bool adj_x = op_context.params->adj_x; const TfLiteTensor* rhs_tensor = adj_y ? rhs : GetTempRhs(context, node, rhs); const TfLiteTensor* lhs_tensor = adj_x ? GetTempLhs(context, node, lhs) : lhs; if (!adj_y) { // TODO(b/154760341) Constant tensors should already be transposed, but // we transpose once if necessary for now. if (!(IsConstantTensor(rhs) && op_data->rhs_transposed)) { TransposeRowsColumns(context, rhs, GetTemporary(context, node, 1)); op_data->rhs_transposed = true; } } if (adj_x) { TransposeRowsColumns(context, lhs, GetTemporary(context, node, 0)); } RuntimeShape rhs_shape = adj_y ? orig_rhs_shape : SwapRowColumnDims(orig_rhs_shape); RuntimeShape lhs_shape = adj_x ? orig_lhs_shape : SwapRowColumnDims(orig_lhs_shape); switch (rhs->type) { case kTfLiteFloat32: // Note we pass RHS args first, LHS args second. See note above. if (kernel_type == kGenericOptimized) { optimized_ops::BatchMatMul(rhs_shape, GetTensorData<float>(rhs_tensor), lhs_shape, GetTensorData<float>(lhs_tensor), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); } else { reference_ops::BatchMatMul(rhs_shape, GetTensorData<float>(rhs_tensor), lhs_shape, GetTensorData<float>(lhs_tensor), GetTensorShape(output), GetTensorData<float>(output)); } break; case kTfLiteInt8: EvalQuantized<kernel_type>(context, node, op_data, lhs_shape, lhs_tensor, rhs_shape, rhs_tensor, output); break; default: TF_LITE_KERNEL_LOG(context, "Currently BatchMatMul doesn't support type: %s", TfLiteTypeGetName(lhs->type)); return kTfLiteError; } return kTfLiteOk; } } // namespace batch_matmul TfLiteRegistration* Register_BATCH_MATMUL_REF() { static TfLiteRegistration r = {batch_matmul::Init, batch_matmul::Free, batch_matmul::Prepare, batch_matmul::Eval<batch_matmul::kReference>}; return &r; } TfLiteRegistration* Register_BATCH_MATMUL_GENERIC_OPTIMIZED() { static TfLiteRegistration r = { batch_matmul::Init, batch_matmul::Free, batch_matmul::Prepare, batch_matmul::Eval<batch_matmul::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_BATCH_MATMUL() { return Register_BATCH_MATMUL_GENERIC_OPTIMIZED(); } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpContext op_context(context, node); TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); bool adj_x = op_context.params->adj_x; bool adj_y = op_context.params->adj_y; const TfLiteTensor* lhs_data = GetInput(context, node, kInputLHSTensor); const TfLiteTensor* rhs_data = GetInput(context, node, kInputRHSTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Note that quantized inference requires that all tensors have their // parameters set. This is usually done during quantized training. if (lhs_data->type == kTfLiteInt8) { double real_multiplier = 0.0; TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( context, lhs_data, rhs_data, output, &real_multiplier)); int exponent; QuantizeMultiplier(real_multiplier, &op_data->output_multiplier, &exponent); op_data->output_shift = exponent; // BatchMatMul has no fused activation functions. Therefore, set // output activation min and max to min and max of int8_t type, // respecitvely. op_data->output_activation_min = std::numeric_limits<int8_t>::min(); op_data->output_activation_max = std::numeric_limits<int8_t>::max(); } TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 || lhs_data->type == kTfLiteInt8); TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 || rhs_data->type == kTfLiteInt8); // Support dimensions between 2 and 4, inclusive. TF_LITE_ENSURE(context, NumDimensions(lhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(lhs_data) <= 4); TF_LITE_ENSURE(context, NumDimensions(rhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(rhs_data) <= 4); const int lhs_rank = NumDimensions(lhs_data); const int rhs_rank = NumDimensions(rhs_data); const int output_rank = std::max(lhs_rank, rhs_rank); const RuntimeShape extended_lhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data)); const RuntimeShape extended_rhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data)); // Ensure any batch dimensions obey broacasting rules. for (int i = 0; i < output_rank - 2; ++i) { const int lhs_dim = extended_lhs_shape.Dims(i); const int rhs_dim = extended_rhs_shape.Dims(i); if (lhs_dim != rhs_dim) { if (lhs_dim != 1) { TF_LITE_ENSURE_EQ(context, rhs_dim, 1); } } } // Ensure other dimensions work for matrix multiplication. int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2) : extended_lhs_shape.Dims(output_rank - 1); int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1) : extended_rhs_shape.Dims(output_rank - 2); TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs); TfLiteStatus status = ResizeOutputTensor(context, extended_lhs_shape, extended_rhs_shape, adj_x, adj_y, output_rank, output); return status; }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpContext op_context(context, node); TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); bool adj_x = op_context.params->adj_x; bool adj_y = op_context.params->adj_y; const TfLiteTensor* lhs_data; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputLHSTensor, &lhs_data)); const TfLiteTensor* rhs_data; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputRHSTensor, &rhs_data)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // Note that quantized inference requires that all tensors have their // parameters set. This is usually done during quantized training. if (lhs_data->type == kTfLiteInt8) { double real_multiplier = 0.0; TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( context, lhs_data, rhs_data, output, &real_multiplier)); int exponent; QuantizeMultiplier(real_multiplier, &op_data->output_multiplier, &exponent); op_data->output_shift = exponent; // BatchMatMul has no fused activation functions. Therefore, set // output activation min and max to min and max of int8_t type, // respecitvely. op_data->output_activation_min = std::numeric_limits<int8_t>::min(); op_data->output_activation_max = std::numeric_limits<int8_t>::max(); } TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 || lhs_data->type == kTfLiteInt8); TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 || rhs_data->type == kTfLiteInt8); // Support dimensions between 2 and 4, inclusive. TF_LITE_ENSURE(context, NumDimensions(lhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(lhs_data) <= 4); TF_LITE_ENSURE(context, NumDimensions(rhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(rhs_data) <= 4); const int lhs_rank = NumDimensions(lhs_data); const int rhs_rank = NumDimensions(rhs_data); const int output_rank = std::max(lhs_rank, rhs_rank); const RuntimeShape extended_lhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data)); const RuntimeShape extended_rhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data)); // Ensure any batch dimensions obey broacasting rules. for (int i = 0; i < output_rank - 2; ++i) { const int lhs_dim = extended_lhs_shape.Dims(i); const int rhs_dim = extended_rhs_shape.Dims(i); if (lhs_dim != rhs_dim) { if (lhs_dim != 1) { TF_LITE_ENSURE_EQ(context, rhs_dim, 1); } } } // Ensure other dimensions work for matrix multiplication. int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2) : extended_lhs_shape.Dims(output_rank - 1); int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1) : extended_rhs_shape.Dims(output_rank - 2); TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs); TfLiteStatus status = ResizeOutputTensor(context, extended_lhs_shape, extended_rhs_shape, adj_x, adj_y, output_rank, output); return status; }
{'added': [(157, ' TfLiteTensor* scratch_buffer;'), (158, ' TF_LITE_ENSURE_OK('), (159, ' context, GetTemporarySafe(context, node, /*index=*/0, &scratch_buffer));'), (180, ' TfLiteTensor* scratch_buffer;'), (181, ' TF_LITE_ENSURE_OK('), (182, ' context, GetTemporarySafe(context, node, /*index=*/1, &scratch_buffer));'), (222, ' TfLiteTensor* input_quantized;'), (223, ' TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/2,'), (224, ' &input_quantized));'), (234, ' TfLiteTensor* scaling_factors;'), (235, ' TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/3,'), (236, ' &scaling_factors));'), (249, ' TfLiteTensor* accum_scratch;'), (250, ' TF_LITE_ENSURE_OK('), (251, ' context, GetTemporarySafe(context, node, /*index=*/4, &accum_scratch));'), (265, ' TfLiteTensor* input_offsets;'), (266, ' TF_LITE_ENSURE_OK('), (267, ' context, GetTemporarySafe(context, node, /*index=*/5, &input_offsets));'), (277, ' TfLiteTensor* row_sums;'), (278, ' TF_LITE_ENSURE_OK(context,'), (279, ' GetTemporarySafe(context, node, /*index=*/6, &row_sums));'), (305, ' const TfLiteTensor* lhs_data;'), (306, ' TF_LITE_ENSURE_OK(context,'), (307, ' GetInputSafe(context, node, kInputLHSTensor, &lhs_data));'), (308, ' const TfLiteTensor* rhs_data;'), (309, ' TF_LITE_ENSURE_OK(context,'), (310, ' GetInputSafe(context, node, kInputRHSTensor, &rhs_data));'), (311, ' TfLiteTensor* output;'), (312, ' TF_LITE_ENSURE_OK(context,'), (313, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (525, ' TfLiteTensor* input_quantized;'), (526, ' TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/2,'), (527, ' &input_quantized));'), (528, ' TfLiteTensor* scaling_factors;'), (529, ' TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/3,'), (530, ' &scaling_factors));'), (531, ' TfLiteTensor* accum_scratch;'), (532, ' TF_LITE_ENSURE_OK('), (533, ' context, GetTemporarySafe(context, node, /*index=*/4, &accum_scratch));'), (534, ' TfLiteTensor* input_offsets;'), (535, ' TF_LITE_ENSURE_OK('), (536, ' context, GetTemporarySafe(context, node, /*index=*/5, &input_offsets));'), (537, ' TfLiteTensor* row_sums;'), (538, ' TF_LITE_ENSURE_OK(context,'), (539, ' GetTemporarySafe(context, node, /*index=*/6, &row_sums));'), (557, ' if (transposed_rhs == nullptr) {'), (558, ' return nullptr;'), (559, ' }'), (560, ''), (572, ' if (transposed_lhs == nullptr) {'), (573, ' return nullptr;'), (574, ' }'), (575, ''), (599, ' const TfLiteTensor* lhs;'), (600, ' TF_LITE_ENSURE_OK(context,'), (601, ' GetInputSafe(context, node, kInputLHSTensor, &lhs));'), (602, ' const TfLiteTensor* rhs;'), (603, ' TF_LITE_ENSURE_OK(context,'), (604, ' GetInputSafe(context, node, kInputRHSTensor, &rhs));'), (605, ' TfLiteTensor* output;'), (606, ' TF_LITE_ENSURE_OK(context,'), (607, ' GetOutputSafe(context, node, kOutputTensor, &output));')], 'deleted': [(157, ' TfLiteTensor* scratch_buffer = GetTemporary(context, node, /*index=*/0);'), (178, ' TfLiteTensor* scratch_buffer = GetTemporary(context, node, /*index=*/1);'), (218, ' TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/2);'), (228, ' TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/3);'), (241, ' TfLiteTensor* accum_scratch = GetTemporary(context, node, /*index=*/4);'), (255, ' TfLiteTensor* input_offsets = GetTemporary(context, node, /*index=*/5);'), (265, ' TfLiteTensor* row_sums = GetTemporary(context, node, /*index=*/6);'), (291, ' const TfLiteTensor* lhs_data = GetInput(context, node, kInputLHSTensor);'), (292, ' const TfLiteTensor* rhs_data = GetInput(context, node, kInputRHSTensor);'), (293, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (505, ' TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/2);'), (506, ' TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/3);'), (507, ' TfLiteTensor* accum_scratch = GetTemporary(context, node, /*index=*/4);'), (508, ' TfLiteTensor* input_offsets = GetTemporary(context, node, /*index=*/5);'), (509, ' TfLiteTensor* row_sums = GetTemporary(context, node, /*index=*/6);'), (561, ' const TfLiteTensor* lhs = GetInput(context, node, kInputLHSTensor);'), (562, ' const TfLiteTensor* rhs = GetInput(context, node, kInputRHSTensor);'), (563, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);')]}
62
18
547
4,104
55
507
9
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
1,241
mach0.c
C
MACH0_( iterate_chained_fixups)
/* radare - LGPL - Copyright 2010-2021 - nibble, mrmacete, pancake */ #include <stdio.h> #include <r_types.h> #include <r_util.h> #include "mach0.h" #include <r_hash.h> // TODO: deprecate bprintf and use Eprintf (bin->self) #define bprintf if (bin->verbose) eprintf #define Eprintf if (mo->verbose) eprintf #define IS_PTR_AUTH(x) ((x & (1ULL << 63)) != 0) #define IS_PTR_BIND(x) ((x & (1ULL << 62)) != 0) typedef struct { struct symbol_t *symbols; int j; int symbols_count; HtPP *hash; } RSymCtx; typedef void (*RExportsIterator)(struct MACH0_(obj_t) *bin, const char *name, ut64 flags, ut64 offset, void *ctx); typedef struct { ut8 *node; char *label; int i; ut8 *next_child; } RTrieState; typedef struct { ut8 * imports; RSkipList *relocs; } RWalkBindChainsContext; // OMG; THIS SHOULD BE KILLED; this var exposes the local native endian, which is completely unnecessary // USE THIS: int ws = bf->o->info->big_endian; #define mach0_endian 1 static ut64 read_uleb128(ut8 **p, ut8 *end) { const char *error = NULL; ut64 v; *p = (ut8 *)r_uleb128 (*p, end - *p, &v, &error); if (error) { eprintf ("%s", error); R_FREE (error); return UT64_MAX; } return v; } static ut64 entry_to_vaddr(struct MACH0_(obj_t) *bin) { switch (bin->main_cmd.cmd) { case LC_MAIN: return bin->entry + bin->baddr; case LC_UNIXTHREAD: case LC_THREAD: return bin->entry; default: return 0; } } static ut64 addr_to_offset(struct MACH0_(obj_t) *bin, ut64 addr) { if (bin->segs) { size_t i; for (i = 0; i < bin->nsegs; i++) { const ut64 segment_base = (ut64)bin->segs[i].vmaddr; const ut64 segment_size = (ut64)bin->segs[i].vmsize; if (addr >= segment_base && addr < segment_base + segment_size) { return bin->segs[i].fileoff + (addr - segment_base); } } } return 0; } static ut64 offset_to_vaddr(struct MACH0_(obj_t) *bin, ut64 offset) { if (bin->segs) { size_t i; for (i = 0; i < bin->nsegs; i++) { ut64 segment_base = (ut64)bin->segs[i].fileoff; ut64 segment_size = (ut64)bin->segs[i].filesize; if (offset >= segment_base && offset < segment_base + segment_size) { return bin->segs[i].vmaddr + (offset - segment_base); } } } return 0; } static ut64 pa2va(RBinFile *bf, ut64 offset) { r_return_val_if_fail (bf && bf->rbin, offset); RIO *io = bf->rbin->iob.io; if (!io || !io->va) { return offset; } struct MACH0_(obj_t) *bin = bf->o->bin_obj; return bin? offset_to_vaddr (bin, offset): offset; } static void init_sdb_formats(struct MACH0_(obj_t) *bin) { /* * These definitions are used by r2 -nn * must be kept in sync with libr/bin/d/macho */ sdb_set (bin->kv, "mach0_build_platform.cparse", "enum mach0_build_platform" "{MACOS=1, IOS=2, TVOS=3, WATCHOS=4, BRIDGEOS=5, IOSMAC=6, IOSSIMULATOR=7, TVOSSIMULATOR=8, WATCHOSSIMULATOR=9};", 0); sdb_set (bin->kv, "mach0_build_tool.cparse", "enum mach0_build_tool" "{CLANG=1, SWIFT=2, LD=3};", 0); sdb_set (bin->kv, "mach0_load_command_type.cparse", "enum mach0_load_command_type" "{ LC_SEGMENT=0x00000001ULL, LC_SYMTAB=0x00000002ULL, LC_SYMSEG=0x00000003ULL, LC_THREAD=0x00000004ULL, LC_UNIXTHREAD=0x00000005ULL, LC_LOADFVMLIB=0x00000006ULL, LC_IDFVMLIB=0x00000007ULL, LC_IDENT=0x00000008ULL, LC_FVMFILE=0x00000009ULL, LC_PREPAGE=0x0000000aULL, LC_DYSYMTAB=0x0000000bULL, LC_LOAD_DYLIB=0x0000000cULL, LC_ID_DYLIB=0x0000000dULL, LC_LOAD_DYLINKER=0x0000000eULL, LC_ID_DYLINKER=0x0000000fULL, LC_PREBOUND_DYLIB=0x00000010ULL, LC_ROUTINES=0x00000011ULL, LC_SUB_FRAMEWORK=0x00000012ULL, LC_SUB_UMBRELLA=0x00000013ULL, LC_SUB_CLIENT=0x00000014ULL, LC_SUB_LIBRARY=0x00000015ULL, LC_TWOLEVEL_HINTS=0x00000016ULL, LC_PREBIND_CKSUM=0x00000017ULL, LC_LOAD_WEAK_DYLIB=0x80000018ULL, LC_SEGMENT_64=0x00000019ULL, LC_ROUTINES_64=0x0000001aULL, LC_UUID=0x0000001bULL, LC_RPATH=0x8000001cULL, LC_CODE_SIGNATURE=0x0000001dULL, LC_SEGMENT_SPLIT_INFO=0x0000001eULL, LC_REEXPORT_DYLIB=0x8000001fULL, LC_LAZY_LOAD_DYLIB=0x00000020ULL, LC_ENCRYPTION_INFO=0x00000021ULL, LC_DYLD_INFO=0x00000022ULL, LC_DYLD_INFO_ONLY=0x80000022ULL, LC_LOAD_UPWARD_DYLIB=0x80000023ULL, LC_VERSION_MIN_MACOSX=0x00000024ULL, LC_VERSION_MIN_IPHONEOS=0x00000025ULL, LC_FUNCTION_STARTS=0x00000026ULL, LC_DYLD_ENVIRONMENT=0x00000027ULL, LC_MAIN=0x80000028ULL, LC_DATA_IN_CODE=0x00000029ULL, LC_SOURCE_VERSION=0x0000002aULL, LC_DYLIB_CODE_SIGN_DRS=0x0000002bULL, LC_ENCRYPTION_INFO_64=0x0000002cULL, LC_LINKER_OPTION=0x0000002dULL, LC_LINKER_OPTIMIZATION_HINT=0x0000002eULL, LC_VERSION_MIN_TVOS=0x0000002fULL, LC_VERSION_MIN_WATCHOS=0x00000030ULL, LC_NOTE=0x00000031ULL, LC_BUILD_VERSION=0x00000032ULL };", 0); sdb_set (bin->kv, "mach0_header_filetype.cparse", "enum mach0_header_filetype" "{MH_OBJECT=1, MH_EXECUTE=2, MH_FVMLIB=3, MH_CORE=4, MH_PRELOAD=5, MH_DYLIB=6, MH_DYLINKER=7, MH_BUNDLE=8, MH_DYLIB_STUB=9, MH_DSYM=10, MH_KEXT_BUNDLE=11};", 0); sdb_set (bin->kv, "mach0_header_flags.cparse", "enum mach0_header_flags" "{MH_NOUNDEFS=1, MH_INCRLINK=2,MH_DYLDLINK=4,MH_BINDATLOAD=8,MH_PREBOUND=0x10, MH_SPLIT_SEGS=0x20,MH_LAZY_INIT=0x40,MH_TWOLEVEL=0x80, MH_FORCE_FLAT=0x100,MH_NOMULTIDEFS=0x200,MH_NOFIXPREBINDING=0x400, MH_PREBINDABLE=0x800, MH_ALLMODSBOUND=0x1000, MH_SUBSECTIONS_VIA_SYMBOLS=0x2000, MH_CANONICAL=0x4000,MH_WEAK_DEFINES=0x8000, MH_BINDS_TO_WEAK=0x10000,MH_ALLOW_STACK_EXECUTION=0x20000, MH_ROOT_SAFE=0x40000,MH_SETUID_SAFE=0x80000, MH_NO_REEXPORTED_DYLIBS=0x100000,MH_PIE=0x200000, MH_DEAD_STRIPPABLE_DYLIB=0x400000, MH_HAS_TLV_DESCRIPTORS=0x800000, MH_NO_HEAP_EXECUTION=0x1000000};", 0); sdb_set (bin->kv, "mach0_section_types.cparse", "enum mach0_section_types" "{S_REGULAR=0, S_ZEROFILL=1, S_CSTRING_LITERALS=2, S_4BYTE_LITERALS=3, S_8BYTE_LITERALS=4, S_LITERAL_POINTERS=5, S_NON_LAZY_SYMBOL_POINTERS=6, S_LAZY_SYMBOL_POINTERS=7, S_SYMBOL_STUBS=8, S_MOD_INIT_FUNC_POINTERS=9, S_MOD_TERM_FUNC_POINTERS=0xa, S_COALESCED=0xb, S_GB_ZEROFILL=0xc, S_INTERPOSING=0xd, S_16BYTE_LITERALS=0xe, S_DTRACE_DOF=0xf, S_LAZY_DYLIB_SYMBOL_POINTERS=0x10, S_THREAD_LOCAL_REGULAR=0x11, S_THREAD_LOCAL_ZEROFILL=0x12, S_THREAD_LOCAL_VARIABLES=0x13, S_THREAD_LOCAL_VARIABLE_POINTERS=0x14, S_THREAD_LOCAL_INIT_FUNCTION_POINTERS=0x15, S_INIT_FUNC_OFFSETS=0x16};", 0); sdb_set (bin->kv, "mach0_section_attrs.cparse", "enum mach0_section_attrs" "{S_ATTR_PURE_INSTRUCTIONS=0x800000ULL, S_ATTR_NO_TOC=0x400000ULL, S_ATTR_STRIP_STATIC_SYMS=0x200000ULL, S_ATTR_NO_DEAD_STRIP=0x100000ULL, S_ATTR_LIVE_SUPPORT=0x080000ULL, S_ATTR_SELF_MODIFYING_CODE=0x040000ULL, S_ATTR_DEBUG=0x020000ULL, S_ATTR_SOME_INSTRUCTIONS=0x000004ULL, S_ATTR_EXT_RELOC=0x000002ULL, S_ATTR_LOC_RELOC=0x000001ULL};", 0); sdb_set (bin->kv, "mach0_header.format", "xxx[4]Edd[4]B " "magic cputype cpusubtype (mach0_header_filetype)filetype ncmds sizeofcmds (mach0_header_flags)flags", 0); sdb_set (bin->kv, "mach0_segment.format", "[4]Ed[16]zxxxxoodx " "(mach0_load_command_type)cmd cmdsize segname vmaddr vmsize fileoff filesize maxprot initprot nsects flags", 0); sdb_set (bin->kv, "mach0_segment64.format", "[4]Ed[16]zqqqqoodx " "(mach0_load_command_type)cmd cmdsize segname vmaddr vmsize fileoff filesize maxprot initprot nsects flags", 0); sdb_set (bin->kv, "mach0_symtab_command.format", "[4]Edxdxd " "(mach0_load_command_type)cmd cmdsize symoff nsyms stroff strsize", 0); sdb_set (bin->kv, "mach0_dysymtab_command.format", "[4]Edddddddddddxdxdxxxd " "(mach0_load_command_type)cmd cmdsize ilocalsym nlocalsym iextdefsym nextdefsym iundefsym nundefsym tocoff ntoc moddtaboff nmodtab extrefsymoff nextrefsyms inddirectsymoff nindirectsyms extreloff nextrel locreloff nlocrel", 0); sdb_set (bin->kv, "mach0_section.format", "[16]z[16]zxxxxxx[1]E[3]Bxx " "sectname segname addr size offset align reloff nreloc (mach0_section_types)flags_type (mach0_section_attrs)flags_attr reserved1 reserved2", 0); sdb_set (bin->kv, "mach0_section64.format", "[16]z[16]zqqxxxx[1]E[3]Bxxx " "sectname segname addr size offset align reloff nreloc (mach0_section_types)flags_type (mach0_section_attrs)flags_attr reserved1 reserved2 reserved3", 0); sdb_set (bin->kv, "mach0_dylib.format", "xxxxz " "name_offset timestamp current_version compatibility_version name", 0); sdb_set (bin->kv, "mach0_dylib_command.format", "[4]Ed? " "(mach0_load_command_type)cmd cmdsize (mach0_dylib)dylib", 0); sdb_set (bin->kv, "mach0_id_dylib_command.format", "[4]Ed? " "(mach0_load_command_type)cmd cmdsize (mach0_dylib)dylib", 0); sdb_set (bin->kv, "mach0_uuid_command.format", "[4]Ed[16]b " "(mach0_load_command_type)cmd cmdsize uuid", 0); sdb_set (bin->kv, "mach0_rpath_command.format", "[4]Edxz " "(mach0_load_command_type)cmd cmdsize path_offset path", 0); sdb_set (bin->kv, "mach0_entry_point_command.format", "[4]Edqq " "(mach0_load_command_type)cmd cmdsize entryoff stacksize", 0); sdb_set (bin->kv, "mach0_encryption_info64_command.format", "[4]Edxddx " "(mach0_load_command_type)cmd cmdsize offset size id padding", 0); sdb_set (bin->kv, "mach0_encryption_info_command.format", "[4]Edxdd " "(mach0_load_command_type)cmd cmdsize offset size id", 0); sdb_set (bin->kv, "mach0_code_signature_command.format", "[4]Edxd " "(mach0_load_command_type)cmd cmdsize offset size", 0); sdb_set (bin->kv, "mach0_dyld_info_only_command.format", "[4]Edxdxdxdxdxd " "(mach0_load_command_type)cmd cmdsize rebase_off rebase_size bind_off bind_size weak_bind_off weak_bind_size lazy_bind_off lazy_bind_size export_off export_size", 0); sdb_set (bin->kv, "mach0_load_dylinker_command.format", "[4]Edxz " "(mach0_load_command_type)cmd cmdsize name_offset name", 0); sdb_set (bin->kv, "mach0_id_dylinker_command.format", "[4]Edxzi " "(mach0_load_command_type)cmd cmdsize name_offset name", 0); sdb_set (bin->kv, "mach0_build_version_command.format", "[4]Ed[4]Exxd " "(mach0_load_command_type)cmd cmdsize (mach0_build_platform)platform minos sdk ntools", 0); sdb_set (bin->kv, "mach0_build_version_tool.format", "[4]Ex " "(mach0_build_tool)tool version", 0); sdb_set (bin->kv, "mach0_source_version_command.format", "[4]Edq " "(mach0_load_command_type)cmd cmdsize version", 0); sdb_set (bin->kv, "mach0_function_starts_command.format", "[4]Edxd " "(mach0_load_command_type)cmd cmdsize offset size", 0); sdb_set (bin->kv, "mach0_data_in_code_command.format", "[4]Edxd " "(mach0_load_command_type)cmd cmdsize offset size", 0); sdb_set (bin->kv, "mach0_version_min_command.format", "[4]Edxx " "(mach0_load_command_type)cmd cmdsize version reserved", 0); sdb_set (bin->kv, "mach0_segment_split_info_command.format", "[4]Edxd " "(mach0_load_command_type)cmd cmdsize offset size", 0); sdb_set (bin->kv, "mach0_unixthread_command.format", "[4]Eddd " "(mach0_load_command_type)cmd cmdsize flavor count", 0); } static bool init_hdr(struct MACH0_(obj_t) *bin) { ut8 magicbytes[4] = {0}; ut8 machohdrbytes[sizeof (struct MACH0_(mach_header))] = {0}; int len; if (r_buf_read_at (bin->b, 0 + bin->header_at, magicbytes, 4) < 1) { return false; } if (r_read_le32 (magicbytes) == 0xfeedface) { bin->big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedface) { bin->big_endian = true; } else if (r_read_le32 (magicbytes) == FAT_MAGIC) { bin->big_endian = false; } else if (r_read_be32 (magicbytes) == FAT_MAGIC) { bin->big_endian = true; } else if (r_read_le32 (magicbytes) == 0xfeedfacf) { bin->big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedfacf) { bin->big_endian = true; } else { return false; // object files are magic == 0, but body is different :? } len = r_buf_read_at (bin->b, 0 + bin->header_at, machohdrbytes, sizeof (machohdrbytes)); if (len != sizeof (machohdrbytes)) { bprintf ("Error: read (hdr)\n"); return false; } bin->hdr.magic = r_read_ble (&machohdrbytes[0], bin->big_endian, 32); bin->hdr.cputype = r_read_ble (&machohdrbytes[4], bin->big_endian, 32); bin->hdr.cpusubtype = r_read_ble (&machohdrbytes[8], bin->big_endian, 32); bin->hdr.filetype = r_read_ble (&machohdrbytes[12], bin->big_endian, 32); bin->hdr.ncmds = r_read_ble (&machohdrbytes[16], bin->big_endian, 32); bin->hdr.sizeofcmds = r_read_ble (&machohdrbytes[20], bin->big_endian, 32); bin->hdr.flags = r_read_ble (&machohdrbytes[24], bin->big_endian, 32); #if R_BIN_MACH064 bin->hdr.reserved = r_read_ble (&machohdrbytes[28], bin->big_endian, 32); #endif init_sdb_formats (bin); sdb_num_set (bin->kv, "mach0_header.offset", 0, 0); // wat about fatmach0? return true; } static bool parse_segments(struct MACH0_(obj_t) *bin, ut64 off) { size_t i, j, k, sect, len; ut32 size_sects; ut8 segcom[sizeof (struct MACH0_(segment_command))] = {0}; ut8 sec[sizeof (struct MACH0_(section))] = {0}; char section_flagname[128]; if (!UT32_MUL (&size_sects, bin->nsegs, sizeof (struct MACH0_(segment_command)))) { return false; } if (!size_sects || size_sects > bin->size) { return false; } if (off > bin->size || off + sizeof (struct MACH0_(segment_command)) > bin->size) { return false; } if (!(bin->segs = realloc (bin->segs, bin->nsegs * sizeof(struct MACH0_(segment_command))))) { r_sys_perror ("realloc (seg)"); return false; } j = bin->nsegs - 1; len = r_buf_read_at (bin->b, off, segcom, sizeof (struct MACH0_(segment_command))); if (len != sizeof (struct MACH0_(segment_command))) { bprintf ("Error: read (seg)\n"); return false; } i = 0; bin->segs[j].cmd = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].cmdsize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); memcpy (&bin->segs[j].segname, &segcom[i], 16); i += 16; #if R_BIN_MACH064 bin->segs[j].vmaddr = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].vmsize = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].fileoff = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].filesize = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); #else bin->segs[j].vmaddr = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].vmsize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].fileoff = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].filesize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); #endif bin->segs[j].maxprot = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].initprot = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].nsects = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].flags = r_read_ble32 (&segcom[i], bin->big_endian); char *segment_flagname = NULL; #if R_BIN_MACH064 segment_flagname = r_str_newf ("mach0_segment64_%u.offset", (ut32)j); #else segment_flagname = r_str_newf ("mach0_segment_%u.offset", (ut32)j); #endif sdb_num_set (bin->kv, segment_flagname, off, 0); free (segment_flagname); sdb_num_set (bin->kv, "mach0_segments.count", 0, 0); if (bin->segs[j].nsects > 0) { sect = bin->nsects; bin->nsects += bin->segs[j].nsects; if (bin->nsects > 128) { int new_nsects = bin->nsects & 0xf; bprintf ("WARNING: mach0 header contains too many sections (%d). Wrapping to %d\n", bin->nsects, new_nsects); bin->nsects = new_nsects; } if ((int)bin->nsects < 1) { bprintf ("Warning: Invalid number of sections\n"); bin->nsects = sect; return false; } if (!UT32_MUL (&size_sects, bin->nsects-sect, sizeof (struct MACH0_(section)))){ bin->nsects = sect; return false; } if (!size_sects || size_sects > bin->size){ bin->nsects = sect; return false; } if (bin->segs[j].cmdsize != sizeof (struct MACH0_(segment_command)) \ + (sizeof (struct MACH0_(section))*bin->segs[j].nsects)){ bin->nsects = sect; return false; } if (off + sizeof (struct MACH0_(segment_command)) > bin->size ||\ off + sizeof (struct MACH0_(segment_command)) + size_sects > bin->size){ bin->nsects = sect; return false; } if (!(bin->sects = realloc (bin->sects, bin->nsects * sizeof (struct MACH0_(section))))) { r_sys_perror ("realloc (sects)"); bin->nsects = sect; return false; } for (k = sect, j = 0; k < bin->nsects; k++, j++) { ut64 offset = off + sizeof (struct MACH0_(segment_command)) + j * sizeof (struct MACH0_(section)); len = r_buf_read_at (bin->b, offset, sec, sizeof (struct MACH0_(section))); if (len != sizeof (struct MACH0_(section))) { bprintf ("Error: read (sects)\n"); bin->nsects = sect; return false; } i = 0; memcpy (&bin->sects[k].sectname, &sec[i], 16); i += 16; memcpy (&bin->sects[k].segname, &sec[i], 16); i += 16; snprintf (section_flagname, sizeof (section_flagname), "mach0_section_%.16s_%.16s.offset", bin->sects[k].segname, bin->sects[k].sectname); sdb_num_set (bin->kv, section_flagname, offset, 0); #if R_BIN_MACH064 snprintf (section_flagname, sizeof (section_flagname), "mach0_section_%.16s_%.16s.format", bin->sects[k].segname, bin->sects[k].sectname); sdb_set (bin->kv, section_flagname, "mach0_section64", 0); #else snprintf (section_flagname, sizeof (section_flagname), "mach0_section_%.16s_%.16s.format", bin->sects[k].segname, bin->sects[k].sectname); sdb_set (bin->kv, section_flagname, "mach0_section", 0); #endif #if R_BIN_MACH064 bin->sects[k].addr = r_read_ble64 (&sec[i], bin->big_endian); i += sizeof (ut64); bin->sects[k].size = r_read_ble64 (&sec[i], bin->big_endian); i += sizeof (ut64); #else bin->sects[k].addr = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].size = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); #endif bin->sects[k].offset = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].align = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reloff = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].nreloc = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].flags = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reserved1 = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reserved2 = r_read_ble32 (&sec[i], bin->big_endian); #if R_BIN_MACH064 i += sizeof (ut32); bin->sects[k].reserved3 = r_read_ble32 (&sec[i], bin->big_endian); #endif } } return true; } #define Error(x) error_message = x; goto error; static bool parse_symtab(struct MACH0_(obj_t) *mo, ut64 off) { struct symtab_command st; ut32 size_sym; size_t i; const char *error_message = ""; ut8 symt[sizeof (struct symtab_command)] = {0}; ut8 nlst[sizeof (struct MACH0_(nlist))] = {0}; const bool be = mo->big_endian; if (off > (ut64)mo->size || off + sizeof (struct symtab_command) > (ut64)mo->size) { return false; } int len = r_buf_read_at (mo->b, off, symt, sizeof (struct symtab_command)); if (len != sizeof (struct symtab_command)) { Eprintf ("Error: read (symtab)\n"); return false; } st.cmd = r_read_ble32 (symt, be); st.cmdsize = r_read_ble32 (symt + 4, be); st.symoff = r_read_ble32 (symt + 8, be) + mo->symbols_off; st.nsyms = r_read_ble32 (symt + 12, be); st.stroff = r_read_ble32 (symt + 16, be) + mo->symbols_off; st.strsize = r_read_ble32 (symt + 20, be); mo->symtab = NULL; mo->nsymtab = 0; if (st.strsize > 0 && st.strsize < mo->size && st.nsyms > 0) { mo->nsymtab = st.nsyms; if (st.stroff > mo->size || st.stroff + st.strsize > mo->size) { Error ("fail"); } if (!UT32_MUL (&size_sym, mo->nsymtab, sizeof (struct MACH0_(nlist)))) { Error ("fail2"); } if (!size_sym) { Error ("symbol size is zero"); } if (st.symoff > mo->size || st.symoff + size_sym > mo->size) { Error ("symoff is out of bounds"); } if (!(mo->symstr = calloc (1, st.strsize + 2))) { Error ("symoff is out of bounds"); } mo->symstrlen = st.strsize; len = r_buf_read_at (mo->b, st.stroff, (ut8*)mo->symstr, st.strsize); if (len != st.strsize) { Error ("Error: read (symstr)"); } ut64 max_nsymtab = (r_buf_size (mo->b) - st.symoff) / sizeof (struct MACH0_(nlist)); if (mo->nsymtab > max_nsymtab || !(mo->symtab = calloc (mo->nsymtab, sizeof (struct MACH0_(nlist))))) { goto error; } for (i = 0; i < mo->nsymtab; i++) { ut64 at = st.symoff + (i * sizeof (struct MACH0_(nlist))); len = r_buf_read_at (mo->b, at, nlst, sizeof (struct MACH0_(nlist))); if (len != sizeof (struct MACH0_(nlist))) { Error ("read (nlist)"); } //XXX not very safe what if is n_un.n_name instead? mo->symtab[i].n_strx = r_read_ble32 (nlst, be); mo->symtab[i].n_type = r_read_ble8 (nlst + 4); mo->symtab[i].n_sect = r_read_ble8 (nlst + 5); mo->symtab[i].n_desc = r_read_ble16 (nlst + 6, be); #if R_BIN_MACH064 mo->symtab[i].n_value = r_read_ble64 (&nlst[8], be); #else mo->symtab[i].n_value = r_read_ble32 (&nlst[8], be); #endif } } return true; error: R_FREE (mo->symstr); R_FREE (mo->symtab); Eprintf ("%s\n", error_message); return false; } static bool parse_dysymtab(struct MACH0_(obj_t) *bin, ut64 off) { size_t len, i; ut32 size_tab; ut8 dysym[sizeof (struct dysymtab_command)] = {0}; ut8 dytoc[sizeof (struct dylib_table_of_contents)] = {0}; ut8 dymod[sizeof (struct MACH0_(dylib_module))] = {0}; ut8 idsyms[sizeof (ut32)] = {0}; if (off > bin->size || off + sizeof (struct dysymtab_command) > bin->size) { return false; } len = r_buf_read_at (bin->b, off, dysym, sizeof (struct dysymtab_command)); if (len != sizeof (struct dysymtab_command)) { bprintf ("Error: read (dysymtab)\n"); return false; } bin->dysymtab.cmd = r_read_ble32 (&dysym[0], bin->big_endian); bin->dysymtab.cmdsize = r_read_ble32 (&dysym[4], bin->big_endian); bin->dysymtab.ilocalsym = r_read_ble32 (&dysym[8], bin->big_endian); bin->dysymtab.nlocalsym = r_read_ble32 (&dysym[12], bin->big_endian); bin->dysymtab.iextdefsym = r_read_ble32 (&dysym[16], bin->big_endian); bin->dysymtab.nextdefsym = r_read_ble32 (&dysym[20], bin->big_endian); bin->dysymtab.iundefsym = r_read_ble32 (&dysym[24], bin->big_endian); bin->dysymtab.nundefsym = r_read_ble32 (&dysym[28], bin->big_endian); bin->dysymtab.tocoff = r_read_ble32 (&dysym[32], bin->big_endian); bin->dysymtab.ntoc = r_read_ble32 (&dysym[36], bin->big_endian); bin->dysymtab.modtaboff = r_read_ble32 (&dysym[40], bin->big_endian); bin->dysymtab.nmodtab = r_read_ble32 (&dysym[44], bin->big_endian); bin->dysymtab.extrefsymoff = r_read_ble32 (&dysym[48], bin->big_endian); bin->dysymtab.nextrefsyms = r_read_ble32 (&dysym[52], bin->big_endian); bin->dysymtab.indirectsymoff = r_read_ble32 (&dysym[56], bin->big_endian); bin->dysymtab.nindirectsyms = r_read_ble32 (&dysym[60], bin->big_endian); bin->dysymtab.extreloff = r_read_ble32 (&dysym[64], bin->big_endian); bin->dysymtab.nextrel = r_read_ble32 (&dysym[68], bin->big_endian); bin->dysymtab.locreloff = r_read_ble32 (&dysym[72], bin->big_endian); bin->dysymtab.nlocrel = r_read_ble32 (&dysym[76], bin->big_endian); bin->ntoc = bin->dysymtab.ntoc; if (bin->ntoc > 0) { if (!(bin->toc = calloc (bin->ntoc, sizeof (struct dylib_table_of_contents)))) { r_sys_perror ("calloc (toc)"); return false; } if (!UT32_MUL (&size_tab, bin->ntoc, sizeof (struct dylib_table_of_contents))){ R_FREE (bin->toc); return false; } if (!size_tab){ R_FREE (bin->toc); return false; } if (bin->dysymtab.tocoff > bin->size || bin->dysymtab.tocoff + size_tab > bin->size){ R_FREE (bin->toc); return false; } for (i = 0; i < bin->ntoc; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.tocoff + i * sizeof (struct dylib_table_of_contents), dytoc, sizeof (struct dylib_table_of_contents)); if (len != sizeof (struct dylib_table_of_contents)) { bprintf ("Error: read (toc)\n"); R_FREE (bin->toc); return false; } bin->toc[i].symbol_index = r_read_ble32 (&dytoc[0], bin->big_endian); bin->toc[i].module_index = r_read_ble32 (&dytoc[4], bin->big_endian); } } bin->nmodtab = bin->dysymtab.nmodtab; ut64 max_nmodtab = (bin->size - bin->dysymtab.modtaboff) / sizeof (struct MACH0_(dylib_module)); if (bin->nmodtab > 0 && bin->nmodtab <= max_nmodtab) { if (!(bin->modtab = calloc (bin->nmodtab, sizeof (struct MACH0_(dylib_module))))) { r_sys_perror ("calloc (modtab)"); return false; } if (!UT32_MUL (&size_tab, bin->nmodtab, sizeof (struct MACH0_(dylib_module)))){ R_FREE (bin->modtab); return false; } if (!size_tab){ R_FREE (bin->modtab); return false; } if (bin->dysymtab.modtaboff > bin->size || \ bin->dysymtab.modtaboff + size_tab > bin->size){ R_FREE (bin->modtab); return false; } for (i = 0; i < bin->nmodtab; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.modtaboff + i * sizeof (struct MACH0_(dylib_module)), dymod, sizeof (struct MACH0_(dylib_module))); if (len == -1) { bprintf ("Error: read (modtab)\n"); R_FREE (bin->modtab); return false; } bin->modtab[i].module_name = r_read_ble32 (&dymod[0], bin->big_endian); bin->modtab[i].iextdefsym = r_read_ble32 (&dymod[4], bin->big_endian); bin->modtab[i].nextdefsym = r_read_ble32 (&dymod[8], bin->big_endian); bin->modtab[i].irefsym = r_read_ble32 (&dymod[12], bin->big_endian); bin->modtab[i].nrefsym = r_read_ble32 (&dymod[16], bin->big_endian); bin->modtab[i].ilocalsym = r_read_ble32 (&dymod[20], bin->big_endian); bin->modtab[i].nlocalsym = r_read_ble32 (&dymod[24], bin->big_endian); bin->modtab[i].iextrel = r_read_ble32 (&dymod[28], bin->big_endian); bin->modtab[i].nextrel = r_read_ble32 (&dymod[32], bin->big_endian); bin->modtab[i].iinit_iterm = r_read_ble32 (&dymod[36], bin->big_endian); bin->modtab[i].ninit_nterm = r_read_ble32 (&dymod[40], bin->big_endian); #if R_BIN_MACH064 bin->modtab[i].objc_module_info_size = r_read_ble32 (&dymod[44], bin->big_endian); bin->modtab[i].objc_module_info_addr = r_read_ble64 (&dymod[48], bin->big_endian); #else bin->modtab[i].objc_module_info_addr = r_read_ble32 (&dymod[44], bin->big_endian); bin->modtab[i].objc_module_info_size = r_read_ble32 (&dymod[48], bin->big_endian); #endif } } bin->nindirectsyms = bin->dysymtab.nindirectsyms; if (bin->nindirectsyms > 0) { if (!(bin->indirectsyms = calloc (bin->nindirectsyms, sizeof (ut32)))) { r_sys_perror ("calloc (indirectsyms)"); return false; } if (!UT32_MUL (&size_tab, bin->nindirectsyms, sizeof (ut32))){ R_FREE (bin->indirectsyms); return false; } if (!size_tab){ R_FREE (bin->indirectsyms); return false; } if (bin->dysymtab.indirectsymoff > bin->size || \ bin->dysymtab.indirectsymoff + size_tab > bin->size){ R_FREE (bin->indirectsyms); return false; } for (i = 0; i < bin->nindirectsyms; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.indirectsymoff + i * sizeof (ut32), idsyms, 4); if (len == -1) { bprintf ("Error: read (indirect syms)\n"); R_FREE (bin->indirectsyms); return false; } bin->indirectsyms[i] = r_read_ble32 (&idsyms[0], bin->big_endian); } } /* TODO extrefsyms, extrel, locrel */ return true; } static char *readString(ut8 *p, int off, int len) { if (off < 0 || off >= len) { return NULL; } return r_str_ndup ((const char *)p + off, len - off); } static void parseCodeDirectory(RBuffer *b, int offset, int datasize) { typedef struct __CodeDirectory { uint32_t magic; /* magic number (CSMAGIC_CODEDIRECTORY) */ uint32_t length; /* total length of CodeDirectory blob */ uint32_t version; /* compatibility version */ uint32_t flags; /* setup and mode flags */ uint32_t hashOffset; /* offset of hash slot element at index zero */ uint32_t identOffset; /* offset of identifier string */ uint32_t nSpecialSlots; /* number of special hash slots */ uint32_t nCodeSlots; /* number of ordinary (code) hash slots */ uint32_t codeLimit; /* limit to main image signature range */ uint8_t hashSize; /* size of each hash in bytes */ uint8_t hashType; /* type of hash (cdHashType* constants) */ uint8_t platform; /* unused (must be zero) */ uint8_t pageSize; /* log2(page size in bytes); 0 => infinite */ uint32_t spare2; /* unused (must be zero) */ /* followed by dynamic content as located by offset fields above */ uint32_t scatterOffset; uint32_t teamIDOffset; uint32_t spare3; ut64 codeLimit64; ut64 execSegBase; ut64 execSegLimit; ut64 execSegFlags; } CS_CodeDirectory; ut64 off = offset; int psize = datasize; ut8 *p = calloc (1, psize); if (!p) { return; } eprintf ("Offset: 0x%08"PFMT64x"\n", off); r_buf_read_at (b, off, p, datasize); CS_CodeDirectory cscd = {0}; #define READFIELD(x) cscd.x = r_read_ble32 (p + r_offsetof (CS_CodeDirectory, x), 1) #define READFIELD8(x) cscd.x = p[r_offsetof (CS_CodeDirectory, x)] READFIELD (length); READFIELD (version); READFIELD (flags); READFIELD (hashOffset); READFIELD (identOffset); READFIELD (nSpecialSlots); READFIELD (nCodeSlots); READFIELD (hashSize); READFIELD (teamIDOffset); READFIELD8 (hashType); READFIELD (pageSize); READFIELD (codeLimit); eprintf ("Version: %x\n", cscd.version); eprintf ("Flags: %x\n", cscd.flags); eprintf ("Length: %d\n", cscd.length); eprintf ("PageSize: %d\n", cscd.pageSize); eprintf ("hashOffset: %d\n", cscd.hashOffset); eprintf ("codeLimit: %d\n", cscd.codeLimit); eprintf ("hashSize: %d\n", cscd.hashSize); eprintf ("hashType: %d\n", cscd.hashType); char *identity = readString (p, cscd.identOffset, psize); eprintf ("Identity: %s\n", identity); char *teamId = readString (p, cscd.teamIDOffset, psize); eprintf ("TeamID: %s\n", teamId); eprintf ("CodeSlots: %d\n", cscd.nCodeSlots); free (identity); free (teamId); int hashSize = 20; // SHA1 is default int algoType = R_HASH_SHA1; const char *hashName = "sha1"; switch (cscd.hashType) { case 0: // SHA1 == 20 bytes case 1: // SHA1 == 20 bytes hashSize = 20; hashName = "sha1"; algoType = R_HASH_SHA1; break; case 2: // SHA256 == 32 bytes hashSize = 32; algoType = R_HASH_SHA256; hashName = "sha256"; break; } // computed cdhash RHash *ctx = r_hash_new (true, algoType); int fofsz = cscd.length; if (fofsz > 0 && fofsz < (r_buf_size (b) - off)) { ut8 *fofbuf = calloc (fofsz, 1); if (fofbuf) { int i; if (r_buf_read_at (b, off, fofbuf, fofsz) != fofsz) { eprintf ("Invalid cdhash offset/length values\n"); } r_hash_do_begin (ctx, algoType); if (algoType == R_HASH_SHA1) { r_hash_do_sha1 (ctx, fofbuf, fofsz); } else { r_hash_do_sha256 (ctx, fofbuf, fofsz); } r_hash_do_end (ctx, algoType); eprintf ("ph %s @ 0x%"PFMT64x"!%d\n", hashName, off, fofsz); eprintf ("ComputedCDHash: "); for (i = 0; i < hashSize;i++) { eprintf ("%02x", ctx->digest[i]); } eprintf ("\n"); free (fofbuf); } } // show and check the rest of hashes ut8 *hash = p + cscd.hashOffset; int j = 0; int k = 0; eprintf ("Hashed region: 0x%08"PFMT64x" - 0x%08"PFMT64x"\n", (ut64)0, (ut64)cscd.codeLimit); for (j = 0; j < cscd.nCodeSlots; j++) { int fof = 4096 * j; int idx = j * hashSize; eprintf ("0x%08"PFMT64x" ", off + cscd.hashOffset + idx); for (k = 0; k < hashSize; k++) { eprintf ("%02x", hash[idx + k]); } ut8 fofbuf[4096]; int fofsz = R_MIN (sizeof (fofbuf), cscd.codeLimit - fof); r_buf_read_at (b, fof, fofbuf, sizeof (fofbuf)); r_hash_do_begin (ctx, algoType); if (algoType == R_HASH_SHA1) { r_hash_do_sha1 (ctx, fofbuf, fofsz); } else { r_hash_do_sha256 (ctx, fofbuf, fofsz); } r_hash_do_end (ctx, algoType); if (memcmp (hash + idx, ctx->digest, hashSize)) { eprintf (" wx "); int i; for (i = 0; i < hashSize;i++) { eprintf ("%02x", ctx->digest[i]); } } else { eprintf (" OK"); } eprintf ("\n"); } r_hash_free (ctx); free (p); } // parse the Load Command static bool parse_signature(struct MACH0_(obj_t) *bin, ut64 off) { int i,len; ut32 data; bin->signature = NULL; struct linkedit_data_command link = {0}; ut8 lit[sizeof (struct linkedit_data_command)] = {0}; struct blob_index_t idx = {0}; struct super_blob_t super = {{0}}; if (off > bin->size || off + sizeof (struct linkedit_data_command) > bin->size) { return false; } len = r_buf_read_at (bin->b, off, lit, sizeof (struct linkedit_data_command)); if (len != sizeof (struct linkedit_data_command)) { bprintf ("Failed to get data while parsing LC_CODE_SIGNATURE command\n"); return false; } link.cmd = r_read_ble32 (&lit[0], bin->big_endian); link.cmdsize = r_read_ble32 (&lit[4], bin->big_endian); link.dataoff = r_read_ble32 (&lit[8], bin->big_endian); link.datasize = r_read_ble32 (&lit[12], bin->big_endian); data = link.dataoff; if (data > bin->size || data + sizeof (struct super_blob_t) > bin->size) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); return true; } super.blob.magic = r_buf_read_ble32_at (bin->b, data, mach0_endian); super.blob.length = r_buf_read_ble32_at (bin->b, data + 4, mach0_endian); super.count = r_buf_read_ble32_at (bin->b, data + 8, mach0_endian); char *verbose = r_sys_getenv ("RABIN2_CODESIGN_VERBOSE"); bool isVerbose = false; if (verbose) { isVerbose = *verbose; free (verbose); } // to dump all certificates // [0x00053f75]> b 5K;/x 30800609;wtf @@ hit* // then do this: // $ openssl asn1parse -inform der -in a|less // $ openssl pkcs7 -inform DER -print_certs -text -in a for (i = 0; i < super.count; i++) { if (data + i > bin->size) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); break; } struct blob_index_t bi; if (r_buf_read_at (bin->b, data + 12 + (i * sizeof (struct blob_index_t)), (ut8*)&bi, sizeof (struct blob_index_t)) < sizeof (struct blob_index_t)) { break; } idx.type = r_read_ble32 (&bi.type, mach0_endian); idx.offset = r_read_ble32 (&bi.offset, mach0_endian); switch (idx.type) { case CSSLOT_ENTITLEMENTS: if (true || isVerbose) { ut64 off = data + idx.offset; if (off > bin->size || off + sizeof (struct blob_t) > bin->size) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); break; } struct blob_t entitlements = {0}; entitlements.magic = r_buf_read_ble32_at (bin->b, off, mach0_endian); entitlements.length = r_buf_read_ble32_at (bin->b, off + 4, mach0_endian); len = entitlements.length - sizeof (struct blob_t); if (len <= bin->size && len > 1) { bin->signature = calloc (1, len + 1); if (!bin->signature) { break; } if (off + sizeof (struct blob_t) + len < r_buf_size (bin->b)) { r_buf_read_at (bin->b, off + sizeof (struct blob_t), (ut8 *)bin->signature, len); if (len >= 0) { bin->signature[len] = '\0'; } } else { bin->signature = (ut8 *)strdup ("Malformed entitlement"); } } else { bin->signature = (ut8 *)strdup ("Malformed entitlement"); } } break; case CSSLOT_CODEDIRECTORY: if (isVerbose) { parseCodeDirectory (bin->b, data + idx.offset, link.datasize); } break; case 0x1000: // unknown break; case CSSLOT_CMS_SIGNATURE: // ASN1/DER certificate if (isVerbose) { ut8 header[8] = {0}; r_buf_read_at (bin->b, data + idx.offset, header, sizeof (header)); ut32 length = R_MIN (UT16_MAX, r_read_ble32 (header + 4, 1)); ut8 *p = calloc (length, 1); if (p) { r_buf_read_at (bin->b, data + idx.offset + 0, p, length); ut32 *words = (ut32*)p; eprintf ("Magic: %x\n", words[0]); eprintf ("wtf DUMP @%d!%d\n", (int)data + idx.offset + 8, (int)length); eprintf ("openssl pkcs7 -print_certs -text -inform der -in DUMP\n"); eprintf ("openssl asn1parse -offset %d -length %d -inform der -in /bin/ls\n", (int)data + idx.offset + 8, (int)length); eprintf ("pFp@%d!%d\n", (int)data + idx.offset + 8, (int)length); free (p); } } break; case CSSLOT_REQUIREMENTS: // 2 { ut8 p[256]; r_buf_read_at (bin->b, data + idx.offset + 16, p, sizeof (p)); p[sizeof (p) - 1] = 0; ut32 slot_size = r_read_ble32 (p + 8, 1); if (slot_size < sizeof (p)) { ut32 ident_size = r_read_ble32 (p + 8, 1); if (!ident_size || ident_size > sizeof (p) - 28) { break; } char *ident = r_str_ndup ((const char *)p + 28, ident_size); if (ident) { sdb_set (bin->kv, "mach0.ident", ident, 0); free (ident); } } else { if (bin->verbose) { eprintf ("Invalid code slot size\n"); } } } break; case CSSLOT_INFOSLOT: // 1; case CSSLOT_RESOURCEDIR: // 3; case CSSLOT_APPLICATION: // 4; // TODO: parse those codesign slots if (bin->verbose) { eprintf ("TODO: Some codesign slots are not yet supported\n"); } break; default: if (bin->verbose) { eprintf ("Unknown Code signature slot %d\n", idx.type); } break; } } if (!bin->signature) { bin->signature = (ut8 *)strdup ("No entitlement found"); } return true; } static int parse_thread(struct MACH0_(obj_t) *bin, struct load_command *lc, ut64 off, bool is_first_thread) { ut64 ptr_thread, pc = UT64_MAX, pc_offset = UT64_MAX; ut32 flavor, count; ut8 *arw_ptr = NULL; int arw_sz, len = 0; ut8 thc[sizeof (struct thread_command)] = {0}; ut8 tmp[4]; if (off > bin->size || off + sizeof (struct thread_command) > bin->size) { return false; } len = r_buf_read_at (bin->b, off, thc, 8); if (len < 1) { goto wrong_read; } bin->thread.cmd = r_read_ble32 (&thc[0], bin->big_endian); bin->thread.cmdsize = r_read_ble32 (&thc[4], bin->big_endian); if (r_buf_read_at (bin->b, off + sizeof (struct thread_command), tmp, 4) < 4) { goto wrong_read; } flavor = r_read_ble32 (tmp, bin->big_endian); if (off + sizeof (struct thread_command) + sizeof (flavor) > bin->size || off + sizeof (struct thread_command) + sizeof (flavor) + sizeof (ut32) > bin->size) { return false; } // TODO: use count for checks if (r_buf_read_at (bin->b, off + sizeof (struct thread_command) + sizeof (flavor), tmp, 4) < 4) { goto wrong_read; } count = r_read_ble32 (tmp, bin->big_endian); ptr_thread = off + sizeof (struct thread_command) + sizeof (flavor) + sizeof (count); if (ptr_thread > bin->size) { return false; } switch (bin->hdr.cputype) { case CPU_TYPE_I386: case CPU_TYPE_X86_64: switch (flavor) { case X86_THREAD_STATE32: if (ptr_thread + sizeof (struct x86_thread_state32) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.x86_32, "16i", 1) == -1) { bprintf ("Error: read (thread state x86_32)\n"); return false; } pc = bin->thread_state.x86_32.eip; pc_offset = ptr_thread + r_offsetof(struct x86_thread_state32, eip); arw_ptr = (ut8 *)&bin->thread_state.x86_32; arw_sz = sizeof (struct x86_thread_state32); break; case X86_THREAD_STATE64: if (ptr_thread + sizeof (struct x86_thread_state64) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.x86_64, "32l", 1) == -1) { bprintf ("Error: read (thread state x86_64)\n"); return false; } pc = bin->thread_state.x86_64.rip; pc_offset = ptr_thread + r_offsetof(struct x86_thread_state64, rip); arw_ptr = (ut8 *)&bin->thread_state.x86_64; arw_sz = sizeof (struct x86_thread_state64); break; //default: bprintf ("Unknown type\n"); } break; case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: if (flavor == X86_THREAD_STATE32) { if (ptr_thread + sizeof (struct ppc_thread_state32) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.ppc_32, bin->big_endian ? "40I" : "40i", 1) == -1) { bprintf ("Error: read (thread state ppc_32)\n"); return false; } pc = bin->thread_state.ppc_32.srr0; pc_offset = ptr_thread + r_offsetof(struct ppc_thread_state32, srr0); arw_ptr = (ut8 *)&bin->thread_state.ppc_32; arw_sz = sizeof (struct ppc_thread_state32); } else if (flavor == X86_THREAD_STATE64) { if (ptr_thread + sizeof (struct ppc_thread_state64) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.ppc_64, bin->big_endian ? "34LI3LI" : "34li3li", 1) == -1) { bprintf ("Error: read (thread state ppc_64)\n"); return false; } pc = bin->thread_state.ppc_64.srr0; pc_offset = ptr_thread + r_offsetof(struct ppc_thread_state64, srr0); arw_ptr = (ut8 *)&bin->thread_state.ppc_64; arw_sz = sizeof (struct ppc_thread_state64); } break; case CPU_TYPE_ARM: if (ptr_thread + sizeof (struct arm_thread_state32) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.arm_32, bin->big_endian ? "17I" : "17i", 1) == -1) { bprintf ("Error: read (thread state arm)\n"); return false; } pc = bin->thread_state.arm_32.r15; pc_offset = ptr_thread + r_offsetof (struct arm_thread_state32, r15); arw_ptr = (ut8 *)&bin->thread_state.arm_32; arw_sz = sizeof (struct arm_thread_state32); break; case CPU_TYPE_ARM64: if (ptr_thread + sizeof (struct arm_thread_state64) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.arm_64, bin->big_endian ? "34LI1I" : "34Li1i", 1) == -1) { bprintf ("Error: read (thread state arm)\n"); return false; } pc = r_read_be64 (&bin->thread_state.arm_64.pc); pc_offset = ptr_thread + r_offsetof (struct arm_thread_state64, pc); arw_ptr = (ut8*)&bin->thread_state.arm_64; arw_sz = sizeof (struct arm_thread_state64); break; default: bprintf ("Error: read (unknown thread state structure)\n"); return false; } // TODO: this shouldnt be an bprintf... if (arw_ptr && arw_sz > 0) { int i; ut8 *p = arw_ptr; bprintf ("arw "); for (i = 0; i < arw_sz; i++) { bprintf ("%02x", 0xff & p[i]); } bprintf ("\n"); } if (is_first_thread) { bin->main_cmd = *lc; if (pc != UT64_MAX) { bin->entry = pc; } if (pc_offset != UT64_MAX) { sdb_num_set (bin->kv, "mach0.entry.offset", pc_offset, 0); } } return true; wrong_read: bprintf ("Error: read (thread)\n"); return false; } static int parse_function_starts(struct MACH0_(obj_t) *bin, ut64 off) { struct linkedit_data_command fc; ut8 sfc[sizeof (struct linkedit_data_command)] = {0}; int len; if (off > bin->size || off + sizeof (struct linkedit_data_command) > bin->size) { bprintf ("Likely overflow while parsing" " LC_FUNCTION_STARTS command\n"); } bin->func_start = NULL; len = r_buf_read_at (bin->b, off, sfc, sizeof (struct linkedit_data_command)); if (len < 1) { bprintf ("Failed to get data while parsing" " LC_FUNCTION_STARTS command\n"); } fc.cmd = r_read_ble32 (&sfc[0], bin->big_endian); fc.cmdsize = r_read_ble32 (&sfc[4], bin->big_endian); fc.dataoff = r_read_ble32 (&sfc[8], bin->big_endian); fc.datasize = r_read_ble32 (&sfc[12], bin->big_endian); if ((int)fc.datasize > 0) { ut8 *buf = calloc (1, fc.datasize + 1); if (!buf) { bprintf ("Failed to allocate buffer\n"); return false; } bin->func_size = fc.datasize; if (fc.dataoff > bin->size || fc.dataoff + fc.datasize > bin->size) { free (buf); bprintf ("Likely overflow while parsing " "LC_FUNCTION_STARTS command\n"); return false; } len = r_buf_read_at (bin->b, fc.dataoff, buf, fc.datasize); if (len != fc.datasize) { free (buf); bprintf ("Failed to get data while parsing" " LC_FUNCTION_STARTS\n"); return false; } buf[fc.datasize] = 0; // null-terminated buffer bin->func_start = buf; return true; } bin->func_start = NULL; return false; } static int parse_dylib(struct MACH0_(obj_t) *bin, ut64 off) { struct dylib_command dl; int lib, len; ut8 sdl[sizeof (struct dylib_command)] = {0}; if (off > bin->size || off + sizeof (struct dylib_command) > bin->size) { return false; } lib = bin->nlibs - 1; void *relibs = realloc (bin->libs, bin->nlibs * R_BIN_MACH0_STRING_LENGTH); if (!relibs) { r_sys_perror ("realloc (libs)"); return false; } bin->libs = relibs; len = r_buf_read_at (bin->b, off, sdl, sizeof (struct dylib_command)); if (len < 1) { bprintf ("Error: read (dylib)\n"); return false; } dl.cmd = r_read_ble32 (&sdl[0], bin->big_endian); dl.cmdsize = r_read_ble32 (&sdl[4], bin->big_endian); dl.dylib.name = r_read_ble32 (&sdl[8], bin->big_endian); dl.dylib.timestamp = r_read_ble32 (&sdl[12], bin->big_endian); dl.dylib.current_version = r_read_ble32 (&sdl[16], bin->big_endian); dl.dylib.compatibility_version = r_read_ble32 (&sdl[20], bin->big_endian); if (off + dl.dylib.name > bin->size ||\ off + dl.dylib.name + R_BIN_MACH0_STRING_LENGTH > bin->size) { return false; } memset (bin->libs[lib], 0, R_BIN_MACH0_STRING_LENGTH); len = r_buf_read_at (bin->b, off + dl.dylib.name, (ut8*)bin->libs[lib], R_BIN_MACH0_STRING_LENGTH - 1); bin->libs[lib][R_BIN_MACH0_STRING_LENGTH - 1] = 0; if (len < 1) { bprintf ("Error: read (dylib str)"); return false; } return true; } static const char *cmd_to_string(ut32 cmd) { switch (cmd) { case LC_DATA_IN_CODE: return "LC_DATA_IN_CODE"; case LC_CODE_SIGNATURE: return "LC_CODE_SIGNATURE"; case LC_RPATH: return "LC_RPATH"; case LC_TWOLEVEL_HINTS: return "LC_TWOLEVEL_HINTS"; case LC_PREBIND_CKSUM: return "LC_PREBIND_CKSUM"; case LC_SEGMENT: return "LC_SEGMENT"; case LC_SEGMENT_64: return "LC_SEGMENT_64"; case LC_SYMTAB: return "LC_SYMTAB"; case LC_SYMSEG: return "LC_SYMSEG"; case LC_DYSYMTAB: return "LC_DYSYMTAB"; case LC_PREBOUND_DYLIB: return "LC_PREBOUND_DYLIB"; case LC_ROUTINES: return "LC_ROUTINES"; case LC_ROUTINES_64: return "LC_ROUTINES_64"; case LC_SUB_FRAMEWORK: return "LC_SUB_FRAMEWORK"; case LC_SUB_UMBRELLA: return "LC_SUB_UMBRELLA"; case LC_SUB_CLIENT: return "LC_SUB_CLIENT"; case LC_SUB_LIBRARY: return "LC_SUB_LIBRARY"; case LC_FUNCTION_STARTS: return "LC_FUNCTION_STARTS"; case LC_DYLIB_CODE_SIGN_DRS: return "LC_DYLIB_CODE_SIGN_DRS"; case LC_NOTE: return "LC_NOTE"; case LC_BUILD_VERSION: return "LC_BUILD_VERSION"; case LC_VERSION_MIN_MACOSX: return "LC_VERSION_MIN_MACOSX"; case LC_VERSION_MIN_IPHONEOS: return "LC_VERSION_MIN_IPHONEOS"; case LC_VERSION_MIN_TVOS: return "LC_VERSION_MIN_TVOS"; case LC_VERSION_MIN_WATCHOS: return "LC_VERSION_MIN_WATCHOS"; case LC_DYLD_INFO: return "LC_DYLD_INFO"; case LC_DYLD_INFO_ONLY: return "LC_DYLD_INFO_ONLY"; case LC_DYLD_ENVIRONMENT: return "LC_DYLD_ENVIRONMENT"; case LC_SOURCE_VERSION: return "LC_SOURCE_VERSION"; case LC_MAIN: return "LC_MAIN"; case LC_UUID: return "LC_UUID"; case LC_ID_DYLIB: return "LC_ID_DYLIB"; case LC_ID_DYLINKER: return "LC_ID_DYLINKER"; case LC_LAZY_LOAD_DYLIB: return "LC_LAZY_LOAD_DYLIB"; case LC_ENCRYPTION_INFO: return "LC_ENCRYPTION_INFO"; case LC_ENCRYPTION_INFO_64: return "LC_ENCRYPTION_INFO_64"; case LC_SEGMENT_SPLIT_INFO: return "LC_SEGMENT_SPLIT_INFO"; case LC_REEXPORT_DYLIB: return "LC_REEXPORT_DYLIB"; case LC_LINKER_OPTION: return "LC_LINKER_OPTION"; case LC_LINKER_OPTIMIZATION_HINT: return "LC_LINKER_OPTIMIZATION_HINT"; case LC_LOAD_DYLINKER: return "LC_LOAD_DYLINKER"; case LC_LOAD_DYLIB: return "LC_LOAD_DYLIB"; case LC_LOAD_WEAK_DYLIB: return "LC_LOAD_WEAK_DYLIB"; case LC_THREAD: return "LC_THREAD"; case LC_UNIXTHREAD: return "LC_UNIXTHREAD"; case LC_LOADFVMLIB: return "LC_LOADFVMLIB"; case LC_IDFVMLIB: return "LC_IDFVMLIB"; case LC_IDENT: return "LC_IDENT"; case LC_FVMFILE: return "LC_FVMFILE"; case LC_PREPAGE: return "LC_PREPAGE"; } return ""; } static const char *cmd_to_pf_definition(ut32 cmd) { switch (cmd) { case LC_BUILD_VERSION: return "mach0_build_version_command"; case LC_CODE_SIGNATURE: return "mach0_code_signature_command"; case LC_DATA_IN_CODE: return "mach0_data_in_code_command"; case LC_DYLD_INFO: case LC_DYLD_INFO_ONLY: return "mach0_dyld_info_only_command"; case LC_DYLD_ENVIRONMENT: return NULL; case LC_DYLIB_CODE_SIGN_DRS: return NULL; case LC_DYSYMTAB: return "mach0_dysymtab_command"; case LC_ENCRYPTION_INFO: return "mach0_encryption_info_command"; case LC_ENCRYPTION_INFO_64: return "mach0_encryption_info64_command"; case LC_FUNCTION_STARTS: return "mach0_function_starts_command"; case LC_FVMFILE: return NULL; case LC_ID_DYLIB: return "mach0_id_dylib_command"; case LC_ID_DYLINKER: return "mach0_id_dylinker_command"; case LC_IDENT: return NULL; case LC_IDFVMLIB: return NULL; case LC_LINKER_OPTION: return NULL; case LC_LINKER_OPTIMIZATION_HINT: return NULL; case LC_LOAD_DYLINKER: return "mach0_load_dylinker_command"; case LC_LAZY_LOAD_DYLIB: case LC_LOAD_WEAK_DYLIB: case LC_LOAD_DYLIB: return "mach0_dylib_command"; case LC_LOADFVMLIB: return NULL; case LC_MAIN: return "mach0_entry_point_command"; case LC_NOTE: return NULL; case LC_PREBIND_CKSUM: return NULL; case LC_PREBOUND_DYLIB: return NULL; case LC_PREPAGE: return NULL; case LC_REEXPORT_DYLIB: return NULL; case LC_ROUTINES: return NULL; case LC_ROUTINES_64: return NULL; case LC_RPATH: return "mach0_rpath_command"; case LC_SEGMENT: return "mach0_segment"; case LC_SEGMENT_64: return "mach0_segment64"; case LC_SEGMENT_SPLIT_INFO: return "mach0_segment_split_info_command"; case LC_SOURCE_VERSION: return "mach0_source_version_command"; case LC_SUB_FRAMEWORK: return NULL; case LC_SUB_UMBRELLA: return NULL; case LC_SUB_CLIENT: return NULL; case LC_SUB_LIBRARY: return NULL; case LC_SYMTAB: return "mach0_symtab_command"; case LC_SYMSEG: return NULL; case LC_TWOLEVEL_HINTS: return NULL; case LC_UUID: return "mach0_uuid_command"; case LC_VERSION_MIN_MACOSX: case LC_VERSION_MIN_IPHONEOS: case LC_VERSION_MIN_TVOS: case LC_VERSION_MIN_WATCHOS: return "mach0_version_min_command"; case LC_THREAD: return NULL; case LC_UNIXTHREAD: return "mach0_unixthread_command"; } return NULL; } static const char *build_version_platform_to_string(ut32 platform) { switch (platform) { case 1: return "macOS"; case 2: return "iOS"; case 3: return "tvOS"; case 4: return "watchOS"; case 5: return "bridgeOS"; case 6: return "iOSmac"; case 7: return "iOS Simulator"; case 8: return "tvOS Simulator"; case 9: return "watchOS Simulator"; default: return "unknown"; } } static const char *build_version_tool_to_string(ut32 tool) { switch (tool) { case 1: return "clang"; case 2: return "swift"; case 3: return "ld"; default: return "unknown"; } } static size_t get_word_size(struct MACH0_(obj_t) *bin) { const size_t word_size = MACH0_(get_bits)(bin) / 8; return R_MAX (word_size, 4); } static bool parse_chained_fixups(struct MACH0_(obj_t) *bin, ut32 offset, ut32 size) { struct dyld_chained_fixups_header header; if (size < sizeof (header)) { return false; } if (r_buf_fread_at (bin->b, offset, (ut8 *)&header, "7i", 1) != sizeof (header)) { return false; } if (header.fixups_version > 0) { eprintf ("Unsupported fixups version: %u\n", header.fixups_version); return false; } ut64 starts_at = offset + header.starts_offset; if (header.starts_offset > size) { return false; } ut32 segs_count; if ((segs_count = r_buf_read_le32_at (bin->b, starts_at)) == UT32_MAX) { return false; } bin->chained_starts = R_NEWS0 (struct r_dyld_chained_starts_in_segment *, segs_count); if (!bin->chained_starts) { return false; } bin->fixups_header = header; bin->fixups_offset = offset; bin->fixups_size = size; size_t i; ut64 cursor = starts_at + sizeof (ut32); ut64 bsize = r_buf_size (bin->b); for (i = 0; i < segs_count && cursor + 4 < bsize; i++) { ut32 seg_off; if ((seg_off = r_buf_read_le32_at (bin->b, cursor)) == UT32_MAX || !seg_off) { cursor += sizeof (ut32); continue; } if (i >= bin->nsegs) { break; } struct r_dyld_chained_starts_in_segment *cur_seg = R_NEW0 (struct r_dyld_chained_starts_in_segment); if (!cur_seg) { return false; } bin->chained_starts[i] = cur_seg; if (r_buf_fread_at (bin->b, starts_at + seg_off, (ut8 *)cur_seg, "isslis", 1) != 22) { return false; } if (cur_seg->page_count > 0) { ut16 *page_start = malloc (sizeof (ut16) * cur_seg->page_count); if (!page_start) { return false; } if (r_buf_fread_at (bin->b, starts_at + seg_off + 22, (ut8 *)page_start, "s", cur_seg->page_count) != cur_seg->page_count * 2) { return false; } cur_seg->page_start = page_start; } cursor += sizeof (ut32); } /* TODO: handle also imports, symbols and multiple starts (32-bit only) */ return true; } static bool reconstruct_chained_fixup(struct MACH0_(obj_t) *bin) { if (!bin->dyld_info) { return false; } if (!bin->nsegs) { return false; } bin->chained_starts = R_NEWS0 (struct r_dyld_chained_starts_in_segment *, bin->nsegs); if (!bin->chained_starts) { return false; } size_t wordsize = get_word_size (bin); ut8 *p = NULL; size_t j, count, skip, bind_size; int seg_idx = 0; ut64 seg_off = 0; bind_size = bin->dyld_info->bind_size; if (!bind_size || bind_size < 1) { return false; } if (bin->dyld_info->bind_off > bin->size) { return false; } if (bin->dyld_info->bind_off + bind_size > bin->size) { return false; } ut8 *opcodes = calloc (1, bind_size + 1); if (!opcodes) { return false; } if (r_buf_read_at (bin->b, bin->dyld_info->bind_off, opcodes, bind_size) != bind_size) { bprintf ("Error: read (dyld_info bind) at 0x%08"PFMT64x"\n", (ut64)(size_t)bin->dyld_info->bind_off); R_FREE (opcodes); return false; } struct r_dyld_chained_starts_in_segment *cur_seg = NULL; size_t cur_seg_idx = 0; ut8 *end; bool done = false; for (p = opcodes, end = opcodes + bind_size; !done && p < end;) { ut8 imm = *p & BIND_IMMEDIATE_MASK, op = *p & BIND_OPCODE_MASK; p++; switch (op) { case BIND_OPCODE_DONE: done = true; break; case BIND_OPCODE_THREADED: { switch (imm) { case BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB: { read_uleb128 (&p, end); break; } case BIND_SUBOPCODE_THREADED_APPLY: { const size_t ps = 0x1000; if (!cur_seg || cur_seg_idx != seg_idx) { cur_seg_idx = seg_idx; cur_seg = bin->chained_starts[seg_idx]; if (!cur_seg) { cur_seg = R_NEW0 (struct r_dyld_chained_starts_in_segment); if (!cur_seg) { break; } bin->chained_starts[seg_idx] = cur_seg; cur_seg->pointer_format = DYLD_CHAINED_PTR_ARM64E; cur_seg->page_size = ps; cur_seg->page_count = ((bin->segs[seg_idx].vmsize + (ps - 1)) & ~(ps - 1)) / ps; if (cur_seg->page_count > 0) { cur_seg->page_start = malloc (sizeof (ut16) * cur_seg->page_count); if (!cur_seg->page_start) { break; } memset (cur_seg->page_start, 0xff, sizeof (ut16) * cur_seg->page_count); } } } if (cur_seg) { ut32 page_index = (ut32)(seg_off / ps); size_t maxsize = cur_seg->page_count * sizeof (ut16); if (page_index < maxsize && cur_seg->page_start) { cur_seg->page_start[page_index] = seg_off & 0xfff; } } break; } default: bprintf ("Error: Unexpected BIND_OPCODE_THREADED sub-opcode: 0x%x\n", imm); } break; } case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: case BIND_OPCODE_SET_TYPE_IMM: break; case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: read_uleb128 (&p, end); break; case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: while (*p++ && p < end) { /* empty loop */ } break; case BIND_OPCODE_SET_ADDEND_SLEB: r_sleb128 ((const ut8 **)&p, end); break; case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: seg_idx = imm; if (seg_idx >= bin->nsegs) { bprintf ("Error: BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB" " has unexistent segment %d\n", seg_idx); R_FREE (opcodes); return false; } else { seg_off = read_uleb128 (&p, end); } break; case BIND_OPCODE_ADD_ADDR_ULEB: seg_off += read_uleb128 (&p, end); break; case BIND_OPCODE_DO_BIND: break; case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: seg_off += read_uleb128 (&p, end) + wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: seg_off += (ut64)imm * (ut64)wordsize + wordsize; break; case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: count = read_uleb128 (&p, end); skip = read_uleb128 (&p, end); for (j = 0; j < count; j++) { seg_off += skip + wordsize; } break; default: bprintf ("Error: unknown bind opcode 0x%02x in dyld_info\n", *p); R_FREE (opcodes); return false; } } R_FREE (opcodes); return true; } static int init_items(struct MACH0_(obj_t) *bin) { struct load_command lc = {0, 0}; ut8 loadc[sizeof (struct load_command)] = {0}; bool is_first_thread = true; ut64 off = 0LL; int i, len; char cmd_flagname[128]; bin->uuidn = 0; bin->os = 0; bin->has_crypto = 0; if (bin->hdr.sizeofcmds > bin->size) { bprintf ("Warning: chopping hdr.sizeofcmds\n"); bin->hdr.sizeofcmds = bin->size - 128; //return false; } //bprintf ("Commands: %d\n", bin->hdr.ncmds); for (i = 0, off = sizeof (struct MACH0_(mach_header)) + bin->header_at; \ i < bin->hdr.ncmds; i++, off += lc.cmdsize) { if (off > bin->size || off + sizeof (struct load_command) > bin->size) { bprintf ("mach0: out of bounds command\n"); return false; } len = r_buf_read_at (bin->b, off, loadc, sizeof (struct load_command)); if (len < 1) { bprintf ("Error: read (lc) at 0x%08"PFMT64x"\n", off); return false; } lc.cmd = r_read_ble32 (&loadc[0], bin->big_endian); lc.cmdsize = r_read_ble32 (&loadc[4], bin->big_endian); if (lc.cmdsize < 1 || off + lc.cmdsize > bin->size) { bprintf ("Warning: mach0_header %d = cmdsize<1. (0x%"PFMT64x" vs 0x%"PFMT64x")\n", i, (ut64)(off + lc.cmdsize), (ut64)(bin->size)); break; } snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.offset", i); sdb_num_set (bin->kv, cmd_flagname, off, 0); const char *format_name = cmd_to_pf_definition (lc.cmd); snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.format", i); if (format_name) { sdb_set (bin->kv, cmd_flagname, format_name, 0); } else { sdb_set (bin->kv, cmd_flagname, "[4]Ed (mach_load_command_type)cmd size", 0); } snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.cmd", i); switch (lc.cmd) { case LC_DATA_IN_CODE: sdb_set (bin->kv, cmd_flagname, "data_in_code", 0); break; case LC_RPATH: sdb_set (bin->kv, cmd_flagname, "rpath", 0); //bprintf ("--->\n"); break; case LC_SEGMENT_64: case LC_SEGMENT: sdb_set (bin->kv, cmd_flagname, "segment", 0); bin->nsegs++; if (!parse_segments (bin, off)) { bprintf ("error parsing segment\n"); bin->nsegs--; return false; } break; case LC_SYMTAB: sdb_set (bin->kv, cmd_flagname, "symtab", 0); if (!parse_symtab (bin, off)) { bprintf ("error parsing symtab\n"); return false; } break; case LC_DYSYMTAB: sdb_set (bin->kv, cmd_flagname, "dysymtab", 0); if (!parse_dysymtab (bin, off)) { bprintf ("error parsing dysymtab\n"); return false; } break; case LC_DYLIB_CODE_SIGN_DRS: sdb_set (bin->kv, cmd_flagname, "dylib_code_sign_drs", 0); //bprintf ("[mach0] code is signed\n"); break; case LC_VERSION_MIN_MACOSX: sdb_set (bin->kv, cmd_flagname, "version_min_macosx", 0); bin->os = 1; // set OS = osx //bprintf ("[mach0] Requires OSX >= x\n"); break; case LC_VERSION_MIN_IPHONEOS: sdb_set (bin->kv, cmd_flagname, "version_min_iphoneos", 0); bin->os = 2; // set OS = ios //bprintf ("[mach0] Requires iOS >= x\n"); break; case LC_VERSION_MIN_TVOS: sdb_set (bin->kv, cmd_flagname, "version_min_tvos", 0); bin->os = 4; break; case LC_VERSION_MIN_WATCHOS: sdb_set (bin->kv, cmd_flagname, "version_min_watchos", 0); bin->os = 3; break; case LC_UUID: sdb_set (bin->kv, cmd_flagname, "uuid", 0); { struct uuid_command uc = {0}; if (off + sizeof (struct uuid_command) > bin->size) { bprintf ("UUID out of bounds\n"); return false; } if (r_buf_fread_at (bin->b, off, (ut8*)&uc, "24c", 1) != -1) { char key[128]; char val[128]; snprintf (key, sizeof (key)-1, "uuid.%d", bin->uuidn++); r_hex_bin2str ((ut8*)&uc.uuid, 16, val); sdb_set (bin->kv, key, val, 0); //for (i=0;i<16; i++) bprintf ("%02x%c", uc.uuid[i], (i==15)?'\n':'-'); } } break; case LC_ENCRYPTION_INFO_64: /* TODO: the struct is probably different here */ case LC_ENCRYPTION_INFO: sdb_set (bin->kv, cmd_flagname, "encryption_info", 0); { struct MACH0_(encryption_info_command) eic = {0}; ut8 seic[sizeof (struct MACH0_(encryption_info_command))] = {0}; if (off + sizeof (struct MACH0_(encryption_info_command)) > bin->size) { bprintf ("encryption info out of bounds\n"); return false; } if (r_buf_read_at (bin->b, off, seic, sizeof (struct MACH0_(encryption_info_command))) != -1) { eic.cmd = r_read_ble32 (&seic[0], bin->big_endian); eic.cmdsize = r_read_ble32 (&seic[4], bin->big_endian); eic.cryptoff = r_read_ble32 (&seic[8], bin->big_endian); eic.cryptsize = r_read_ble32 (&seic[12], bin->big_endian); eic.cryptid = r_read_ble32 (&seic[16], bin->big_endian); bin->has_crypto = eic.cryptid; sdb_set (bin->kv, "crypto", "true", 0); sdb_num_set (bin->kv, "cryptid", eic.cryptid, 0); sdb_num_set (bin->kv, "cryptoff", eic.cryptoff, 0); sdb_num_set (bin->kv, "cryptsize", eic.cryptsize, 0); sdb_num_set (bin->kv, "cryptheader", off, 0); } } break; case LC_LOAD_DYLINKER: { sdb_set (bin->kv, cmd_flagname, "dylinker", 0); R_FREE (bin->intrp); //bprintf ("[mach0] load dynamic linker\n"); struct dylinker_command dy = {0}; ut8 sdy[sizeof (struct dylinker_command)] = {0}; if (off + sizeof (struct dylinker_command) > bin->size){ bprintf ("Warning: Cannot parse dylinker command\n"); return false; } if (r_buf_read_at (bin->b, off, sdy, sizeof (struct dylinker_command)) == -1) { bprintf ("Warning: read (LC_DYLD_INFO) at 0x%08"PFMT64x"\n", off); } else { dy.cmd = r_read_ble32 (&sdy[0], bin->big_endian); dy.cmdsize = r_read_ble32 (&sdy[4], bin->big_endian); dy.name = r_read_ble32 (&sdy[8], bin->big_endian); int len = dy.cmdsize; char *buf = malloc (len+1); if (buf) { // wtf @ off + 0xc ? r_buf_read_at (bin->b, off + 0xc, (ut8*)buf, len); buf[len] = 0; free (bin->intrp); bin->intrp = buf; } } } break; case LC_MAIN: { struct { ut64 eo; ut64 ss; } ep = {0}; ut8 sep[2 * sizeof (ut64)] = {0}; sdb_set (bin->kv, cmd_flagname, "main", 0); if (!is_first_thread) { bprintf ("Error: LC_MAIN with other threads\n"); return false; } if (off + 8 > bin->size || off + sizeof (ep) > bin->size) { bprintf ("invalid command size for main\n"); return false; } r_buf_read_at (bin->b, off + 8, sep, 2 * sizeof (ut64)); ep.eo = r_read_ble64 (&sep[0], bin->big_endian); ep.ss = r_read_ble64 (&sep[8], bin->big_endian); bin->entry = ep.eo; bin->main_cmd = lc; sdb_num_set (bin->kv, "mach0.entry.offset", off + 8, 0); sdb_num_set (bin->kv, "stacksize", ep.ss, 0); is_first_thread = false; } break; case LC_UNIXTHREAD: sdb_set (bin->kv, cmd_flagname, "unixthread", 0); if (!is_first_thread) { bprintf ("Error: LC_UNIXTHREAD with other threads\n"); return false; } case LC_THREAD: sdb_set (bin->kv, cmd_flagname, "thread", 0); if (!parse_thread (bin, &lc, off, is_first_thread)) { bprintf ("Cannot parse thread\n"); return false; } is_first_thread = false; break; case LC_LOAD_DYLIB: case LC_LOAD_WEAK_DYLIB: sdb_set (bin->kv, cmd_flagname, "load_dylib", 0); bin->nlibs++; if (!parse_dylib (bin, off)) { bprintf ("Cannot parse dylib\n"); bin->nlibs--; return false; } break; case LC_DYLD_INFO: case LC_DYLD_INFO_ONLY: { ut8 dyldi[sizeof (struct dyld_info_command)] = {0}; sdb_set (bin->kv, cmd_flagname, "dyld_info", 0); bin->dyld_info = calloc (1, sizeof (struct dyld_info_command)); if (bin->dyld_info) { if (off + sizeof (struct dyld_info_command) > bin->size){ bprintf ("Cannot parse dyldinfo\n"); R_FREE (bin->dyld_info); return false; } if (r_buf_read_at (bin->b, off, dyldi, sizeof (struct dyld_info_command)) == -1) { R_FREE (bin->dyld_info); bprintf ("Error: read (LC_DYLD_INFO) at 0x%08"PFMT64x"\n", off); } else { bin->dyld_info->cmd = r_read_ble32 (&dyldi[0], bin->big_endian); bin->dyld_info->cmdsize = r_read_ble32 (&dyldi[4], bin->big_endian); bin->dyld_info->rebase_off = r_read_ble32 (&dyldi[8], bin->big_endian); bin->dyld_info->rebase_size = r_read_ble32 (&dyldi[12], bin->big_endian); bin->dyld_info->bind_off = r_read_ble32 (&dyldi[16], bin->big_endian); bin->dyld_info->bind_size = r_read_ble32 (&dyldi[20], bin->big_endian); bin->dyld_info->weak_bind_off = r_read_ble32 (&dyldi[24], bin->big_endian); bin->dyld_info->weak_bind_size = r_read_ble32 (&dyldi[28], bin->big_endian); bin->dyld_info->lazy_bind_off = r_read_ble32 (&dyldi[32], bin->big_endian); bin->dyld_info->lazy_bind_size = r_read_ble32 (&dyldi[36], bin->big_endian); bin->dyld_info->export_off = r_read_ble32 (&dyldi[40], bin->big_endian) + bin->symbols_off; bin->dyld_info->export_size = r_read_ble32 (&dyldi[44], bin->big_endian); } } } break; case LC_CODE_SIGNATURE: parse_signature (bin, off); sdb_set (bin->kv, cmd_flagname, "signature", 0); /* ut32 dataoff // ut32 datasize */ break; case LC_SOURCE_VERSION: sdb_set (bin->kv, cmd_flagname, "version", 0); /* uint64_t version; */ /* A.B.C.D.E packed as a24.b10.c10.d10.e10 */ //bprintf ("mach0: TODO: Show source version\n"); break; case LC_SEGMENT_SPLIT_INFO: sdb_set (bin->kv, cmd_flagname, "split_info", 0); /* TODO */ break; case LC_FUNCTION_STARTS: sdb_set (bin->kv, cmd_flagname, "function_starts", 0); if (!parse_function_starts (bin, off)) { bprintf ("Cannot parse LC_FUNCTION_STARTS\n"); } break; case LC_REEXPORT_DYLIB: sdb_set (bin->kv, cmd_flagname, "dylib", 0); /* TODO */ break; default: //bprintf ("mach0: Unknown header command %x\n", lc.cmd); break; } } bool has_chained_fixups = false; for (i = 0, off = sizeof (struct MACH0_(mach_header)) + bin->header_at; \ i < bin->hdr.ncmds; i++, off += lc.cmdsize) { len = r_buf_read_at (bin->b, off, loadc, sizeof (struct load_command)); if (len < 1) { bprintf ("Error: read (lc) at 0x%08"PFMT64x"\n", off); return false; } lc.cmd = r_read_ble32 (&loadc[0], bin->big_endian); lc.cmdsize = r_read_ble32 (&loadc[4], bin->big_endian); if (lc.cmdsize < 1 || off + lc.cmdsize > bin->size) { bprintf ("Warning: mach0_header %d = cmdsize<1. (0x%"PFMT64x" vs 0x%"PFMT64x")\n", i, (ut64)(off + lc.cmdsize), (ut64)(bin->size)); break; } snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.offset", i); sdb_num_set (bin->kv, cmd_flagname, off, 0); const char *format_name = cmd_to_pf_definition (lc.cmd); snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.format", i); if (format_name) { sdb_set (bin->kv, cmd_flagname, format_name, 0); } else { sdb_set (bin->kv, cmd_flagname, "[4]Ed (mach_load_command_type)cmd size", 0); } switch (lc.cmd) { case LC_DATA_IN_CODE: snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.cmd", i); sdb_set (bin->kv, cmd_flagname, "data_in_code", 0); if (bin->verbose) { ut8 buf[8]; r_buf_read_at (bin->b, off + 8, buf, sizeof (buf)); ut32 dataoff = r_read_ble32 (buf, bin->big_endian); ut32 datasize= r_read_ble32 (buf + 4, bin->big_endian); eprintf ("data-in-code at 0x%x size %d\n", dataoff, datasize); ut8 *db = (ut8*)malloc (datasize); if (db) { r_buf_read_at (bin->b, dataoff, db, datasize); // TODO table of non-instructions regions in __text int j; for (j = 0; j < datasize; j += 8) { ut32 dw = r_read_ble32 (db + j, bin->big_endian); // int kind = r_read_ble16 (db + i + 4 + 2, bin->big_endian); int len = r_read_ble16 (db + j + 4, bin->big_endian); ut64 va = offset_to_vaddr(bin, dw); // eprintf ("# 0x%d -> 0x%x\n", dw, va); // eprintf ("0x%x kind %d len %d\n", dw, kind, len); eprintf ("Cd 4 %d @ 0x%"PFMT64x"\n", len / 4, va); } } } break; case LC_DYLD_EXPORTS_TRIE: if (bin->verbose) { ut8 buf[8]; r_buf_read_at (bin->b, off + 8, buf, sizeof (buf)); ut32 dataoff = r_read_ble32 (buf, bin->big_endian); ut32 datasize= r_read_ble32 (buf + 4, bin->big_endian); eprintf ("exports trie at 0x%x size %d\n", dataoff, datasize); } break; case LC_DYLD_CHAINED_FIXUPS: { ut8 buf[8]; if (r_buf_read_at (bin->b, off + 8, buf, sizeof (buf)) == sizeof (buf)) { ut32 dataoff = r_read_ble32 (buf, bin->big_endian); ut32 datasize= r_read_ble32 (buf + 4, bin->big_endian); if (bin->verbose) { eprintf ("chained fixups at 0x%x size %d\n", dataoff, datasize); } has_chained_fixups = parse_chained_fixups (bin, dataoff, datasize); } } break; } } if (!has_chained_fixups && bin->hdr.cputype == CPU_TYPE_ARM64 && (bin->hdr.cpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E) { #if 0 if (bin->verbose) { eprintf ("reconstructing chained fixups\n"); } #endif reconstruct_chained_fixup (bin); } return true; } static bool init(struct MACH0_(obj_t) *mo) { if (!init_hdr (mo)) { return false; } if (!init_items (mo)) { Eprintf ("Warning: Cannot initialize items\n"); } mo->baddr = MACH0_(get_baddr)(mo); return true; } void *MACH0_(mach0_free)(struct MACH0_(obj_t) *mo) { if (!mo) { return NULL; } size_t i; if (mo->symbols) { for (i = 0; !mo->symbols[i].last; i++) { free (mo->symbols[i].name); } free (mo->symbols); } free (mo->segs); free (mo->sects); free (mo->symtab); free (mo->symstr); free (mo->indirectsyms); free (mo->imports_by_ord); ht_pp_free (mo->imports_by_name); free (mo->dyld_info); free (mo->toc); free (mo->modtab); free (mo->libs); free (mo->func_start); free (mo->signature); free (mo->intrp); free (mo->compiler); if (mo->chained_starts) { for (i = 0; i < mo->nsegs; i++) { if (mo->chained_starts[i]) { free (mo->chained_starts[i]->page_start); free (mo->chained_starts[i]); } } free (mo->chained_starts); } r_buf_free (mo->b); free (mo); return NULL; } void MACH0_(opts_set_default)(struct MACH0_(opts_t) *options, RBinFile *bf) { r_return_if_fail (options && bf && bf->rbin); options->header_at = 0; options->symbols_off = 0; options->verbose = bf->rbin->verbose; } static void *duplicate_ptr(void *p) { return p; } static void free_only_key(HtPPKv *kv) { free (kv->key); } static size_t ptr_size(void *c) { // :D return 8; } // XXX should be deprecated its never called struct MACH0_(obj_t) *MACH0_(mach0_new)(const char *file, struct MACH0_(opts_t) *options) { struct MACH0_(obj_t) *bin = R_NEW0 (struct MACH0_(obj_t)); if (!bin) { return NULL; } if (options) { bin->verbose = options->verbose; bin->header_at = options->header_at; bin->symbols_off = options->symbols_off; } bin->file = file; size_t binsz; ut8 *buf = (ut8 *)r_file_slurp (file, &binsz); bin->size = binsz; if (!buf) { return MACH0_(mach0_free)(bin); } bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return MACH0_(mach0_free)(bin); } free (buf); bin->dyld_info = NULL; if (!init (bin)) { return MACH0_(mach0_free)(bin); } bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->imports_by_name = ht_pp_new ((HtPPDupValue)duplicate_ptr, free_only_key, (HtPPCalcSizeV)ptr_size); return bin; } struct MACH0_(obj_t) *MACH0_(new_buf)(RBuffer *buf, struct MACH0_(opts_t) *options) { r_return_val_if_fail (buf, NULL); struct MACH0_(obj_t) *bin = R_NEW0 (struct MACH0_(obj_t)); if (bin) { bin->b = r_buf_ref (buf); bin->main_addr = UT64_MAX; bin->kv = sdb_new (NULL, "bin.mach0", 0); bin->size = r_buf_size (bin->b); if (options) { bin->verbose = options->verbose; bin->header_at = options->header_at; bin->symbols_off = options->symbols_off; } if (!init (bin)) { return MACH0_(mach0_free)(bin); } } return bin; } // prot: r = 1, w = 2, x = 4 // perm: r = 4, w = 2, x = 1 static int prot2perm(int x) { int r = 0; if (x & 1) { r |= 4; } if (x & 2) { r |= 2; } if (x & 4) { r |= 1; } return r; } static bool __isDataSection(RBinSection *sect) { if (strstr (sect->name, "_cstring")) { return true; } if (strstr (sect->name, "_objc_methname")) { return true; } if (strstr (sect->name, "_objc_classname")) { return true; } if (strstr (sect->name, "_objc_methtype")) { return true; } return false; } RList *MACH0_(get_segments)(RBinFile *bf) { struct MACH0_(obj_t) *bin = bf->o->bin_obj; RList *list = r_list_newf ((RListFree)r_bin_section_free); size_t i, j; /* for core files */ if (bin->nsegs > 0) { struct MACH0_(segment_command) *seg; for (i = 0; i < bin->nsegs; i++) { seg = &bin->segs[i]; if (!seg->initprot) { continue; } RBinSection *s = r_bin_section_new (NULL); if (!s) { break; } s->vaddr = seg->vmaddr; s->vsize = seg->vmsize; s->size = seg->vmsize; s->paddr = seg->fileoff; s->paddr += bf->o->boffset; //TODO s->flags = seg->flags; s->name = r_str_ndup (seg->segname, 16); s->is_segment = true; r_str_filter (s->name, -1); s->perm = prot2perm (seg->initprot); s->add = true; r_list_append (list, s); } } if (bin->nsects > 0) { int last_section = R_MIN (bin->nsects, 128); // maybe drop this limit? for (i = 0; i < last_section; i++) { RBinSection *s = R_NEW0 (RBinSection); if (!s) { break; } s->vaddr = (ut64)bin->sects[i].addr; s->vsize = (ut64)bin->sects[i].size; s->is_segment = false; s->size = (bin->sects[i].flags == S_ZEROFILL) ? 0 : (ut64)bin->sects[i].size; // XXX flags s->paddr = (ut64)bin->sects[i].offset; int segment_index = 0; //s->perm =prot2perm (bin->segs[j].initprot); for (j = 0; j < bin->nsegs; j++) { if (s->vaddr >= bin->segs[j].vmaddr && s->vaddr < (bin->segs[j].vmaddr + bin->segs[j].vmsize)) { s->perm = prot2perm (bin->segs[j].initprot); segment_index = j; break; } } char *section_name = r_str_ndup (bin->sects[i].sectname, 16); char *segment_name = r_str_newf ("%u.%s", (ut32)i, bin->segs[segment_index].segname); s->name = r_str_newf ("%s.%s", segment_name, section_name); s->is_data = __isDataSection (s); if (strstr (section_name, "interpos") || strstr (section_name, "__mod_")) { #if R_BIN_MACH064 const int ws = 8; #else const int ws = 4; #endif s->format = r_str_newf ("Cd %d[%"PFMT64d"]", ws, s->vsize / ws); } r_list_append (list, s); free (segment_name); free (section_name); } } return list; } // XXX this function is called so many times struct section_t *MACH0_(get_sections)(struct MACH0_(obj_t) *bin) { r_return_val_if_fail (bin, NULL); struct section_t *sections; char sectname[64], raw_segname[17]; size_t i, j, to; /* for core files */ if (bin->nsects < 1 && bin->nsegs > 0) { struct MACH0_(segment_command) *seg; if (!(sections = calloc ((bin->nsegs + 1), sizeof (struct section_t)))) { return NULL; } for (i = 0; i < bin->nsegs; i++) { seg = &bin->segs[i]; sections[i].addr = seg->vmaddr; sections[i].offset = seg->fileoff; sections[i].size = seg->vmsize; sections[i].vsize = seg->vmsize; sections[i].align = 4096; sections[i].flags = seg->flags; r_str_ncpy (sectname, seg->segname, 16); sectname[16] = 0; r_str_filter (sectname, -1); // hack to support multiple sections with same name sections[i].perm = prot2perm (seg->initprot); sections[i].last = 0; } sections[i].last = 1; return sections; } if (!bin->sects) { return NULL; } to = R_MIN (bin->nsects, 128); // limit number of sections here to avoid fuzzed bins if (to < 1) { return NULL; } if (!(sections = calloc (bin->nsects + 1, sizeof (struct section_t)))) { return NULL; } for (i = 0; i < to; i++) { sections[i].offset = (ut64)bin->sects[i].offset; sections[i].addr = (ut64)bin->sects[i].addr; sections[i].size = (bin->sects[i].flags == S_ZEROFILL) ? 0 : (ut64)bin->sects[i].size; sections[i].vsize = (ut64)bin->sects[i].size; sections[i].align = bin->sects[i].align; sections[i].flags = bin->sects[i].flags; r_str_ncpy (sectname, bin->sects[i].sectname, 17); r_str_filter (sectname, -1); r_str_ncpy (raw_segname, bin->sects[i].segname, 16); for (j = 0; j < bin->nsegs; j++) { if (sections[i].addr >= bin->segs[j].vmaddr && sections[i].addr < (bin->segs[j].vmaddr + bin->segs[j].vmsize)) { sections[i].perm = prot2perm (bin->segs[j].initprot); break; } } snprintf (sections[i].name, sizeof (sections[i].name), "%d.%s.%s", (int)i, raw_segname, sectname); sections[i].last = 0; } sections[i].last = 1; return sections; } static bool parse_import_stub(struct MACH0_(obj_t) *bin, struct symbol_t *symbol, int idx) { size_t i, j, nsyms, stridx; const char *symstr; if (idx < 0) { return false; } symbol->offset = 0LL; symbol->addr = 0LL; symbol->name = NULL; symbol->is_imported = true; if (!bin || !bin->sects) { return false; } for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == S_SYMBOL_STUBS && bin->sects[i].reserved2 > 0) { ut64 sect_size = bin->sects[i].size; ut32 sect_fragment = bin->sects[i].reserved2; if (bin->sects[i].offset > bin->size) { bprintf ("mach0: section offset starts way beyond the end of the file\n"); continue; } if (sect_size > bin->size) { bprintf ("mach0: Invalid symbol table size\n"); sect_size = bin->size - bin->sects[i].offset; } nsyms = (int)(sect_size / sect_fragment); for (j = 0; j < nsyms; j++) { if (bin->sects) { if (bin->sects[i].reserved1 + j >= bin->nindirectsyms) { continue; } } if (bin->indirectsyms) { if (idx != bin->indirectsyms[bin->sects[i].reserved1 + j]) { continue; } } if (idx > bin->nsymtab) { continue; } symbol->type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL; int delta = j * bin->sects[i].reserved2; if (delta < 0) { bprintf ("mach0: corrupted reserved2 value leads to int overflow.\n"); continue; } symbol->offset = bin->sects[i].offset + delta; symbol->addr = bin->sects[i].addr + delta; symbol->size = 0; stridx = bin->symtab[idx].n_strx; if (stridx < bin->symstrlen) { symstr = (char *)bin->symstr + stridx; } else { symstr = "???"; } // Remove the extra underscore that every import seems to have in Mach-O. if (*symstr == '_') { symstr++; } symbol->name = strdup (symstr); return true; } } } return false; } static int inSymtab(HtPP *hash, const char *name, ut64 addr) { bool found = false; char *key = r_str_newf ("%"PFMT64x".%s", addr, name); ht_pp_find (hash, key, &found); if (found) { free (key); return true; } ht_pp_insert (hash, key, "1"); free (key); return false; } static char *get_name(struct MACH0_(obj_t) *mo, ut32 stridx, bool filter) { size_t i = 0; if (!mo->symstr || stridx >= mo->symstrlen) { return NULL; } int len = mo->symstrlen - stridx; const char *symstr = (const char*)mo->symstr + stridx; for (i = 0; i < len; i++) { if ((ut8)(symstr[i] & 0xff) == 0xff || !symstr[i]) { len = i; break; } } if (len > 0) { char *res = r_str_ndup (symstr, len); if (filter) { r_str_filter (res, -1); } return res; } return NULL; } static int walk_exports(struct MACH0_(obj_t) *bin, RExportsIterator iterator, void *ctx) { RList *states = NULL; r_return_val_if_fail (bin, 0); if (!bin->dyld_info) { return 0; } size_t count = 0; ut8 *p = NULL; ut64 size = bin->dyld_info->export_size; if (!size || size >= SIZE_MAX) { return 0; } ut8 *trie = calloc (size + 1, 1); if (!trie) { return 0; } ut8 *end = trie + size; if (r_buf_read_at (bin->b, bin->dyld_info->export_off, trie, bin->dyld_info->export_size) != size) { goto beach; } states = r_list_newf ((RListFree)free); if (!states) { goto beach; } RTrieState *root = R_NEW0 (RTrieState); if (!root) { goto beach; } root->node = trie; root->i = 0; root->label = NULL; r_list_push (states, root); do { RTrieState * state = r_list_get_top (states); p = state->node; ut64 len = read_uleb128 (&p, end); if (len == UT64_MAX) { break; } if (len) { ut64 flags = read_uleb128 (&p, end); if (flags == UT64_MAX) { break; } ut64 offset = read_uleb128 (&p, end); if (offset == UT64_MAX) { break; } ut64 resolver = 0; bool isReexport = flags & EXPORT_SYMBOL_FLAGS_REEXPORT; bool hasResolver = flags & EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER; if (hasResolver) { ut64 res = read_uleb128 (&p, end); if (res == UT64_MAX) { break; } resolver = res + bin->header_at; } else if (isReexport) { p += strlen ((char*) p) + 1; // TODO: handle this } if (!isReexport) { offset += bin->header_at; } if (iterator && !isReexport) { char * name = NULL; RListIter *iter; RTrieState *s; r_list_foreach (states, iter, s) { if (!s->label) { continue; } name = r_str_append (name, s->label); } if (!name) { bprintf ("malformed export trie %d\n", __LINE__); goto beach; } if (hasResolver) { char * stub_name = r_str_newf ("stub.%s", name); iterator (bin, stub_name, flags, offset, ctx); iterator (bin, name, flags, resolver, ctx); R_FREE (stub_name); } else { iterator (bin, name, flags, offset, ctx); } R_FREE (name); } if (!isReexport) { if (hasResolver) { count++; } count++; } } ut64 child_count = read_uleb128 (&p, end); if (child_count == UT64_MAX) { goto beach; } if (state->i == child_count) { free (r_list_pop (states)); continue; } if (!state->next_child) { state->next_child = p; } else { p = state->next_child; } RTrieState * next = R_NEW0 (RTrieState); if (!next) { goto beach; } next->label = (char *) p; p += strlen (next->label) + 1; if (p >= end) { bprintf ("malformed export trie %d\n", __LINE__); R_FREE (next); goto beach; } ut64 tr = read_uleb128 (&p, end); if (tr == UT64_MAX || tr >= size) { R_FREE (next); goto beach; } next->node = trie + (size_t)tr; if (next->node >= end) { bprintf ("malformed export trie %d\n", __LINE__); R_FREE (next); goto beach; } { // avoid loops RListIter *it; RTrieState *s; r_list_foreach (states, it, s) { if (s->node == next->node) { bprintf ("malformed export trie %d\n", __LINE__); R_FREE (next); goto beach; } } } next->i = 0; state->i++; state->next_child = p; r_list_push (states, next); } while (r_list_length (states)); beach: r_list_free (states); R_FREE (trie); return count; } static void fill_exports_list(struct MACH0_(obj_t) *bin, const char *name, ut64 flags, ut64 offset, void *ctx) { RList *list = (RList*) ctx; RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { return; } sym->vaddr = offset_to_vaddr (bin, offset); sym->paddr = offset; sym->type = "EXT"; sym->name = strdup (name); sym->bind = R_BIN_BIND_GLOBAL_STR; r_list_append (list, sym); } // TODO: Return RList<RBinSymbol> // 2x speedup const RList *MACH0_(get_symbols_list)(struct MACH0_(obj_t) *bin) { static RList * cache = NULL; // XXX DONT COMMIT WITH THIS struct symbol_t *symbols; size_t j, s, symbols_size, symbols_count; ut32 to, from; size_t i; r_return_val_if_fail (bin, NULL); if (cache) { return cache; } RList *list = r_list_newf ((RListFree)r_bin_symbol_free); cache = list; HtPP *hash = ht_pp_new0 (); if (!hash) { return NULL; } walk_exports (bin, fill_exports_list, list); if (r_list_length (list)) { RListIter *it; RBinSymbol *s; r_list_foreach (list, it, s) { inSymtab (hash, s->name, s->vaddr); } } if (!bin->symtab || !bin->symstr) { ht_pp_free (hash); return list; } /* parse dynamic symbol table */ symbols_count = (bin->dysymtab.nextdefsym + \ bin->dysymtab.nlocalsym + \ bin->dysymtab.nundefsym ); symbols_count += bin->nsymtab; symbols_size = (symbols_count + 1) * 2 * sizeof (struct symbol_t); if (symbols_size < 1 || !(symbols = calloc (1, symbols_size))) { ht_pp_free (hash); return NULL; } j = 0; // symbol_idx bin->main_addr = 0; int bits = MACH0_(get_bits_from_hdr) (&bin->hdr); for (s = 0; s < 2; s++) { switch (s) { case 0: from = bin->dysymtab.iextdefsym; to = from + bin->dysymtab.nextdefsym; break; case 1: from = bin->dysymtab.ilocalsym; to = from + bin->dysymtab.nlocalsym; break; #if NOT_USED case 2: from = bin->dysymtab.iundefsym; to = from + bin->dysymtab.nundefsym; break; #endif } if (from == to) { continue; } from = R_MIN (from, symbols_size / sizeof (struct symbol_t)); to = R_MIN (R_MIN (to, bin->nsymtab), symbols_size / sizeof (struct symbol_t)); ut32 maxsymbols = symbols_size / sizeof (struct symbol_t); if (symbols_count >= maxsymbols) { symbols_count = maxsymbols - 1; eprintf ("macho warning: Symbol table truncated\n"); } for (i = from; i < to && j < symbols_count; i++, j++) { RBinSymbol *sym = R_NEW0 (RBinSymbol); sym->vaddr = bin->symtab[i].n_value; sym->paddr = addr_to_offset (bin, sym->vaddr); symbols[j].size = 0; /* TODO: Is it anywhere? */ sym->bits = bin->symtab[i].n_desc & N_ARM_THUMB_DEF ? 16 : bits; if (bin->symtab[i].n_type & N_EXT) { sym->type = "EXT"; } else { sym->type = "LOCAL"; } int stridx = bin->symtab[i].n_strx; char *sym_name = get_name (bin, stridx, false); if (sym_name) { sym->name = sym_name; if (!bin->main_addr || bin->main_addr == UT64_MAX) { const char *name = sym->name; if (!strcmp (name, "__Dmain")) { bin->main_addr = symbols[j].addr; } else if (strstr (name, "4main") && !strstr (name, "STATIC")) { bin->main_addr = symbols[j].addr; } else if (!strcmp (name, "_main")) { bin->main_addr = symbols[j].addr; } else if (!strcmp (name, "main")) { bin->main_addr = symbols[j].addr; } } } else { sym->name = r_str_newf ("unk%u", (ut32)i); } if (!inSymtab (hash, sym->name, sym->vaddr)) { r_list_append (list, sym); } else { r_bin_symbol_free (sym); } } } to = R_MIN ((ut32)bin->nsymtab, bin->dysymtab.iundefsym + bin->dysymtab.nundefsym); for (i = bin->dysymtab.iundefsym; i < to; i++) { struct symbol_t symbol; if (j > symbols_count) { bprintf ("mach0-get-symbols: error\n"); break; } if (parse_import_stub (bin, &symbol, i)) { j++; RBinSymbol *sym = R_NEW0 (RBinSymbol); sym->vaddr = symbol.addr; sym->paddr = symbol.offset; sym->name = symbol.name; if (!sym->name) { sym->name = r_str_newf ("unk%u", (ut32)i); } sym->is_imported = symbol.is_imported; r_list_append (list, sym); } } for (i = 0; i < bin->nsymtab && i < symbols_count; i++) { struct MACH0_(nlist) *st = &bin->symtab[i]; // 0 is for imports // 1 is for symbols // 2 is for func.eh (exception handlers?) int section = st->n_sect; if (section == 1 && j < symbols_count) { // text ??st->n_type == 1) maybe wrong RBinSymbol *sym = R_NEW0(RBinSymbol); /* is symbol */ sym->vaddr = st->n_value; sym->paddr = addr_to_offset (bin, symbols[j].addr); sym->is_imported = symbols[j].is_imported; if (st->n_type & N_EXT) { sym->type = "EXT"; } else { sym->type = "LOCAL"; } char *sym_name = get_name (bin, st->n_strx, false); if (sym_name) { sym->name = sym_name; if (inSymtab (hash, sym->name, sym->vaddr)) { r_bin_symbol_free (sym); continue; } if (!bin->main_addr || bin->main_addr == UT64_MAX) { const char *name = sym->name; if (!strcmp (name, "__Dmain")) { bin->main_addr = symbols[i].addr; } else if (strstr (name, "4main") && !strstr (name, "STATIC")) { bin->main_addr = symbols[i].addr; } else if (!strcmp (symbols[i].name, "_main")) { bin->main_addr = symbols[i].addr; } } } else { sym->name = r_str_newf ("unk%u", (ut32)i); } r_list_append (list, sym); j++; } } ht_pp_free (hash); // bin->symbols = symbols; free (symbols); return list; } static void assign_export_symbol_t(struct MACH0_(obj_t) *bin, const char *name, ut64 flags, ut64 offset, void *ctx) { RSymCtx *sym_ctx = (RSymCtx*) ctx; int j = sym_ctx->j; if (j < sym_ctx->symbols_count) { sym_ctx->symbols[j].offset = offset; sym_ctx->symbols[j].addr = offset_to_vaddr (bin, offset); if (inSymtab (sym_ctx->hash, name, sym_ctx->symbols[j].addr)) { return; } sym_ctx->symbols[j].size = 0; sym_ctx->symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_EXT; sym_ctx->symbols[j].name = strdup (name); sym_ctx->j++; } } const struct symbol_t *MACH0_(get_symbols)(struct MACH0_(obj_t) *bin) { struct symbol_t *symbols; int j, s, stridx, symbols_size, symbols_count; ut32 to, from, i; if (bin->symbols) { return bin->symbols; } HtPP *hash = ht_pp_new0 (); if (!hash) { return NULL; } r_return_val_if_fail (bin, NULL); int n_exports = walk_exports (bin, NULL, NULL); symbols_count = n_exports; j = 0; // symbol_idx int bits = MACH0_(get_bits_from_hdr) (&bin->hdr); if (bin->symtab && bin->symstr) { /* parse dynamic symbol table */ symbols_count = (bin->dysymtab.nextdefsym + \ bin->dysymtab.nlocalsym + \ bin->dysymtab.nundefsym ); symbols_count += bin->nsymtab; if (symbols_count < 0 || ((st64)symbols_count * 2) > ST32_MAX) { eprintf ("Symbols count overflow\n"); ht_pp_free (hash); return NULL; } symbols_size = (symbols_count + 1) * 2 * sizeof (struct symbol_t); if (symbols_size < 1) { ht_pp_free (hash); return NULL; } if (!(symbols = calloc (1, symbols_size))) { ht_pp_free (hash); return NULL; } bin->main_addr = 0; for (s = 0; s < 2; s++) { switch (s) { case 0: from = bin->dysymtab.iextdefsym; to = from + bin->dysymtab.nextdefsym; break; case 1: from = bin->dysymtab.ilocalsym; to = from + bin->dysymtab.nlocalsym; break; #if NOT_USED case 2: from = bin->dysymtab.iundefsym; to = from + bin->dysymtab.nundefsym; break; #endif } if (from == to) { continue; } from = R_MIN (from, symbols_size / sizeof (struct symbol_t)); to = R_MIN (R_MIN (to, bin->nsymtab), symbols_size / sizeof (struct symbol_t)); ut32 maxsymbols = symbols_size / sizeof (struct symbol_t); if (symbols_count >= maxsymbols) { symbols_count = maxsymbols - 1; eprintf ("macho warning: Symbol table truncated\n"); } for (i = from; i < to && j < symbols_count; i++, j++) { symbols[j].offset = addr_to_offset (bin, bin->symtab[i].n_value); symbols[j].addr = bin->symtab[i].n_value; symbols[j].size = 0; /* TODO: Is it anywhere? */ symbols[j].bits = bin->symtab[i].n_desc & N_ARM_THUMB_DEF ? 16 : bits; symbols[j].is_imported = false; symbols[j].type = (bin->symtab[i].n_type & N_EXT) ? R_BIN_MACH0_SYMBOL_TYPE_EXT : R_BIN_MACH0_SYMBOL_TYPE_LOCAL; stridx = bin->symtab[i].n_strx; symbols[j].name = get_name (bin, stridx, false); symbols[j].last = false; const char *name = symbols[j].name; if (bin->main_addr == 0 && name) { if (!strcmp (name, "__Dmain")) { bin->main_addr = symbols[j].addr; } else if (strstr (name, "4main") && !strstr (name, "STATIC")) { bin->main_addr = symbols[j].addr; } else if (!strcmp (name, "_main")) { bin->main_addr = symbols[j].addr; } else if (!strcmp (name, "main")) { bin->main_addr = symbols[j].addr; } } if (inSymtab (hash, symbols[j].name, symbols[j].addr)) { free (symbols[j].name); symbols[j].name = NULL; j--; } } } to = R_MIN ((ut32)bin->nsymtab, bin->dysymtab.iundefsym + bin->dysymtab.nundefsym); for (i = bin->dysymtab.iundefsym; i < to; i++) { if (j > symbols_count) { bprintf ("mach0-get-symbols: error\n"); break; } if (parse_import_stub (bin, &symbols[j], i)) { symbols[j++].last = false; } } for (i = 0; i < bin->nsymtab; i++) { struct MACH0_(nlist) *st = &bin->symtab[i]; if (st->n_type & N_STAB) { continue; } // 0 is for imports // 1 is for symbols // 2 is for func.eh (exception handlers?) int section = st->n_sect; if (section == 1 && j < symbols_count) { // check if symbol exists already /* is symbol */ symbols[j].addr = st->n_value; symbols[j].offset = addr_to_offset (bin, symbols[j].addr); symbols[j].size = 0; /* find next symbol and crop */ symbols[j].type = (st->n_type & N_EXT) ? R_BIN_MACH0_SYMBOL_TYPE_EXT : R_BIN_MACH0_SYMBOL_TYPE_LOCAL; char *sym_name = get_name (bin, st->n_strx, false); if (sym_name) { symbols[j].name = sym_name; } else { symbols[j].name = r_str_newf ("entry%d", i); } symbols[j].last = 0; if (inSymtab (hash, symbols[j].name, symbols[j].addr)) { R_FREE (symbols[j].name); } else { j++; } const char *name = symbols[i].name; if (bin->main_addr == 0 && name) { if (name && !strcmp (name, "__Dmain")) { bin->main_addr = symbols[i].addr; } else if (name && strstr (name, "4main") && !strstr (name, "STATIC")) { bin->main_addr = symbols[i].addr; } else if (symbols[i].name && !strcmp (symbols[i].name, "_main")) { bin->main_addr = symbols[i].addr; } } } } } else if (!n_exports) { ht_pp_free (hash); return NULL; } else { symbols_size = (symbols_count + 1) * sizeof (struct symbol_t); if (symbols_size < 1) { ht_pp_free (hash); return NULL; } if (!(symbols = calloc (1, symbols_size))) { ht_pp_free (hash); return NULL; } } if (n_exports && (symbols_count - j) >= n_exports) { RSymCtx sym_ctx; sym_ctx.symbols = symbols; sym_ctx.j = j; sym_ctx.symbols_count = symbols_count; sym_ctx.hash = hash; walk_exports (bin, assign_export_symbol_t, &sym_ctx); j = sym_ctx.j; } ht_pp_free (hash); symbols[j].last = true; bin->symbols = symbols; return symbols; } static int parse_import_ptr(struct MACH0_(obj_t) *bin, struct reloc_t *reloc, int idx) { int i, j, sym; size_t wordsize; ut32 stype; wordsize = get_word_size (bin); if (idx < 0 || idx >= bin->nsymtab) { return 0; } if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) { stype = S_LAZY_SYMBOL_POINTERS; } else { stype = S_NON_LAZY_SYMBOL_POINTERS; } reloc->offset = 0; reloc->addr = 0; reloc->addend = 0; #define CASE(T) case ((T) / 8): reloc->type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return false; } #undef CASE for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == stype) { for (j = 0, sym = -1; bin->sects[i].reserved1 + j < bin->nindirectsyms; j++) { int indidx = bin->sects[i].reserved1 + j; if (indidx < 0 || indidx >= bin->nindirectsyms) { break; } if (idx == bin->indirectsyms[indidx]) { sym = j; break; } } reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize; reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize; return true; } } return false; } struct import_t *MACH0_(get_imports)(struct MACH0_(obj_t) *bin) { r_return_val_if_fail (bin, NULL); int i, j, idx, stridx; if (!bin->sects || !bin->symtab || !bin->symstr || !bin->indirectsyms) { return NULL; } if (bin->dysymtab.nundefsym < 1 || bin->dysymtab.nundefsym > 0xfffff) { return NULL; } struct import_t *imports = calloc (bin->dysymtab.nundefsym + 1, sizeof (struct import_t)); if (!imports) { return NULL; } for (i = j = 0; i < bin->dysymtab.nundefsym; i++) { idx = bin->dysymtab.iundefsym + i; if (idx < 0 || idx >= bin->nsymtab) { bprintf ("WARNING: Imports index out of bounds. Ignoring relocs\n"); free (imports); return NULL; } stridx = bin->symtab[idx].n_strx; char *imp_name = get_name (bin, stridx, false); if (imp_name) { r_str_ncpy (imports[j].name, imp_name, R_BIN_MACH0_STRING_LENGTH - 1); free (imp_name); } else { //imports[j].name[0] = 0; continue; } imports[j].ord = i; imports[j++].last = 0; } imports[j].last = 1; if (!bin->imports_by_ord_size) { if (j > 0) { bin->imports_by_ord_size = j; bin->imports_by_ord = (RBinImport**)calloc (j, sizeof (RBinImport*)); } else { bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; } } return imports; } static int reloc_comparator(struct reloc_t *a, struct reloc_t *b) { return a->addr - b->addr; } static void parse_relocation_info(struct MACH0_(obj_t) *bin, RSkipList *relocs, ut32 offset, ut32 num) { if (!num || !offset || (st32)num < 0) { return; } ut64 total_size = num * sizeof (struct relocation_info); if (offset > bin->size) { return; } if (total_size > bin->size) { total_size = bin->size - offset; num = total_size /= sizeof (struct relocation_info); } struct relocation_info *info = calloc (num, sizeof (struct relocation_info)); if (!info) { return; } if (r_buf_read_at (bin->b, offset, (ut8 *) info, total_size) < total_size) { free (info); return; } size_t i; for (i = 0; i < num; i++) { struct relocation_info a_info = info[i]; ut32 sym_num = a_info.r_symbolnum; if (sym_num > bin->nsymtab) { continue; } ut32 stridx = bin->symtab[sym_num].n_strx; char *sym_name = get_name (bin, stridx, false); if (!sym_name) { continue; } struct reloc_t *reloc = R_NEW0 (struct reloc_t); if (!reloc) { free (info); free (sym_name); return; } reloc->addr = offset_to_vaddr (bin, a_info.r_address); reloc->offset = a_info.r_address; reloc->ord = sym_num; reloc->type = a_info.r_type; // enum RelocationInfoType reloc->external = a_info.r_extern; reloc->pc_relative = a_info.r_pcrel; reloc->size = a_info.r_length; r_str_ncpy (reloc->name, sym_name, sizeof (reloc->name) - 1); r_skiplist_insert (relocs, reloc); free (sym_name); } free (info); } static bool walk_bind_chains_callback(void * context, RFixupEventDetails * event_details) { r_return_val_if_fail (event_details->type == R_FIXUP_EVENT_BIND || event_details->type == R_FIXUP_EVENT_BIND_AUTH, false); RWalkBindChainsContext *ctx = context; ut8 *imports = ctx->imports; struct MACH0_(obj_t) *bin = event_details->bin; ut32 imports_count = bin->fixups_header.imports_count; ut32 fixups_offset = bin->fixups_offset; ut32 fixups_size = bin->fixups_size; ut32 imports_format = bin->fixups_header.imports_format; ut32 import_index = ((RFixupBindEventDetails *) event_details)->ordinal; ut64 addend = 0; if (event_details->type != R_FIXUP_EVENT_BIND_AUTH) { addend = ((RFixupBindEventDetails *) event_details)->addend; } if (import_index < imports_count) { ut64 name_offset; switch (imports_format) { case DYLD_CHAINED_IMPORT: { struct dyld_chained_import * item = &((struct dyld_chained_import *) imports)[import_index]; name_offset = item->name_offset; break; } case DYLD_CHAINED_IMPORT_ADDEND: { struct dyld_chained_import_addend * item = &((struct dyld_chained_import_addend *) imports)[import_index]; name_offset = item->name_offset; addend += item->addend; break; } case DYLD_CHAINED_IMPORT_ADDEND64: { struct dyld_chained_import_addend64 * item = &((struct dyld_chained_import_addend64 *) imports)[import_index]; name_offset = item->name_offset; addend += item->addend; break; } default: bprintf ("Unsupported imports format\n"); return false; } ut64 symbols_offset = bin->fixups_header.symbols_offset + fixups_offset; if (symbols_offset + name_offset + 1 < fixups_offset + fixups_size) { char *name = r_buf_get_string (bin->b, symbols_offset + name_offset); if (name) { struct reloc_t *reloc = R_NEW0 (struct reloc_t); if (!reloc) { free (name); return false; } reloc->addr = offset_to_vaddr (bin, event_details->offset); reloc->offset = event_details->offset; reloc->ord = import_index; reloc->type = R_BIN_RELOC_64; reloc->size = 8; reloc->addend = addend; r_str_ncpy (reloc->name, name, sizeof (reloc->name) - 1); r_skiplist_insert_autofree (ctx->relocs, reloc); free (name); } else if (bin->verbose) { eprintf ("Malformed chained bind: failed to read name\n"); } } else if (bin->verbose) { eprintf ("Malformed chained bind: name_offset out of bounds\n"); } } else if (bin->verbose) { eprintf ("Malformed chained bind: import out of length\n"); } return true; } static void walk_bind_chains(struct MACH0_(obj_t) *bin, RSkipList *relocs) { r_return_if_fail (bin && bin->fixups_offset); ut8 *imports = NULL; ut32 imports_count = bin->fixups_header.imports_count; ut32 fixups_offset = bin->fixups_offset; ut32 imports_offset = bin->fixups_header.imports_offset; if (!imports_count || !imports_offset) { return; } if (bin->fixups_header.symbols_format != 0) { eprintf ("Compressed fixups symbols not supported yet, please file a bug with a sample attached.\n"); return; } ut32 imports_format = bin->fixups_header.imports_format; ut64 imports_size; switch (imports_format) { case DYLD_CHAINED_IMPORT: imports_size = sizeof (struct dyld_chained_import) * imports_count; break; case DYLD_CHAINED_IMPORT_ADDEND: imports_size = sizeof (struct dyld_chained_import_addend) * imports_count; break; case DYLD_CHAINED_IMPORT_ADDEND64: imports_size = sizeof (struct dyld_chained_import_addend64) * imports_count; break; default: eprintf ("Unsupported chained imports format: %d\n", imports_format); goto beach; } imports = malloc (imports_size); if (!imports) { goto beach; } switch (imports_format) { case DYLD_CHAINED_IMPORT: if (r_buf_fread_at (bin->b, fixups_offset + imports_offset, imports, "i", imports_count) != imports_size) { goto beach; } break; case DYLD_CHAINED_IMPORT_ADDEND: if (r_buf_fread_at (bin->b, fixups_offset + imports_offset, imports, "ii", imports_count) != imports_size) { goto beach; } break; case DYLD_CHAINED_IMPORT_ADDEND64: if (r_buf_fread_at (bin->b, fixups_offset + imports_offset, imports, "il", imports_count) != imports_size) { goto beach; } break; } RWalkBindChainsContext ctx; ctx.imports = imports; ctx.relocs = relocs; MACH0_(iterate_chained_fixups) (bin, 0, UT64_MAX, R_FIXUP_EVENT_MASK_BIND_ALL, &walk_bind_chains_callback, &ctx); beach: free (imports); } static bool is_valid_ordinal_table_size(ut64 size) { return size > 0 && size <= UT16_MAX; } RSkipList *MACH0_(get_relocs)(struct MACH0_(obj_t) *bin) { RSkipList *relocs = NULL; RPVector *threaded_binds = NULL; size_t wordsize = get_word_size (bin); if (bin->dyld_info) { ut8 *opcodes, rel_type = 0; size_t bind_size, lazy_size, weak_size; #define CASE(T) case ((T) / 8): rel_type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return NULL; } #undef CASE bind_size = bin->dyld_info->bind_size; lazy_size = bin->dyld_info->lazy_bind_size; weak_size = bin->dyld_info->weak_bind_size; if (!bind_size && !lazy_size) { return NULL; } if ((bind_size + lazy_size)<1) { return NULL; } if (bin->dyld_info->bind_off > bin->size || bin->dyld_info->bind_off + bind_size > bin->size) { return NULL; } if (bin->dyld_info->lazy_bind_off > bin->size || \ bin->dyld_info->lazy_bind_off + lazy_size > bin->size) { return NULL; } if (bin->dyld_info->bind_off + bind_size + lazy_size > bin->size) { return NULL; } if (bin->dyld_info->weak_bind_off + weak_size > bin->size) { return NULL; } ut64 amount = bind_size + lazy_size + weak_size; if (amount == 0 || amount > UT32_MAX) { return NULL; } if (!bin->segs) { return NULL; } relocs = r_skiplist_new ((RListFree) &free, (RListComparator) &reloc_comparator); if (!relocs) { return NULL; } opcodes = calloc (1, amount + 1); if (!opcodes) { r_skiplist_free (relocs); return NULL; } int len = r_buf_read_at (bin->b, bin->dyld_info->bind_off, opcodes, bind_size); len += r_buf_read_at (bin->b, bin->dyld_info->lazy_bind_off, opcodes + bind_size, lazy_size); len += r_buf_read_at (bin->b, bin->dyld_info->weak_bind_off, opcodes + bind_size + lazy_size, weak_size); if (len < amount) { bprintf ("Error: read (dyld_info bind) at 0x%08"PFMT64x"\n", (ut64)(size_t)bin->dyld_info->bind_off); R_FREE (opcodes); r_skiplist_free (relocs); return NULL; } size_t partition_sizes[] = {bind_size, lazy_size, weak_size}; size_t pidx; int opcodes_offset = 0; for (pidx = 0; pidx < R_ARRAY_SIZE (partition_sizes); pidx++) { size_t partition_size = partition_sizes[pidx]; ut8 type = 0; int lib_ord = 0, seg_idx = -1, sym_ord = -1; char *sym_name = NULL; size_t j, count, skip; st64 addend = 0; ut64 addr = bin->segs[0].vmaddr; ut64 segment_size = bin->segs[0].filesize; if (bin->segs[0].filesize != bin->segs[0].vmsize) { // is probably invalid and we should warn the user } if (segment_size > bin->size) { // is probably invalid and we should warn the user segment_size = bin->size; } ut64 segment_end_addr = addr + segment_size; ut8 *p = opcodes + opcodes_offset; ut8 *end = p + partition_size; bool done = false; while (!done && p < end) { ut8 imm = *p & BIND_IMMEDIATE_MASK; ut8 op = *p & BIND_OPCODE_MASK; p++; switch (op) { case BIND_OPCODE_DONE: { bool in_lazy_binds = pidx == 1; if (!in_lazy_binds) { done = true; } break; } case BIND_OPCODE_THREADED: { switch (imm) { case BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB: { ut64 table_size = read_uleb128 (&p, end); if (!is_valid_ordinal_table_size (table_size)) { bprintf ("Error: BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB size is wrong\n"); break; } if (threaded_binds) { r_pvector_free (threaded_binds); } threaded_binds = r_pvector_new_with_len ((RPVectorFree) &free, table_size); if (threaded_binds) { sym_ord = 0; } break; } case BIND_SUBOPCODE_THREADED_APPLY: if (threaded_binds) { int cur_seg_idx = (seg_idx != -1)? seg_idx: 0; size_t n_threaded_binds = r_pvector_len (threaded_binds); while (addr < segment_end_addr) { ut8 tmp[8]; ut64 paddr = addr - bin->segs[cur_seg_idx].vmaddr + bin->segs[cur_seg_idx].fileoff; bin->rebasing_buffer = true; if (r_buf_read_at (bin->b, paddr, tmp, 8) != 8) { break; } bin->rebasing_buffer = false; ut64 raw_ptr = r_read_le64 (tmp); bool is_auth = (raw_ptr & (1ULL << 63)) != 0; bool is_bind = (raw_ptr & (1ULL << 62)) != 0; int ordinal = -1; int addend = -1; ut64 delta; if (is_auth && is_bind) { struct dyld_chained_ptr_arm64e_auth_bind *p = (struct dyld_chained_ptr_arm64e_auth_bind *) &raw_ptr; delta = p->next; ordinal = p->ordinal; } else if (!is_auth && is_bind) { struct dyld_chained_ptr_arm64e_bind *p = (struct dyld_chained_ptr_arm64e_bind *) &raw_ptr; delta = p->next; ordinal = p->ordinal; addend = p->addend; } else if (is_auth && !is_bind) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; delta = p->next; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; delta = p->next; } if (ordinal != -1) { if (ordinal >= n_threaded_binds) { bprintf ("Error: Malformed bind chain\n"); break; } struct reloc_t *ref = r_pvector_at (threaded_binds, ordinal); if (!ref) { bprintf ("Error: Inconsistent bind opcodes\n"); break; } struct reloc_t *reloc = R_NEW0 (struct reloc_t); if (!reloc) { break; } *reloc = *ref; reloc->addr = addr; reloc->offset = paddr; if (addend != -1) { reloc->addend = addend; } r_skiplist_insert (relocs, reloc); } addr += delta * wordsize; if (!delta) { break; } } } break; default: bprintf ("Error: Unexpected BIND_OPCODE_THREADED sub-opcode: 0x%x\n", imm); } break; } case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: lib_ord = imm; break; case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: lib_ord = read_uleb128 (&p, end); break; case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: lib_ord = imm? (st8)(BIND_OPCODE_MASK | imm) : 0; break; case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: { sym_name = (char*)p; while (*p++ && p < end) { /* empty loop */ } if (threaded_binds) { break; } sym_ord = -1; if (bin->symtab && bin->dysymtab.nundefsym < UT16_MAX) { for (j = 0; j < bin->dysymtab.nundefsym; j++) { size_t stridx = 0; bool found = false; int iundefsym = bin->dysymtab.iundefsym; if (iundefsym >= 0 && iundefsym < bin->nsymtab) { int sidx = iundefsym + j; if (sidx < 0 || sidx >= bin->nsymtab) { continue; } stridx = bin->symtab[sidx].n_strx; if (stridx >= bin->symstrlen) { continue; } found = true; } if (found && !strcmp ((const char *)bin->symstr + stridx, sym_name)) { sym_ord = j; break; } } } break; } case BIND_OPCODE_SET_TYPE_IMM: type = imm; break; case BIND_OPCODE_SET_ADDEND_SLEB: addend = r_sleb128 ((const ut8 **)&p, end); break; case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: seg_idx = imm; if (seg_idx >= bin->nsegs) { bprintf ("Error: BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB" " has unexistent segment %d\n", seg_idx); free (opcodes); r_skiplist_free (relocs); r_pvector_free (threaded_binds); return NULL; // early exit to avoid future mayhem } addr = bin->segs[seg_idx].vmaddr + read_uleb128 (&p, end); segment_end_addr = bin->segs[seg_idx].vmaddr \ + bin->segs[seg_idx].vmsize; break; case BIND_OPCODE_ADD_ADDR_ULEB: addr += read_uleb128 (&p, end); break; #define DO_BIND() do {\ if (sym_ord < 0 && !sym_name) break;\ if (!threaded_binds) {\ if (seg_idx < 0 ) break;\ if (!addr) break;\ }\ struct reloc_t *reloc = R_NEW0 (struct reloc_t);\ reloc->addr = addr;\ if (seg_idx >= 0) {\ reloc->offset = addr - bin->segs[seg_idx].vmaddr + bin->segs[seg_idx].fileoff;\ if (type == BIND_TYPE_TEXT_PCREL32)\ reloc->addend = addend - (bin->baddr + addr);\ else\ reloc->addend = addend;\ } else {\ reloc->addend = addend;\ }\ /* library ordinal ??? */ \ reloc->ord = lib_ord;\ reloc->ord = sym_ord;\ reloc->type = rel_type;\ if (sym_name)\ r_str_ncpy (reloc->name, sym_name, 256);\ if (threaded_binds)\ r_pvector_set (threaded_binds, sym_ord, reloc);\ else\ r_skiplist_insert (relocs, reloc);\ } while (0) case BIND_OPCODE_DO_BIND: if (!threaded_binds && addr >= segment_end_addr) { bprintf ("Error: Malformed DO bind opcode 0x%"PFMT64x"\n", addr); goto beach; } DO_BIND (); if (!threaded_binds) { addr += wordsize; } else { sym_ord++; } break; case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: if (addr >= segment_end_addr) { bprintf ("Error: Malformed ADDR ULEB bind opcode\n"); goto beach; } DO_BIND (); addr += read_uleb128 (&p, end) + wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: if (addr >= segment_end_addr) { bprintf ("Error: Malformed IMM SCALED bind opcode\n"); goto beach; } DO_BIND (); addr += (ut64)imm * (ut64)wordsize + wordsize; break; case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: count = read_uleb128 (&p, end); skip = read_uleb128 (&p, end); for (j = 0; j < count; j++) { if (addr >= segment_end_addr) { bprintf ("Error: Malformed ULEB TIMES bind opcode\n"); goto beach; } DO_BIND (); addr += skip + wordsize; } break; #undef DO_BIND default: bprintf ("Error: unknown bind opcode 0x%02x in dyld_info\n", *p); R_FREE (opcodes); r_pvector_free (threaded_binds); return relocs; } } opcodes_offset += partition_size; } R_FREE (opcodes); r_pvector_free (threaded_binds); threaded_binds = NULL; } if (bin->symtab && bin->symstr && bin->sects && bin->indirectsyms) { int j; int amount = bin->dysymtab.nundefsym; if (amount < 0) { amount = 0; } if (!relocs) { relocs = r_skiplist_new ((RListFree) &free, (RListComparator) &reloc_comparator); if (!relocs) { goto beach; } } for (j = 0; j < amount; j++) { struct reloc_t *reloc = R_NEW0 (struct reloc_t); if (!reloc) { break; } if (parse_import_ptr (bin, reloc, bin->dysymtab.iundefsym + j)) { reloc->ord = j; r_skiplist_insert_autofree (relocs, reloc); } else { R_FREE (reloc); } } } if (bin->symtab && bin->dysymtab.extreloff && bin->dysymtab.nextrel) { if (!relocs) { relocs = r_skiplist_new ((RListFree) &free, (RListComparator) &reloc_comparator); if (!relocs) { goto beach; } } parse_relocation_info (bin, relocs, bin->dysymtab.extreloff, bin->dysymtab.nextrel); } if (!bin->dyld_info && bin->chained_starts && bin->nsegs && bin->fixups_offset) { if (!relocs) { relocs = r_skiplist_new ((RListFree) &free, (RListComparator) &reloc_comparator); if (!relocs) { goto beach; } } walk_bind_chains (bin, relocs); } beach: r_pvector_free (threaded_binds); return relocs; } struct addr_t *MACH0_(get_entrypoint)(struct MACH0_(obj_t) *bin) { r_return_val_if_fail (bin, NULL); ut64 ea = entry_to_vaddr (bin); if (ea == 0 || ea == UT64_MAX) { return NULL; } struct addr_t *entry = R_NEW0 (struct addr_t); if (!entry) { return NULL; } entry->addr = ea; entry->offset = addr_to_offset (bin, entry->addr); entry->haddr = sdb_num_get (bin->kv, "mach0.entry.offset", 0); sdb_num_set (bin->kv, "mach0.entry.vaddr", entry->addr, 0); sdb_num_set (bin->kv, "mach0.entry.paddr", bin->entry, 0); if (entry->offset == 0 && !bin->sects) { int i; for (i = 0; i < bin->nsects; i++) { // XXX: section name shoudnt matter .. just check for exec flags if (!strncmp (bin->sects[i].sectname, "__text", 6)) { entry->offset = (ut64)bin->sects[i].offset; sdb_num_set (bin->kv, "mach0.entry", entry->offset, 0); entry->addr = (ut64)bin->sects[i].addr; if (!entry->addr) { // workaround for object files eprintf ("entrypoint is 0...\n"); // XXX(lowlyw) there's technically not really entrypoints // for .o files, so ignore this... // entry->addr = entry->offset; } break; } } bin->entry = entry->addr; } return entry; } void MACH0_(kv_loadlibs)(struct MACH0_(obj_t) *bin) { int i; char lib_flagname[128]; for (i = 0; i < bin->nlibs; i++) { snprintf (lib_flagname, sizeof (lib_flagname), "libs.%d.name", i); sdb_set (bin->kv, lib_flagname, bin->libs[i], 0); } } struct lib_t *MACH0_(get_libs)(struct MACH0_(obj_t) *bin) { struct lib_t *libs; int i; char lib_flagname[128]; if (!bin->nlibs) { return NULL; } if (!(libs = calloc ((bin->nlibs + 1), sizeof (struct lib_t)))) { return NULL; } for (i = 0; i < bin->nlibs; i++) { snprintf (lib_flagname, sizeof (lib_flagname), "libs.%d.name", i); sdb_set (bin->kv, lib_flagname, bin->libs[i], 0); r_str_ncpy (libs[i].name, bin->libs[i], R_BIN_MACH0_STRING_LENGTH - 1); libs[i].last = 0; } libs[i].last = 1; return libs; } ut64 MACH0_(get_baddr)(struct MACH0_(obj_t) *bin) { int i; if (bin->hdr.filetype != MH_EXECUTE && bin->hdr.filetype != MH_DYLINKER && bin->hdr.filetype != MH_FILESET) { return 0; } for (i = 0; i < bin->nsegs; i++) { if (bin->segs[i].fileoff == 0 && bin->segs[i].filesize != 0) { return bin->segs[i].vmaddr; } } return 0; } char *MACH0_(get_class)(struct MACH0_(obj_t) *bin) { #if R_BIN_MACH064 return r_str_new ("MACH064"); #else return r_str_new ("MACH0"); #endif } //XXX we are mixing up bits from cpu and opcodes //since thumb use 16 bits opcode but run in 32 bits //cpus so here we should only return 32 or 64 int MACH0_(get_bits)(struct MACH0_(obj_t) *bin) { if (bin) { int bits = MACH0_(get_bits_from_hdr) (&bin->hdr); if (bin->hdr.cputype == CPU_TYPE_ARM && bin->entry & 1) { return 16; } return bits; } return 32; } int MACH0_(get_bits_from_hdr)(struct MACH0_(mach_header) *hdr) { if (hdr->magic == MH_MAGIC_64 || hdr->magic == MH_CIGAM_64) { return 64; } if (hdr->cputype == CPU_TYPE_ARM64_32) { // new apple watch aka arm64_32 return 64; } if ((hdr->cpusubtype & CPU_SUBTYPE_MASK) == (CPU_SUBTYPE_ARM_V7K << 24)) { return 16; } return 32; } bool MACH0_(is_big_endian)(struct MACH0_(obj_t) *bin) { if (bin) { const int cpu = bin->hdr.cputype; return cpu == CPU_TYPE_POWERPC || cpu == CPU_TYPE_POWERPC64; } return false; } const char *MACH0_(get_intrp)(struct MACH0_(obj_t) *bin) { return bin? bin->intrp: NULL; } const char *MACH0_(get_os)(struct MACH0_(obj_t) *bin) { if (bin) { switch (bin->os) { case 1: return "macos"; case 2: return "ios"; case 3: return "watchos"; case 4: return "tvos"; } } return "darwin"; } const char *MACH0_(get_cputype_from_hdr)(struct MACH0_(mach_header) *hdr) { const char *archstr = "unknown"; switch (hdr->cputype) { case CPU_TYPE_VAX: archstr = "vax"; break; case CPU_TYPE_MC680x0: archstr = "mc680x0"; break; case CPU_TYPE_I386: case CPU_TYPE_X86_64: archstr = "x86"; break; case CPU_TYPE_MC88000: archstr = "mc88000"; break; case CPU_TYPE_MC98000: archstr = "mc98000"; break; case CPU_TYPE_HPPA: archstr = "hppa"; break; case CPU_TYPE_ARM: case CPU_TYPE_ARM64: case CPU_TYPE_ARM64_32: archstr = "arm"; break; case CPU_TYPE_SPARC: archstr = "sparc"; break; case CPU_TYPE_MIPS: archstr = "mips"; break; case CPU_TYPE_I860: archstr = "i860"; break; case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: archstr = "ppc"; break; default: eprintf ("Unknown arch %d\n", hdr->cputype); break; } return archstr; } const char *MACH0_(get_cputype)(struct MACH0_(obj_t) *bin) { return bin? MACH0_(get_cputype_from_hdr) (&bin->hdr): "unknown"; } static const char *cpusubtype_tostring(ut32 cputype, ut32 cpusubtype) { switch (cputype) { case CPU_TYPE_VAX: switch (cpusubtype) { case CPU_SUBTYPE_VAX_ALL: return "all"; case CPU_SUBTYPE_VAX780: return "vax780"; case CPU_SUBTYPE_VAX785: return "vax785"; case CPU_SUBTYPE_VAX750: return "vax750"; case CPU_SUBTYPE_VAX730: return "vax730"; case CPU_SUBTYPE_UVAXI: return "uvaxI"; case CPU_SUBTYPE_UVAXII: return "uvaxII"; case CPU_SUBTYPE_VAX8200: return "vax8200"; case CPU_SUBTYPE_VAX8500: return "vax8500"; case CPU_SUBTYPE_VAX8600: return "vax8600"; case CPU_SUBTYPE_VAX8650: return "vax8650"; case CPU_SUBTYPE_VAX8800: return "vax8800"; case CPU_SUBTYPE_UVAXIII: return "uvaxIII"; default: return "Unknown vax subtype"; } case CPU_TYPE_MC680x0: switch (cpusubtype) { case CPU_SUBTYPE_MC68030: return "mc68030"; case CPU_SUBTYPE_MC68040: return "mc68040"; case CPU_SUBTYPE_MC68030_ONLY: return "mc68030 only"; default: return "Unknown mc680x0 subtype"; } case CPU_TYPE_I386: switch (cpusubtype) { case CPU_SUBTYPE_386: return "386"; case CPU_SUBTYPE_486: return "486"; case CPU_SUBTYPE_486SX: return "486sx"; case CPU_SUBTYPE_PENT: return "Pentium"; case CPU_SUBTYPE_PENTPRO: return "Pentium Pro"; case CPU_SUBTYPE_PENTII_M3: return "Pentium 3 M3"; case CPU_SUBTYPE_PENTII_M5: return "Pentium 3 M5"; case CPU_SUBTYPE_CELERON: return "Celeron"; case CPU_SUBTYPE_CELERON_MOBILE: return "Celeron Mobile"; case CPU_SUBTYPE_PENTIUM_3: return "Pentium 3"; case CPU_SUBTYPE_PENTIUM_3_M: return "Pentium 3 M"; case CPU_SUBTYPE_PENTIUM_3_XEON: return "Pentium 3 Xeon"; case CPU_SUBTYPE_PENTIUM_M: return "Pentium Mobile"; case CPU_SUBTYPE_PENTIUM_4: return "Pentium 4"; case CPU_SUBTYPE_PENTIUM_4_M: return "Pentium 4 M"; case CPU_SUBTYPE_ITANIUM: return "Itanium"; case CPU_SUBTYPE_ITANIUM_2: return "Itanium 2"; case CPU_SUBTYPE_XEON: return "Xeon"; case CPU_SUBTYPE_XEON_MP: return "Xeon MP"; default: return "Unknown i386 subtype"; } case CPU_TYPE_X86_64: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_X86_64_ALL: return "x86 64 all"; case CPU_SUBTYPE_X86_ARCH1: return "x86 arch 1"; default: return "Unknown x86 subtype"; } case CPU_TYPE_MC88000: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_MC88000_ALL: return "all"; case CPU_SUBTYPE_MC88100: return "mc88100"; case CPU_SUBTYPE_MC88110: return "mc88110"; default: return "Unknown mc88000 subtype"; } case CPU_TYPE_MC98000: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_MC98000_ALL: return "all"; case CPU_SUBTYPE_MC98601: return "mc98601"; default: return "Unknown mc98000 subtype"; } case CPU_TYPE_HPPA: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_HPPA_7100: return "hppa7100"; case CPU_SUBTYPE_HPPA_7100LC: return "hppa7100LC"; default: return "Unknown hppa subtype"; } case CPU_TYPE_ARM64: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_ARM64_ALL: return "all"; case CPU_SUBTYPE_ARM64_V8: return "arm64v8"; case CPU_SUBTYPE_ARM64E: return "arm64e"; default: return "Unknown arm64 subtype"; } case CPU_TYPE_ARM: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_ARM_ALL: return "all"; case CPU_SUBTYPE_ARM_V4T: return "v4t"; case CPU_SUBTYPE_ARM_V5: return "v5"; case CPU_SUBTYPE_ARM_V6: return "v6"; case CPU_SUBTYPE_ARM_XSCALE: return "xscale"; case CPU_SUBTYPE_ARM_V7: return "v7"; case CPU_SUBTYPE_ARM_V7F: return "v7f"; case CPU_SUBTYPE_ARM_V7S: return "v7s"; case CPU_SUBTYPE_ARM_V7K: return "v7k"; case CPU_SUBTYPE_ARM_V7M: return "v7m"; case CPU_SUBTYPE_ARM_V7EM: return "v7em"; default: eprintf ("Unknown arm subtype %d\n", cpusubtype & 0xff); return "unknown arm subtype"; } case CPU_TYPE_SPARC: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_SPARC_ALL: return "all"; default: return "Unknown sparc subtype"; } case CPU_TYPE_MIPS: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_MIPS_ALL: return "all"; case CPU_SUBTYPE_MIPS_R2300: return "r2300"; case CPU_SUBTYPE_MIPS_R2600: return "r2600"; case CPU_SUBTYPE_MIPS_R2800: return "r2800"; case CPU_SUBTYPE_MIPS_R2000a: return "r2000a"; case CPU_SUBTYPE_MIPS_R2000: return "r2000"; case CPU_SUBTYPE_MIPS_R3000a: return "r3000a"; case CPU_SUBTYPE_MIPS_R3000: return "r3000"; default: return "Unknown mips subtype"; } case CPU_TYPE_I860: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_I860_ALL: return "all"; case CPU_SUBTYPE_I860_860: return "860"; default: return "Unknown i860 subtype"; } case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_POWERPC_ALL: return "all"; case CPU_SUBTYPE_POWERPC_601: return "601"; case CPU_SUBTYPE_POWERPC_602: return "602"; case CPU_SUBTYPE_POWERPC_603: return "603"; case CPU_SUBTYPE_POWERPC_603e: return "603e"; case CPU_SUBTYPE_POWERPC_603ev: return "603ev"; case CPU_SUBTYPE_POWERPC_604: return "604"; case CPU_SUBTYPE_POWERPC_604e: return "604e"; case CPU_SUBTYPE_POWERPC_620: return "620"; case CPU_SUBTYPE_POWERPC_750: return "750"; case CPU_SUBTYPE_POWERPC_7400: return "7400"; case CPU_SUBTYPE_POWERPC_7450: return "7450"; case CPU_SUBTYPE_POWERPC_970: return "970"; default: return "Unknown ppc subtype"; } } return "Unknown cputype"; } char *MACH0_(get_cpusubtype_from_hdr)(struct MACH0_(mach_header) *hdr) { r_return_val_if_fail (hdr, NULL); return strdup (cpusubtype_tostring (hdr->cputype, hdr->cpusubtype)); } char *MACH0_(get_cpusubtype)(struct MACH0_(obj_t) *bin) { return bin? MACH0_(get_cpusubtype_from_hdr) (&bin->hdr): strdup ("Unknown"); } bool MACH0_(is_pie)(struct MACH0_(obj_t) *bin) { return (bin && bin->hdr.filetype == MH_EXECUTE && bin->hdr.flags & MH_PIE); } bool MACH0_(has_nx)(struct MACH0_(obj_t) *bin) { return (bin && bin->hdr.filetype == MH_EXECUTE && bin->hdr.flags & MH_NO_HEAP_EXECUTION); } char *MACH0_(get_filetype_from_hdr)(struct MACH0_(mach_header) *hdr) { const char *mhtype = "Unknown"; switch (hdr->filetype) { case MH_OBJECT: mhtype = "Relocatable object"; break; case MH_EXECUTE: mhtype = "Executable file"; break; case MH_FVMLIB: mhtype = "Fixed VM shared library"; break; case MH_CORE: mhtype = "Core file"; break; case MH_PRELOAD: mhtype = "Preloaded executable file"; break; case MH_DYLIB: mhtype = "Dynamically bound shared library"; break; case MH_DYLINKER: mhtype = "Dynamic link editor"; break; case MH_BUNDLE: mhtype = "Dynamically bound bundle file"; break; case MH_DYLIB_STUB: mhtype = "Shared library stub for static linking (no sections)"; break; case MH_DSYM: mhtype = "Companion file with only debug sections"; break; case MH_KEXT_BUNDLE: mhtype = "Kernel extension bundle file"; break; case MH_FILESET: mhtype = "Kernel cache file"; break; } return strdup (mhtype); } char *MACH0_(get_filetype)(struct MACH0_(obj_t) *bin) { return bin? MACH0_(get_filetype_from_hdr) (&bin->hdr): strdup ("Unknown"); } ut64 MACH0_(get_main)(struct MACH0_(obj_t) *bin) { ut64 addr = UT64_MAX; int i; // 0 = sscanned but no main found // -1 = not scanned, so no main // other = valid main addr if (bin->main_addr == UT64_MAX) { #if FEATURE_SYMLIST (void)MACH0_(get_symbols_list) (bin); #else (void)MACH0_(get_symbols) (bin); #endif } if (bin->main_addr != 0 && bin->main_addr != UT64_MAX) { return bin->main_addr; } // dummy call to initialize things free (MACH0_(get_entrypoint)(bin)); bin->main_addr = 0; if (addr == UT64_MAX && bin->main_cmd.cmd == LC_MAIN) { addr = bin->entry + bin->baddr; } if (!addr) { ut8 b[128]; ut64 entry = addr_to_offset (bin, bin->entry); // XXX: X86 only and hacky! if (entry > bin->size || entry + sizeof (b) > bin->size) { return UT64_MAX; } i = r_buf_read_at (bin->b, entry, b, sizeof (b)); if (i < 80) { return UT64_MAX; } for (i = 0; i < 64; i++) { if (b[i] == 0xe8 && !b[i + 3] && !b[i + 4]) { int delta = b[i + 1] | (b[i + 2] << 8) | (b[i + 3] << 16) | (b[i + 4] << 24); addr = bin->entry + i + 5 + delta; break; } } if (!addr) { addr = entry; } } return bin->main_addr = addr; } void MACH0_(mach_headerfields)(RBinFile *bf) { PrintfCallback cb_printf = bf->rbin->cb_printf; if (!cb_printf) { cb_printf = printf; } RBuffer *buf = bf->buf; ut64 length = r_buf_size (buf); int n = 0; struct MACH0_(mach_header) *mh = MACH0_(get_hdr)(buf); if (!mh) { return; } ut64 pvaddr = pa2va (bf, 0); cb_printf ("pf.mach0_header @ 0x%08"PFMT64x"\n", pvaddr); cb_printf ("0x%08"PFMT64x" Magic 0x%x\n", pvaddr, mh->magic); pvaddr += 4; cb_printf ("0x%08"PFMT64x" CpuType 0x%x\n", pvaddr, mh->cputype); pvaddr += 4; cb_printf ("0x%08"PFMT64x" CpuSubType 0x%x\n", pvaddr, mh->cpusubtype); pvaddr += 4; cb_printf ("0x%08"PFMT64x" FileType 0x%x\n", pvaddr, mh->filetype); pvaddr += 4; cb_printf ("0x%08"PFMT64x" nCmds %d\n", pvaddr, mh->ncmds); pvaddr += 4; cb_printf ("0x%08"PFMT64x" sizeOfCmds %d\n", pvaddr, mh->sizeofcmds); pvaddr += 4; cb_printf ("0x%08"PFMT64x" Flags 0x%x\n", pvaddr, mh->flags); pvaddr += 4; bool is64 = mh->cputype >> 16; ut64 addr = 0x20 - 4; ut32 word = 0; ut8 wordbuf[sizeof (word)]; bool isBe = false; switch (mh->cputype) { case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: isBe = true; break; } #define READWORD() \ if (r_buf_read_at (buf, addr, (ut8*)wordbuf, 4) != 4) { \ eprintf ("Invalid address in buffer."); \ break; \ } \ addr += 4; \ pvaddr += 4;\ word = isBe? r_read_be32 (wordbuf): r_read_le32 (wordbuf); if (is64) { addr += 4; pvaddr += 4; } for (n = 0; n < mh->ncmds && addr < length; n++) { READWORD (); ut32 lcType = word; const char *pf_definition = cmd_to_pf_definition (lcType); if (pf_definition) { cb_printf ("pf.%s @ 0x%08"PFMT64x"\n", pf_definition, pvaddr - 4); } cb_printf ("0x%08"PFMT64x" cmd %7d 0x%x %s\n", pvaddr - 4, n, lcType, cmd_to_string (lcType)); READWORD (); if (addr > length) { break; } int lcSize = word; word &= 0xFFFFFF; cb_printf ("0x%08"PFMT64x" cmdsize %d\n", pvaddr - 4, word); if (lcSize < 1) { eprintf ("Invalid size for a load command\n"); break; } switch (lcType) { case LC_BUILD_VERSION: { cb_printf ("0x%08"PFMT64x" platform %s\n", pvaddr, build_version_platform_to_string (r_buf_read_le32_at (buf, addr))); cb_printf ("0x%08"PFMT64x" minos %d.%d.%d\n", pvaddr + 4, r_buf_read_le16_at (buf, addr + 6), r_buf_read8_at (buf, addr + 5), r_buf_read8_at (buf, addr + 4)); cb_printf ("0x%08"PFMT64x" sdk %d.%d.%d\n", pvaddr + 8, r_buf_read_le16_at (buf, addr + 10), r_buf_read8_at (buf, addr + 9), r_buf_read8_at (buf, addr + 8)); ut32 ntools = r_buf_read_le32_at (buf, addr + 12); cb_printf ("0x%08"PFMT64x" ntools %d\n", pvaddr + 12, ntools); ut64 off = 16; while (off < (lcSize - 8) && ntools--) { cb_printf ("pf.mach0_build_version_tool @ 0x%08"PFMT64x"\n", pvaddr + off); cb_printf ("0x%08"PFMT64x" tool %s\n", pvaddr + off, build_version_tool_to_string (r_buf_read_le32_at (buf, addr + off))); off += 4; if (off >= (lcSize - 8)) { break; } cb_printf ("0x%08"PFMT64x" version %d.%d.%d\n", pvaddr + off, r_buf_read_le16_at (buf, addr + off + 2), r_buf_read8_at (buf, addr + off + 1), r_buf_read8_at (buf, addr + off)); off += 4; } break; } case LC_MAIN: { ut8 data[64] = {0}; r_buf_read_at (buf, addr, data, sizeof (data)); #if R_BIN_MACH064 ut64 ep = r_read_ble64 (&data, false); // bin->big_endian); cb_printf ("0x%08"PFMT64x" entry0 0x%" PFMT64x "\n", pvaddr, ep); ut64 ss = r_read_ble64 (&data[8], false); // bin->big_endian); cb_printf ("0x%08"PFMT64x" stacksize 0x%" PFMT64x "\n", pvaddr + 8, ss); #else ut32 ep = r_read_ble32 (&data, false); // bin->big_endian); cb_printf ("0x%08"PFMT32x" entry0 0x%" PFMT32x "\n", (ut32)pvaddr, ep); ut32 ss = r_read_ble32 (&data[4], false); // bin->big_endian); cb_printf ("0x%08"PFMT32x" stacksize 0x%" PFMT32x "\n", (ut32)pvaddr + 4, ss); #endif } break; case LC_SYMTAB: #if 0 { char *id = r_buf_get_string (buf, addr + 20); cb_printf ("0x%08"PFMT64x" id 0x%x\n", addr + 20, r_str_get (id)); cb_printf ("0x%08"PFMT64x" symooff 0x%x\n", addr + 20, r_str_get (id)); cb_printf ("0x%08"PFMT64x" nsyms %d\n", addr + 20, r_str_get (id)); cb_printf ("0x%08"PFMT64x" stroff 0x%x\n", addr + 20, r_str_get (id)); cb_printf ("0x%08"PFMT64x" strsize 0x%x\n", addr + 20, r_str_get (id)); free (id); } #endif break; case LC_ID_DYLIB: { // install_name_tool ut32 str_off = r_buf_read_ble32_at (buf, addr, isBe); char *id = r_buf_get_string (buf, addr + str_off - 8); cb_printf ("0x%08"PFMT64x" current %d.%d.%d\n", pvaddr + 8, r_buf_read_le16_at (buf, addr + 10), r_buf_read8_at (buf, addr + 9), r_buf_read8_at (buf, addr + 8)); cb_printf ("0x%08"PFMT64x" compat %d.%d.%d\n", pvaddr + 12, r_buf_read_le16_at (buf, addr + 14), r_buf_read8_at (buf, addr + 13), r_buf_read8_at (buf, addr + 12)); cb_printf ("0x%08"PFMT64x" id %s\n", pvaddr + str_off - 8, r_str_get (id)); if (id) { free (id); } break; } case LC_UUID: { ut8 i, uuid[16]; r_buf_read_at (buf, addr, uuid, sizeof (uuid)); cb_printf ("0x%08"PFMT64x" uuid ", pvaddr); for (i = 0; i < sizeof (uuid); i++) { cb_printf ("%02x", uuid[i]); } cb_printf ("\n"); } break; case LC_SEGMENT: case LC_SEGMENT_64: { ut8 name[17] = {0}; r_buf_read_at (buf, addr, name, sizeof (name) - 1); cb_printf ("0x%08"PFMT64x" name %s\n", pvaddr, name); ut32 nsects = r_buf_read_le32_at (buf, addr - 8 + (is64 ? 64 : 48)); ut64 off = is64 ? 72 : 56; while (off < lcSize && nsects--) { if (is64) { cb_printf ("pf.mach0_section64 @ 0x%08"PFMT64x"\n", pvaddr - 8 + off); off += 80; } else { cb_printf ("pf.mach0_section @ 0x%08"PFMT64x"\n", pvaddr - 8 + off); off += 68; } } } break; case LC_LOAD_DYLIB: case LC_LOAD_WEAK_DYLIB: { ut32 str_off = r_buf_read_ble32_at (buf, addr, isBe); char *load_dylib = r_buf_get_string (buf, addr + str_off - 8); cb_printf ("0x%08"PFMT64x" current %d.%d.%d\n", pvaddr + 8, r_buf_read_le16_at (buf, addr + 10), r_buf_read8_at (buf, addr + 9), r_buf_read8_at (buf, addr + 8)); cb_printf ("0x%08"PFMT64x" compat %d.%d.%d\n", pvaddr + 12, r_buf_read_le16_at (buf, addr + 14), r_buf_read8_at (buf, addr + 13), r_buf_read8_at (buf, addr + 12)); cb_printf ("0x%08"PFMT64x" load_dylib %s\n", pvaddr + str_off - 8, r_str_get (load_dylib)); if (load_dylib) { free (load_dylib); } break; } case LC_RPATH: { char *rpath = r_buf_get_string (buf, addr + 4); cb_printf ("0x%08" PFMT64x " rpath %s\n", pvaddr + 4, r_str_get (rpath)); if (rpath) { free (rpath); } break; } case LC_ENCRYPTION_INFO: case LC_ENCRYPTION_INFO_64: { ut32 word = r_buf_read_le32_at (buf, addr); cb_printf ("0x%08"PFMT64x" cryptoff 0x%08x\n", pvaddr, word); word = r_buf_read_le32_at (buf, addr + 4); cb_printf ("0x%08"PFMT64x" cryptsize %d\n", pvaddr + 4, word); word = r_buf_read_le32_at (buf, addr + 8); cb_printf ("0x%08"PFMT64x" cryptid %d\n", pvaddr + 8, word); break; } case LC_CODE_SIGNATURE: { ut32 words[2]; r_buf_read_at (buf, addr, (ut8 *)words, sizeof (words)); cb_printf ("0x%08"PFMT64x" dataoff 0x%08x\n", pvaddr, words[0]); cb_printf ("0x%08"PFMT64x" datasize %d\n", pvaddr + 4, words[1]); cb_printf ("# wtf mach0.sign %d @ 0x%x\n", words[1], words[0]); break; } } addr += word - 8; pvaddr += word - 8; } free (mh); } RList *MACH0_(mach_fields)(RBinFile *bf) { RBuffer *buf = bf->buf; ut64 length = r_buf_size (buf); struct MACH0_(mach_header) *mh = MACH0_(get_hdr) (buf); if (!mh) { return NULL; } RList *ret = r_list_new (); if (!ret) { free (mh); return NULL; } ret->free = free; ut64 addr = pa2va (bf, 0); ut64 paddr = 0; r_list_append (ret, r_bin_field_new (addr, addr, 1, "header", "mach0_header", "mach0_header", true)); addr += 0x20 - 4; paddr += 0x20 - 4; bool is64 = mh->cputype >> 16; if (is64) { addr += 4; paddr += 4; } bool isBe = false; switch (mh->cputype) { case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: isBe = true; break; } int n; char load_command_flagname[128]; for (n = 0; n < mh->ncmds && paddr < length; n++) { ut32 lcType = r_buf_read_ble32_at (buf, paddr, isBe); ut32 word = r_buf_read_ble32_at (buf, paddr + 4, isBe); if (paddr + 8 > length) { break; } ut32 lcSize = word; word &= 0xFFFFFF; if (lcSize < 1) { eprintf ("Invalid size for a load command\n"); break; } if (word == 0) { break; } const char *pf_definition = cmd_to_pf_definition (lcType); if (pf_definition) { snprintf (load_command_flagname, sizeof (load_command_flagname), "load_command_%d_%s", n, cmd_to_string (lcType)); r_list_append (ret, r_bin_field_new (addr, addr, 1, load_command_flagname, pf_definition, pf_definition, true)); } switch (lcType) { case LC_BUILD_VERSION: { ut32 ntools = r_buf_read_le32_at (buf, paddr + 20); ut64 off = 24; int j = 0; char tool_flagname[32]; while (off < lcSize && ntools--) { snprintf (tool_flagname, sizeof (tool_flagname), "tool_%d", j++); r_list_append (ret, r_bin_field_new (addr + off, addr + off, 1, tool_flagname, "mach0_build_version_tool", "mach0_build_version_tool", true)); off += 8; } break; } case LC_SEGMENT: case LC_SEGMENT_64: { ut32 nsects = r_buf_read_le32_at (buf, addr + (is64 ? 64 : 48)); ut64 off = is64 ? 72 : 56; size_t i, j = 0; char section_flagname[128]; for (i = 0; i < nsects && (addr + off) < length && off < lcSize; i++) { const char *sname = is64? "mach0_section64": "mach0_section"; snprintf (section_flagname, sizeof (section_flagname), "section_%u", (ut32)j++); RBinField *f = r_bin_field_new (addr + off, addr + off, 1, section_flagname, sname, sname, true); r_list_append (ret, f); off += is64? 80: 68; } break; default: // TODO break; } } addr += word; paddr += word; } free (mh); return ret; } struct MACH0_(mach_header) *MACH0_(get_hdr)(RBuffer *buf) { ut8 magicbytes[sizeof (ut32)] = {0}; ut8 machohdrbytes[sizeof (struct MACH0_(mach_header))] = {0}; int len; struct MACH0_(mach_header) *macho_hdr = R_NEW0 (struct MACH0_(mach_header)); bool big_endian = false; if (!macho_hdr) { return NULL; } if (r_buf_read_at (buf, 0, magicbytes, 4) < 1) { free (macho_hdr); return false; } if (r_read_le32 (magicbytes) == 0xfeedface) { big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedface) { big_endian = true; } else if (r_read_le32 (magicbytes) == FAT_MAGIC) { big_endian = false; } else if (r_read_be32 (magicbytes) == FAT_MAGIC) { big_endian = true; } else if (r_read_le32 (magicbytes) == 0xfeedfacf) { big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedfacf) { big_endian = true; } else { /* also extract non-mach0s */ #if 0 free (macho_hdr); return NULL; #endif } len = r_buf_read_at (buf, 0, machohdrbytes, sizeof (machohdrbytes)); if (len != sizeof (struct MACH0_(mach_header))) { free (macho_hdr); return NULL; } macho_hdr->magic = r_read_ble (&machohdrbytes[0], big_endian, 32); macho_hdr->cputype = r_read_ble (&machohdrbytes[4], big_endian, 32); macho_hdr->cpusubtype = r_read_ble (&machohdrbytes[8], big_endian, 32); macho_hdr->filetype = r_read_ble (&machohdrbytes[12], big_endian, 32); macho_hdr->ncmds = r_read_ble (&machohdrbytes[16], big_endian, 32); macho_hdr->sizeofcmds = r_read_ble (&machohdrbytes[20], big_endian, 32); macho_hdr->flags = r_read_ble (&machohdrbytes[24], big_endian, 32); #if R_BIN_MACH064 macho_hdr->reserved = r_read_ble (&machohdrbytes[28], big_endian, 32); #endif return macho_hdr; } void MACH0_(iterate_chained_fixups)(struct MACH0_(obj_t) *bin, ut64 limit_start, ut64 limit_end, ut32 event_mask, RFixupCallback callback, void * context) { int i = 0; for (; i < bin->nsegs; i++) { if (!bin->chained_starts[i]) { continue; } int page_size = bin->chained_starts[i]->page_size; if (page_size < 1) { page_size = 4096; } ut64 start = bin->segs[i].fileoff; ut64 end = start + bin->segs[i].filesize; if (end >= limit_start && start <= limit_end) { ut64 page_idx = (R_MAX (start, limit_start) - start) / page_size; ut64 page_end_idx = (R_MIN (limit_end, end) - start) / page_size; for (; page_idx <= page_end_idx; page_idx++) { if (page_idx >= bin->chained_starts[i]->page_count) { break; } ut16 page_start = bin->chained_starts[i]->page_start[page_idx]; if (page_start == DYLD_CHAINED_PTR_START_NONE) { continue; } ut64 cursor = start + page_idx * page_size + page_start; while (cursor < limit_end && cursor < end) { ut8 tmp[8]; bool previous_rebasing = bin->rebasing_buffer; bin->rebasing_buffer = true; if (r_buf_read_at (bin->b, cursor, tmp, 8) != 8) { bin->rebasing_buffer = previous_rebasing; break; } bin->rebasing_buffer = previous_rebasing; ut64 raw_ptr = r_read_le64 (tmp); ut64 ptr_value = raw_ptr; ut64 delta, stride, addend; ut16 pointer_format = bin->chained_starts[i]->pointer_format; RFixupEvent event = R_FIXUP_EVENT_NONE; ut8 key = 0, addr_div = 0; ut16 diversity = 0; ut32 ordinal = UT32_MAX; if (pointer_format == DYLD_CHAINED_PTR_ARM64E) { stride = 8; bool is_auth = IS_PTR_AUTH (raw_ptr); bool is_bind = IS_PTR_BIND (raw_ptr); if (is_auth && is_bind) { struct dyld_chained_ptr_arm64e_auth_bind *p = (struct dyld_chained_ptr_arm64e_auth_bind *) &raw_ptr; event = R_FIXUP_EVENT_BIND_AUTH; delta = p->next; ordinal = p->ordinal; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else if (!is_auth && is_bind) { struct dyld_chained_ptr_arm64e_bind *p = (struct dyld_chained_ptr_arm64e_bind *) &raw_ptr; event = R_FIXUP_EVENT_BIND; delta = p->next; ordinal = p->ordinal; addend = p->addend; } else if (is_auth && !is_bind) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE_AUTH; delta = p->next; ptr_value = p->target + bin->baddr; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = ((ut64)p->high8 << 56) | p->target; } } else if (pointer_format == DYLD_CHAINED_PTR_ARM64E_USERLAND24) { stride = 8; struct dyld_chained_ptr_arm64e_bind24 *bind = (struct dyld_chained_ptr_arm64e_bind24 *) &raw_ptr; if (bind->bind) { delta = bind->next; if (bind->auth) { struct dyld_chained_ptr_arm64e_auth_bind24 *p = (struct dyld_chained_ptr_arm64e_auth_bind24 *) &raw_ptr; event = R_FIXUP_EVENT_BIND_AUTH; ordinal = p->ordinal; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { event = R_FIXUP_EVENT_BIND; ordinal = bind->ordinal; addend = bind->addend; } } else { if (bind->auth) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE_AUTH; delta = p->next; ptr_value = p->target + bin->baddr; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target); } } } else if (pointer_format == DYLD_CHAINED_PTR_64_OFFSET) { stride = 4; struct dyld_chained_ptr_64_bind *bind = (struct dyld_chained_ptr_64_bind *) &raw_ptr; if (bind->bind) { event = R_FIXUP_EVENT_BIND; delta = bind->next; ordinal = bind->ordinal; addend = bind->addend; } else { struct dyld_chained_ptr_64_rebase *p = (struct dyld_chained_ptr_64_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target); } } else { eprintf ("Unsupported chained pointer format %d\n", pointer_format); return; } if (cursor >= limit_start && cursor <= limit_end - 8 && (event & event_mask) != 0) { bool carry_on; switch (event) { case R_FIXUP_EVENT_BIND: { RFixupBindEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ordinal = ordinal; event_details.addend = addend; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_BIND_AUTH: { RFixupBindAuthEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ordinal = ordinal; event_details.key = key; event_details.addr_div = addr_div; event_details.diversity = diversity; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_REBASE: { RFixupRebaseEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ptr_value = ptr_value; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_REBASE_AUTH: { RFixupRebaseAuthEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ptr_value = ptr_value; event_details.key = key; event_details.addr_div = addr_div; event_details.diversity = diversity; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } default: eprintf ("Unexpected event while iterating chained fixups\n"); carry_on = false; } if (!carry_on) { return; } } cursor += delta * stride; if (!delta) { break; } } } } } }
/* radare - LGPL - Copyright 2010-2021 - nibble, mrmacete, pancake */ #include <stdio.h> #include <r_types.h> #include <r_util.h> #include "mach0.h" #include <r_hash.h> // TODO: deprecate bprintf and use Eprintf (bin->self) #define bprintf if (bin->verbose) eprintf #define Eprintf if (mo->verbose) eprintf #define IS_PTR_AUTH(x) ((x & (1ULL << 63)) != 0) #define IS_PTR_BIND(x) ((x & (1ULL << 62)) != 0) typedef struct { struct symbol_t *symbols; int j; int symbols_count; HtPP *hash; } RSymCtx; typedef void (*RExportsIterator)(struct MACH0_(obj_t) *bin, const char *name, ut64 flags, ut64 offset, void *ctx); typedef struct { ut8 *node; char *label; int i; ut8 *next_child; } RTrieState; typedef struct { ut8 * imports; RSkipList *relocs; } RWalkBindChainsContext; // OMG; THIS SHOULD BE KILLED; this var exposes the local native endian, which is completely unnecessary // USE THIS: int ws = bf->o->info->big_endian; #define mach0_endian 1 static ut64 read_uleb128(ut8 **p, ut8 *end) { const char *error = NULL; ut64 v; *p = (ut8 *)r_uleb128 (*p, end - *p, &v, &error); if (error) { eprintf ("%s", error); R_FREE (error); return UT64_MAX; } return v; } static ut64 entry_to_vaddr(struct MACH0_(obj_t) *bin) { switch (bin->main_cmd.cmd) { case LC_MAIN: return bin->entry + bin->baddr; case LC_UNIXTHREAD: case LC_THREAD: return bin->entry; default: return 0; } } static ut64 addr_to_offset(struct MACH0_(obj_t) *bin, ut64 addr) { if (bin->segs) { size_t i; for (i = 0; i < bin->nsegs; i++) { const ut64 segment_base = (ut64)bin->segs[i].vmaddr; const ut64 segment_size = (ut64)bin->segs[i].vmsize; if (addr >= segment_base && addr < segment_base + segment_size) { return bin->segs[i].fileoff + (addr - segment_base); } } } return 0; } static ut64 offset_to_vaddr(struct MACH0_(obj_t) *bin, ut64 offset) { if (bin->segs) { size_t i; for (i = 0; i < bin->nsegs; i++) { ut64 segment_base = (ut64)bin->segs[i].fileoff; ut64 segment_size = (ut64)bin->segs[i].filesize; if (offset >= segment_base && offset < segment_base + segment_size) { return bin->segs[i].vmaddr + (offset - segment_base); } } } return 0; } static ut64 pa2va(RBinFile *bf, ut64 offset) { r_return_val_if_fail (bf && bf->rbin, offset); RIO *io = bf->rbin->iob.io; if (!io || !io->va) { return offset; } struct MACH0_(obj_t) *bin = bf->o->bin_obj; return bin? offset_to_vaddr (bin, offset): offset; } static void init_sdb_formats(struct MACH0_(obj_t) *bin) { /* * These definitions are used by r2 -nn * must be kept in sync with libr/bin/d/macho */ sdb_set (bin->kv, "mach0_build_platform.cparse", "enum mach0_build_platform" "{MACOS=1, IOS=2, TVOS=3, WATCHOS=4, BRIDGEOS=5, IOSMAC=6, IOSSIMULATOR=7, TVOSSIMULATOR=8, WATCHOSSIMULATOR=9};", 0); sdb_set (bin->kv, "mach0_build_tool.cparse", "enum mach0_build_tool" "{CLANG=1, SWIFT=2, LD=3};", 0); sdb_set (bin->kv, "mach0_load_command_type.cparse", "enum mach0_load_command_type" "{ LC_SEGMENT=0x00000001ULL, LC_SYMTAB=0x00000002ULL, LC_SYMSEG=0x00000003ULL, LC_THREAD=0x00000004ULL, LC_UNIXTHREAD=0x00000005ULL, LC_LOADFVMLIB=0x00000006ULL, LC_IDFVMLIB=0x00000007ULL, LC_IDENT=0x00000008ULL, LC_FVMFILE=0x00000009ULL, LC_PREPAGE=0x0000000aULL, LC_DYSYMTAB=0x0000000bULL, LC_LOAD_DYLIB=0x0000000cULL, LC_ID_DYLIB=0x0000000dULL, LC_LOAD_DYLINKER=0x0000000eULL, LC_ID_DYLINKER=0x0000000fULL, LC_PREBOUND_DYLIB=0x00000010ULL, LC_ROUTINES=0x00000011ULL, LC_SUB_FRAMEWORK=0x00000012ULL, LC_SUB_UMBRELLA=0x00000013ULL, LC_SUB_CLIENT=0x00000014ULL, LC_SUB_LIBRARY=0x00000015ULL, LC_TWOLEVEL_HINTS=0x00000016ULL, LC_PREBIND_CKSUM=0x00000017ULL, LC_LOAD_WEAK_DYLIB=0x80000018ULL, LC_SEGMENT_64=0x00000019ULL, LC_ROUTINES_64=0x0000001aULL, LC_UUID=0x0000001bULL, LC_RPATH=0x8000001cULL, LC_CODE_SIGNATURE=0x0000001dULL, LC_SEGMENT_SPLIT_INFO=0x0000001eULL, LC_REEXPORT_DYLIB=0x8000001fULL, LC_LAZY_LOAD_DYLIB=0x00000020ULL, LC_ENCRYPTION_INFO=0x00000021ULL, LC_DYLD_INFO=0x00000022ULL, LC_DYLD_INFO_ONLY=0x80000022ULL, LC_LOAD_UPWARD_DYLIB=0x80000023ULL, LC_VERSION_MIN_MACOSX=0x00000024ULL, LC_VERSION_MIN_IPHONEOS=0x00000025ULL, LC_FUNCTION_STARTS=0x00000026ULL, LC_DYLD_ENVIRONMENT=0x00000027ULL, LC_MAIN=0x80000028ULL, LC_DATA_IN_CODE=0x00000029ULL, LC_SOURCE_VERSION=0x0000002aULL, LC_DYLIB_CODE_SIGN_DRS=0x0000002bULL, LC_ENCRYPTION_INFO_64=0x0000002cULL, LC_LINKER_OPTION=0x0000002dULL, LC_LINKER_OPTIMIZATION_HINT=0x0000002eULL, LC_VERSION_MIN_TVOS=0x0000002fULL, LC_VERSION_MIN_WATCHOS=0x00000030ULL, LC_NOTE=0x00000031ULL, LC_BUILD_VERSION=0x00000032ULL };", 0); sdb_set (bin->kv, "mach0_header_filetype.cparse", "enum mach0_header_filetype" "{MH_OBJECT=1, MH_EXECUTE=2, MH_FVMLIB=3, MH_CORE=4, MH_PRELOAD=5, MH_DYLIB=6, MH_DYLINKER=7, MH_BUNDLE=8, MH_DYLIB_STUB=9, MH_DSYM=10, MH_KEXT_BUNDLE=11};", 0); sdb_set (bin->kv, "mach0_header_flags.cparse", "enum mach0_header_flags" "{MH_NOUNDEFS=1, MH_INCRLINK=2,MH_DYLDLINK=4,MH_BINDATLOAD=8,MH_PREBOUND=0x10, MH_SPLIT_SEGS=0x20,MH_LAZY_INIT=0x40,MH_TWOLEVEL=0x80, MH_FORCE_FLAT=0x100,MH_NOMULTIDEFS=0x200,MH_NOFIXPREBINDING=0x400, MH_PREBINDABLE=0x800, MH_ALLMODSBOUND=0x1000, MH_SUBSECTIONS_VIA_SYMBOLS=0x2000, MH_CANONICAL=0x4000,MH_WEAK_DEFINES=0x8000, MH_BINDS_TO_WEAK=0x10000,MH_ALLOW_STACK_EXECUTION=0x20000, MH_ROOT_SAFE=0x40000,MH_SETUID_SAFE=0x80000, MH_NO_REEXPORTED_DYLIBS=0x100000,MH_PIE=0x200000, MH_DEAD_STRIPPABLE_DYLIB=0x400000, MH_HAS_TLV_DESCRIPTORS=0x800000, MH_NO_HEAP_EXECUTION=0x1000000};", 0); sdb_set (bin->kv, "mach0_section_types.cparse", "enum mach0_section_types" "{S_REGULAR=0, S_ZEROFILL=1, S_CSTRING_LITERALS=2, S_4BYTE_LITERALS=3, S_8BYTE_LITERALS=4, S_LITERAL_POINTERS=5, S_NON_LAZY_SYMBOL_POINTERS=6, S_LAZY_SYMBOL_POINTERS=7, S_SYMBOL_STUBS=8, S_MOD_INIT_FUNC_POINTERS=9, S_MOD_TERM_FUNC_POINTERS=0xa, S_COALESCED=0xb, S_GB_ZEROFILL=0xc, S_INTERPOSING=0xd, S_16BYTE_LITERALS=0xe, S_DTRACE_DOF=0xf, S_LAZY_DYLIB_SYMBOL_POINTERS=0x10, S_THREAD_LOCAL_REGULAR=0x11, S_THREAD_LOCAL_ZEROFILL=0x12, S_THREAD_LOCAL_VARIABLES=0x13, S_THREAD_LOCAL_VARIABLE_POINTERS=0x14, S_THREAD_LOCAL_INIT_FUNCTION_POINTERS=0x15, S_INIT_FUNC_OFFSETS=0x16};", 0); sdb_set (bin->kv, "mach0_section_attrs.cparse", "enum mach0_section_attrs" "{S_ATTR_PURE_INSTRUCTIONS=0x800000ULL, S_ATTR_NO_TOC=0x400000ULL, S_ATTR_STRIP_STATIC_SYMS=0x200000ULL, S_ATTR_NO_DEAD_STRIP=0x100000ULL, S_ATTR_LIVE_SUPPORT=0x080000ULL, S_ATTR_SELF_MODIFYING_CODE=0x040000ULL, S_ATTR_DEBUG=0x020000ULL, S_ATTR_SOME_INSTRUCTIONS=0x000004ULL, S_ATTR_EXT_RELOC=0x000002ULL, S_ATTR_LOC_RELOC=0x000001ULL};", 0); sdb_set (bin->kv, "mach0_header.format", "xxx[4]Edd[4]B " "magic cputype cpusubtype (mach0_header_filetype)filetype ncmds sizeofcmds (mach0_header_flags)flags", 0); sdb_set (bin->kv, "mach0_segment.format", "[4]Ed[16]zxxxxoodx " "(mach0_load_command_type)cmd cmdsize segname vmaddr vmsize fileoff filesize maxprot initprot nsects flags", 0); sdb_set (bin->kv, "mach0_segment64.format", "[4]Ed[16]zqqqqoodx " "(mach0_load_command_type)cmd cmdsize segname vmaddr vmsize fileoff filesize maxprot initprot nsects flags", 0); sdb_set (bin->kv, "mach0_symtab_command.format", "[4]Edxdxd " "(mach0_load_command_type)cmd cmdsize symoff nsyms stroff strsize", 0); sdb_set (bin->kv, "mach0_dysymtab_command.format", "[4]Edddddddddddxdxdxxxd " "(mach0_load_command_type)cmd cmdsize ilocalsym nlocalsym iextdefsym nextdefsym iundefsym nundefsym tocoff ntoc moddtaboff nmodtab extrefsymoff nextrefsyms inddirectsymoff nindirectsyms extreloff nextrel locreloff nlocrel", 0); sdb_set (bin->kv, "mach0_section.format", "[16]z[16]zxxxxxx[1]E[3]Bxx " "sectname segname addr size offset align reloff nreloc (mach0_section_types)flags_type (mach0_section_attrs)flags_attr reserved1 reserved2", 0); sdb_set (bin->kv, "mach0_section64.format", "[16]z[16]zqqxxxx[1]E[3]Bxxx " "sectname segname addr size offset align reloff nreloc (mach0_section_types)flags_type (mach0_section_attrs)flags_attr reserved1 reserved2 reserved3", 0); sdb_set (bin->kv, "mach0_dylib.format", "xxxxz " "name_offset timestamp current_version compatibility_version name", 0); sdb_set (bin->kv, "mach0_dylib_command.format", "[4]Ed? " "(mach0_load_command_type)cmd cmdsize (mach0_dylib)dylib", 0); sdb_set (bin->kv, "mach0_id_dylib_command.format", "[4]Ed? " "(mach0_load_command_type)cmd cmdsize (mach0_dylib)dylib", 0); sdb_set (bin->kv, "mach0_uuid_command.format", "[4]Ed[16]b " "(mach0_load_command_type)cmd cmdsize uuid", 0); sdb_set (bin->kv, "mach0_rpath_command.format", "[4]Edxz " "(mach0_load_command_type)cmd cmdsize path_offset path", 0); sdb_set (bin->kv, "mach0_entry_point_command.format", "[4]Edqq " "(mach0_load_command_type)cmd cmdsize entryoff stacksize", 0); sdb_set (bin->kv, "mach0_encryption_info64_command.format", "[4]Edxddx " "(mach0_load_command_type)cmd cmdsize offset size id padding", 0); sdb_set (bin->kv, "mach0_encryption_info_command.format", "[4]Edxdd " "(mach0_load_command_type)cmd cmdsize offset size id", 0); sdb_set (bin->kv, "mach0_code_signature_command.format", "[4]Edxd " "(mach0_load_command_type)cmd cmdsize offset size", 0); sdb_set (bin->kv, "mach0_dyld_info_only_command.format", "[4]Edxdxdxdxdxd " "(mach0_load_command_type)cmd cmdsize rebase_off rebase_size bind_off bind_size weak_bind_off weak_bind_size lazy_bind_off lazy_bind_size export_off export_size", 0); sdb_set (bin->kv, "mach0_load_dylinker_command.format", "[4]Edxz " "(mach0_load_command_type)cmd cmdsize name_offset name", 0); sdb_set (bin->kv, "mach0_id_dylinker_command.format", "[4]Edxzi " "(mach0_load_command_type)cmd cmdsize name_offset name", 0); sdb_set (bin->kv, "mach0_build_version_command.format", "[4]Ed[4]Exxd " "(mach0_load_command_type)cmd cmdsize (mach0_build_platform)platform minos sdk ntools", 0); sdb_set (bin->kv, "mach0_build_version_tool.format", "[4]Ex " "(mach0_build_tool)tool version", 0); sdb_set (bin->kv, "mach0_source_version_command.format", "[4]Edq " "(mach0_load_command_type)cmd cmdsize version", 0); sdb_set (bin->kv, "mach0_function_starts_command.format", "[4]Edxd " "(mach0_load_command_type)cmd cmdsize offset size", 0); sdb_set (bin->kv, "mach0_data_in_code_command.format", "[4]Edxd " "(mach0_load_command_type)cmd cmdsize offset size", 0); sdb_set (bin->kv, "mach0_version_min_command.format", "[4]Edxx " "(mach0_load_command_type)cmd cmdsize version reserved", 0); sdb_set (bin->kv, "mach0_segment_split_info_command.format", "[4]Edxd " "(mach0_load_command_type)cmd cmdsize offset size", 0); sdb_set (bin->kv, "mach0_unixthread_command.format", "[4]Eddd " "(mach0_load_command_type)cmd cmdsize flavor count", 0); } static bool init_hdr(struct MACH0_(obj_t) *bin) { ut8 magicbytes[4] = {0}; ut8 machohdrbytes[sizeof (struct MACH0_(mach_header))] = {0}; int len; if (r_buf_read_at (bin->b, 0 + bin->header_at, magicbytes, 4) < 1) { return false; } if (r_read_le32 (magicbytes) == 0xfeedface) { bin->big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedface) { bin->big_endian = true; } else if (r_read_le32 (magicbytes) == FAT_MAGIC) { bin->big_endian = false; } else if (r_read_be32 (magicbytes) == FAT_MAGIC) { bin->big_endian = true; } else if (r_read_le32 (magicbytes) == 0xfeedfacf) { bin->big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedfacf) { bin->big_endian = true; } else { return false; // object files are magic == 0, but body is different :? } len = r_buf_read_at (bin->b, 0 + bin->header_at, machohdrbytes, sizeof (machohdrbytes)); if (len != sizeof (machohdrbytes)) { bprintf ("Error: read (hdr)\n"); return false; } bin->hdr.magic = r_read_ble (&machohdrbytes[0], bin->big_endian, 32); bin->hdr.cputype = r_read_ble (&machohdrbytes[4], bin->big_endian, 32); bin->hdr.cpusubtype = r_read_ble (&machohdrbytes[8], bin->big_endian, 32); bin->hdr.filetype = r_read_ble (&machohdrbytes[12], bin->big_endian, 32); bin->hdr.ncmds = r_read_ble (&machohdrbytes[16], bin->big_endian, 32); bin->hdr.sizeofcmds = r_read_ble (&machohdrbytes[20], bin->big_endian, 32); bin->hdr.flags = r_read_ble (&machohdrbytes[24], bin->big_endian, 32); #if R_BIN_MACH064 bin->hdr.reserved = r_read_ble (&machohdrbytes[28], bin->big_endian, 32); #endif init_sdb_formats (bin); sdb_num_set (bin->kv, "mach0_header.offset", 0, 0); // wat about fatmach0? return true; } static bool parse_segments(struct MACH0_(obj_t) *bin, ut64 off) { size_t i, j, k, sect, len; ut32 size_sects; ut8 segcom[sizeof (struct MACH0_(segment_command))] = {0}; ut8 sec[sizeof (struct MACH0_(section))] = {0}; char section_flagname[128]; if (!UT32_MUL (&size_sects, bin->nsegs, sizeof (struct MACH0_(segment_command)))) { return false; } if (!size_sects || size_sects > bin->size) { return false; } if (off > bin->size || off + sizeof (struct MACH0_(segment_command)) > bin->size) { return false; } if (!(bin->segs = realloc (bin->segs, bin->nsegs * sizeof(struct MACH0_(segment_command))))) { r_sys_perror ("realloc (seg)"); return false; } j = bin->nsegs - 1; len = r_buf_read_at (bin->b, off, segcom, sizeof (struct MACH0_(segment_command))); if (len != sizeof (struct MACH0_(segment_command))) { bprintf ("Error: read (seg)\n"); return false; } i = 0; bin->segs[j].cmd = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].cmdsize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); memcpy (&bin->segs[j].segname, &segcom[i], 16); i += 16; #if R_BIN_MACH064 bin->segs[j].vmaddr = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].vmsize = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].fileoff = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].filesize = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); #else bin->segs[j].vmaddr = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].vmsize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].fileoff = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].filesize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); #endif bin->segs[j].maxprot = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].initprot = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].nsects = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].flags = r_read_ble32 (&segcom[i], bin->big_endian); char *segment_flagname = NULL; #if R_BIN_MACH064 segment_flagname = r_str_newf ("mach0_segment64_%u.offset", (ut32)j); #else segment_flagname = r_str_newf ("mach0_segment_%u.offset", (ut32)j); #endif sdb_num_set (bin->kv, segment_flagname, off, 0); free (segment_flagname); sdb_num_set (bin->kv, "mach0_segments.count", 0, 0); if (bin->segs[j].nsects > 0) { sect = bin->nsects; bin->nsects += bin->segs[j].nsects; if (bin->nsects > 128) { int new_nsects = bin->nsects & 0xf; bprintf ("WARNING: mach0 header contains too many sections (%d). Wrapping to %d\n", bin->nsects, new_nsects); bin->nsects = new_nsects; } if ((int)bin->nsects < 1) { bprintf ("Warning: Invalid number of sections\n"); bin->nsects = sect; return false; } if (!UT32_MUL (&size_sects, bin->nsects-sect, sizeof (struct MACH0_(section)))){ bin->nsects = sect; return false; } if (!size_sects || size_sects > bin->size){ bin->nsects = sect; return false; } if (bin->segs[j].cmdsize != sizeof (struct MACH0_(segment_command)) \ + (sizeof (struct MACH0_(section))*bin->segs[j].nsects)){ bin->nsects = sect; return false; } if (off + sizeof (struct MACH0_(segment_command)) > bin->size ||\ off + sizeof (struct MACH0_(segment_command)) + size_sects > bin->size){ bin->nsects = sect; return false; } if (!(bin->sects = realloc (bin->sects, bin->nsects * sizeof (struct MACH0_(section))))) { r_sys_perror ("realloc (sects)"); bin->nsects = sect; return false; } for (k = sect, j = 0; k < bin->nsects; k++, j++) { ut64 offset = off + sizeof (struct MACH0_(segment_command)) + j * sizeof (struct MACH0_(section)); len = r_buf_read_at (bin->b, offset, sec, sizeof (struct MACH0_(section))); if (len != sizeof (struct MACH0_(section))) { bprintf ("Error: read (sects)\n"); bin->nsects = sect; return false; } i = 0; memcpy (&bin->sects[k].sectname, &sec[i], 16); i += 16; memcpy (&bin->sects[k].segname, &sec[i], 16); i += 16; snprintf (section_flagname, sizeof (section_flagname), "mach0_section_%.16s_%.16s.offset", bin->sects[k].segname, bin->sects[k].sectname); sdb_num_set (bin->kv, section_flagname, offset, 0); #if R_BIN_MACH064 snprintf (section_flagname, sizeof (section_flagname), "mach0_section_%.16s_%.16s.format", bin->sects[k].segname, bin->sects[k].sectname); sdb_set (bin->kv, section_flagname, "mach0_section64", 0); #else snprintf (section_flagname, sizeof (section_flagname), "mach0_section_%.16s_%.16s.format", bin->sects[k].segname, bin->sects[k].sectname); sdb_set (bin->kv, section_flagname, "mach0_section", 0); #endif #if R_BIN_MACH064 bin->sects[k].addr = r_read_ble64 (&sec[i], bin->big_endian); i += sizeof (ut64); bin->sects[k].size = r_read_ble64 (&sec[i], bin->big_endian); i += sizeof (ut64); #else bin->sects[k].addr = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].size = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); #endif bin->sects[k].offset = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].align = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reloff = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].nreloc = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].flags = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reserved1 = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reserved2 = r_read_ble32 (&sec[i], bin->big_endian); #if R_BIN_MACH064 i += sizeof (ut32); bin->sects[k].reserved3 = r_read_ble32 (&sec[i], bin->big_endian); #endif } } return true; } #define Error(x) error_message = x; goto error; static bool parse_symtab(struct MACH0_(obj_t) *mo, ut64 off) { struct symtab_command st; ut32 size_sym; size_t i; const char *error_message = ""; ut8 symt[sizeof (struct symtab_command)] = {0}; ut8 nlst[sizeof (struct MACH0_(nlist))] = {0}; const bool be = mo->big_endian; if (off > (ut64)mo->size || off + sizeof (struct symtab_command) > (ut64)mo->size) { return false; } int len = r_buf_read_at (mo->b, off, symt, sizeof (struct symtab_command)); if (len != sizeof (struct symtab_command)) { Eprintf ("Error: read (symtab)\n"); return false; } st.cmd = r_read_ble32 (symt, be); st.cmdsize = r_read_ble32 (symt + 4, be); st.symoff = r_read_ble32 (symt + 8, be) + mo->symbols_off; st.nsyms = r_read_ble32 (symt + 12, be); st.stroff = r_read_ble32 (symt + 16, be) + mo->symbols_off; st.strsize = r_read_ble32 (symt + 20, be); mo->symtab = NULL; mo->nsymtab = 0; if (st.strsize > 0 && st.strsize < mo->size && st.nsyms > 0) { mo->nsymtab = st.nsyms; if (st.stroff > mo->size || st.stroff + st.strsize > mo->size) { Error ("fail"); } if (!UT32_MUL (&size_sym, mo->nsymtab, sizeof (struct MACH0_(nlist)))) { Error ("fail2"); } if (!size_sym) { Error ("symbol size is zero"); } if (st.symoff > mo->size || st.symoff + size_sym > mo->size) { Error ("symoff is out of bounds"); } if (!(mo->symstr = calloc (1, st.strsize + 2))) { Error ("symoff is out of bounds"); } mo->symstrlen = st.strsize; len = r_buf_read_at (mo->b, st.stroff, (ut8*)mo->symstr, st.strsize); if (len != st.strsize) { Error ("Error: read (symstr)"); } ut64 max_nsymtab = (r_buf_size (mo->b) - st.symoff) / sizeof (struct MACH0_(nlist)); if (mo->nsymtab > max_nsymtab || !(mo->symtab = calloc (mo->nsymtab, sizeof (struct MACH0_(nlist))))) { goto error; } for (i = 0; i < mo->nsymtab; i++) { ut64 at = st.symoff + (i * sizeof (struct MACH0_(nlist))); len = r_buf_read_at (mo->b, at, nlst, sizeof (struct MACH0_(nlist))); if (len != sizeof (struct MACH0_(nlist))) { Error ("read (nlist)"); } //XXX not very safe what if is n_un.n_name instead? mo->symtab[i].n_strx = r_read_ble32 (nlst, be); mo->symtab[i].n_type = r_read_ble8 (nlst + 4); mo->symtab[i].n_sect = r_read_ble8 (nlst + 5); mo->symtab[i].n_desc = r_read_ble16 (nlst + 6, be); #if R_BIN_MACH064 mo->symtab[i].n_value = r_read_ble64 (&nlst[8], be); #else mo->symtab[i].n_value = r_read_ble32 (&nlst[8], be); #endif } } return true; error: R_FREE (mo->symstr); R_FREE (mo->symtab); Eprintf ("%s\n", error_message); return false; } static bool parse_dysymtab(struct MACH0_(obj_t) *bin, ut64 off) { size_t len, i; ut32 size_tab; ut8 dysym[sizeof (struct dysymtab_command)] = {0}; ut8 dytoc[sizeof (struct dylib_table_of_contents)] = {0}; ut8 dymod[sizeof (struct MACH0_(dylib_module))] = {0}; ut8 idsyms[sizeof (ut32)] = {0}; if (off > bin->size || off + sizeof (struct dysymtab_command) > bin->size) { return false; } len = r_buf_read_at (bin->b, off, dysym, sizeof (struct dysymtab_command)); if (len != sizeof (struct dysymtab_command)) { bprintf ("Error: read (dysymtab)\n"); return false; } bin->dysymtab.cmd = r_read_ble32 (&dysym[0], bin->big_endian); bin->dysymtab.cmdsize = r_read_ble32 (&dysym[4], bin->big_endian); bin->dysymtab.ilocalsym = r_read_ble32 (&dysym[8], bin->big_endian); bin->dysymtab.nlocalsym = r_read_ble32 (&dysym[12], bin->big_endian); bin->dysymtab.iextdefsym = r_read_ble32 (&dysym[16], bin->big_endian); bin->dysymtab.nextdefsym = r_read_ble32 (&dysym[20], bin->big_endian); bin->dysymtab.iundefsym = r_read_ble32 (&dysym[24], bin->big_endian); bin->dysymtab.nundefsym = r_read_ble32 (&dysym[28], bin->big_endian); bin->dysymtab.tocoff = r_read_ble32 (&dysym[32], bin->big_endian); bin->dysymtab.ntoc = r_read_ble32 (&dysym[36], bin->big_endian); bin->dysymtab.modtaboff = r_read_ble32 (&dysym[40], bin->big_endian); bin->dysymtab.nmodtab = r_read_ble32 (&dysym[44], bin->big_endian); bin->dysymtab.extrefsymoff = r_read_ble32 (&dysym[48], bin->big_endian); bin->dysymtab.nextrefsyms = r_read_ble32 (&dysym[52], bin->big_endian); bin->dysymtab.indirectsymoff = r_read_ble32 (&dysym[56], bin->big_endian); bin->dysymtab.nindirectsyms = r_read_ble32 (&dysym[60], bin->big_endian); bin->dysymtab.extreloff = r_read_ble32 (&dysym[64], bin->big_endian); bin->dysymtab.nextrel = r_read_ble32 (&dysym[68], bin->big_endian); bin->dysymtab.locreloff = r_read_ble32 (&dysym[72], bin->big_endian); bin->dysymtab.nlocrel = r_read_ble32 (&dysym[76], bin->big_endian); bin->ntoc = bin->dysymtab.ntoc; if (bin->ntoc > 0) { if (!(bin->toc = calloc (bin->ntoc, sizeof (struct dylib_table_of_contents)))) { r_sys_perror ("calloc (toc)"); return false; } if (!UT32_MUL (&size_tab, bin->ntoc, sizeof (struct dylib_table_of_contents))){ R_FREE (bin->toc); return false; } if (!size_tab){ R_FREE (bin->toc); return false; } if (bin->dysymtab.tocoff > bin->size || bin->dysymtab.tocoff + size_tab > bin->size){ R_FREE (bin->toc); return false; } for (i = 0; i < bin->ntoc; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.tocoff + i * sizeof (struct dylib_table_of_contents), dytoc, sizeof (struct dylib_table_of_contents)); if (len != sizeof (struct dylib_table_of_contents)) { bprintf ("Error: read (toc)\n"); R_FREE (bin->toc); return false; } bin->toc[i].symbol_index = r_read_ble32 (&dytoc[0], bin->big_endian); bin->toc[i].module_index = r_read_ble32 (&dytoc[4], bin->big_endian); } } bin->nmodtab = bin->dysymtab.nmodtab; ut64 max_nmodtab = (bin->size - bin->dysymtab.modtaboff) / sizeof (struct MACH0_(dylib_module)); if (bin->nmodtab > 0 && bin->nmodtab <= max_nmodtab) { if (!(bin->modtab = calloc (bin->nmodtab, sizeof (struct MACH0_(dylib_module))))) { r_sys_perror ("calloc (modtab)"); return false; } if (!UT32_MUL (&size_tab, bin->nmodtab, sizeof (struct MACH0_(dylib_module)))){ R_FREE (bin->modtab); return false; } if (!size_tab){ R_FREE (bin->modtab); return false; } if (bin->dysymtab.modtaboff > bin->size || \ bin->dysymtab.modtaboff + size_tab > bin->size){ R_FREE (bin->modtab); return false; } for (i = 0; i < bin->nmodtab; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.modtaboff + i * sizeof (struct MACH0_(dylib_module)), dymod, sizeof (struct MACH0_(dylib_module))); if (len == -1) { bprintf ("Error: read (modtab)\n"); R_FREE (bin->modtab); return false; } bin->modtab[i].module_name = r_read_ble32 (&dymod[0], bin->big_endian); bin->modtab[i].iextdefsym = r_read_ble32 (&dymod[4], bin->big_endian); bin->modtab[i].nextdefsym = r_read_ble32 (&dymod[8], bin->big_endian); bin->modtab[i].irefsym = r_read_ble32 (&dymod[12], bin->big_endian); bin->modtab[i].nrefsym = r_read_ble32 (&dymod[16], bin->big_endian); bin->modtab[i].ilocalsym = r_read_ble32 (&dymod[20], bin->big_endian); bin->modtab[i].nlocalsym = r_read_ble32 (&dymod[24], bin->big_endian); bin->modtab[i].iextrel = r_read_ble32 (&dymod[28], bin->big_endian); bin->modtab[i].nextrel = r_read_ble32 (&dymod[32], bin->big_endian); bin->modtab[i].iinit_iterm = r_read_ble32 (&dymod[36], bin->big_endian); bin->modtab[i].ninit_nterm = r_read_ble32 (&dymod[40], bin->big_endian); #if R_BIN_MACH064 bin->modtab[i].objc_module_info_size = r_read_ble32 (&dymod[44], bin->big_endian); bin->modtab[i].objc_module_info_addr = r_read_ble64 (&dymod[48], bin->big_endian); #else bin->modtab[i].objc_module_info_addr = r_read_ble32 (&dymod[44], bin->big_endian); bin->modtab[i].objc_module_info_size = r_read_ble32 (&dymod[48], bin->big_endian); #endif } } bin->nindirectsyms = bin->dysymtab.nindirectsyms; if (bin->nindirectsyms > 0) { if (!(bin->indirectsyms = calloc (bin->nindirectsyms, sizeof (ut32)))) { r_sys_perror ("calloc (indirectsyms)"); return false; } if (!UT32_MUL (&size_tab, bin->nindirectsyms, sizeof (ut32))){ R_FREE (bin->indirectsyms); return false; } if (!size_tab){ R_FREE (bin->indirectsyms); return false; } if (bin->dysymtab.indirectsymoff > bin->size || \ bin->dysymtab.indirectsymoff + size_tab > bin->size){ R_FREE (bin->indirectsyms); return false; } for (i = 0; i < bin->nindirectsyms; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.indirectsymoff + i * sizeof (ut32), idsyms, 4); if (len == -1) { bprintf ("Error: read (indirect syms)\n"); R_FREE (bin->indirectsyms); return false; } bin->indirectsyms[i] = r_read_ble32 (&idsyms[0], bin->big_endian); } } /* TODO extrefsyms, extrel, locrel */ return true; } static char *readString(ut8 *p, int off, int len) { if (off < 0 || off >= len) { return NULL; } return r_str_ndup ((const char *)p + off, len - off); } static void parseCodeDirectory(RBuffer *b, int offset, int datasize) { typedef struct __CodeDirectory { uint32_t magic; /* magic number (CSMAGIC_CODEDIRECTORY) */ uint32_t length; /* total length of CodeDirectory blob */ uint32_t version; /* compatibility version */ uint32_t flags; /* setup and mode flags */ uint32_t hashOffset; /* offset of hash slot element at index zero */ uint32_t identOffset; /* offset of identifier string */ uint32_t nSpecialSlots; /* number of special hash slots */ uint32_t nCodeSlots; /* number of ordinary (code) hash slots */ uint32_t codeLimit; /* limit to main image signature range */ uint8_t hashSize; /* size of each hash in bytes */ uint8_t hashType; /* type of hash (cdHashType* constants) */ uint8_t platform; /* unused (must be zero) */ uint8_t pageSize; /* log2(page size in bytes); 0 => infinite */ uint32_t spare2; /* unused (must be zero) */ /* followed by dynamic content as located by offset fields above */ uint32_t scatterOffset; uint32_t teamIDOffset; uint32_t spare3; ut64 codeLimit64; ut64 execSegBase; ut64 execSegLimit; ut64 execSegFlags; } CS_CodeDirectory; ut64 off = offset; int psize = datasize; ut8 *p = calloc (1, psize); if (!p) { return; } eprintf ("Offset: 0x%08"PFMT64x"\n", off); r_buf_read_at (b, off, p, datasize); CS_CodeDirectory cscd = {0}; #define READFIELD(x) cscd.x = r_read_ble32 (p + r_offsetof (CS_CodeDirectory, x), 1) #define READFIELD8(x) cscd.x = p[r_offsetof (CS_CodeDirectory, x)] READFIELD (length); READFIELD (version); READFIELD (flags); READFIELD (hashOffset); READFIELD (identOffset); READFIELD (nSpecialSlots); READFIELD (nCodeSlots); READFIELD (hashSize); READFIELD (teamIDOffset); READFIELD8 (hashType); READFIELD (pageSize); READFIELD (codeLimit); eprintf ("Version: %x\n", cscd.version); eprintf ("Flags: %x\n", cscd.flags); eprintf ("Length: %d\n", cscd.length); eprintf ("PageSize: %d\n", cscd.pageSize); eprintf ("hashOffset: %d\n", cscd.hashOffset); eprintf ("codeLimit: %d\n", cscd.codeLimit); eprintf ("hashSize: %d\n", cscd.hashSize); eprintf ("hashType: %d\n", cscd.hashType); char *identity = readString (p, cscd.identOffset, psize); eprintf ("Identity: %s\n", identity); char *teamId = readString (p, cscd.teamIDOffset, psize); eprintf ("TeamID: %s\n", teamId); eprintf ("CodeSlots: %d\n", cscd.nCodeSlots); free (identity); free (teamId); int hashSize = 20; // SHA1 is default int algoType = R_HASH_SHA1; const char *hashName = "sha1"; switch (cscd.hashType) { case 0: // SHA1 == 20 bytes case 1: // SHA1 == 20 bytes hashSize = 20; hashName = "sha1"; algoType = R_HASH_SHA1; break; case 2: // SHA256 == 32 bytes hashSize = 32; algoType = R_HASH_SHA256; hashName = "sha256"; break; } // computed cdhash RHash *ctx = r_hash_new (true, algoType); int fofsz = cscd.length; if (fofsz > 0 && fofsz < (r_buf_size (b) - off)) { ut8 *fofbuf = calloc (fofsz, 1); if (fofbuf) { int i; if (r_buf_read_at (b, off, fofbuf, fofsz) != fofsz) { eprintf ("Invalid cdhash offset/length values\n"); } r_hash_do_begin (ctx, algoType); if (algoType == R_HASH_SHA1) { r_hash_do_sha1 (ctx, fofbuf, fofsz); } else { r_hash_do_sha256 (ctx, fofbuf, fofsz); } r_hash_do_end (ctx, algoType); eprintf ("ph %s @ 0x%"PFMT64x"!%d\n", hashName, off, fofsz); eprintf ("ComputedCDHash: "); for (i = 0; i < hashSize;i++) { eprintf ("%02x", ctx->digest[i]); } eprintf ("\n"); free (fofbuf); } } // show and check the rest of hashes ut8 *hash = p + cscd.hashOffset; int j = 0; int k = 0; eprintf ("Hashed region: 0x%08"PFMT64x" - 0x%08"PFMT64x"\n", (ut64)0, (ut64)cscd.codeLimit); for (j = 0; j < cscd.nCodeSlots; j++) { int fof = 4096 * j; int idx = j * hashSize; eprintf ("0x%08"PFMT64x" ", off + cscd.hashOffset + idx); for (k = 0; k < hashSize; k++) { eprintf ("%02x", hash[idx + k]); } ut8 fofbuf[4096]; int fofsz = R_MIN (sizeof (fofbuf), cscd.codeLimit - fof); r_buf_read_at (b, fof, fofbuf, sizeof (fofbuf)); r_hash_do_begin (ctx, algoType); if (algoType == R_HASH_SHA1) { r_hash_do_sha1 (ctx, fofbuf, fofsz); } else { r_hash_do_sha256 (ctx, fofbuf, fofsz); } r_hash_do_end (ctx, algoType); if (memcmp (hash + idx, ctx->digest, hashSize)) { eprintf (" wx "); int i; for (i = 0; i < hashSize;i++) { eprintf ("%02x", ctx->digest[i]); } } else { eprintf (" OK"); } eprintf ("\n"); } r_hash_free (ctx); free (p); } // parse the Load Command static bool parse_signature(struct MACH0_(obj_t) *bin, ut64 off) { int i,len; ut32 data; bin->signature = NULL; struct linkedit_data_command link = {0}; ut8 lit[sizeof (struct linkedit_data_command)] = {0}; struct blob_index_t idx = {0}; struct super_blob_t super = {{0}}; if (off > bin->size || off + sizeof (struct linkedit_data_command) > bin->size) { return false; } len = r_buf_read_at (bin->b, off, lit, sizeof (struct linkedit_data_command)); if (len != sizeof (struct linkedit_data_command)) { bprintf ("Failed to get data while parsing LC_CODE_SIGNATURE command\n"); return false; } link.cmd = r_read_ble32 (&lit[0], bin->big_endian); link.cmdsize = r_read_ble32 (&lit[4], bin->big_endian); link.dataoff = r_read_ble32 (&lit[8], bin->big_endian); link.datasize = r_read_ble32 (&lit[12], bin->big_endian); data = link.dataoff; if (data > bin->size || data + sizeof (struct super_blob_t) > bin->size) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); return true; } super.blob.magic = r_buf_read_ble32_at (bin->b, data, mach0_endian); super.blob.length = r_buf_read_ble32_at (bin->b, data + 4, mach0_endian); super.count = r_buf_read_ble32_at (bin->b, data + 8, mach0_endian); char *verbose = r_sys_getenv ("RABIN2_CODESIGN_VERBOSE"); bool isVerbose = false; if (verbose) { isVerbose = *verbose; free (verbose); } // to dump all certificates // [0x00053f75]> b 5K;/x 30800609;wtf @@ hit* // then do this: // $ openssl asn1parse -inform der -in a|less // $ openssl pkcs7 -inform DER -print_certs -text -in a for (i = 0; i < super.count; i++) { if (data + i > bin->size) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); break; } struct blob_index_t bi; if (r_buf_read_at (bin->b, data + 12 + (i * sizeof (struct blob_index_t)), (ut8*)&bi, sizeof (struct blob_index_t)) < sizeof (struct blob_index_t)) { break; } idx.type = r_read_ble32 (&bi.type, mach0_endian); idx.offset = r_read_ble32 (&bi.offset, mach0_endian); switch (idx.type) { case CSSLOT_ENTITLEMENTS: if (true || isVerbose) { ut64 off = data + idx.offset; if (off > bin->size || off + sizeof (struct blob_t) > bin->size) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); break; } struct blob_t entitlements = {0}; entitlements.magic = r_buf_read_ble32_at (bin->b, off, mach0_endian); entitlements.length = r_buf_read_ble32_at (bin->b, off + 4, mach0_endian); len = entitlements.length - sizeof (struct blob_t); if (len <= bin->size && len > 1) { bin->signature = calloc (1, len + 1); if (!bin->signature) { break; } if (off + sizeof (struct blob_t) + len < r_buf_size (bin->b)) { r_buf_read_at (bin->b, off + sizeof (struct blob_t), (ut8 *)bin->signature, len); if (len >= 0) { bin->signature[len] = '\0'; } } else { bin->signature = (ut8 *)strdup ("Malformed entitlement"); } } else { bin->signature = (ut8 *)strdup ("Malformed entitlement"); } } break; case CSSLOT_CODEDIRECTORY: if (isVerbose) { parseCodeDirectory (bin->b, data + idx.offset, link.datasize); } break; case 0x1000: // unknown break; case CSSLOT_CMS_SIGNATURE: // ASN1/DER certificate if (isVerbose) { ut8 header[8] = {0}; r_buf_read_at (bin->b, data + idx.offset, header, sizeof (header)); ut32 length = R_MIN (UT16_MAX, r_read_ble32 (header + 4, 1)); ut8 *p = calloc (length, 1); if (p) { r_buf_read_at (bin->b, data + idx.offset + 0, p, length); ut32 *words = (ut32*)p; eprintf ("Magic: %x\n", words[0]); eprintf ("wtf DUMP @%d!%d\n", (int)data + idx.offset + 8, (int)length); eprintf ("openssl pkcs7 -print_certs -text -inform der -in DUMP\n"); eprintf ("openssl asn1parse -offset %d -length %d -inform der -in /bin/ls\n", (int)data + idx.offset + 8, (int)length); eprintf ("pFp@%d!%d\n", (int)data + idx.offset + 8, (int)length); free (p); } } break; case CSSLOT_REQUIREMENTS: // 2 { ut8 p[256]; r_buf_read_at (bin->b, data + idx.offset + 16, p, sizeof (p)); p[sizeof (p) - 1] = 0; ut32 slot_size = r_read_ble32 (p + 8, 1); if (slot_size < sizeof (p)) { ut32 ident_size = r_read_ble32 (p + 8, 1); if (!ident_size || ident_size > sizeof (p) - 28) { break; } char *ident = r_str_ndup ((const char *)p + 28, ident_size); if (ident) { sdb_set (bin->kv, "mach0.ident", ident, 0); free (ident); } } else { if (bin->verbose) { eprintf ("Invalid code slot size\n"); } } } break; case CSSLOT_INFOSLOT: // 1; case CSSLOT_RESOURCEDIR: // 3; case CSSLOT_APPLICATION: // 4; // TODO: parse those codesign slots if (bin->verbose) { eprintf ("TODO: Some codesign slots are not yet supported\n"); } break; default: if (bin->verbose) { eprintf ("Unknown Code signature slot %d\n", idx.type); } break; } } if (!bin->signature) { bin->signature = (ut8 *)strdup ("No entitlement found"); } return true; } static int parse_thread(struct MACH0_(obj_t) *bin, struct load_command *lc, ut64 off, bool is_first_thread) { ut64 ptr_thread, pc = UT64_MAX, pc_offset = UT64_MAX; ut32 flavor, count; ut8 *arw_ptr = NULL; int arw_sz, len = 0; ut8 thc[sizeof (struct thread_command)] = {0}; ut8 tmp[4]; if (off > bin->size || off + sizeof (struct thread_command) > bin->size) { return false; } len = r_buf_read_at (bin->b, off, thc, 8); if (len < 1) { goto wrong_read; } bin->thread.cmd = r_read_ble32 (&thc[0], bin->big_endian); bin->thread.cmdsize = r_read_ble32 (&thc[4], bin->big_endian); if (r_buf_read_at (bin->b, off + sizeof (struct thread_command), tmp, 4) < 4) { goto wrong_read; } flavor = r_read_ble32 (tmp, bin->big_endian); if (off + sizeof (struct thread_command) + sizeof (flavor) > bin->size || off + sizeof (struct thread_command) + sizeof (flavor) + sizeof (ut32) > bin->size) { return false; } // TODO: use count for checks if (r_buf_read_at (bin->b, off + sizeof (struct thread_command) + sizeof (flavor), tmp, 4) < 4) { goto wrong_read; } count = r_read_ble32 (tmp, bin->big_endian); ptr_thread = off + sizeof (struct thread_command) + sizeof (flavor) + sizeof (count); if (ptr_thread > bin->size) { return false; } switch (bin->hdr.cputype) { case CPU_TYPE_I386: case CPU_TYPE_X86_64: switch (flavor) { case X86_THREAD_STATE32: if (ptr_thread + sizeof (struct x86_thread_state32) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.x86_32, "16i", 1) == -1) { bprintf ("Error: read (thread state x86_32)\n"); return false; } pc = bin->thread_state.x86_32.eip; pc_offset = ptr_thread + r_offsetof(struct x86_thread_state32, eip); arw_ptr = (ut8 *)&bin->thread_state.x86_32; arw_sz = sizeof (struct x86_thread_state32); break; case X86_THREAD_STATE64: if (ptr_thread + sizeof (struct x86_thread_state64) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.x86_64, "32l", 1) == -1) { bprintf ("Error: read (thread state x86_64)\n"); return false; } pc = bin->thread_state.x86_64.rip; pc_offset = ptr_thread + r_offsetof(struct x86_thread_state64, rip); arw_ptr = (ut8 *)&bin->thread_state.x86_64; arw_sz = sizeof (struct x86_thread_state64); break; //default: bprintf ("Unknown type\n"); } break; case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: if (flavor == X86_THREAD_STATE32) { if (ptr_thread + sizeof (struct ppc_thread_state32) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.ppc_32, bin->big_endian ? "40I" : "40i", 1) == -1) { bprintf ("Error: read (thread state ppc_32)\n"); return false; } pc = bin->thread_state.ppc_32.srr0; pc_offset = ptr_thread + r_offsetof(struct ppc_thread_state32, srr0); arw_ptr = (ut8 *)&bin->thread_state.ppc_32; arw_sz = sizeof (struct ppc_thread_state32); } else if (flavor == X86_THREAD_STATE64) { if (ptr_thread + sizeof (struct ppc_thread_state64) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.ppc_64, bin->big_endian ? "34LI3LI" : "34li3li", 1) == -1) { bprintf ("Error: read (thread state ppc_64)\n"); return false; } pc = bin->thread_state.ppc_64.srr0; pc_offset = ptr_thread + r_offsetof(struct ppc_thread_state64, srr0); arw_ptr = (ut8 *)&bin->thread_state.ppc_64; arw_sz = sizeof (struct ppc_thread_state64); } break; case CPU_TYPE_ARM: if (ptr_thread + sizeof (struct arm_thread_state32) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.arm_32, bin->big_endian ? "17I" : "17i", 1) == -1) { bprintf ("Error: read (thread state arm)\n"); return false; } pc = bin->thread_state.arm_32.r15; pc_offset = ptr_thread + r_offsetof (struct arm_thread_state32, r15); arw_ptr = (ut8 *)&bin->thread_state.arm_32; arw_sz = sizeof (struct arm_thread_state32); break; case CPU_TYPE_ARM64: if (ptr_thread + sizeof (struct arm_thread_state64) > bin->size) { return false; } if (r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.arm_64, bin->big_endian ? "34LI1I" : "34Li1i", 1) == -1) { bprintf ("Error: read (thread state arm)\n"); return false; } pc = r_read_be64 (&bin->thread_state.arm_64.pc); pc_offset = ptr_thread + r_offsetof (struct arm_thread_state64, pc); arw_ptr = (ut8*)&bin->thread_state.arm_64; arw_sz = sizeof (struct arm_thread_state64); break; default: bprintf ("Error: read (unknown thread state structure)\n"); return false; } // TODO: this shouldnt be an bprintf... if (arw_ptr && arw_sz > 0) { int i; ut8 *p = arw_ptr; bprintf ("arw "); for (i = 0; i < arw_sz; i++) { bprintf ("%02x", 0xff & p[i]); } bprintf ("\n"); } if (is_first_thread) { bin->main_cmd = *lc; if (pc != UT64_MAX) { bin->entry = pc; } if (pc_offset != UT64_MAX) { sdb_num_set (bin->kv, "mach0.entry.offset", pc_offset, 0); } } return true; wrong_read: bprintf ("Error: read (thread)\n"); return false; } static int parse_function_starts(struct MACH0_(obj_t) *bin, ut64 off) { struct linkedit_data_command fc; ut8 sfc[sizeof (struct linkedit_data_command)] = {0}; int len; if (off > bin->size || off + sizeof (struct linkedit_data_command) > bin->size) { bprintf ("Likely overflow while parsing" " LC_FUNCTION_STARTS command\n"); } bin->func_start = NULL; len = r_buf_read_at (bin->b, off, sfc, sizeof (struct linkedit_data_command)); if (len < 1) { bprintf ("Failed to get data while parsing" " LC_FUNCTION_STARTS command\n"); } fc.cmd = r_read_ble32 (&sfc[0], bin->big_endian); fc.cmdsize = r_read_ble32 (&sfc[4], bin->big_endian); fc.dataoff = r_read_ble32 (&sfc[8], bin->big_endian); fc.datasize = r_read_ble32 (&sfc[12], bin->big_endian); if ((int)fc.datasize > 0) { ut8 *buf = calloc (1, fc.datasize + 1); if (!buf) { bprintf ("Failed to allocate buffer\n"); return false; } bin->func_size = fc.datasize; if (fc.dataoff > bin->size || fc.dataoff + fc.datasize > bin->size) { free (buf); bprintf ("Likely overflow while parsing " "LC_FUNCTION_STARTS command\n"); return false; } len = r_buf_read_at (bin->b, fc.dataoff, buf, fc.datasize); if (len != fc.datasize) { free (buf); bprintf ("Failed to get data while parsing" " LC_FUNCTION_STARTS\n"); return false; } buf[fc.datasize] = 0; // null-terminated buffer bin->func_start = buf; return true; } bin->func_start = NULL; return false; } static int parse_dylib(struct MACH0_(obj_t) *bin, ut64 off) { struct dylib_command dl; int lib, len; ut8 sdl[sizeof (struct dylib_command)] = {0}; if (off > bin->size || off + sizeof (struct dylib_command) > bin->size) { return false; } lib = bin->nlibs - 1; void *relibs = realloc (bin->libs, bin->nlibs * R_BIN_MACH0_STRING_LENGTH); if (!relibs) { r_sys_perror ("realloc (libs)"); return false; } bin->libs = relibs; len = r_buf_read_at (bin->b, off, sdl, sizeof (struct dylib_command)); if (len < 1) { bprintf ("Error: read (dylib)\n"); return false; } dl.cmd = r_read_ble32 (&sdl[0], bin->big_endian); dl.cmdsize = r_read_ble32 (&sdl[4], bin->big_endian); dl.dylib.name = r_read_ble32 (&sdl[8], bin->big_endian); dl.dylib.timestamp = r_read_ble32 (&sdl[12], bin->big_endian); dl.dylib.current_version = r_read_ble32 (&sdl[16], bin->big_endian); dl.dylib.compatibility_version = r_read_ble32 (&sdl[20], bin->big_endian); if (off + dl.dylib.name > bin->size ||\ off + dl.dylib.name + R_BIN_MACH0_STRING_LENGTH > bin->size) { return false; } memset (bin->libs[lib], 0, R_BIN_MACH0_STRING_LENGTH); len = r_buf_read_at (bin->b, off + dl.dylib.name, (ut8*)bin->libs[lib], R_BIN_MACH0_STRING_LENGTH - 1); bin->libs[lib][R_BIN_MACH0_STRING_LENGTH - 1] = 0; if (len < 1) { bprintf ("Error: read (dylib str)"); return false; } return true; } static const char *cmd_to_string(ut32 cmd) { switch (cmd) { case LC_DATA_IN_CODE: return "LC_DATA_IN_CODE"; case LC_CODE_SIGNATURE: return "LC_CODE_SIGNATURE"; case LC_RPATH: return "LC_RPATH"; case LC_TWOLEVEL_HINTS: return "LC_TWOLEVEL_HINTS"; case LC_PREBIND_CKSUM: return "LC_PREBIND_CKSUM"; case LC_SEGMENT: return "LC_SEGMENT"; case LC_SEGMENT_64: return "LC_SEGMENT_64"; case LC_SYMTAB: return "LC_SYMTAB"; case LC_SYMSEG: return "LC_SYMSEG"; case LC_DYSYMTAB: return "LC_DYSYMTAB"; case LC_PREBOUND_DYLIB: return "LC_PREBOUND_DYLIB"; case LC_ROUTINES: return "LC_ROUTINES"; case LC_ROUTINES_64: return "LC_ROUTINES_64"; case LC_SUB_FRAMEWORK: return "LC_SUB_FRAMEWORK"; case LC_SUB_UMBRELLA: return "LC_SUB_UMBRELLA"; case LC_SUB_CLIENT: return "LC_SUB_CLIENT"; case LC_SUB_LIBRARY: return "LC_SUB_LIBRARY"; case LC_FUNCTION_STARTS: return "LC_FUNCTION_STARTS"; case LC_DYLIB_CODE_SIGN_DRS: return "LC_DYLIB_CODE_SIGN_DRS"; case LC_NOTE: return "LC_NOTE"; case LC_BUILD_VERSION: return "LC_BUILD_VERSION"; case LC_VERSION_MIN_MACOSX: return "LC_VERSION_MIN_MACOSX"; case LC_VERSION_MIN_IPHONEOS: return "LC_VERSION_MIN_IPHONEOS"; case LC_VERSION_MIN_TVOS: return "LC_VERSION_MIN_TVOS"; case LC_VERSION_MIN_WATCHOS: return "LC_VERSION_MIN_WATCHOS"; case LC_DYLD_INFO: return "LC_DYLD_INFO"; case LC_DYLD_INFO_ONLY: return "LC_DYLD_INFO_ONLY"; case LC_DYLD_ENVIRONMENT: return "LC_DYLD_ENVIRONMENT"; case LC_SOURCE_VERSION: return "LC_SOURCE_VERSION"; case LC_MAIN: return "LC_MAIN"; case LC_UUID: return "LC_UUID"; case LC_ID_DYLIB: return "LC_ID_DYLIB"; case LC_ID_DYLINKER: return "LC_ID_DYLINKER"; case LC_LAZY_LOAD_DYLIB: return "LC_LAZY_LOAD_DYLIB"; case LC_ENCRYPTION_INFO: return "LC_ENCRYPTION_INFO"; case LC_ENCRYPTION_INFO_64: return "LC_ENCRYPTION_INFO_64"; case LC_SEGMENT_SPLIT_INFO: return "LC_SEGMENT_SPLIT_INFO"; case LC_REEXPORT_DYLIB: return "LC_REEXPORT_DYLIB"; case LC_LINKER_OPTION: return "LC_LINKER_OPTION"; case LC_LINKER_OPTIMIZATION_HINT: return "LC_LINKER_OPTIMIZATION_HINT"; case LC_LOAD_DYLINKER: return "LC_LOAD_DYLINKER"; case LC_LOAD_DYLIB: return "LC_LOAD_DYLIB"; case LC_LOAD_WEAK_DYLIB: return "LC_LOAD_WEAK_DYLIB"; case LC_THREAD: return "LC_THREAD"; case LC_UNIXTHREAD: return "LC_UNIXTHREAD"; case LC_LOADFVMLIB: return "LC_LOADFVMLIB"; case LC_IDFVMLIB: return "LC_IDFVMLIB"; case LC_IDENT: return "LC_IDENT"; case LC_FVMFILE: return "LC_FVMFILE"; case LC_PREPAGE: return "LC_PREPAGE"; } return ""; } static const char *cmd_to_pf_definition(ut32 cmd) { switch (cmd) { case LC_BUILD_VERSION: return "mach0_build_version_command"; case LC_CODE_SIGNATURE: return "mach0_code_signature_command"; case LC_DATA_IN_CODE: return "mach0_data_in_code_command"; case LC_DYLD_INFO: case LC_DYLD_INFO_ONLY: return "mach0_dyld_info_only_command"; case LC_DYLD_ENVIRONMENT: return NULL; case LC_DYLIB_CODE_SIGN_DRS: return NULL; case LC_DYSYMTAB: return "mach0_dysymtab_command"; case LC_ENCRYPTION_INFO: return "mach0_encryption_info_command"; case LC_ENCRYPTION_INFO_64: return "mach0_encryption_info64_command"; case LC_FUNCTION_STARTS: return "mach0_function_starts_command"; case LC_FVMFILE: return NULL; case LC_ID_DYLIB: return "mach0_id_dylib_command"; case LC_ID_DYLINKER: return "mach0_id_dylinker_command"; case LC_IDENT: return NULL; case LC_IDFVMLIB: return NULL; case LC_LINKER_OPTION: return NULL; case LC_LINKER_OPTIMIZATION_HINT: return NULL; case LC_LOAD_DYLINKER: return "mach0_load_dylinker_command"; case LC_LAZY_LOAD_DYLIB: case LC_LOAD_WEAK_DYLIB: case LC_LOAD_DYLIB: return "mach0_dylib_command"; case LC_LOADFVMLIB: return NULL; case LC_MAIN: return "mach0_entry_point_command"; case LC_NOTE: return NULL; case LC_PREBIND_CKSUM: return NULL; case LC_PREBOUND_DYLIB: return NULL; case LC_PREPAGE: return NULL; case LC_REEXPORT_DYLIB: return NULL; case LC_ROUTINES: return NULL; case LC_ROUTINES_64: return NULL; case LC_RPATH: return "mach0_rpath_command"; case LC_SEGMENT: return "mach0_segment"; case LC_SEGMENT_64: return "mach0_segment64"; case LC_SEGMENT_SPLIT_INFO: return "mach0_segment_split_info_command"; case LC_SOURCE_VERSION: return "mach0_source_version_command"; case LC_SUB_FRAMEWORK: return NULL; case LC_SUB_UMBRELLA: return NULL; case LC_SUB_CLIENT: return NULL; case LC_SUB_LIBRARY: return NULL; case LC_SYMTAB: return "mach0_symtab_command"; case LC_SYMSEG: return NULL; case LC_TWOLEVEL_HINTS: return NULL; case LC_UUID: return "mach0_uuid_command"; case LC_VERSION_MIN_MACOSX: case LC_VERSION_MIN_IPHONEOS: case LC_VERSION_MIN_TVOS: case LC_VERSION_MIN_WATCHOS: return "mach0_version_min_command"; case LC_THREAD: return NULL; case LC_UNIXTHREAD: return "mach0_unixthread_command"; } return NULL; } static const char *build_version_platform_to_string(ut32 platform) { switch (platform) { case 1: return "macOS"; case 2: return "iOS"; case 3: return "tvOS"; case 4: return "watchOS"; case 5: return "bridgeOS"; case 6: return "iOSmac"; case 7: return "iOS Simulator"; case 8: return "tvOS Simulator"; case 9: return "watchOS Simulator"; default: return "unknown"; } } static const char *build_version_tool_to_string(ut32 tool) { switch (tool) { case 1: return "clang"; case 2: return "swift"; case 3: return "ld"; default: return "unknown"; } } static size_t get_word_size(struct MACH0_(obj_t) *bin) { const size_t word_size = MACH0_(get_bits)(bin) / 8; return R_MAX (word_size, 4); } static bool parse_chained_fixups(struct MACH0_(obj_t) *bin, ut32 offset, ut32 size) { struct dyld_chained_fixups_header header; if (size < sizeof (header)) { return false; } if (r_buf_fread_at (bin->b, offset, (ut8 *)&header, "7i", 1) != sizeof (header)) { return false; } if (header.fixups_version > 0) { eprintf ("Unsupported fixups version: %u\n", header.fixups_version); return false; } ut64 starts_at = offset + header.starts_offset; if (header.starts_offset > size) { return false; } ut32 segs_count = r_buf_read_le32_at (bin->b, starts_at); if (segs_count == UT32_MAX || segs_count == 0) { return false; } bin->segs_count = segs_count; bin->chained_starts = R_NEWS0 (struct r_dyld_chained_starts_in_segment *, segs_count); if (!bin->chained_starts) { return false; } bin->fixups_header = header; bin->fixups_offset = offset; bin->fixups_size = size; size_t i; ut64 cursor = starts_at + sizeof (ut32); ut64 bsize = r_buf_size (bin->b); for (i = 0; i < segs_count && cursor + 4 < bsize; i++) { ut32 seg_off; if ((seg_off = r_buf_read_le32_at (bin->b, cursor)) == UT32_MAX || !seg_off) { cursor += sizeof (ut32); continue; } if (i >= bin->nsegs) { break; } struct r_dyld_chained_starts_in_segment *cur_seg = R_NEW0 (struct r_dyld_chained_starts_in_segment); if (!cur_seg) { return false; } bin->chained_starts[i] = cur_seg; if (r_buf_fread_at (bin->b, starts_at + seg_off, (ut8 *)cur_seg, "isslis", 1) != 22) { return false; } if (cur_seg->page_count > 0) { ut16 *page_start = malloc (sizeof (ut16) * cur_seg->page_count); if (!page_start) { return false; } if (r_buf_fread_at (bin->b, starts_at + seg_off + 22, (ut8 *)page_start, "s", cur_seg->page_count) != cur_seg->page_count * 2) { return false; } cur_seg->page_start = page_start; } cursor += sizeof (ut32); } /* TODO: handle also imports, symbols and multiple starts (32-bit only) */ return true; } static bool reconstruct_chained_fixup(struct MACH0_(obj_t) *bin) { if (!bin->dyld_info) { return false; } if (!bin->nsegs) { return false; } bin->chained_starts = R_NEWS0 (struct r_dyld_chained_starts_in_segment *, bin->nsegs); if (!bin->chained_starts) { return false; } size_t wordsize = get_word_size (bin); ut8 *p = NULL; size_t j, count, skip, bind_size; int seg_idx = 0; ut64 seg_off = 0; bind_size = bin->dyld_info->bind_size; if (!bind_size || bind_size < 1) { return false; } if (bin->dyld_info->bind_off > bin->size) { return false; } if (bin->dyld_info->bind_off + bind_size > bin->size) { return false; } ut8 *opcodes = calloc (1, bind_size + 1); if (!opcodes) { return false; } if (r_buf_read_at (bin->b, bin->dyld_info->bind_off, opcodes, bind_size) != bind_size) { bprintf ("Error: read (dyld_info bind) at 0x%08"PFMT64x"\n", (ut64)(size_t)bin->dyld_info->bind_off); R_FREE (opcodes); return false; } struct r_dyld_chained_starts_in_segment *cur_seg = NULL; size_t cur_seg_idx = 0; ut8 *end; bool done = false; for (p = opcodes, end = opcodes + bind_size; !done && p < end;) { ut8 imm = *p & BIND_IMMEDIATE_MASK, op = *p & BIND_OPCODE_MASK; p++; switch (op) { case BIND_OPCODE_DONE: done = true; break; case BIND_OPCODE_THREADED: { switch (imm) { case BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB: { read_uleb128 (&p, end); break; } case BIND_SUBOPCODE_THREADED_APPLY: { const size_t ps = 0x1000; if (!cur_seg || cur_seg_idx != seg_idx) { cur_seg_idx = seg_idx; cur_seg = bin->chained_starts[seg_idx]; if (!cur_seg) { cur_seg = R_NEW0 (struct r_dyld_chained_starts_in_segment); if (!cur_seg) { break; } bin->chained_starts[seg_idx] = cur_seg; cur_seg->pointer_format = DYLD_CHAINED_PTR_ARM64E; cur_seg->page_size = ps; cur_seg->page_count = ((bin->segs[seg_idx].vmsize + (ps - 1)) & ~(ps - 1)) / ps; if (cur_seg->page_count > 0) { cur_seg->page_start = malloc (sizeof (ut16) * cur_seg->page_count); if (!cur_seg->page_start) { break; } memset (cur_seg->page_start, 0xff, sizeof (ut16) * cur_seg->page_count); } } } if (cur_seg) { ut32 page_index = (ut32)(seg_off / ps); size_t maxsize = cur_seg->page_count * sizeof (ut16); if (page_index < maxsize && cur_seg->page_start) { cur_seg->page_start[page_index] = seg_off & 0xfff; } } break; } default: bprintf ("Error: Unexpected BIND_OPCODE_THREADED sub-opcode: 0x%x\n", imm); } break; } case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: case BIND_OPCODE_SET_TYPE_IMM: break; case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: read_uleb128 (&p, end); break; case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: while (*p++ && p < end) { /* empty loop */ } break; case BIND_OPCODE_SET_ADDEND_SLEB: r_sleb128 ((const ut8 **)&p, end); break; case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: seg_idx = imm; if (seg_idx >= bin->nsegs) { bprintf ("Error: BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB" " has unexistent segment %d\n", seg_idx); R_FREE (opcodes); return false; } else { seg_off = read_uleb128 (&p, end); } break; case BIND_OPCODE_ADD_ADDR_ULEB: seg_off += read_uleb128 (&p, end); break; case BIND_OPCODE_DO_BIND: break; case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: seg_off += read_uleb128 (&p, end) + wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: seg_off += (ut64)imm * (ut64)wordsize + wordsize; break; case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: count = read_uleb128 (&p, end); skip = read_uleb128 (&p, end); for (j = 0; j < count; j++) { seg_off += skip + wordsize; } break; default: bprintf ("Error: unknown bind opcode 0x%02x in dyld_info\n", *p); R_FREE (opcodes); return false; } } R_FREE (opcodes); bin->segs_count = bin->nsegs; return true; } static int init_items(struct MACH0_(obj_t) *bin) { struct load_command lc = {0, 0}; ut8 loadc[sizeof (struct load_command)] = {0}; bool is_first_thread = true; ut64 off = 0LL; int i, len; char cmd_flagname[128]; bin->uuidn = 0; bin->os = 0; bin->has_crypto = 0; if (bin->hdr.sizeofcmds > bin->size) { bprintf ("Warning: chopping hdr.sizeofcmds\n"); bin->hdr.sizeofcmds = bin->size - 128; //return false; } //bprintf ("Commands: %d\n", bin->hdr.ncmds); for (i = 0, off = sizeof (struct MACH0_(mach_header)) + bin->header_at; \ i < bin->hdr.ncmds; i++, off += lc.cmdsize) { if (off > bin->size || off + sizeof (struct load_command) > bin->size) { bprintf ("mach0: out of bounds command\n"); return false; } len = r_buf_read_at (bin->b, off, loadc, sizeof (struct load_command)); if (len < 1) { bprintf ("Error: read (lc) at 0x%08"PFMT64x"\n", off); return false; } lc.cmd = r_read_ble32 (&loadc[0], bin->big_endian); lc.cmdsize = r_read_ble32 (&loadc[4], bin->big_endian); if (lc.cmdsize < 1 || off + lc.cmdsize > bin->size) { bprintf ("Warning: mach0_header %d = cmdsize<1. (0x%"PFMT64x" vs 0x%"PFMT64x")\n", i, (ut64)(off + lc.cmdsize), (ut64)(bin->size)); break; } snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.offset", i); sdb_num_set (bin->kv, cmd_flagname, off, 0); const char *format_name = cmd_to_pf_definition (lc.cmd); snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.format", i); if (format_name) { sdb_set (bin->kv, cmd_flagname, format_name, 0); } else { sdb_set (bin->kv, cmd_flagname, "[4]Ed (mach_load_command_type)cmd size", 0); } snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.cmd", i); switch (lc.cmd) { case LC_DATA_IN_CODE: sdb_set (bin->kv, cmd_flagname, "data_in_code", 0); break; case LC_RPATH: sdb_set (bin->kv, cmd_flagname, "rpath", 0); //bprintf ("--->\n"); break; case LC_SEGMENT_64: case LC_SEGMENT: sdb_set (bin->kv, cmd_flagname, "segment", 0); bin->nsegs++; if (!parse_segments (bin, off)) { bprintf ("error parsing segment\n"); bin->nsegs--; return false; } break; case LC_SYMTAB: sdb_set (bin->kv, cmd_flagname, "symtab", 0); if (!parse_symtab (bin, off)) { bprintf ("error parsing symtab\n"); return false; } break; case LC_DYSYMTAB: sdb_set (bin->kv, cmd_flagname, "dysymtab", 0); if (!parse_dysymtab (bin, off)) { bprintf ("error parsing dysymtab\n"); return false; } break; case LC_DYLIB_CODE_SIGN_DRS: sdb_set (bin->kv, cmd_flagname, "dylib_code_sign_drs", 0); //bprintf ("[mach0] code is signed\n"); break; case LC_VERSION_MIN_MACOSX: sdb_set (bin->kv, cmd_flagname, "version_min_macosx", 0); bin->os = 1; // set OS = osx //bprintf ("[mach0] Requires OSX >= x\n"); break; case LC_VERSION_MIN_IPHONEOS: sdb_set (bin->kv, cmd_flagname, "version_min_iphoneos", 0); bin->os = 2; // set OS = ios //bprintf ("[mach0] Requires iOS >= x\n"); break; case LC_VERSION_MIN_TVOS: sdb_set (bin->kv, cmd_flagname, "version_min_tvos", 0); bin->os = 4; break; case LC_VERSION_MIN_WATCHOS: sdb_set (bin->kv, cmd_flagname, "version_min_watchos", 0); bin->os = 3; break; case LC_UUID: sdb_set (bin->kv, cmd_flagname, "uuid", 0); { struct uuid_command uc = {0}; if (off + sizeof (struct uuid_command) > bin->size) { bprintf ("UUID out of bounds\n"); return false; } if (r_buf_fread_at (bin->b, off, (ut8*)&uc, "24c", 1) != -1) { char key[128]; char val[128]; snprintf (key, sizeof (key)-1, "uuid.%d", bin->uuidn++); r_hex_bin2str ((ut8*)&uc.uuid, 16, val); sdb_set (bin->kv, key, val, 0); //for (i=0;i<16; i++) bprintf ("%02x%c", uc.uuid[i], (i==15)?'\n':'-'); } } break; case LC_ENCRYPTION_INFO_64: /* TODO: the struct is probably different here */ case LC_ENCRYPTION_INFO: sdb_set (bin->kv, cmd_flagname, "encryption_info", 0); { struct MACH0_(encryption_info_command) eic = {0}; ut8 seic[sizeof (struct MACH0_(encryption_info_command))] = {0}; if (off + sizeof (struct MACH0_(encryption_info_command)) > bin->size) { bprintf ("encryption info out of bounds\n"); return false; } if (r_buf_read_at (bin->b, off, seic, sizeof (struct MACH0_(encryption_info_command))) != -1) { eic.cmd = r_read_ble32 (&seic[0], bin->big_endian); eic.cmdsize = r_read_ble32 (&seic[4], bin->big_endian); eic.cryptoff = r_read_ble32 (&seic[8], bin->big_endian); eic.cryptsize = r_read_ble32 (&seic[12], bin->big_endian); eic.cryptid = r_read_ble32 (&seic[16], bin->big_endian); bin->has_crypto = eic.cryptid; sdb_set (bin->kv, "crypto", "true", 0); sdb_num_set (bin->kv, "cryptid", eic.cryptid, 0); sdb_num_set (bin->kv, "cryptoff", eic.cryptoff, 0); sdb_num_set (bin->kv, "cryptsize", eic.cryptsize, 0); sdb_num_set (bin->kv, "cryptheader", off, 0); } } break; case LC_LOAD_DYLINKER: { sdb_set (bin->kv, cmd_flagname, "dylinker", 0); R_FREE (bin->intrp); //bprintf ("[mach0] load dynamic linker\n"); struct dylinker_command dy = {0}; ut8 sdy[sizeof (struct dylinker_command)] = {0}; if (off + sizeof (struct dylinker_command) > bin->size){ bprintf ("Warning: Cannot parse dylinker command\n"); return false; } if (r_buf_read_at (bin->b, off, sdy, sizeof (struct dylinker_command)) == -1) { bprintf ("Warning: read (LC_DYLD_INFO) at 0x%08"PFMT64x"\n", off); } else { dy.cmd = r_read_ble32 (&sdy[0], bin->big_endian); dy.cmdsize = r_read_ble32 (&sdy[4], bin->big_endian); dy.name = r_read_ble32 (&sdy[8], bin->big_endian); int len = dy.cmdsize; char *buf = malloc (len+1); if (buf) { // wtf @ off + 0xc ? r_buf_read_at (bin->b, off + 0xc, (ut8*)buf, len); buf[len] = 0; free (bin->intrp); bin->intrp = buf; } } } break; case LC_MAIN: { struct { ut64 eo; ut64 ss; } ep = {0}; ut8 sep[2 * sizeof (ut64)] = {0}; sdb_set (bin->kv, cmd_flagname, "main", 0); if (!is_first_thread) { bprintf ("Error: LC_MAIN with other threads\n"); return false; } if (off + 8 > bin->size || off + sizeof (ep) > bin->size) { bprintf ("invalid command size for main\n"); return false; } r_buf_read_at (bin->b, off + 8, sep, 2 * sizeof (ut64)); ep.eo = r_read_ble64 (&sep[0], bin->big_endian); ep.ss = r_read_ble64 (&sep[8], bin->big_endian); bin->entry = ep.eo; bin->main_cmd = lc; sdb_num_set (bin->kv, "mach0.entry.offset", off + 8, 0); sdb_num_set (bin->kv, "stacksize", ep.ss, 0); is_first_thread = false; } break; case LC_UNIXTHREAD: sdb_set (bin->kv, cmd_flagname, "unixthread", 0); if (!is_first_thread) { bprintf ("Error: LC_UNIXTHREAD with other threads\n"); return false; } case LC_THREAD: sdb_set (bin->kv, cmd_flagname, "thread", 0); if (!parse_thread (bin, &lc, off, is_first_thread)) { bprintf ("Cannot parse thread\n"); return false; } is_first_thread = false; break; case LC_LOAD_DYLIB: case LC_LOAD_WEAK_DYLIB: sdb_set (bin->kv, cmd_flagname, "load_dylib", 0); bin->nlibs++; if (!parse_dylib (bin, off)) { bprintf ("Cannot parse dylib\n"); bin->nlibs--; return false; } break; case LC_DYLD_INFO: case LC_DYLD_INFO_ONLY: { ut8 dyldi[sizeof (struct dyld_info_command)] = {0}; sdb_set (bin->kv, cmd_flagname, "dyld_info", 0); bin->dyld_info = calloc (1, sizeof (struct dyld_info_command)); if (bin->dyld_info) { if (off + sizeof (struct dyld_info_command) > bin->size){ bprintf ("Cannot parse dyldinfo\n"); R_FREE (bin->dyld_info); return false; } if (r_buf_read_at (bin->b, off, dyldi, sizeof (struct dyld_info_command)) == -1) { R_FREE (bin->dyld_info); bprintf ("Error: read (LC_DYLD_INFO) at 0x%08"PFMT64x"\n", off); } else { bin->dyld_info->cmd = r_read_ble32 (&dyldi[0], bin->big_endian); bin->dyld_info->cmdsize = r_read_ble32 (&dyldi[4], bin->big_endian); bin->dyld_info->rebase_off = r_read_ble32 (&dyldi[8], bin->big_endian); bin->dyld_info->rebase_size = r_read_ble32 (&dyldi[12], bin->big_endian); bin->dyld_info->bind_off = r_read_ble32 (&dyldi[16], bin->big_endian); bin->dyld_info->bind_size = r_read_ble32 (&dyldi[20], bin->big_endian); bin->dyld_info->weak_bind_off = r_read_ble32 (&dyldi[24], bin->big_endian); bin->dyld_info->weak_bind_size = r_read_ble32 (&dyldi[28], bin->big_endian); bin->dyld_info->lazy_bind_off = r_read_ble32 (&dyldi[32], bin->big_endian); bin->dyld_info->lazy_bind_size = r_read_ble32 (&dyldi[36], bin->big_endian); bin->dyld_info->export_off = r_read_ble32 (&dyldi[40], bin->big_endian) + bin->symbols_off; bin->dyld_info->export_size = r_read_ble32 (&dyldi[44], bin->big_endian); } } } break; case LC_CODE_SIGNATURE: parse_signature (bin, off); sdb_set (bin->kv, cmd_flagname, "signature", 0); /* ut32 dataoff // ut32 datasize */ break; case LC_SOURCE_VERSION: sdb_set (bin->kv, cmd_flagname, "version", 0); /* uint64_t version; */ /* A.B.C.D.E packed as a24.b10.c10.d10.e10 */ //bprintf ("mach0: TODO: Show source version\n"); break; case LC_SEGMENT_SPLIT_INFO: sdb_set (bin->kv, cmd_flagname, "split_info", 0); /* TODO */ break; case LC_FUNCTION_STARTS: sdb_set (bin->kv, cmd_flagname, "function_starts", 0); if (!parse_function_starts (bin, off)) { bprintf ("Cannot parse LC_FUNCTION_STARTS\n"); } break; case LC_REEXPORT_DYLIB: sdb_set (bin->kv, cmd_flagname, "dylib", 0); /* TODO */ break; default: //bprintf ("mach0: Unknown header command %x\n", lc.cmd); break; } } bool has_chained_fixups = false; for (i = 0, off = sizeof (struct MACH0_(mach_header)) + bin->header_at; \ i < bin->hdr.ncmds; i++, off += lc.cmdsize) { len = r_buf_read_at (bin->b, off, loadc, sizeof (struct load_command)); if (len < 1) { bprintf ("Error: read (lc) at 0x%08"PFMT64x"\n", off); return false; } lc.cmd = r_read_ble32 (&loadc[0], bin->big_endian); lc.cmdsize = r_read_ble32 (&loadc[4], bin->big_endian); if (lc.cmdsize < 1 || off + lc.cmdsize > bin->size) { bprintf ("Warning: mach0_header %d = cmdsize<1. (0x%"PFMT64x" vs 0x%"PFMT64x")\n", i, (ut64)(off + lc.cmdsize), (ut64)(bin->size)); break; } snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.offset", i); sdb_num_set (bin->kv, cmd_flagname, off, 0); const char *format_name = cmd_to_pf_definition (lc.cmd); snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.format", i); if (format_name) { sdb_set (bin->kv, cmd_flagname, format_name, 0); } else { sdb_set (bin->kv, cmd_flagname, "[4]Ed (mach_load_command_type)cmd size", 0); } switch (lc.cmd) { case LC_DATA_IN_CODE: snprintf (cmd_flagname, sizeof (cmd_flagname), "mach0_cmd_%d.cmd", i); sdb_set (bin->kv, cmd_flagname, "data_in_code", 0); if (bin->verbose) { ut8 buf[8]; r_buf_read_at (bin->b, off + 8, buf, sizeof (buf)); ut32 dataoff = r_read_ble32 (buf, bin->big_endian); ut32 datasize= r_read_ble32 (buf + 4, bin->big_endian); eprintf ("data-in-code at 0x%x size %d\n", dataoff, datasize); ut8 *db = (ut8*)malloc (datasize); if (db) { r_buf_read_at (bin->b, dataoff, db, datasize); // TODO table of non-instructions regions in __text int j; for (j = 0; j < datasize; j += 8) { ut32 dw = r_read_ble32 (db + j, bin->big_endian); // int kind = r_read_ble16 (db + i + 4 + 2, bin->big_endian); int len = r_read_ble16 (db + j + 4, bin->big_endian); ut64 va = offset_to_vaddr(bin, dw); // eprintf ("# 0x%d -> 0x%x\n", dw, va); // eprintf ("0x%x kind %d len %d\n", dw, kind, len); eprintf ("Cd 4 %d @ 0x%"PFMT64x"\n", len / 4, va); } } } break; case LC_DYLD_EXPORTS_TRIE: if (bin->verbose) { ut8 buf[8]; r_buf_read_at (bin->b, off + 8, buf, sizeof (buf)); ut32 dataoff = r_read_ble32 (buf, bin->big_endian); ut32 datasize= r_read_ble32 (buf + 4, bin->big_endian); eprintf ("exports trie at 0x%x size %d\n", dataoff, datasize); } break; case LC_DYLD_CHAINED_FIXUPS: { ut8 buf[8]; if (r_buf_read_at (bin->b, off + 8, buf, sizeof (buf)) == sizeof (buf)) { ut32 dataoff = r_read_ble32 (buf, bin->big_endian); ut32 datasize= r_read_ble32 (buf + 4, bin->big_endian); if (bin->verbose) { eprintf ("chained fixups at 0x%x size %d\n", dataoff, datasize); } has_chained_fixups = parse_chained_fixups (bin, dataoff, datasize); } } break; } } if (!has_chained_fixups && bin->hdr.cputype == CPU_TYPE_ARM64 && (bin->hdr.cpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E) { #if 0 if (bin->verbose) { eprintf ("reconstructing chained fixups\n"); } #endif reconstruct_chained_fixup (bin); } return true; } static bool init(struct MACH0_(obj_t) *mo) { if (!init_hdr (mo)) { return false; } if (!init_items (mo)) { Eprintf ("Warning: Cannot initialize items\n"); } mo->baddr = MACH0_(get_baddr)(mo); return true; } void *MACH0_(mach0_free)(struct MACH0_(obj_t) *mo) { if (!mo) { return NULL; } size_t i; if (mo->symbols) { for (i = 0; !mo->symbols[i].last; i++) { free (mo->symbols[i].name); } free (mo->symbols); } free (mo->segs); free (mo->sects); free (mo->symtab); free (mo->symstr); free (mo->indirectsyms); free (mo->imports_by_ord); ht_pp_free (mo->imports_by_name); free (mo->dyld_info); free (mo->toc); free (mo->modtab); free (mo->libs); free (mo->func_start); free (mo->signature); free (mo->intrp); free (mo->compiler); if (mo->chained_starts) { for (i = 0; i < mo->nsegs && i < mo->segs_count; i++) { if (mo->chained_starts[i]) { free (mo->chained_starts[i]->page_start); free (mo->chained_starts[i]); } } free (mo->chained_starts); } r_buf_free (mo->b); free (mo); return NULL; } void MACH0_(opts_set_default)(struct MACH0_(opts_t) *options, RBinFile *bf) { r_return_if_fail (options && bf && bf->rbin); options->header_at = 0; options->symbols_off = 0; options->verbose = bf->rbin->verbose; } static void *duplicate_ptr(void *p) { return p; } static void free_only_key(HtPPKv *kv) { free (kv->key); } static size_t ptr_size(void *c) { // :D return 8; } // XXX should be deprecated its never called struct MACH0_(obj_t) *MACH0_(mach0_new)(const char *file, struct MACH0_(opts_t) *options) { struct MACH0_(obj_t) *bin = R_NEW0 (struct MACH0_(obj_t)); if (!bin) { return NULL; } if (options) { bin->verbose = options->verbose; bin->header_at = options->header_at; bin->symbols_off = options->symbols_off; } bin->file = file; size_t binsz; ut8 *buf = (ut8 *)r_file_slurp (file, &binsz); bin->size = binsz; if (!buf) { return MACH0_(mach0_free)(bin); } bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return MACH0_(mach0_free)(bin); } free (buf); bin->dyld_info = NULL; if (!init (bin)) { return MACH0_(mach0_free)(bin); } bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->imports_by_name = ht_pp_new ((HtPPDupValue)duplicate_ptr, free_only_key, (HtPPCalcSizeV)ptr_size); return bin; } struct MACH0_(obj_t) *MACH0_(new_buf)(RBuffer *buf, struct MACH0_(opts_t) *options) { r_return_val_if_fail (buf, NULL); struct MACH0_(obj_t) *bin = R_NEW0 (struct MACH0_(obj_t)); if (bin) { bin->b = r_buf_ref (buf); bin->main_addr = UT64_MAX; bin->kv = sdb_new (NULL, "bin.mach0", 0); bin->size = r_buf_size (bin->b); if (options) { bin->verbose = options->verbose; bin->header_at = options->header_at; bin->symbols_off = options->symbols_off; } if (!init (bin)) { return MACH0_(mach0_free)(bin); } } return bin; } // prot: r = 1, w = 2, x = 4 // perm: r = 4, w = 2, x = 1 static int prot2perm(int x) { int r = 0; if (x & 1) { r |= 4; } if (x & 2) { r |= 2; } if (x & 4) { r |= 1; } return r; } static bool __isDataSection(RBinSection *sect) { if (strstr (sect->name, "_cstring")) { return true; } if (strstr (sect->name, "_objc_methname")) { return true; } if (strstr (sect->name, "_objc_classname")) { return true; } if (strstr (sect->name, "_objc_methtype")) { return true; } return false; } RList *MACH0_(get_segments)(RBinFile *bf) { struct MACH0_(obj_t) *bin = bf->o->bin_obj; RList *list = r_list_newf ((RListFree)r_bin_section_free); size_t i, j; /* for core files */ if (bin->nsegs > 0) { struct MACH0_(segment_command) *seg; for (i = 0; i < bin->nsegs; i++) { seg = &bin->segs[i]; if (!seg->initprot) { continue; } RBinSection *s = r_bin_section_new (NULL); if (!s) { break; } s->vaddr = seg->vmaddr; s->vsize = seg->vmsize; s->size = seg->vmsize; s->paddr = seg->fileoff; s->paddr += bf->o->boffset; //TODO s->flags = seg->flags; s->name = r_str_ndup (seg->segname, 16); s->is_segment = true; r_str_filter (s->name, -1); s->perm = prot2perm (seg->initprot); s->add = true; r_list_append (list, s); } } if (bin->nsects > 0) { int last_section = R_MIN (bin->nsects, 128); // maybe drop this limit? for (i = 0; i < last_section; i++) { RBinSection *s = R_NEW0 (RBinSection); if (!s) { break; } s->vaddr = (ut64)bin->sects[i].addr; s->vsize = (ut64)bin->sects[i].size; s->is_segment = false; s->size = (bin->sects[i].flags == S_ZEROFILL) ? 0 : (ut64)bin->sects[i].size; // XXX flags s->paddr = (ut64)bin->sects[i].offset; int segment_index = 0; //s->perm =prot2perm (bin->segs[j].initprot); for (j = 0; j < bin->nsegs; j++) { if (s->vaddr >= bin->segs[j].vmaddr && s->vaddr < (bin->segs[j].vmaddr + bin->segs[j].vmsize)) { s->perm = prot2perm (bin->segs[j].initprot); segment_index = j; break; } } char *section_name = r_str_ndup (bin->sects[i].sectname, 16); char *segment_name = r_str_newf ("%u.%s", (ut32)i, bin->segs[segment_index].segname); s->name = r_str_newf ("%s.%s", segment_name, section_name); s->is_data = __isDataSection (s); if (strstr (section_name, "interpos") || strstr (section_name, "__mod_")) { #if R_BIN_MACH064 const int ws = 8; #else const int ws = 4; #endif s->format = r_str_newf ("Cd %d[%"PFMT64d"]", ws, s->vsize / ws); } r_list_append (list, s); free (segment_name); free (section_name); } } return list; } // XXX this function is called so many times struct section_t *MACH0_(get_sections)(struct MACH0_(obj_t) *bin) { r_return_val_if_fail (bin, NULL); struct section_t *sections; char sectname[64], raw_segname[17]; size_t i, j, to; /* for core files */ if (bin->nsects < 1 && bin->nsegs > 0) { struct MACH0_(segment_command) *seg; if (!(sections = calloc ((bin->nsegs + 1), sizeof (struct section_t)))) { return NULL; } for (i = 0; i < bin->nsegs; i++) { seg = &bin->segs[i]; sections[i].addr = seg->vmaddr; sections[i].offset = seg->fileoff; sections[i].size = seg->vmsize; sections[i].vsize = seg->vmsize; sections[i].align = 4096; sections[i].flags = seg->flags; r_str_ncpy (sectname, seg->segname, 16); sectname[16] = 0; r_str_filter (sectname, -1); // hack to support multiple sections with same name sections[i].perm = prot2perm (seg->initprot); sections[i].last = 0; } sections[i].last = 1; return sections; } if (!bin->sects) { return NULL; } to = R_MIN (bin->nsects, 128); // limit number of sections here to avoid fuzzed bins if (to < 1) { return NULL; } if (!(sections = calloc (bin->nsects + 1, sizeof (struct section_t)))) { return NULL; } for (i = 0; i < to; i++) { sections[i].offset = (ut64)bin->sects[i].offset; sections[i].addr = (ut64)bin->sects[i].addr; sections[i].size = (bin->sects[i].flags == S_ZEROFILL) ? 0 : (ut64)bin->sects[i].size; sections[i].vsize = (ut64)bin->sects[i].size; sections[i].align = bin->sects[i].align; sections[i].flags = bin->sects[i].flags; r_str_ncpy (sectname, bin->sects[i].sectname, 17); r_str_filter (sectname, -1); r_str_ncpy (raw_segname, bin->sects[i].segname, 16); for (j = 0; j < bin->nsegs; j++) { if (sections[i].addr >= bin->segs[j].vmaddr && sections[i].addr < (bin->segs[j].vmaddr + bin->segs[j].vmsize)) { sections[i].perm = prot2perm (bin->segs[j].initprot); break; } } snprintf (sections[i].name, sizeof (sections[i].name), "%d.%s.%s", (int)i, raw_segname, sectname); sections[i].last = 0; } sections[i].last = 1; return sections; } static bool parse_import_stub(struct MACH0_(obj_t) *bin, struct symbol_t *symbol, int idx) { size_t i, j, nsyms, stridx; const char *symstr; if (idx < 0) { return false; } symbol->offset = 0LL; symbol->addr = 0LL; symbol->name = NULL; symbol->is_imported = true; if (!bin || !bin->sects) { return false; } for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == S_SYMBOL_STUBS && bin->sects[i].reserved2 > 0) { ut64 sect_size = bin->sects[i].size; ut32 sect_fragment = bin->sects[i].reserved2; if (bin->sects[i].offset > bin->size) { bprintf ("mach0: section offset starts way beyond the end of the file\n"); continue; } if (sect_size > bin->size) { bprintf ("mach0: Invalid symbol table size\n"); sect_size = bin->size - bin->sects[i].offset; } nsyms = (int)(sect_size / sect_fragment); for (j = 0; j < nsyms; j++) { if (bin->sects) { if (bin->sects[i].reserved1 + j >= bin->nindirectsyms) { continue; } } if (bin->indirectsyms) { if (idx != bin->indirectsyms[bin->sects[i].reserved1 + j]) { continue; } } if (idx > bin->nsymtab) { continue; } symbol->type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL; int delta = j * bin->sects[i].reserved2; if (delta < 0) { bprintf ("mach0: corrupted reserved2 value leads to int overflow.\n"); continue; } symbol->offset = bin->sects[i].offset + delta; symbol->addr = bin->sects[i].addr + delta; symbol->size = 0; stridx = bin->symtab[idx].n_strx; if (stridx < bin->symstrlen) { symstr = (char *)bin->symstr + stridx; } else { symstr = "???"; } // Remove the extra underscore that every import seems to have in Mach-O. if (*symstr == '_') { symstr++; } symbol->name = strdup (symstr); return true; } } } return false; } static int inSymtab(HtPP *hash, const char *name, ut64 addr) { bool found = false; char *key = r_str_newf ("%"PFMT64x".%s", addr, name); ht_pp_find (hash, key, &found); if (found) { free (key); return true; } ht_pp_insert (hash, key, "1"); free (key); return false; } static char *get_name(struct MACH0_(obj_t) *mo, ut32 stridx, bool filter) { size_t i = 0; if (!mo->symstr || stridx >= mo->symstrlen) { return NULL; } int len = mo->symstrlen - stridx; const char *symstr = (const char*)mo->symstr + stridx; for (i = 0; i < len; i++) { if ((ut8)(symstr[i] & 0xff) == 0xff || !symstr[i]) { len = i; break; } } if (len > 0) { char *res = r_str_ndup (symstr, len); if (filter) { r_str_filter (res, -1); } return res; } return NULL; } static int walk_exports(struct MACH0_(obj_t) *bin, RExportsIterator iterator, void *ctx) { RList *states = NULL; r_return_val_if_fail (bin, 0); if (!bin->dyld_info) { return 0; } size_t count = 0; ut8 *p = NULL; ut64 size = bin->dyld_info->export_size; if (!size || size >= SIZE_MAX) { return 0; } ut8 *trie = calloc (size + 1, 1); if (!trie) { return 0; } ut8 *end = trie + size; if (r_buf_read_at (bin->b, bin->dyld_info->export_off, trie, bin->dyld_info->export_size) != size) { goto beach; } states = r_list_newf ((RListFree)free); if (!states) { goto beach; } RTrieState *root = R_NEW0 (RTrieState); if (!root) { goto beach; } root->node = trie; root->i = 0; root->label = NULL; r_list_push (states, root); do { RTrieState * state = r_list_get_top (states); p = state->node; ut64 len = read_uleb128 (&p, end); if (len == UT64_MAX) { break; } if (len) { ut64 flags = read_uleb128 (&p, end); if (flags == UT64_MAX) { break; } ut64 offset = read_uleb128 (&p, end); if (offset == UT64_MAX) { break; } ut64 resolver = 0; bool isReexport = flags & EXPORT_SYMBOL_FLAGS_REEXPORT; bool hasResolver = flags & EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER; if (hasResolver) { ut64 res = read_uleb128 (&p, end); if (res == UT64_MAX) { break; } resolver = res + bin->header_at; } else if (isReexport) { p += strlen ((char*) p) + 1; // TODO: handle this } if (!isReexport) { offset += bin->header_at; } if (iterator && !isReexport) { char * name = NULL; RListIter *iter; RTrieState *s; r_list_foreach (states, iter, s) { if (!s->label) { continue; } name = r_str_append (name, s->label); } if (!name) { bprintf ("malformed export trie %d\n", __LINE__); goto beach; } if (hasResolver) { char * stub_name = r_str_newf ("stub.%s", name); iterator (bin, stub_name, flags, offset, ctx); iterator (bin, name, flags, resolver, ctx); R_FREE (stub_name); } else { iterator (bin, name, flags, offset, ctx); } R_FREE (name); } if (!isReexport) { if (hasResolver) { count++; } count++; } } ut64 child_count = read_uleb128 (&p, end); if (child_count == UT64_MAX) { goto beach; } if (state->i == child_count) { free (r_list_pop (states)); continue; } if (!state->next_child) { state->next_child = p; } else { p = state->next_child; } RTrieState * next = R_NEW0 (RTrieState); if (!next) { goto beach; } next->label = (char *) p; p += strlen (next->label) + 1; if (p >= end) { bprintf ("malformed export trie %d\n", __LINE__); R_FREE (next); goto beach; } ut64 tr = read_uleb128 (&p, end); if (tr == UT64_MAX || tr >= size) { R_FREE (next); goto beach; } next->node = trie + (size_t)tr; if (next->node >= end) { bprintf ("malformed export trie %d\n", __LINE__); R_FREE (next); goto beach; } { // avoid loops RListIter *it; RTrieState *s; r_list_foreach (states, it, s) { if (s->node == next->node) { bprintf ("malformed export trie %d\n", __LINE__); R_FREE (next); goto beach; } } } next->i = 0; state->i++; state->next_child = p; r_list_push (states, next); } while (r_list_length (states)); beach: r_list_free (states); R_FREE (trie); return count; } static void fill_exports_list(struct MACH0_(obj_t) *bin, const char *name, ut64 flags, ut64 offset, void *ctx) { RList *list = (RList*) ctx; RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { return; } sym->vaddr = offset_to_vaddr (bin, offset); sym->paddr = offset; sym->type = "EXT"; sym->name = strdup (name); sym->bind = R_BIN_BIND_GLOBAL_STR; r_list_append (list, sym); } // TODO: Return RList<RBinSymbol> // 2x speedup const RList *MACH0_(get_symbols_list)(struct MACH0_(obj_t) *bin) { static RList * cache = NULL; // XXX DONT COMMIT WITH THIS struct symbol_t *symbols; size_t j, s, symbols_size, symbols_count; ut32 to, from; size_t i; r_return_val_if_fail (bin, NULL); if (cache) { return cache; } RList *list = r_list_newf ((RListFree)r_bin_symbol_free); cache = list; HtPP *hash = ht_pp_new0 (); if (!hash) { return NULL; } walk_exports (bin, fill_exports_list, list); if (r_list_length (list)) { RListIter *it; RBinSymbol *s; r_list_foreach (list, it, s) { inSymtab (hash, s->name, s->vaddr); } } if (!bin->symtab || !bin->symstr) { ht_pp_free (hash); return list; } /* parse dynamic symbol table */ symbols_count = (bin->dysymtab.nextdefsym + \ bin->dysymtab.nlocalsym + \ bin->dysymtab.nundefsym ); symbols_count += bin->nsymtab; symbols_size = (symbols_count + 1) * 2 * sizeof (struct symbol_t); if (symbols_size < 1 || !(symbols = calloc (1, symbols_size))) { ht_pp_free (hash); return NULL; } j = 0; // symbol_idx bin->main_addr = 0; int bits = MACH0_(get_bits_from_hdr) (&bin->hdr); for (s = 0; s < 2; s++) { switch (s) { case 0: from = bin->dysymtab.iextdefsym; to = from + bin->dysymtab.nextdefsym; break; case 1: from = bin->dysymtab.ilocalsym; to = from + bin->dysymtab.nlocalsym; break; #if NOT_USED case 2: from = bin->dysymtab.iundefsym; to = from + bin->dysymtab.nundefsym; break; #endif } if (from == to) { continue; } from = R_MIN (from, symbols_size / sizeof (struct symbol_t)); to = R_MIN (R_MIN (to, bin->nsymtab), symbols_size / sizeof (struct symbol_t)); ut32 maxsymbols = symbols_size / sizeof (struct symbol_t); if (symbols_count >= maxsymbols) { symbols_count = maxsymbols - 1; eprintf ("macho warning: Symbol table truncated\n"); } for (i = from; i < to && j < symbols_count; i++, j++) { RBinSymbol *sym = R_NEW0 (RBinSymbol); sym->vaddr = bin->symtab[i].n_value; sym->paddr = addr_to_offset (bin, sym->vaddr); symbols[j].size = 0; /* TODO: Is it anywhere? */ sym->bits = bin->symtab[i].n_desc & N_ARM_THUMB_DEF ? 16 : bits; if (bin->symtab[i].n_type & N_EXT) { sym->type = "EXT"; } else { sym->type = "LOCAL"; } int stridx = bin->symtab[i].n_strx; char *sym_name = get_name (bin, stridx, false); if (sym_name) { sym->name = sym_name; if (!bin->main_addr || bin->main_addr == UT64_MAX) { const char *name = sym->name; if (!strcmp (name, "__Dmain")) { bin->main_addr = symbols[j].addr; } else if (strstr (name, "4main") && !strstr (name, "STATIC")) { bin->main_addr = symbols[j].addr; } else if (!strcmp (name, "_main")) { bin->main_addr = symbols[j].addr; } else if (!strcmp (name, "main")) { bin->main_addr = symbols[j].addr; } } } else { sym->name = r_str_newf ("unk%u", (ut32)i); } if (!inSymtab (hash, sym->name, sym->vaddr)) { r_list_append (list, sym); } else { r_bin_symbol_free (sym); } } } to = R_MIN ((ut32)bin->nsymtab, bin->dysymtab.iundefsym + bin->dysymtab.nundefsym); for (i = bin->dysymtab.iundefsym; i < to; i++) { struct symbol_t symbol; if (j > symbols_count) { bprintf ("mach0-get-symbols: error\n"); break; } if (parse_import_stub (bin, &symbol, i)) { j++; RBinSymbol *sym = R_NEW0 (RBinSymbol); sym->vaddr = symbol.addr; sym->paddr = symbol.offset; sym->name = symbol.name; if (!sym->name) { sym->name = r_str_newf ("unk%u", (ut32)i); } sym->is_imported = symbol.is_imported; r_list_append (list, sym); } } for (i = 0; i < bin->nsymtab && i < symbols_count; i++) { struct MACH0_(nlist) *st = &bin->symtab[i]; // 0 is for imports // 1 is for symbols // 2 is for func.eh (exception handlers?) int section = st->n_sect; if (section == 1 && j < symbols_count) { // text ??st->n_type == 1) maybe wrong RBinSymbol *sym = R_NEW0(RBinSymbol); /* is symbol */ sym->vaddr = st->n_value; sym->paddr = addr_to_offset (bin, symbols[j].addr); sym->is_imported = symbols[j].is_imported; if (st->n_type & N_EXT) { sym->type = "EXT"; } else { sym->type = "LOCAL"; } char *sym_name = get_name (bin, st->n_strx, false); if (sym_name) { sym->name = sym_name; if (inSymtab (hash, sym->name, sym->vaddr)) { r_bin_symbol_free (sym); continue; } if (!bin->main_addr || bin->main_addr == UT64_MAX) { const char *name = sym->name; if (!strcmp (name, "__Dmain")) { bin->main_addr = symbols[i].addr; } else if (strstr (name, "4main") && !strstr (name, "STATIC")) { bin->main_addr = symbols[i].addr; } else if (!strcmp (symbols[i].name, "_main")) { bin->main_addr = symbols[i].addr; } } } else { sym->name = r_str_newf ("unk%u", (ut32)i); } r_list_append (list, sym); j++; } } ht_pp_free (hash); // bin->symbols = symbols; free (symbols); return list; } static void assign_export_symbol_t(struct MACH0_(obj_t) *bin, const char *name, ut64 flags, ut64 offset, void *ctx) { RSymCtx *sym_ctx = (RSymCtx*) ctx; int j = sym_ctx->j; if (j < sym_ctx->symbols_count) { sym_ctx->symbols[j].offset = offset; sym_ctx->symbols[j].addr = offset_to_vaddr (bin, offset); if (inSymtab (sym_ctx->hash, name, sym_ctx->symbols[j].addr)) { return; } sym_ctx->symbols[j].size = 0; sym_ctx->symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_EXT; sym_ctx->symbols[j].name = strdup (name); sym_ctx->j++; } } const struct symbol_t *MACH0_(get_symbols)(struct MACH0_(obj_t) *bin) { struct symbol_t *symbols; int j, s, stridx, symbols_size, symbols_count; ut32 to, from, i; if (bin->symbols) { return bin->symbols; } HtPP *hash = ht_pp_new0 (); if (!hash) { return NULL; } r_return_val_if_fail (bin, NULL); int n_exports = walk_exports (bin, NULL, NULL); symbols_count = n_exports; j = 0; // symbol_idx int bits = MACH0_(get_bits_from_hdr) (&bin->hdr); if (bin->symtab && bin->symstr) { /* parse dynamic symbol table */ symbols_count = (bin->dysymtab.nextdefsym + \ bin->dysymtab.nlocalsym + \ bin->dysymtab.nundefsym ); symbols_count += bin->nsymtab; if (symbols_count < 0 || ((st64)symbols_count * 2) > ST32_MAX) { eprintf ("Symbols count overflow\n"); ht_pp_free (hash); return NULL; } symbols_size = (symbols_count + 1) * 2 * sizeof (struct symbol_t); if (symbols_size < 1) { ht_pp_free (hash); return NULL; } if (!(symbols = calloc (1, symbols_size))) { ht_pp_free (hash); return NULL; } bin->main_addr = 0; for (s = 0; s < 2; s++) { switch (s) { case 0: from = bin->dysymtab.iextdefsym; to = from + bin->dysymtab.nextdefsym; break; case 1: from = bin->dysymtab.ilocalsym; to = from + bin->dysymtab.nlocalsym; break; #if NOT_USED case 2: from = bin->dysymtab.iundefsym; to = from + bin->dysymtab.nundefsym; break; #endif } if (from == to) { continue; } from = R_MIN (from, symbols_size / sizeof (struct symbol_t)); to = R_MIN (R_MIN (to, bin->nsymtab), symbols_size / sizeof (struct symbol_t)); ut32 maxsymbols = symbols_size / sizeof (struct symbol_t); if (symbols_count >= maxsymbols) { symbols_count = maxsymbols - 1; eprintf ("macho warning: Symbol table truncated\n"); } for (i = from; i < to && j < symbols_count; i++, j++) { symbols[j].offset = addr_to_offset (bin, bin->symtab[i].n_value); symbols[j].addr = bin->symtab[i].n_value; symbols[j].size = 0; /* TODO: Is it anywhere? */ symbols[j].bits = bin->symtab[i].n_desc & N_ARM_THUMB_DEF ? 16 : bits; symbols[j].is_imported = false; symbols[j].type = (bin->symtab[i].n_type & N_EXT) ? R_BIN_MACH0_SYMBOL_TYPE_EXT : R_BIN_MACH0_SYMBOL_TYPE_LOCAL; stridx = bin->symtab[i].n_strx; symbols[j].name = get_name (bin, stridx, false); symbols[j].last = false; const char *name = symbols[j].name; if (bin->main_addr == 0 && name) { if (!strcmp (name, "__Dmain")) { bin->main_addr = symbols[j].addr; } else if (strstr (name, "4main") && !strstr (name, "STATIC")) { bin->main_addr = symbols[j].addr; } else if (!strcmp (name, "_main")) { bin->main_addr = symbols[j].addr; } else if (!strcmp (name, "main")) { bin->main_addr = symbols[j].addr; } } if (inSymtab (hash, symbols[j].name, symbols[j].addr)) { free (symbols[j].name); symbols[j].name = NULL; j--; } } } to = R_MIN ((ut32)bin->nsymtab, bin->dysymtab.iundefsym + bin->dysymtab.nundefsym); for (i = bin->dysymtab.iundefsym; i < to; i++) { if (j > symbols_count) { bprintf ("mach0-get-symbols: error\n"); break; } if (parse_import_stub (bin, &symbols[j], i)) { symbols[j++].last = false; } } for (i = 0; i < bin->nsymtab; i++) { struct MACH0_(nlist) *st = &bin->symtab[i]; if (st->n_type & N_STAB) { continue; } // 0 is for imports // 1 is for symbols // 2 is for func.eh (exception handlers?) int section = st->n_sect; if (section == 1 && j < symbols_count) { // check if symbol exists already /* is symbol */ symbols[j].addr = st->n_value; symbols[j].offset = addr_to_offset (bin, symbols[j].addr); symbols[j].size = 0; /* find next symbol and crop */ symbols[j].type = (st->n_type & N_EXT) ? R_BIN_MACH0_SYMBOL_TYPE_EXT : R_BIN_MACH0_SYMBOL_TYPE_LOCAL; char *sym_name = get_name (bin, st->n_strx, false); if (sym_name) { symbols[j].name = sym_name; } else { symbols[j].name = r_str_newf ("entry%d", i); } symbols[j].last = 0; if (inSymtab (hash, symbols[j].name, symbols[j].addr)) { R_FREE (symbols[j].name); } else { j++; } const char *name = symbols[i].name; if (bin->main_addr == 0 && name) { if (name && !strcmp (name, "__Dmain")) { bin->main_addr = symbols[i].addr; } else if (name && strstr (name, "4main") && !strstr (name, "STATIC")) { bin->main_addr = symbols[i].addr; } else if (symbols[i].name && !strcmp (symbols[i].name, "_main")) { bin->main_addr = symbols[i].addr; } } } } } else if (!n_exports) { ht_pp_free (hash); return NULL; } else { symbols_size = (symbols_count + 1) * sizeof (struct symbol_t); if (symbols_size < 1) { ht_pp_free (hash); return NULL; } if (!(symbols = calloc (1, symbols_size))) { ht_pp_free (hash); return NULL; } } if (n_exports && (symbols_count - j) >= n_exports) { RSymCtx sym_ctx; sym_ctx.symbols = symbols; sym_ctx.j = j; sym_ctx.symbols_count = symbols_count; sym_ctx.hash = hash; walk_exports (bin, assign_export_symbol_t, &sym_ctx); j = sym_ctx.j; } ht_pp_free (hash); symbols[j].last = true; bin->symbols = symbols; return symbols; } static int parse_import_ptr(struct MACH0_(obj_t) *bin, struct reloc_t *reloc, int idx) { int i, j, sym; size_t wordsize; ut32 stype; wordsize = get_word_size (bin); if (idx < 0 || idx >= bin->nsymtab) { return 0; } if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) { stype = S_LAZY_SYMBOL_POINTERS; } else { stype = S_NON_LAZY_SYMBOL_POINTERS; } reloc->offset = 0; reloc->addr = 0; reloc->addend = 0; #define CASE(T) case ((T) / 8): reloc->type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return false; } #undef CASE for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == stype) { for (j = 0, sym = -1; bin->sects[i].reserved1 + j < bin->nindirectsyms; j++) { int indidx = bin->sects[i].reserved1 + j; if (indidx < 0 || indidx >= bin->nindirectsyms) { break; } if (idx == bin->indirectsyms[indidx]) { sym = j; break; } } reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize; reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize; return true; } } return false; } struct import_t *MACH0_(get_imports)(struct MACH0_(obj_t) *bin) { r_return_val_if_fail (bin, NULL); int i, j, idx, stridx; if (!bin->sects || !bin->symtab || !bin->symstr || !bin->indirectsyms) { return NULL; } if (bin->dysymtab.nundefsym < 1 || bin->dysymtab.nundefsym > 0xfffff) { return NULL; } struct import_t *imports = calloc (bin->dysymtab.nundefsym + 1, sizeof (struct import_t)); if (!imports) { return NULL; } for (i = j = 0; i < bin->dysymtab.nundefsym; i++) { idx = bin->dysymtab.iundefsym + i; if (idx < 0 || idx >= bin->nsymtab) { bprintf ("WARNING: Imports index out of bounds. Ignoring relocs\n"); free (imports); return NULL; } stridx = bin->symtab[idx].n_strx; char *imp_name = get_name (bin, stridx, false); if (imp_name) { r_str_ncpy (imports[j].name, imp_name, R_BIN_MACH0_STRING_LENGTH - 1); free (imp_name); } else { //imports[j].name[0] = 0; continue; } imports[j].ord = i; imports[j++].last = 0; } imports[j].last = 1; if (!bin->imports_by_ord_size) { if (j > 0) { bin->imports_by_ord_size = j; bin->imports_by_ord = (RBinImport**)calloc (j, sizeof (RBinImport*)); } else { bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; } } return imports; } static int reloc_comparator(struct reloc_t *a, struct reloc_t *b) { return a->addr - b->addr; } static void parse_relocation_info(struct MACH0_(obj_t) *bin, RSkipList *relocs, ut32 offset, ut32 num) { if (!num || !offset || (st32)num < 0) { return; } ut64 total_size = num * sizeof (struct relocation_info); if (offset > bin->size) { return; } if (total_size > bin->size) { total_size = bin->size - offset; num = total_size /= sizeof (struct relocation_info); } struct relocation_info *info = calloc (num, sizeof (struct relocation_info)); if (!info) { return; } if (r_buf_read_at (bin->b, offset, (ut8 *) info, total_size) < total_size) { free (info); return; } size_t i; for (i = 0; i < num; i++) { struct relocation_info a_info = info[i]; ut32 sym_num = a_info.r_symbolnum; if (sym_num > bin->nsymtab) { continue; } ut32 stridx = bin->symtab[sym_num].n_strx; char *sym_name = get_name (bin, stridx, false); if (!sym_name) { continue; } struct reloc_t *reloc = R_NEW0 (struct reloc_t); if (!reloc) { free (info); free (sym_name); return; } reloc->addr = offset_to_vaddr (bin, a_info.r_address); reloc->offset = a_info.r_address; reloc->ord = sym_num; reloc->type = a_info.r_type; // enum RelocationInfoType reloc->external = a_info.r_extern; reloc->pc_relative = a_info.r_pcrel; reloc->size = a_info.r_length; r_str_ncpy (reloc->name, sym_name, sizeof (reloc->name) - 1); r_skiplist_insert (relocs, reloc); free (sym_name); } free (info); } static bool walk_bind_chains_callback(void * context, RFixupEventDetails * event_details) { r_return_val_if_fail (event_details->type == R_FIXUP_EVENT_BIND || event_details->type == R_FIXUP_EVENT_BIND_AUTH, false); RWalkBindChainsContext *ctx = context; ut8 *imports = ctx->imports; struct MACH0_(obj_t) *bin = event_details->bin; ut32 imports_count = bin->fixups_header.imports_count; ut32 fixups_offset = bin->fixups_offset; ut32 fixups_size = bin->fixups_size; ut32 imports_format = bin->fixups_header.imports_format; ut32 import_index = ((RFixupBindEventDetails *) event_details)->ordinal; ut64 addend = 0; if (event_details->type != R_FIXUP_EVENT_BIND_AUTH) { addend = ((RFixupBindEventDetails *) event_details)->addend; } if (import_index < imports_count) { ut64 name_offset; switch (imports_format) { case DYLD_CHAINED_IMPORT: { struct dyld_chained_import * item = &((struct dyld_chained_import *) imports)[import_index]; name_offset = item->name_offset; break; } case DYLD_CHAINED_IMPORT_ADDEND: { struct dyld_chained_import_addend * item = &((struct dyld_chained_import_addend *) imports)[import_index]; name_offset = item->name_offset; addend += item->addend; break; } case DYLD_CHAINED_IMPORT_ADDEND64: { struct dyld_chained_import_addend64 * item = &((struct dyld_chained_import_addend64 *) imports)[import_index]; name_offset = item->name_offset; addend += item->addend; break; } default: bprintf ("Unsupported imports format\n"); return false; } ut64 symbols_offset = bin->fixups_header.symbols_offset + fixups_offset; if (symbols_offset + name_offset + 1 < fixups_offset + fixups_size) { char *name = r_buf_get_string (bin->b, symbols_offset + name_offset); if (name) { struct reloc_t *reloc = R_NEW0 (struct reloc_t); if (!reloc) { free (name); return false; } reloc->addr = offset_to_vaddr (bin, event_details->offset); reloc->offset = event_details->offset; reloc->ord = import_index; reloc->type = R_BIN_RELOC_64; reloc->size = 8; reloc->addend = addend; r_str_ncpy (reloc->name, name, sizeof (reloc->name) - 1); r_skiplist_insert_autofree (ctx->relocs, reloc); free (name); } else if (bin->verbose) { eprintf ("Malformed chained bind: failed to read name\n"); } } else if (bin->verbose) { eprintf ("Malformed chained bind: name_offset out of bounds\n"); } } else if (bin->verbose) { eprintf ("Malformed chained bind: import out of length\n"); } return true; } static void walk_bind_chains(struct MACH0_(obj_t) *bin, RSkipList *relocs) { r_return_if_fail (bin && bin->fixups_offset); ut8 *imports = NULL; ut32 imports_count = bin->fixups_header.imports_count; ut32 fixups_offset = bin->fixups_offset; ut32 imports_offset = bin->fixups_header.imports_offset; if (!imports_count || !imports_offset) { return; } if (bin->fixups_header.symbols_format != 0) { eprintf ("Compressed fixups symbols not supported yet, please file a bug with a sample attached.\n"); return; } ut32 imports_format = bin->fixups_header.imports_format; ut64 imports_size; switch (imports_format) { case DYLD_CHAINED_IMPORT: imports_size = sizeof (struct dyld_chained_import) * imports_count; break; case DYLD_CHAINED_IMPORT_ADDEND: imports_size = sizeof (struct dyld_chained_import_addend) * imports_count; break; case DYLD_CHAINED_IMPORT_ADDEND64: imports_size = sizeof (struct dyld_chained_import_addend64) * imports_count; break; default: eprintf ("Unsupported chained imports format: %d\n", imports_format); goto beach; } imports = malloc (imports_size); if (!imports) { goto beach; } switch (imports_format) { case DYLD_CHAINED_IMPORT: if (r_buf_fread_at (bin->b, fixups_offset + imports_offset, imports, "i", imports_count) != imports_size) { goto beach; } break; case DYLD_CHAINED_IMPORT_ADDEND: if (r_buf_fread_at (bin->b, fixups_offset + imports_offset, imports, "ii", imports_count) != imports_size) { goto beach; } break; case DYLD_CHAINED_IMPORT_ADDEND64: if (r_buf_fread_at (bin->b, fixups_offset + imports_offset, imports, "il", imports_count) != imports_size) { goto beach; } break; } RWalkBindChainsContext ctx; ctx.imports = imports; ctx.relocs = relocs; MACH0_(iterate_chained_fixups) (bin, 0, UT64_MAX, R_FIXUP_EVENT_MASK_BIND_ALL, &walk_bind_chains_callback, &ctx); beach: free (imports); } static bool is_valid_ordinal_table_size(ut64 size) { return size > 0 && size <= UT16_MAX; } RSkipList *MACH0_(get_relocs)(struct MACH0_(obj_t) *bin) { RSkipList *relocs = NULL; RPVector *threaded_binds = NULL; size_t wordsize = get_word_size (bin); if (bin->dyld_info) { ut8 *opcodes, rel_type = 0; size_t bind_size, lazy_size, weak_size; #define CASE(T) case ((T) / 8): rel_type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return NULL; } #undef CASE bind_size = bin->dyld_info->bind_size; lazy_size = bin->dyld_info->lazy_bind_size; weak_size = bin->dyld_info->weak_bind_size; if (!bind_size && !lazy_size) { return NULL; } if ((bind_size + lazy_size)<1) { return NULL; } if (bin->dyld_info->bind_off > bin->size || bin->dyld_info->bind_off + bind_size > bin->size) { return NULL; } if (bin->dyld_info->lazy_bind_off > bin->size || \ bin->dyld_info->lazy_bind_off + lazy_size > bin->size) { return NULL; } if (bin->dyld_info->bind_off + bind_size + lazy_size > bin->size) { return NULL; } if (bin->dyld_info->weak_bind_off + weak_size > bin->size) { return NULL; } ut64 amount = bind_size + lazy_size + weak_size; if (amount == 0 || amount > UT32_MAX) { return NULL; } if (!bin->segs) { return NULL; } relocs = r_skiplist_new ((RListFree) &free, (RListComparator) &reloc_comparator); if (!relocs) { return NULL; } opcodes = calloc (1, amount + 1); if (!opcodes) { r_skiplist_free (relocs); return NULL; } int len = r_buf_read_at (bin->b, bin->dyld_info->bind_off, opcodes, bind_size); len += r_buf_read_at (bin->b, bin->dyld_info->lazy_bind_off, opcodes + bind_size, lazy_size); len += r_buf_read_at (bin->b, bin->dyld_info->weak_bind_off, opcodes + bind_size + lazy_size, weak_size); if (len < amount) { bprintf ("Error: read (dyld_info bind) at 0x%08"PFMT64x"\n", (ut64)(size_t)bin->dyld_info->bind_off); R_FREE (opcodes); r_skiplist_free (relocs); return NULL; } size_t partition_sizes[] = {bind_size, lazy_size, weak_size}; size_t pidx; int opcodes_offset = 0; for (pidx = 0; pidx < R_ARRAY_SIZE (partition_sizes); pidx++) { size_t partition_size = partition_sizes[pidx]; ut8 type = 0; int lib_ord = 0, seg_idx = -1, sym_ord = -1; char *sym_name = NULL; size_t j, count, skip; st64 addend = 0; ut64 addr = bin->segs[0].vmaddr; ut64 segment_size = bin->segs[0].filesize; if (bin->segs[0].filesize != bin->segs[0].vmsize) { // is probably invalid and we should warn the user } if (segment_size > bin->size) { // is probably invalid and we should warn the user segment_size = bin->size; } ut64 segment_end_addr = addr + segment_size; ut8 *p = opcodes + opcodes_offset; ut8 *end = p + partition_size; bool done = false; while (!done && p < end) { ut8 imm = *p & BIND_IMMEDIATE_MASK; ut8 op = *p & BIND_OPCODE_MASK; p++; switch (op) { case BIND_OPCODE_DONE: { bool in_lazy_binds = pidx == 1; if (!in_lazy_binds) { done = true; } break; } case BIND_OPCODE_THREADED: { switch (imm) { case BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB: { ut64 table_size = read_uleb128 (&p, end); if (!is_valid_ordinal_table_size (table_size)) { bprintf ("Error: BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB size is wrong\n"); break; } if (threaded_binds) { r_pvector_free (threaded_binds); } threaded_binds = r_pvector_new_with_len ((RPVectorFree) &free, table_size); if (threaded_binds) { sym_ord = 0; } break; } case BIND_SUBOPCODE_THREADED_APPLY: if (threaded_binds) { int cur_seg_idx = (seg_idx != -1)? seg_idx: 0; size_t n_threaded_binds = r_pvector_len (threaded_binds); while (addr < segment_end_addr) { ut8 tmp[8]; ut64 paddr = addr - bin->segs[cur_seg_idx].vmaddr + bin->segs[cur_seg_idx].fileoff; bin->rebasing_buffer = true; if (r_buf_read_at (bin->b, paddr, tmp, 8) != 8) { break; } bin->rebasing_buffer = false; ut64 raw_ptr = r_read_le64 (tmp); bool is_auth = (raw_ptr & (1ULL << 63)) != 0; bool is_bind = (raw_ptr & (1ULL << 62)) != 0; int ordinal = -1; int addend = -1; ut64 delta; if (is_auth && is_bind) { struct dyld_chained_ptr_arm64e_auth_bind *p = (struct dyld_chained_ptr_arm64e_auth_bind *) &raw_ptr; delta = p->next; ordinal = p->ordinal; } else if (!is_auth && is_bind) { struct dyld_chained_ptr_arm64e_bind *p = (struct dyld_chained_ptr_arm64e_bind *) &raw_ptr; delta = p->next; ordinal = p->ordinal; addend = p->addend; } else if (is_auth && !is_bind) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; delta = p->next; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; delta = p->next; } if (ordinal != -1) { if (ordinal >= n_threaded_binds) { bprintf ("Error: Malformed bind chain\n"); break; } struct reloc_t *ref = r_pvector_at (threaded_binds, ordinal); if (!ref) { bprintf ("Error: Inconsistent bind opcodes\n"); break; } struct reloc_t *reloc = R_NEW0 (struct reloc_t); if (!reloc) { break; } *reloc = *ref; reloc->addr = addr; reloc->offset = paddr; if (addend != -1) { reloc->addend = addend; } r_skiplist_insert (relocs, reloc); } addr += delta * wordsize; if (!delta) { break; } } } break; default: bprintf ("Error: Unexpected BIND_OPCODE_THREADED sub-opcode: 0x%x\n", imm); } break; } case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: lib_ord = imm; break; case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: lib_ord = read_uleb128 (&p, end); break; case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: lib_ord = imm? (st8)(BIND_OPCODE_MASK | imm) : 0; break; case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: { sym_name = (char*)p; while (*p++ && p < end) { /* empty loop */ } if (threaded_binds) { break; } sym_ord = -1; if (bin->symtab && bin->dysymtab.nundefsym < UT16_MAX) { for (j = 0; j < bin->dysymtab.nundefsym; j++) { size_t stridx = 0; bool found = false; int iundefsym = bin->dysymtab.iundefsym; if (iundefsym >= 0 && iundefsym < bin->nsymtab) { int sidx = iundefsym + j; if (sidx < 0 || sidx >= bin->nsymtab) { continue; } stridx = bin->symtab[sidx].n_strx; if (stridx >= bin->symstrlen) { continue; } found = true; } if (found && !strcmp ((const char *)bin->symstr + stridx, sym_name)) { sym_ord = j; break; } } } break; } case BIND_OPCODE_SET_TYPE_IMM: type = imm; break; case BIND_OPCODE_SET_ADDEND_SLEB: addend = r_sleb128 ((const ut8 **)&p, end); break; case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: seg_idx = imm; if (seg_idx >= bin->nsegs) { bprintf ("Error: BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB" " has unexistent segment %d\n", seg_idx); free (opcodes); r_skiplist_free (relocs); r_pvector_free (threaded_binds); return NULL; // early exit to avoid future mayhem } addr = bin->segs[seg_idx].vmaddr + read_uleb128 (&p, end); segment_end_addr = bin->segs[seg_idx].vmaddr \ + bin->segs[seg_idx].vmsize; break; case BIND_OPCODE_ADD_ADDR_ULEB: addr += read_uleb128 (&p, end); break; #define DO_BIND() do {\ if (sym_ord < 0 && !sym_name) break;\ if (!threaded_binds) {\ if (seg_idx < 0 ) break;\ if (!addr) break;\ }\ struct reloc_t *reloc = R_NEW0 (struct reloc_t);\ reloc->addr = addr;\ if (seg_idx >= 0) {\ reloc->offset = addr - bin->segs[seg_idx].vmaddr + bin->segs[seg_idx].fileoff;\ if (type == BIND_TYPE_TEXT_PCREL32)\ reloc->addend = addend - (bin->baddr + addr);\ else\ reloc->addend = addend;\ } else {\ reloc->addend = addend;\ }\ /* library ordinal ??? */ \ reloc->ord = lib_ord;\ reloc->ord = sym_ord;\ reloc->type = rel_type;\ if (sym_name)\ r_str_ncpy (reloc->name, sym_name, 256);\ if (threaded_binds)\ r_pvector_set (threaded_binds, sym_ord, reloc);\ else\ r_skiplist_insert (relocs, reloc);\ } while (0) case BIND_OPCODE_DO_BIND: if (!threaded_binds && addr >= segment_end_addr) { bprintf ("Error: Malformed DO bind opcode 0x%"PFMT64x"\n", addr); goto beach; } DO_BIND (); if (!threaded_binds) { addr += wordsize; } else { sym_ord++; } break; case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: if (addr >= segment_end_addr) { bprintf ("Error: Malformed ADDR ULEB bind opcode\n"); goto beach; } DO_BIND (); addr += read_uleb128 (&p, end) + wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: if (addr >= segment_end_addr) { bprintf ("Error: Malformed IMM SCALED bind opcode\n"); goto beach; } DO_BIND (); addr += (ut64)imm * (ut64)wordsize + wordsize; break; case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: count = read_uleb128 (&p, end); skip = read_uleb128 (&p, end); for (j = 0; j < count; j++) { if (addr >= segment_end_addr) { bprintf ("Error: Malformed ULEB TIMES bind opcode\n"); goto beach; } DO_BIND (); addr += skip + wordsize; } break; #undef DO_BIND default: bprintf ("Error: unknown bind opcode 0x%02x in dyld_info\n", *p); R_FREE (opcodes); r_pvector_free (threaded_binds); return relocs; } } opcodes_offset += partition_size; } R_FREE (opcodes); r_pvector_free (threaded_binds); threaded_binds = NULL; } if (bin->symtab && bin->symstr && bin->sects && bin->indirectsyms) { int j; int amount = bin->dysymtab.nundefsym; if (amount < 0) { amount = 0; } if (!relocs) { relocs = r_skiplist_new ((RListFree) &free, (RListComparator) &reloc_comparator); if (!relocs) { goto beach; } } for (j = 0; j < amount; j++) { struct reloc_t *reloc = R_NEW0 (struct reloc_t); if (!reloc) { break; } if (parse_import_ptr (bin, reloc, bin->dysymtab.iundefsym + j)) { reloc->ord = j; r_skiplist_insert_autofree (relocs, reloc); } else { R_FREE (reloc); } } } if (bin->symtab && bin->dysymtab.extreloff && bin->dysymtab.nextrel) { if (!relocs) { relocs = r_skiplist_new ((RListFree) &free, (RListComparator) &reloc_comparator); if (!relocs) { goto beach; } } parse_relocation_info (bin, relocs, bin->dysymtab.extreloff, bin->dysymtab.nextrel); } if (!bin->dyld_info && bin->chained_starts && bin->nsegs && bin->fixups_offset) { if (!relocs) { relocs = r_skiplist_new ((RListFree) &free, (RListComparator) &reloc_comparator); if (!relocs) { goto beach; } } walk_bind_chains (bin, relocs); } beach: r_pvector_free (threaded_binds); return relocs; } struct addr_t *MACH0_(get_entrypoint)(struct MACH0_(obj_t) *bin) { r_return_val_if_fail (bin, NULL); ut64 ea = entry_to_vaddr (bin); if (ea == 0 || ea == UT64_MAX) { return NULL; } struct addr_t *entry = R_NEW0 (struct addr_t); if (!entry) { return NULL; } entry->addr = ea; entry->offset = addr_to_offset (bin, entry->addr); entry->haddr = sdb_num_get (bin->kv, "mach0.entry.offset", 0); sdb_num_set (bin->kv, "mach0.entry.vaddr", entry->addr, 0); sdb_num_set (bin->kv, "mach0.entry.paddr", bin->entry, 0); if (entry->offset == 0 && !bin->sects) { int i; for (i = 0; i < bin->nsects; i++) { // XXX: section name shoudnt matter .. just check for exec flags if (!strncmp (bin->sects[i].sectname, "__text", 6)) { entry->offset = (ut64)bin->sects[i].offset; sdb_num_set (bin->kv, "mach0.entry", entry->offset, 0); entry->addr = (ut64)bin->sects[i].addr; if (!entry->addr) { // workaround for object files eprintf ("entrypoint is 0...\n"); // XXX(lowlyw) there's technically not really entrypoints // for .o files, so ignore this... // entry->addr = entry->offset; } break; } } bin->entry = entry->addr; } return entry; } void MACH0_(kv_loadlibs)(struct MACH0_(obj_t) *bin) { int i; char lib_flagname[128]; for (i = 0; i < bin->nlibs; i++) { snprintf (lib_flagname, sizeof (lib_flagname), "libs.%d.name", i); sdb_set (bin->kv, lib_flagname, bin->libs[i], 0); } } struct lib_t *MACH0_(get_libs)(struct MACH0_(obj_t) *bin) { struct lib_t *libs; int i; char lib_flagname[128]; if (!bin->nlibs) { return NULL; } if (!(libs = calloc ((bin->nlibs + 1), sizeof (struct lib_t)))) { return NULL; } for (i = 0; i < bin->nlibs; i++) { snprintf (lib_flagname, sizeof (lib_flagname), "libs.%d.name", i); sdb_set (bin->kv, lib_flagname, bin->libs[i], 0); r_str_ncpy (libs[i].name, bin->libs[i], R_BIN_MACH0_STRING_LENGTH - 1); libs[i].last = 0; } libs[i].last = 1; return libs; } ut64 MACH0_(get_baddr)(struct MACH0_(obj_t) *bin) { int i; if (bin->hdr.filetype != MH_EXECUTE && bin->hdr.filetype != MH_DYLINKER && bin->hdr.filetype != MH_FILESET) { return 0; } for (i = 0; i < bin->nsegs; i++) { if (bin->segs[i].fileoff == 0 && bin->segs[i].filesize != 0) { return bin->segs[i].vmaddr; } } return 0; } char *MACH0_(get_class)(struct MACH0_(obj_t) *bin) { #if R_BIN_MACH064 return r_str_new ("MACH064"); #else return r_str_new ("MACH0"); #endif } //XXX we are mixing up bits from cpu and opcodes //since thumb use 16 bits opcode but run in 32 bits //cpus so here we should only return 32 or 64 int MACH0_(get_bits)(struct MACH0_(obj_t) *bin) { if (bin) { int bits = MACH0_(get_bits_from_hdr) (&bin->hdr); if (bin->hdr.cputype == CPU_TYPE_ARM && bin->entry & 1) { return 16; } return bits; } return 32; } int MACH0_(get_bits_from_hdr)(struct MACH0_(mach_header) *hdr) { if (hdr->magic == MH_MAGIC_64 || hdr->magic == MH_CIGAM_64) { return 64; } if (hdr->cputype == CPU_TYPE_ARM64_32) { // new apple watch aka arm64_32 return 64; } if ((hdr->cpusubtype & CPU_SUBTYPE_MASK) == (CPU_SUBTYPE_ARM_V7K << 24)) { return 16; } return 32; } bool MACH0_(is_big_endian)(struct MACH0_(obj_t) *bin) { if (bin) { const int cpu = bin->hdr.cputype; return cpu == CPU_TYPE_POWERPC || cpu == CPU_TYPE_POWERPC64; } return false; } const char *MACH0_(get_intrp)(struct MACH0_(obj_t) *bin) { return bin? bin->intrp: NULL; } const char *MACH0_(get_os)(struct MACH0_(obj_t) *bin) { if (bin) { switch (bin->os) { case 1: return "macos"; case 2: return "ios"; case 3: return "watchos"; case 4: return "tvos"; } } return "darwin"; } const char *MACH0_(get_cputype_from_hdr)(struct MACH0_(mach_header) *hdr) { const char *archstr = "unknown"; switch (hdr->cputype) { case CPU_TYPE_VAX: archstr = "vax"; break; case CPU_TYPE_MC680x0: archstr = "mc680x0"; break; case CPU_TYPE_I386: case CPU_TYPE_X86_64: archstr = "x86"; break; case CPU_TYPE_MC88000: archstr = "mc88000"; break; case CPU_TYPE_MC98000: archstr = "mc98000"; break; case CPU_TYPE_HPPA: archstr = "hppa"; break; case CPU_TYPE_ARM: case CPU_TYPE_ARM64: case CPU_TYPE_ARM64_32: archstr = "arm"; break; case CPU_TYPE_SPARC: archstr = "sparc"; break; case CPU_TYPE_MIPS: archstr = "mips"; break; case CPU_TYPE_I860: archstr = "i860"; break; case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: archstr = "ppc"; break; default: eprintf ("Unknown arch %d\n", hdr->cputype); break; } return archstr; } const char *MACH0_(get_cputype)(struct MACH0_(obj_t) *bin) { return bin? MACH0_(get_cputype_from_hdr) (&bin->hdr): "unknown"; } static const char *cpusubtype_tostring(ut32 cputype, ut32 cpusubtype) { switch (cputype) { case CPU_TYPE_VAX: switch (cpusubtype) { case CPU_SUBTYPE_VAX_ALL: return "all"; case CPU_SUBTYPE_VAX780: return "vax780"; case CPU_SUBTYPE_VAX785: return "vax785"; case CPU_SUBTYPE_VAX750: return "vax750"; case CPU_SUBTYPE_VAX730: return "vax730"; case CPU_SUBTYPE_UVAXI: return "uvaxI"; case CPU_SUBTYPE_UVAXII: return "uvaxII"; case CPU_SUBTYPE_VAX8200: return "vax8200"; case CPU_SUBTYPE_VAX8500: return "vax8500"; case CPU_SUBTYPE_VAX8600: return "vax8600"; case CPU_SUBTYPE_VAX8650: return "vax8650"; case CPU_SUBTYPE_VAX8800: return "vax8800"; case CPU_SUBTYPE_UVAXIII: return "uvaxIII"; default: return "Unknown vax subtype"; } case CPU_TYPE_MC680x0: switch (cpusubtype) { case CPU_SUBTYPE_MC68030: return "mc68030"; case CPU_SUBTYPE_MC68040: return "mc68040"; case CPU_SUBTYPE_MC68030_ONLY: return "mc68030 only"; default: return "Unknown mc680x0 subtype"; } case CPU_TYPE_I386: switch (cpusubtype) { case CPU_SUBTYPE_386: return "386"; case CPU_SUBTYPE_486: return "486"; case CPU_SUBTYPE_486SX: return "486sx"; case CPU_SUBTYPE_PENT: return "Pentium"; case CPU_SUBTYPE_PENTPRO: return "Pentium Pro"; case CPU_SUBTYPE_PENTII_M3: return "Pentium 3 M3"; case CPU_SUBTYPE_PENTII_M5: return "Pentium 3 M5"; case CPU_SUBTYPE_CELERON: return "Celeron"; case CPU_SUBTYPE_CELERON_MOBILE: return "Celeron Mobile"; case CPU_SUBTYPE_PENTIUM_3: return "Pentium 3"; case CPU_SUBTYPE_PENTIUM_3_M: return "Pentium 3 M"; case CPU_SUBTYPE_PENTIUM_3_XEON: return "Pentium 3 Xeon"; case CPU_SUBTYPE_PENTIUM_M: return "Pentium Mobile"; case CPU_SUBTYPE_PENTIUM_4: return "Pentium 4"; case CPU_SUBTYPE_PENTIUM_4_M: return "Pentium 4 M"; case CPU_SUBTYPE_ITANIUM: return "Itanium"; case CPU_SUBTYPE_ITANIUM_2: return "Itanium 2"; case CPU_SUBTYPE_XEON: return "Xeon"; case CPU_SUBTYPE_XEON_MP: return "Xeon MP"; default: return "Unknown i386 subtype"; } case CPU_TYPE_X86_64: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_X86_64_ALL: return "x86 64 all"; case CPU_SUBTYPE_X86_ARCH1: return "x86 arch 1"; default: return "Unknown x86 subtype"; } case CPU_TYPE_MC88000: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_MC88000_ALL: return "all"; case CPU_SUBTYPE_MC88100: return "mc88100"; case CPU_SUBTYPE_MC88110: return "mc88110"; default: return "Unknown mc88000 subtype"; } case CPU_TYPE_MC98000: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_MC98000_ALL: return "all"; case CPU_SUBTYPE_MC98601: return "mc98601"; default: return "Unknown mc98000 subtype"; } case CPU_TYPE_HPPA: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_HPPA_7100: return "hppa7100"; case CPU_SUBTYPE_HPPA_7100LC: return "hppa7100LC"; default: return "Unknown hppa subtype"; } case CPU_TYPE_ARM64: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_ARM64_ALL: return "all"; case CPU_SUBTYPE_ARM64_V8: return "arm64v8"; case CPU_SUBTYPE_ARM64E: return "arm64e"; default: return "Unknown arm64 subtype"; } case CPU_TYPE_ARM: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_ARM_ALL: return "all"; case CPU_SUBTYPE_ARM_V4T: return "v4t"; case CPU_SUBTYPE_ARM_V5: return "v5"; case CPU_SUBTYPE_ARM_V6: return "v6"; case CPU_SUBTYPE_ARM_XSCALE: return "xscale"; case CPU_SUBTYPE_ARM_V7: return "v7"; case CPU_SUBTYPE_ARM_V7F: return "v7f"; case CPU_SUBTYPE_ARM_V7S: return "v7s"; case CPU_SUBTYPE_ARM_V7K: return "v7k"; case CPU_SUBTYPE_ARM_V7M: return "v7m"; case CPU_SUBTYPE_ARM_V7EM: return "v7em"; default: eprintf ("Unknown arm subtype %d\n", cpusubtype & 0xff); return "unknown arm subtype"; } case CPU_TYPE_SPARC: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_SPARC_ALL: return "all"; default: return "Unknown sparc subtype"; } case CPU_TYPE_MIPS: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_MIPS_ALL: return "all"; case CPU_SUBTYPE_MIPS_R2300: return "r2300"; case CPU_SUBTYPE_MIPS_R2600: return "r2600"; case CPU_SUBTYPE_MIPS_R2800: return "r2800"; case CPU_SUBTYPE_MIPS_R2000a: return "r2000a"; case CPU_SUBTYPE_MIPS_R2000: return "r2000"; case CPU_SUBTYPE_MIPS_R3000a: return "r3000a"; case CPU_SUBTYPE_MIPS_R3000: return "r3000"; default: return "Unknown mips subtype"; } case CPU_TYPE_I860: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_I860_ALL: return "all"; case CPU_SUBTYPE_I860_860: return "860"; default: return "Unknown i860 subtype"; } case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: switch (cpusubtype & 0xff) { case CPU_SUBTYPE_POWERPC_ALL: return "all"; case CPU_SUBTYPE_POWERPC_601: return "601"; case CPU_SUBTYPE_POWERPC_602: return "602"; case CPU_SUBTYPE_POWERPC_603: return "603"; case CPU_SUBTYPE_POWERPC_603e: return "603e"; case CPU_SUBTYPE_POWERPC_603ev: return "603ev"; case CPU_SUBTYPE_POWERPC_604: return "604"; case CPU_SUBTYPE_POWERPC_604e: return "604e"; case CPU_SUBTYPE_POWERPC_620: return "620"; case CPU_SUBTYPE_POWERPC_750: return "750"; case CPU_SUBTYPE_POWERPC_7400: return "7400"; case CPU_SUBTYPE_POWERPC_7450: return "7450"; case CPU_SUBTYPE_POWERPC_970: return "970"; default: return "Unknown ppc subtype"; } } return "Unknown cputype"; } char *MACH0_(get_cpusubtype_from_hdr)(struct MACH0_(mach_header) *hdr) { r_return_val_if_fail (hdr, NULL); return strdup (cpusubtype_tostring (hdr->cputype, hdr->cpusubtype)); } char *MACH0_(get_cpusubtype)(struct MACH0_(obj_t) *bin) { return bin? MACH0_(get_cpusubtype_from_hdr) (&bin->hdr): strdup ("Unknown"); } bool MACH0_(is_pie)(struct MACH0_(obj_t) *bin) { return (bin && bin->hdr.filetype == MH_EXECUTE && bin->hdr.flags & MH_PIE); } bool MACH0_(has_nx)(struct MACH0_(obj_t) *bin) { return (bin && bin->hdr.filetype == MH_EXECUTE && bin->hdr.flags & MH_NO_HEAP_EXECUTION); } char *MACH0_(get_filetype_from_hdr)(struct MACH0_(mach_header) *hdr) { const char *mhtype = "Unknown"; switch (hdr->filetype) { case MH_OBJECT: mhtype = "Relocatable object"; break; case MH_EXECUTE: mhtype = "Executable file"; break; case MH_FVMLIB: mhtype = "Fixed VM shared library"; break; case MH_CORE: mhtype = "Core file"; break; case MH_PRELOAD: mhtype = "Preloaded executable file"; break; case MH_DYLIB: mhtype = "Dynamically bound shared library"; break; case MH_DYLINKER: mhtype = "Dynamic link editor"; break; case MH_BUNDLE: mhtype = "Dynamically bound bundle file"; break; case MH_DYLIB_STUB: mhtype = "Shared library stub for static linking (no sections)"; break; case MH_DSYM: mhtype = "Companion file with only debug sections"; break; case MH_KEXT_BUNDLE: mhtype = "Kernel extension bundle file"; break; case MH_FILESET: mhtype = "Kernel cache file"; break; } return strdup (mhtype); } char *MACH0_(get_filetype)(struct MACH0_(obj_t) *bin) { return bin? MACH0_(get_filetype_from_hdr) (&bin->hdr): strdup ("Unknown"); } ut64 MACH0_(get_main)(struct MACH0_(obj_t) *bin) { ut64 addr = UT64_MAX; int i; // 0 = sscanned but no main found // -1 = not scanned, so no main // other = valid main addr if (bin->main_addr == UT64_MAX) { #if FEATURE_SYMLIST (void)MACH0_(get_symbols_list) (bin); #else (void)MACH0_(get_symbols) (bin); #endif } if (bin->main_addr != 0 && bin->main_addr != UT64_MAX) { return bin->main_addr; } // dummy call to initialize things free (MACH0_(get_entrypoint)(bin)); bin->main_addr = 0; if (addr == UT64_MAX && bin->main_cmd.cmd == LC_MAIN) { addr = bin->entry + bin->baddr; } if (!addr) { ut8 b[128]; ut64 entry = addr_to_offset (bin, bin->entry); // XXX: X86 only and hacky! if (entry > bin->size || entry + sizeof (b) > bin->size) { return UT64_MAX; } i = r_buf_read_at (bin->b, entry, b, sizeof (b)); if (i < 80) { return UT64_MAX; } for (i = 0; i < 64; i++) { if (b[i] == 0xe8 && !b[i + 3] && !b[i + 4]) { int delta = b[i + 1] | (b[i + 2] << 8) | (b[i + 3] << 16) | (b[i + 4] << 24); addr = bin->entry + i + 5 + delta; break; } } if (!addr) { addr = entry; } } return bin->main_addr = addr; } void MACH0_(mach_headerfields)(RBinFile *bf) { PrintfCallback cb_printf = bf->rbin->cb_printf; if (!cb_printf) { cb_printf = printf; } RBuffer *buf = bf->buf; ut64 length = r_buf_size (buf); int n = 0; struct MACH0_(mach_header) *mh = MACH0_(get_hdr)(buf); if (!mh) { return; } ut64 pvaddr = pa2va (bf, 0); cb_printf ("pf.mach0_header @ 0x%08"PFMT64x"\n", pvaddr); cb_printf ("0x%08"PFMT64x" Magic 0x%x\n", pvaddr, mh->magic); pvaddr += 4; cb_printf ("0x%08"PFMT64x" CpuType 0x%x\n", pvaddr, mh->cputype); pvaddr += 4; cb_printf ("0x%08"PFMT64x" CpuSubType 0x%x\n", pvaddr, mh->cpusubtype); pvaddr += 4; cb_printf ("0x%08"PFMT64x" FileType 0x%x\n", pvaddr, mh->filetype); pvaddr += 4; cb_printf ("0x%08"PFMT64x" nCmds %d\n", pvaddr, mh->ncmds); pvaddr += 4; cb_printf ("0x%08"PFMT64x" sizeOfCmds %d\n", pvaddr, mh->sizeofcmds); pvaddr += 4; cb_printf ("0x%08"PFMT64x" Flags 0x%x\n", pvaddr, mh->flags); pvaddr += 4; bool is64 = mh->cputype >> 16; ut64 addr = 0x20 - 4; ut32 word = 0; ut8 wordbuf[sizeof (word)]; bool isBe = false; switch (mh->cputype) { case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: isBe = true; break; } #define READWORD() \ if (r_buf_read_at (buf, addr, (ut8*)wordbuf, 4) != 4) { \ eprintf ("Invalid address in buffer."); \ break; \ } \ addr += 4; \ pvaddr += 4;\ word = isBe? r_read_be32 (wordbuf): r_read_le32 (wordbuf); if (is64) { addr += 4; pvaddr += 4; } for (n = 0; n < mh->ncmds && addr < length; n++) { READWORD (); ut32 lcType = word; const char *pf_definition = cmd_to_pf_definition (lcType); if (pf_definition) { cb_printf ("pf.%s @ 0x%08"PFMT64x"\n", pf_definition, pvaddr - 4); } cb_printf ("0x%08"PFMT64x" cmd %7d 0x%x %s\n", pvaddr - 4, n, lcType, cmd_to_string (lcType)); READWORD (); if (addr > length) { break; } int lcSize = word; word &= 0xFFFFFF; cb_printf ("0x%08"PFMT64x" cmdsize %d\n", pvaddr - 4, word); if (lcSize < 1) { eprintf ("Invalid size for a load command\n"); break; } switch (lcType) { case LC_BUILD_VERSION: { cb_printf ("0x%08"PFMT64x" platform %s\n", pvaddr, build_version_platform_to_string (r_buf_read_le32_at (buf, addr))); cb_printf ("0x%08"PFMT64x" minos %d.%d.%d\n", pvaddr + 4, r_buf_read_le16_at (buf, addr + 6), r_buf_read8_at (buf, addr + 5), r_buf_read8_at (buf, addr + 4)); cb_printf ("0x%08"PFMT64x" sdk %d.%d.%d\n", pvaddr + 8, r_buf_read_le16_at (buf, addr + 10), r_buf_read8_at (buf, addr + 9), r_buf_read8_at (buf, addr + 8)); ut32 ntools = r_buf_read_le32_at (buf, addr + 12); cb_printf ("0x%08"PFMT64x" ntools %d\n", pvaddr + 12, ntools); ut64 off = 16; while (off < (lcSize - 8) && ntools--) { cb_printf ("pf.mach0_build_version_tool @ 0x%08"PFMT64x"\n", pvaddr + off); cb_printf ("0x%08"PFMT64x" tool %s\n", pvaddr + off, build_version_tool_to_string (r_buf_read_le32_at (buf, addr + off))); off += 4; if (off >= (lcSize - 8)) { break; } cb_printf ("0x%08"PFMT64x" version %d.%d.%d\n", pvaddr + off, r_buf_read_le16_at (buf, addr + off + 2), r_buf_read8_at (buf, addr + off + 1), r_buf_read8_at (buf, addr + off)); off += 4; } break; } case LC_MAIN: { ut8 data[64] = {0}; r_buf_read_at (buf, addr, data, sizeof (data)); #if R_BIN_MACH064 ut64 ep = r_read_ble64 (&data, false); // bin->big_endian); cb_printf ("0x%08"PFMT64x" entry0 0x%" PFMT64x "\n", pvaddr, ep); ut64 ss = r_read_ble64 (&data[8], false); // bin->big_endian); cb_printf ("0x%08"PFMT64x" stacksize 0x%" PFMT64x "\n", pvaddr + 8, ss); #else ut32 ep = r_read_ble32 (&data, false); // bin->big_endian); cb_printf ("0x%08"PFMT32x" entry0 0x%" PFMT32x "\n", (ut32)pvaddr, ep); ut32 ss = r_read_ble32 (&data[4], false); // bin->big_endian); cb_printf ("0x%08"PFMT32x" stacksize 0x%" PFMT32x "\n", (ut32)pvaddr + 4, ss); #endif } break; case LC_SYMTAB: #if 0 { char *id = r_buf_get_string (buf, addr + 20); cb_printf ("0x%08"PFMT64x" id 0x%x\n", addr + 20, r_str_get (id)); cb_printf ("0x%08"PFMT64x" symooff 0x%x\n", addr + 20, r_str_get (id)); cb_printf ("0x%08"PFMT64x" nsyms %d\n", addr + 20, r_str_get (id)); cb_printf ("0x%08"PFMT64x" stroff 0x%x\n", addr + 20, r_str_get (id)); cb_printf ("0x%08"PFMT64x" strsize 0x%x\n", addr + 20, r_str_get (id)); free (id); } #endif break; case LC_ID_DYLIB: { // install_name_tool ut32 str_off = r_buf_read_ble32_at (buf, addr, isBe); char *id = r_buf_get_string (buf, addr + str_off - 8); cb_printf ("0x%08"PFMT64x" current %d.%d.%d\n", pvaddr + 8, r_buf_read_le16_at (buf, addr + 10), r_buf_read8_at (buf, addr + 9), r_buf_read8_at (buf, addr + 8)); cb_printf ("0x%08"PFMT64x" compat %d.%d.%d\n", pvaddr + 12, r_buf_read_le16_at (buf, addr + 14), r_buf_read8_at (buf, addr + 13), r_buf_read8_at (buf, addr + 12)); cb_printf ("0x%08"PFMT64x" id %s\n", pvaddr + str_off - 8, r_str_get (id)); if (id) { free (id); } break; } case LC_UUID: { ut8 i, uuid[16]; r_buf_read_at (buf, addr, uuid, sizeof (uuid)); cb_printf ("0x%08"PFMT64x" uuid ", pvaddr); for (i = 0; i < sizeof (uuid); i++) { cb_printf ("%02x", uuid[i]); } cb_printf ("\n"); } break; case LC_SEGMENT: case LC_SEGMENT_64: { ut8 name[17] = {0}; r_buf_read_at (buf, addr, name, sizeof (name) - 1); cb_printf ("0x%08"PFMT64x" name %s\n", pvaddr, name); ut32 nsects = r_buf_read_le32_at (buf, addr - 8 + (is64 ? 64 : 48)); ut64 off = is64 ? 72 : 56; while (off < lcSize && nsects--) { if (is64) { cb_printf ("pf.mach0_section64 @ 0x%08"PFMT64x"\n", pvaddr - 8 + off); off += 80; } else { cb_printf ("pf.mach0_section @ 0x%08"PFMT64x"\n", pvaddr - 8 + off); off += 68; } } } break; case LC_LOAD_DYLIB: case LC_LOAD_WEAK_DYLIB: { ut32 str_off = r_buf_read_ble32_at (buf, addr, isBe); char *load_dylib = r_buf_get_string (buf, addr + str_off - 8); cb_printf ("0x%08"PFMT64x" current %d.%d.%d\n", pvaddr + 8, r_buf_read_le16_at (buf, addr + 10), r_buf_read8_at (buf, addr + 9), r_buf_read8_at (buf, addr + 8)); cb_printf ("0x%08"PFMT64x" compat %d.%d.%d\n", pvaddr + 12, r_buf_read_le16_at (buf, addr + 14), r_buf_read8_at (buf, addr + 13), r_buf_read8_at (buf, addr + 12)); cb_printf ("0x%08"PFMT64x" load_dylib %s\n", pvaddr + str_off - 8, r_str_get (load_dylib)); if (load_dylib) { free (load_dylib); } break; } case LC_RPATH: { char *rpath = r_buf_get_string (buf, addr + 4); cb_printf ("0x%08" PFMT64x " rpath %s\n", pvaddr + 4, r_str_get (rpath)); if (rpath) { free (rpath); } break; } case LC_ENCRYPTION_INFO: case LC_ENCRYPTION_INFO_64: { ut32 word = r_buf_read_le32_at (buf, addr); cb_printf ("0x%08"PFMT64x" cryptoff 0x%08x\n", pvaddr, word); word = r_buf_read_le32_at (buf, addr + 4); cb_printf ("0x%08"PFMT64x" cryptsize %d\n", pvaddr + 4, word); word = r_buf_read_le32_at (buf, addr + 8); cb_printf ("0x%08"PFMT64x" cryptid %d\n", pvaddr + 8, word); break; } case LC_CODE_SIGNATURE: { ut32 words[2]; r_buf_read_at (buf, addr, (ut8 *)words, sizeof (words)); cb_printf ("0x%08"PFMT64x" dataoff 0x%08x\n", pvaddr, words[0]); cb_printf ("0x%08"PFMT64x" datasize %d\n", pvaddr + 4, words[1]); cb_printf ("# wtf mach0.sign %d @ 0x%x\n", words[1], words[0]); break; } } addr += word - 8; pvaddr += word - 8; } free (mh); } RList *MACH0_(mach_fields)(RBinFile *bf) { RBuffer *buf = bf->buf; ut64 length = r_buf_size (buf); struct MACH0_(mach_header) *mh = MACH0_(get_hdr) (buf); if (!mh) { return NULL; } RList *ret = r_list_new (); if (!ret) { free (mh); return NULL; } ret->free = free; ut64 addr = pa2va (bf, 0); ut64 paddr = 0; r_list_append (ret, r_bin_field_new (addr, addr, 1, "header", "mach0_header", "mach0_header", true)); addr += 0x20 - 4; paddr += 0x20 - 4; bool is64 = mh->cputype >> 16; if (is64) { addr += 4; paddr += 4; } bool isBe = false; switch (mh->cputype) { case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: isBe = true; break; } int n; char load_command_flagname[128]; for (n = 0; n < mh->ncmds && paddr < length; n++) { ut32 lcType = r_buf_read_ble32_at (buf, paddr, isBe); ut32 word = r_buf_read_ble32_at (buf, paddr + 4, isBe); if (paddr + 8 > length) { break; } ut32 lcSize = word; word &= 0xFFFFFF; if (lcSize < 1) { eprintf ("Invalid size for a load command\n"); break; } if (word == 0) { break; } const char *pf_definition = cmd_to_pf_definition (lcType); if (pf_definition) { snprintf (load_command_flagname, sizeof (load_command_flagname), "load_command_%d_%s", n, cmd_to_string (lcType)); r_list_append (ret, r_bin_field_new (addr, addr, 1, load_command_flagname, pf_definition, pf_definition, true)); } switch (lcType) { case LC_BUILD_VERSION: { ut32 ntools = r_buf_read_le32_at (buf, paddr + 20); ut64 off = 24; int j = 0; char tool_flagname[32]; while (off < lcSize && ntools--) { snprintf (tool_flagname, sizeof (tool_flagname), "tool_%d", j++); r_list_append (ret, r_bin_field_new (addr + off, addr + off, 1, tool_flagname, "mach0_build_version_tool", "mach0_build_version_tool", true)); off += 8; } break; } case LC_SEGMENT: case LC_SEGMENT_64: { ut32 nsects = r_buf_read_le32_at (buf, addr + (is64 ? 64 : 48)); ut64 off = is64 ? 72 : 56; size_t i, j = 0; char section_flagname[128]; for (i = 0; i < nsects && (addr + off) < length && off < lcSize; i++) { const char *sname = is64? "mach0_section64": "mach0_section"; snprintf (section_flagname, sizeof (section_flagname), "section_%u", (ut32)j++); RBinField *f = r_bin_field_new (addr + off, addr + off, 1, section_flagname, sname, sname, true); r_list_append (ret, f); off += is64? 80: 68; } break; default: // TODO break; } } addr += word; paddr += word; } free (mh); return ret; } struct MACH0_(mach_header) *MACH0_(get_hdr)(RBuffer *buf) { ut8 magicbytes[sizeof (ut32)] = {0}; ut8 machohdrbytes[sizeof (struct MACH0_(mach_header))] = {0}; int len; struct MACH0_(mach_header) *macho_hdr = R_NEW0 (struct MACH0_(mach_header)); bool big_endian = false; if (!macho_hdr) { return NULL; } if (r_buf_read_at (buf, 0, magicbytes, 4) < 1) { free (macho_hdr); return false; } if (r_read_le32 (magicbytes) == 0xfeedface) { big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedface) { big_endian = true; } else if (r_read_le32 (magicbytes) == FAT_MAGIC) { big_endian = false; } else if (r_read_be32 (magicbytes) == FAT_MAGIC) { big_endian = true; } else if (r_read_le32 (magicbytes) == 0xfeedfacf) { big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedfacf) { big_endian = true; } else { /* also extract non-mach0s */ #if 0 free (macho_hdr); return NULL; #endif } len = r_buf_read_at (buf, 0, machohdrbytes, sizeof (machohdrbytes)); if (len != sizeof (struct MACH0_(mach_header))) { free (macho_hdr); return NULL; } macho_hdr->magic = r_read_ble (&machohdrbytes[0], big_endian, 32); macho_hdr->cputype = r_read_ble (&machohdrbytes[4], big_endian, 32); macho_hdr->cpusubtype = r_read_ble (&machohdrbytes[8], big_endian, 32); macho_hdr->filetype = r_read_ble (&machohdrbytes[12], big_endian, 32); macho_hdr->ncmds = r_read_ble (&machohdrbytes[16], big_endian, 32); macho_hdr->sizeofcmds = r_read_ble (&machohdrbytes[20], big_endian, 32); macho_hdr->flags = r_read_ble (&machohdrbytes[24], big_endian, 32); #if R_BIN_MACH064 macho_hdr->reserved = r_read_ble (&machohdrbytes[28], big_endian, 32); #endif return macho_hdr; } void MACH0_(iterate_chained_fixups)(struct MACH0_(obj_t) *bin, ut64 limit_start, ut64 limit_end, ut32 event_mask, RFixupCallback callback, void * context) { int i = 0; for (; i < bin->nsegs && i < bin->segs_count; i++) { if (!bin->chained_starts[i]) { continue; } int page_size = bin->chained_starts[i]->page_size; if (page_size < 1) { page_size = 4096; } ut64 start = bin->segs[i].fileoff; ut64 end = start + bin->segs[i].filesize; if (end >= limit_start && start <= limit_end) { ut64 page_idx = (R_MAX (start, limit_start) - start) / page_size; ut64 page_end_idx = (R_MIN (limit_end, end) - start) / page_size; for (; page_idx <= page_end_idx; page_idx++) { if (page_idx >= bin->chained_starts[i]->page_count) { break; } ut16 page_start = bin->chained_starts[i]->page_start[page_idx]; if (page_start == DYLD_CHAINED_PTR_START_NONE) { continue; } ut64 cursor = start + page_idx * page_size + page_start; while (cursor < limit_end && cursor < end) { ut8 tmp[8]; bool previous_rebasing = bin->rebasing_buffer; bin->rebasing_buffer = true; if (r_buf_read_at (bin->b, cursor, tmp, 8) != 8) { bin->rebasing_buffer = previous_rebasing; break; } bin->rebasing_buffer = previous_rebasing; ut64 raw_ptr = r_read_le64 (tmp); ut64 ptr_value = raw_ptr; ut64 delta, stride, addend; ut16 pointer_format = bin->chained_starts[i]->pointer_format; RFixupEvent event = R_FIXUP_EVENT_NONE; ut8 key = 0, addr_div = 0; ut16 diversity = 0; ut32 ordinal = UT32_MAX; if (pointer_format == DYLD_CHAINED_PTR_ARM64E) { stride = 8; bool is_auth = IS_PTR_AUTH (raw_ptr); bool is_bind = IS_PTR_BIND (raw_ptr); if (is_auth && is_bind) { struct dyld_chained_ptr_arm64e_auth_bind *p = (struct dyld_chained_ptr_arm64e_auth_bind *) &raw_ptr; event = R_FIXUP_EVENT_BIND_AUTH; delta = p->next; ordinal = p->ordinal; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else if (!is_auth && is_bind) { struct dyld_chained_ptr_arm64e_bind *p = (struct dyld_chained_ptr_arm64e_bind *) &raw_ptr; event = R_FIXUP_EVENT_BIND; delta = p->next; ordinal = p->ordinal; addend = p->addend; } else if (is_auth && !is_bind) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE_AUTH; delta = p->next; ptr_value = p->target + bin->baddr; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = ((ut64)p->high8 << 56) | p->target; } } else if (pointer_format == DYLD_CHAINED_PTR_ARM64E_USERLAND24) { stride = 8; struct dyld_chained_ptr_arm64e_bind24 *bind = (struct dyld_chained_ptr_arm64e_bind24 *) &raw_ptr; if (bind->bind) { delta = bind->next; if (bind->auth) { struct dyld_chained_ptr_arm64e_auth_bind24 *p = (struct dyld_chained_ptr_arm64e_auth_bind24 *) &raw_ptr; event = R_FIXUP_EVENT_BIND_AUTH; ordinal = p->ordinal; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { event = R_FIXUP_EVENT_BIND; ordinal = bind->ordinal; addend = bind->addend; } } else { if (bind->auth) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE_AUTH; delta = p->next; ptr_value = p->target + bin->baddr; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target); } } } else if (pointer_format == DYLD_CHAINED_PTR_64_OFFSET) { stride = 4; struct dyld_chained_ptr_64_bind *bind = (struct dyld_chained_ptr_64_bind *) &raw_ptr; if (bind->bind) { event = R_FIXUP_EVENT_BIND; delta = bind->next; ordinal = bind->ordinal; addend = bind->addend; } else { struct dyld_chained_ptr_64_rebase *p = (struct dyld_chained_ptr_64_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target); } } else { eprintf ("Unsupported chained pointer format %d\n", pointer_format); return; } if (cursor >= limit_start && cursor <= limit_end - 8 && (event & event_mask) != 0) { bool carry_on; switch (event) { case R_FIXUP_EVENT_BIND: { RFixupBindEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ordinal = ordinal; event_details.addend = addend; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_BIND_AUTH: { RFixupBindAuthEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ordinal = ordinal; event_details.key = key; event_details.addr_div = addr_div; event_details.diversity = diversity; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_REBASE: { RFixupRebaseEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ptr_value = ptr_value; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_REBASE_AUTH: { RFixupRebaseAuthEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ptr_value = ptr_value; event_details.key = key; event_details.addr_div = addr_div; event_details.diversity = diversity; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } default: eprintf ("Unexpected event while iterating chained fixups\n"); carry_on = false; } if (!carry_on) { return; } } cursor += delta * stride; if (!delta) { break; } } } } } }
void MACH0_(iterate_chained_fixups)(struct MACH0_(obj_t) *bin, ut64 limit_start, ut64 limit_end, ut32 event_mask, RFixupCallback callback, void * context) { int i = 0; for (; i < bin->nsegs; i++) { if (!bin->chained_starts[i]) { continue; } int page_size = bin->chained_starts[i]->page_size; if (page_size < 1) { page_size = 4096; } ut64 start = bin->segs[i].fileoff; ut64 end = start + bin->segs[i].filesize; if (end >= limit_start && start <= limit_end) { ut64 page_idx = (R_MAX (start, limit_start) - start) / page_size; ut64 page_end_idx = (R_MIN (limit_end, end) - start) / page_size; for (; page_idx <= page_end_idx; page_idx++) { if (page_idx >= bin->chained_starts[i]->page_count) { break; } ut16 page_start = bin->chained_starts[i]->page_start[page_idx]; if (page_start == DYLD_CHAINED_PTR_START_NONE) { continue; } ut64 cursor = start + page_idx * page_size + page_start; while (cursor < limit_end && cursor < end) { ut8 tmp[8]; bool previous_rebasing = bin->rebasing_buffer; bin->rebasing_buffer = true; if (r_buf_read_at (bin->b, cursor, tmp, 8) != 8) { bin->rebasing_buffer = previous_rebasing; break; } bin->rebasing_buffer = previous_rebasing; ut64 raw_ptr = r_read_le64 (tmp); ut64 ptr_value = raw_ptr; ut64 delta, stride, addend; ut16 pointer_format = bin->chained_starts[i]->pointer_format; RFixupEvent event = R_FIXUP_EVENT_NONE; ut8 key = 0, addr_div = 0; ut16 diversity = 0; ut32 ordinal = UT32_MAX; if (pointer_format == DYLD_CHAINED_PTR_ARM64E) { stride = 8; bool is_auth = IS_PTR_AUTH (raw_ptr); bool is_bind = IS_PTR_BIND (raw_ptr); if (is_auth && is_bind) { struct dyld_chained_ptr_arm64e_auth_bind *p = (struct dyld_chained_ptr_arm64e_auth_bind *) &raw_ptr; event = R_FIXUP_EVENT_BIND_AUTH; delta = p->next; ordinal = p->ordinal; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else if (!is_auth && is_bind) { struct dyld_chained_ptr_arm64e_bind *p = (struct dyld_chained_ptr_arm64e_bind *) &raw_ptr; event = R_FIXUP_EVENT_BIND; delta = p->next; ordinal = p->ordinal; addend = p->addend; } else if (is_auth && !is_bind) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE_AUTH; delta = p->next; ptr_value = p->target + bin->baddr; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = ((ut64)p->high8 << 56) | p->target; } } else if (pointer_format == DYLD_CHAINED_PTR_ARM64E_USERLAND24) { stride = 8; struct dyld_chained_ptr_arm64e_bind24 *bind = (struct dyld_chained_ptr_arm64e_bind24 *) &raw_ptr; if (bind->bind) { delta = bind->next; if (bind->auth) { struct dyld_chained_ptr_arm64e_auth_bind24 *p = (struct dyld_chained_ptr_arm64e_auth_bind24 *) &raw_ptr; event = R_FIXUP_EVENT_BIND_AUTH; ordinal = p->ordinal; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { event = R_FIXUP_EVENT_BIND; ordinal = bind->ordinal; addend = bind->addend; } } else { if (bind->auth) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE_AUTH; delta = p->next; ptr_value = p->target + bin->baddr; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target); } } } else if (pointer_format == DYLD_CHAINED_PTR_64_OFFSET) { stride = 4; struct dyld_chained_ptr_64_bind *bind = (struct dyld_chained_ptr_64_bind *) &raw_ptr; if (bind->bind) { event = R_FIXUP_EVENT_BIND; delta = bind->next; ordinal = bind->ordinal; addend = bind->addend; } else { struct dyld_chained_ptr_64_rebase *p = (struct dyld_chained_ptr_64_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target); } } else { eprintf ("Unsupported chained pointer format %d\n", pointer_format); return; } if (cursor >= limit_start && cursor <= limit_end - 8 && (event & event_mask) != 0) { bool carry_on; switch (event) { case R_FIXUP_EVENT_BIND: { RFixupBindEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ordinal = ordinal; event_details.addend = addend; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_BIND_AUTH: { RFixupBindAuthEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ordinal = ordinal; event_details.key = key; event_details.addr_div = addr_div; event_details.diversity = diversity; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_REBASE: { RFixupRebaseEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ptr_value = ptr_value; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_REBASE_AUTH: { RFixupRebaseAuthEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ptr_value = ptr_value; event_details.key = key; event_details.addr_div = addr_div; event_details.diversity = diversity; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } default: eprintf ("Unexpected event while iterating chained fixups\n"); carry_on = false; } if (!carry_on) { return; } } cursor += delta * stride; if (!delta) { break; } } } } } }
void MACH0_(iterate_chained_fixups)(struct MACH0_(obj_t) *bin, ut64 limit_start, ut64 limit_end, ut32 event_mask, RFixupCallback callback, void * context) { int i = 0; for (; i < bin->nsegs && i < bin->segs_count; i++) { if (!bin->chained_starts[i]) { continue; } int page_size = bin->chained_starts[i]->page_size; if (page_size < 1) { page_size = 4096; } ut64 start = bin->segs[i].fileoff; ut64 end = start + bin->segs[i].filesize; if (end >= limit_start && start <= limit_end) { ut64 page_idx = (R_MAX (start, limit_start) - start) / page_size; ut64 page_end_idx = (R_MIN (limit_end, end) - start) / page_size; for (; page_idx <= page_end_idx; page_idx++) { if (page_idx >= bin->chained_starts[i]->page_count) { break; } ut16 page_start = bin->chained_starts[i]->page_start[page_idx]; if (page_start == DYLD_CHAINED_PTR_START_NONE) { continue; } ut64 cursor = start + page_idx * page_size + page_start; while (cursor < limit_end && cursor < end) { ut8 tmp[8]; bool previous_rebasing = bin->rebasing_buffer; bin->rebasing_buffer = true; if (r_buf_read_at (bin->b, cursor, tmp, 8) != 8) { bin->rebasing_buffer = previous_rebasing; break; } bin->rebasing_buffer = previous_rebasing; ut64 raw_ptr = r_read_le64 (tmp); ut64 ptr_value = raw_ptr; ut64 delta, stride, addend; ut16 pointer_format = bin->chained_starts[i]->pointer_format; RFixupEvent event = R_FIXUP_EVENT_NONE; ut8 key = 0, addr_div = 0; ut16 diversity = 0; ut32 ordinal = UT32_MAX; if (pointer_format == DYLD_CHAINED_PTR_ARM64E) { stride = 8; bool is_auth = IS_PTR_AUTH (raw_ptr); bool is_bind = IS_PTR_BIND (raw_ptr); if (is_auth && is_bind) { struct dyld_chained_ptr_arm64e_auth_bind *p = (struct dyld_chained_ptr_arm64e_auth_bind *) &raw_ptr; event = R_FIXUP_EVENT_BIND_AUTH; delta = p->next; ordinal = p->ordinal; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else if (!is_auth && is_bind) { struct dyld_chained_ptr_arm64e_bind *p = (struct dyld_chained_ptr_arm64e_bind *) &raw_ptr; event = R_FIXUP_EVENT_BIND; delta = p->next; ordinal = p->ordinal; addend = p->addend; } else if (is_auth && !is_bind) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE_AUTH; delta = p->next; ptr_value = p->target + bin->baddr; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = ((ut64)p->high8 << 56) | p->target; } } else if (pointer_format == DYLD_CHAINED_PTR_ARM64E_USERLAND24) { stride = 8; struct dyld_chained_ptr_arm64e_bind24 *bind = (struct dyld_chained_ptr_arm64e_bind24 *) &raw_ptr; if (bind->bind) { delta = bind->next; if (bind->auth) { struct dyld_chained_ptr_arm64e_auth_bind24 *p = (struct dyld_chained_ptr_arm64e_auth_bind24 *) &raw_ptr; event = R_FIXUP_EVENT_BIND_AUTH; ordinal = p->ordinal; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { event = R_FIXUP_EVENT_BIND; ordinal = bind->ordinal; addend = bind->addend; } } else { if (bind->auth) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE_AUTH; delta = p->next; ptr_value = p->target + bin->baddr; key = p->key; addr_div = p->addrDiv; diversity = p->diversity; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target); } } } else if (pointer_format == DYLD_CHAINED_PTR_64_OFFSET) { stride = 4; struct dyld_chained_ptr_64_bind *bind = (struct dyld_chained_ptr_64_bind *) &raw_ptr; if (bind->bind) { event = R_FIXUP_EVENT_BIND; delta = bind->next; ordinal = bind->ordinal; addend = bind->addend; } else { struct dyld_chained_ptr_64_rebase *p = (struct dyld_chained_ptr_64_rebase *) &raw_ptr; event = R_FIXUP_EVENT_REBASE; delta = p->next; ptr_value = bin->baddr + (((ut64)p->high8 << 56) | p->target); } } else { eprintf ("Unsupported chained pointer format %d\n", pointer_format); return; } if (cursor >= limit_start && cursor <= limit_end - 8 && (event & event_mask) != 0) { bool carry_on; switch (event) { case R_FIXUP_EVENT_BIND: { RFixupBindEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ordinal = ordinal; event_details.addend = addend; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_BIND_AUTH: { RFixupBindAuthEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ordinal = ordinal; event_details.key = key; event_details.addr_div = addr_div; event_details.diversity = diversity; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_REBASE: { RFixupRebaseEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ptr_value = ptr_value; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } case R_FIXUP_EVENT_REBASE_AUTH: { RFixupRebaseAuthEventDetails event_details; event_details.type = event; event_details.bin = bin; event_details.offset = cursor; event_details.raw_ptr = raw_ptr; event_details.ptr_value = ptr_value; event_details.key = key; event_details.addr_div = addr_div; event_details.diversity = diversity; carry_on = callback (context, (RFixupEventDetails *) &event_details); break; } default: eprintf ("Unexpected event while iterating chained fixups\n"); carry_on = false; } if (!carry_on) { return; } } cursor += delta * stride; if (!delta) { break; } } } } } }
{'added': [(1513, '\tut32 segs_count = r_buf_read_le32_at (bin->b, starts_at);'), (1514, '\tif (segs_count == UT32_MAX || segs_count == 0) {'), (1517, '\tbin->segs_count = segs_count;'), (1703, '\tbin->segs_count = bin->nsegs;'), (2129, '\t\tfor (i = 0; i < mo->nsegs && i < mo->segs_count; i++) {'), (4563, '\tfor (; i < bin->nsegs && i < bin->segs_count; i++) {')], 'deleted': [(1513, '\tut32 segs_count;'), (1514, '\tif ((segs_count = r_buf_read_le32_at (bin->b, starts_at)) == UT32_MAX) {'), (2127, '\t\tfor (i = 0; i < mo->nsegs; i++) {'), (4561, '\tfor (; i < bin->nsegs; i++) {')]}
6
4
4,358
30,740
201
1,234
34
https://github.com/radareorg/radare2
CVE-2022-1052
CWE-125
2,307
url.c
C
Curl_close
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include "curl_setup.h" #ifdef HAVE_NETINET_IN_H #include <netinet/in.h> #endif #ifdef HAVE_NETDB_H #include <netdb.h> #endif #ifdef HAVE_ARPA_INET_H #include <arpa/inet.h> #endif #ifdef HAVE_NET_IF_H #include <net/if.h> #endif #ifdef HAVE_SYS_IOCTL_H #include <sys/ioctl.h> #endif #ifdef HAVE_SYS_PARAM_H #include <sys/param.h> #endif #ifdef __VMS #include <in.h> #include <inet.h> #endif #ifdef HAVE_SYS_UN_H #include <sys/un.h> #endif #ifndef HAVE_SOCKET #error "We can't compile without socket() support!" #endif #include <limits.h> #ifdef USE_LIBIDN2 #include <idn2.h> #elif defined(USE_WIN32_IDN) /* prototype for curl_win32_idn_to_ascii() */ bool curl_win32_idn_to_ascii(const char *in, char **out); #endif /* USE_LIBIDN2 */ #include "urldata.h" #include "netrc.h" #include "formdata.h" #include "mime.h" #include "vtls/vtls.h" #include "hostip.h" #include "transfer.h" #include "sendf.h" #include "progress.h" #include "cookie.h" #include "strcase.h" #include "strerror.h" #include "escape.h" #include "strtok.h" #include "share.h" #include "content_encoding.h" #include "http_digest.h" #include "http_negotiate.h" #include "select.h" #include "multiif.h" #include "easyif.h" #include "speedcheck.h" #include "warnless.h" #include "non-ascii.h" #include "inet_pton.h" #include "getinfo.h" #include "urlapi-int.h" /* And now for the protocols */ #include "ftp.h" #include "dict.h" #include "telnet.h" #include "tftp.h" #include "http.h" #include "http2.h" #include "file.h" #include "curl_ldap.h" #include "ssh.h" #include "imap.h" #include "url.h" #include "connect.h" #include "inet_ntop.h" #include "http_ntlm.h" #include "curl_ntlm_wb.h" #include "socks.h" #include "curl_rtmp.h" #include "gopher.h" #include "http_proxy.h" #include "conncache.h" #include "multihandle.h" #include "pipeline.h" #include "dotdot.h" #include "strdup.h" #include "setopt.h" /* The last 3 #include files should be in this order */ #include "curl_printf.h" #include "curl_memory.h" #include "memdebug.h" static void conn_free(struct connectdata *conn); static void free_fixed_hostname(struct hostname *host); static unsigned int get_protocol_family(unsigned int protocol); /* Some parts of the code (e.g. chunked encoding) assume this buffer has at * more than just a few bytes to play with. Don't let it become too small or * bad things will happen. */ #if READBUFFER_SIZE < READBUFFER_MIN # error READBUFFER_SIZE is too small #endif /* * Protocol table. */ static const struct Curl_handler * const protocols[] = { #ifndef CURL_DISABLE_HTTP &Curl_handler_http, #endif #if defined(USE_SSL) && !defined(CURL_DISABLE_HTTP) &Curl_handler_https, #endif #ifndef CURL_DISABLE_FTP &Curl_handler_ftp, #endif #if defined(USE_SSL) && !defined(CURL_DISABLE_FTP) &Curl_handler_ftps, #endif #ifndef CURL_DISABLE_TELNET &Curl_handler_telnet, #endif #ifndef CURL_DISABLE_DICT &Curl_handler_dict, #endif #ifndef CURL_DISABLE_LDAP &Curl_handler_ldap, #if !defined(CURL_DISABLE_LDAPS) && \ ((defined(USE_OPENLDAP) && defined(USE_SSL)) || \ (!defined(USE_OPENLDAP) && defined(HAVE_LDAP_SSL))) &Curl_handler_ldaps, #endif #endif #ifndef CURL_DISABLE_FILE &Curl_handler_file, #endif #ifndef CURL_DISABLE_TFTP &Curl_handler_tftp, #endif #if defined(USE_LIBSSH2) || defined(USE_LIBSSH) &Curl_handler_scp, #endif #if defined(USE_LIBSSH2) || defined(USE_LIBSSH) &Curl_handler_sftp, #endif #ifndef CURL_DISABLE_IMAP &Curl_handler_imap, #ifdef USE_SSL &Curl_handler_imaps, #endif #endif #ifndef CURL_DISABLE_POP3 &Curl_handler_pop3, #ifdef USE_SSL &Curl_handler_pop3s, #endif #endif #if !defined(CURL_DISABLE_SMB) && defined(USE_NTLM) && \ (CURL_SIZEOF_CURL_OFF_T > 4) && \ (!defined(USE_WINDOWS_SSPI) || defined(USE_WIN32_CRYPTO)) &Curl_handler_smb, #ifdef USE_SSL &Curl_handler_smbs, #endif #endif #ifndef CURL_DISABLE_SMTP &Curl_handler_smtp, #ifdef USE_SSL &Curl_handler_smtps, #endif #endif #ifndef CURL_DISABLE_RTSP &Curl_handler_rtsp, #endif #ifndef CURL_DISABLE_GOPHER &Curl_handler_gopher, #endif #ifdef USE_LIBRTMP &Curl_handler_rtmp, &Curl_handler_rtmpt, &Curl_handler_rtmpe, &Curl_handler_rtmpte, &Curl_handler_rtmps, &Curl_handler_rtmpts, #endif (struct Curl_handler *) NULL }; /* * Dummy handler for undefined protocol schemes. */ static const struct Curl_handler Curl_handler_dummy = { "<no protocol>", /* scheme */ ZERO_NULL, /* setup_connection */ ZERO_NULL, /* do_it */ ZERO_NULL, /* done */ ZERO_NULL, /* do_more */ ZERO_NULL, /* connect_it */ ZERO_NULL, /* connecting */ ZERO_NULL, /* doing */ ZERO_NULL, /* proto_getsock */ ZERO_NULL, /* doing_getsock */ ZERO_NULL, /* domore_getsock */ ZERO_NULL, /* perform_getsock */ ZERO_NULL, /* disconnect */ ZERO_NULL, /* readwrite */ ZERO_NULL, /* connection_check */ 0, /* defport */ 0, /* protocol */ PROTOPT_NONE /* flags */ }; void Curl_freeset(struct Curl_easy *data) { /* Free all dynamic strings stored in the data->set substructure. */ enum dupstring i; for(i = (enum dupstring)0; i < STRING_LAST; i++) { Curl_safefree(data->set.str[i]); } if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; if(data->change.url_alloc) { Curl_safefree(data->change.url); data->change.url_alloc = FALSE; } data->change.url = NULL; Curl_mime_cleanpart(&data->set.mimepost); } /* free the URL pieces */ void Curl_up_free(struct Curl_easy *data) { struct urlpieces *up = &data->state.up; Curl_safefree(up->scheme); Curl_safefree(up->hostname); Curl_safefree(up->port); Curl_safefree(up->user); Curl_safefree(up->password); Curl_safefree(up->options); Curl_safefree(up->path); Curl_safefree(up->query); curl_url_cleanup(data->state.uh); data->state.uh = NULL; } /* * This is the internal function curl_easy_cleanup() calls. This should * cleanup and free all resources associated with this sessionhandle. * * NOTE: if we ever add something that attempts to write to a socket or * similar here, we must ignore SIGPIPE first. It is currently only done * when curl_easy_perform() is invoked. */ CURLcode Curl_close(struct Curl_easy *data) { struct Curl_multi *m; if(!data) return CURLE_OK; Curl_expire_clear(data); /* shut off timers */ m = data->multi; if(m) /* This handle is still part of a multi handle, take care of this first and detach this handle from there. */ curl_multi_remove_handle(data->multi, data); if(data->multi_easy) /* when curl_easy_perform() is used, it creates its own multi handle to use and this is the one */ curl_multi_cleanup(data->multi_easy); /* Destroy the timeout list that is held in the easy handle. It is /normally/ done by curl_multi_remove_handle() but this is "just in case" */ Curl_llist_destroy(&data->state.timeoutlist, NULL); data->magic = 0; /* force a clear AFTER the possibly enforced removal from the multi handle, since that function uses the magic field! */ if(data->state.rangestringalloc) free(data->state.range); /* freed here just in case DONE wasn't called */ Curl_free_request_state(data); /* Close down all open SSL info and sessions */ Curl_ssl_close_all(data); Curl_safefree(data->state.first_host); Curl_safefree(data->state.scratch); Curl_ssl_free_certinfo(data); /* Cleanup possible redirect junk */ free(data->req.newurl); data->req.newurl = NULL; if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; Curl_up_free(data); Curl_safefree(data->state.buffer); Curl_safefree(data->state.headerbuff); Curl_safefree(data->state.ulbuf); Curl_flush_cookies(data, 1); Curl_digest_cleanup(data); Curl_safefree(data->info.contenttype); Curl_safefree(data->info.wouldredirect); /* this destroys the channel and we cannot use it anymore after this */ Curl_resolver_cleanup(data->state.resolver); Curl_http2_cleanup_dependencies(data); Curl_convert_close(data); /* No longer a dirty share, if it exists */ if(data->share) { Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE); data->share->dirty--; Curl_share_unlock(data, CURL_LOCK_DATA_SHARE); } /* destruct wildcard structures if it is needed */ Curl_wildcard_dtor(&data->wildcard); Curl_freeset(data); free(data); return CURLE_OK; } /* * Initialize the UserDefined fields within a Curl_easy. * This may be safely called on a new or existing Curl_easy. */ CURLcode Curl_init_userdefined(struct Curl_easy *data) { struct UserDefined *set = &data->set; CURLcode result = CURLE_OK; set->out = stdout; /* default output to stdout */ set->in_set = stdin; /* default input from stdin */ set->err = stderr; /* default stderr to stderr */ /* use fwrite as default function to store output */ set->fwrite_func = (curl_write_callback)fwrite; /* use fread as default function to read input */ set->fread_func_set = (curl_read_callback)fread; set->is_fread_set = 0; set->is_fwrite_set = 0; set->seek_func = ZERO_NULL; set->seek_client = ZERO_NULL; /* conversion callbacks for non-ASCII hosts */ set->convfromnetwork = ZERO_NULL; set->convtonetwork = ZERO_NULL; set->convfromutf8 = ZERO_NULL; set->filesize = -1; /* we don't know the size */ set->postfieldsize = -1; /* unknown size */ set->maxredirs = -1; /* allow any amount by default */ set->httpreq = HTTPREQ_GET; /* Default HTTP request */ set->rtspreq = RTSPREQ_OPTIONS; /* Default RTSP request */ set->ftp_use_epsv = TRUE; /* FTP defaults to EPSV operations */ set->ftp_use_eprt = TRUE; /* FTP defaults to EPRT operations */ set->ftp_use_pret = FALSE; /* mainly useful for drftpd servers */ set->ftp_filemethod = FTPFILE_MULTICWD; set->dns_cache_timeout = 60; /* Timeout every 60 seconds by default */ /* Set the default size of the SSL session ID cache */ set->general_ssl.max_ssl_sessions = 5; set->proxyport = 0; set->proxytype = CURLPROXY_HTTP; /* defaults to HTTP proxy */ set->httpauth = CURLAUTH_BASIC; /* defaults to basic */ set->proxyauth = CURLAUTH_BASIC; /* defaults to basic */ /* SOCKS5 proxy auth defaults to username/password + GSS-API */ set->socks5auth = CURLAUTH_BASIC | CURLAUTH_GSSAPI; /* make libcurl quiet by default: */ set->hide_progress = TRUE; /* CURLOPT_NOPROGRESS changes these */ Curl_mime_initpart(&set->mimepost, data); /* * libcurl 7.10 introduced SSL verification *by default*! This needs to be * switched off unless wanted. */ set->ssl.primary.verifypeer = TRUE; set->ssl.primary.verifyhost = TRUE; #ifdef USE_TLS_SRP set->ssl.authtype = CURL_TLSAUTH_NONE; #endif set->ssh_auth_types = CURLSSH_AUTH_DEFAULT; /* defaults to any auth type */ set->ssl.primary.sessionid = TRUE; /* session ID caching enabled by default */ set->proxy_ssl = set->ssl; set->new_file_perms = 0644; /* Default permissions */ set->new_directory_perms = 0755; /* Default permissions */ /* for the *protocols fields we don't use the CURLPROTO_ALL convenience define since we internally only use the lower 16 bits for the passed in bitmask to not conflict with the private bits */ set->allowed_protocols = CURLPROTO_ALL; set->redir_protocols = CURLPROTO_ALL & /* All except FILE, SCP and SMB */ ~(CURLPROTO_FILE | CURLPROTO_SCP | CURLPROTO_SMB | CURLPROTO_SMBS); #if defined(HAVE_GSSAPI) || defined(USE_WINDOWS_SSPI) /* * disallow unprotected protection negotiation NEC reference implementation * seem not to follow rfc1961 section 4.3/4.4 */ set->socks5_gssapi_nec = FALSE; #endif /* Set the default CA cert bundle/path detected/specified at build time. * * If Schannel (WinSSL) is the selected SSL backend then these locations * are ignored. We allow setting CA location for schannel only when * explicitly specified by the user via CURLOPT_CAINFO / --cacert. */ if(Curl_ssl_backend() != CURLSSLBACKEND_SCHANNEL) { #if defined(CURL_CA_BUNDLE) result = Curl_setstropt(&set->str[STRING_SSL_CAFILE_ORIG], CURL_CA_BUNDLE); if(result) return result; result = Curl_setstropt(&set->str[STRING_SSL_CAFILE_PROXY], CURL_CA_BUNDLE); if(result) return result; #endif #if defined(CURL_CA_PATH) result = Curl_setstropt(&set->str[STRING_SSL_CAPATH_ORIG], CURL_CA_PATH); if(result) return result; result = Curl_setstropt(&set->str[STRING_SSL_CAPATH_PROXY], CURL_CA_PATH); if(result) return result; #endif } set->wildcard_enabled = FALSE; set->chunk_bgn = ZERO_NULL; set->chunk_end = ZERO_NULL; set->tcp_keepalive = FALSE; set->tcp_keepintvl = 60; set->tcp_keepidle = 60; set->tcp_fastopen = FALSE; set->tcp_nodelay = TRUE; set->ssl_enable_npn = TRUE; set->ssl_enable_alpn = TRUE; set->expect_100_timeout = 1000L; /* Wait for a second by default. */ set->sep_headers = TRUE; /* separated header lists by default */ set->buffer_size = READBUFFER_SIZE; set->upload_buffer_size = UPLOADBUFFER_DEFAULT; set->happy_eyeballs_timeout = CURL_HET_DEFAULT; set->fnmatch = ZERO_NULL; set->upkeep_interval_ms = CURL_UPKEEP_INTERVAL_DEFAULT; set->maxconnects = DEFAULT_CONNCACHE_SIZE; /* for easy handles */ set->httpversion = #ifdef USE_NGHTTP2 CURL_HTTP_VERSION_2TLS #else CURL_HTTP_VERSION_1_1 #endif ; Curl_http2_init_userset(set); return result; } /** * Curl_open() * * @param curl is a pointer to a sessionhandle pointer that gets set by this * function. * @return CURLcode */ CURLcode Curl_open(struct Curl_easy **curl) { CURLcode result; struct Curl_easy *data; /* Very simple start-up: alloc the struct, init it with zeroes and return */ data = calloc(1, sizeof(struct Curl_easy)); if(!data) { /* this is a very serious error */ DEBUGF(fprintf(stderr, "Error: calloc of Curl_easy failed\n")); return CURLE_OUT_OF_MEMORY; } data->magic = CURLEASY_MAGIC_NUMBER; result = Curl_resolver_init(&data->state.resolver); if(result) { DEBUGF(fprintf(stderr, "Error: resolver_init failed\n")); free(data); return result; } /* We do some initial setup here, all those fields that can't be just 0 */ data->state.buffer = malloc(READBUFFER_SIZE + 1); if(!data->state.buffer) { DEBUGF(fprintf(stderr, "Error: malloc of buffer failed\n")); result = CURLE_OUT_OF_MEMORY; } else { data->state.headerbuff = malloc(HEADERSIZE); if(!data->state.headerbuff) { DEBUGF(fprintf(stderr, "Error: malloc of headerbuff failed\n")); result = CURLE_OUT_OF_MEMORY; } else { result = Curl_init_userdefined(data); data->state.headersize = HEADERSIZE; Curl_convert_init(data); Curl_initinfo(data); /* most recent connection is not yet defined */ data->state.lastconnect = NULL; data->progress.flags |= PGRS_HIDE; data->state.current_speed = -1; /* init to negative == impossible */ Curl_http2_init_state(&data->state); } } if(result) { Curl_resolver_cleanup(data->state.resolver); free(data->state.buffer); free(data->state.headerbuff); Curl_freeset(data); free(data); data = NULL; } else *curl = data; return result; } #ifdef USE_RECV_BEFORE_SEND_WORKAROUND static void conn_reset_postponed_data(struct connectdata *conn, int num) { struct postponed_data * const psnd = &(conn->postponed[num]); if(psnd->buffer) { DEBUGASSERT(psnd->allocated_size > 0); DEBUGASSERT(psnd->recv_size <= psnd->allocated_size); DEBUGASSERT(psnd->recv_size ? (psnd->recv_processed < psnd->recv_size) : (psnd->recv_processed == 0)); DEBUGASSERT(psnd->bindsock != CURL_SOCKET_BAD); free(psnd->buffer); psnd->buffer = NULL; psnd->allocated_size = 0; psnd->recv_size = 0; psnd->recv_processed = 0; #ifdef DEBUGBUILD psnd->bindsock = CURL_SOCKET_BAD; /* used only for DEBUGASSERT */ #endif /* DEBUGBUILD */ } else { DEBUGASSERT(psnd->allocated_size == 0); DEBUGASSERT(psnd->recv_size == 0); DEBUGASSERT(psnd->recv_processed == 0); DEBUGASSERT(psnd->bindsock == CURL_SOCKET_BAD); } } static void conn_reset_all_postponed_data(struct connectdata *conn) { conn_reset_postponed_data(conn, 0); conn_reset_postponed_data(conn, 1); } #else /* ! USE_RECV_BEFORE_SEND_WORKAROUND */ /* Use "do-nothing" macro instead of function when workaround not used */ #define conn_reset_all_postponed_data(c) do {} WHILE_FALSE #endif /* ! USE_RECV_BEFORE_SEND_WORKAROUND */ static void conn_free(struct connectdata *conn) { if(!conn) return; /* possible left-overs from the async name resolvers */ Curl_resolver_cancel(conn); /* close the SSL stuff before we close any sockets since they will/may write to the sockets */ Curl_ssl_close(conn, FIRSTSOCKET); Curl_ssl_close(conn, SECONDARYSOCKET); /* close possibly still open sockets */ if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET]) Curl_closesocket(conn, conn->sock[SECONDARYSOCKET]); if(CURL_SOCKET_BAD != conn->sock[FIRSTSOCKET]) Curl_closesocket(conn, conn->sock[FIRSTSOCKET]); if(CURL_SOCKET_BAD != conn->tempsock[0]) Curl_closesocket(conn, conn->tempsock[0]); if(CURL_SOCKET_BAD != conn->tempsock[1]) Curl_closesocket(conn, conn->tempsock[1]); #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) && \ defined(NTLM_WB_ENABLED) Curl_ntlm_wb_cleanup(conn); #endif Curl_safefree(conn->user); Curl_safefree(conn->passwd); Curl_safefree(conn->oauth_bearer); Curl_safefree(conn->options); Curl_safefree(conn->http_proxy.user); Curl_safefree(conn->socks_proxy.user); Curl_safefree(conn->http_proxy.passwd); Curl_safefree(conn->socks_proxy.passwd); Curl_safefree(conn->allocptr.proxyuserpwd); Curl_safefree(conn->allocptr.uagent); Curl_safefree(conn->allocptr.userpwd); Curl_safefree(conn->allocptr.accept_encoding); Curl_safefree(conn->allocptr.te); Curl_safefree(conn->allocptr.rangeline); Curl_safefree(conn->allocptr.ref); Curl_safefree(conn->allocptr.host); Curl_safefree(conn->allocptr.cookiehost); Curl_safefree(conn->allocptr.rtsp_transport); Curl_safefree(conn->trailer); Curl_safefree(conn->host.rawalloc); /* host name buffer */ Curl_safefree(conn->conn_to_host.rawalloc); /* host name buffer */ Curl_safefree(conn->secondaryhostname); Curl_safefree(conn->http_proxy.host.rawalloc); /* http proxy name buffer */ Curl_safefree(conn->socks_proxy.host.rawalloc); /* socks proxy name buffer */ Curl_safefree(conn->master_buffer); Curl_safefree(conn->connect_state); conn_reset_all_postponed_data(conn); Curl_llist_destroy(&conn->send_pipe, NULL); Curl_llist_destroy(&conn->recv_pipe, NULL); Curl_safefree(conn->localdev); Curl_free_primary_ssl_config(&conn->ssl_config); Curl_free_primary_ssl_config(&conn->proxy_ssl_config); #ifdef USE_UNIX_SOCKETS Curl_safefree(conn->unix_domain_socket); #endif #ifdef USE_SSL Curl_safefree(conn->ssl_extra); #endif free(conn); /* free all the connection oriented data */ } /* * Disconnects the given connection. Note the connection may not be the * primary connection, like when freeing room in the connection cache or * killing of a dead old connection. * * A connection needs an easy handle when closing down. We support this passed * in separately since the connection to get closed here is often already * disassociated from an easy handle. * * This function MUST NOT reset state in the Curl_easy struct if that * isn't strictly bound to the life-time of *this* particular connection. * */ CURLcode Curl_disconnect(struct Curl_easy *data, struct connectdata *conn, bool dead_connection) { if(!conn) return CURLE_OK; /* this is closed and fine already */ if(!data) { DEBUGF(infof(data, "DISCONNECT without easy handle, ignoring\n")); return CURLE_OK; } /* * If this connection isn't marked to force-close, leave it open if there * are other users of it */ if(CONN_INUSE(conn) && !dead_connection) { DEBUGF(infof(data, "Curl_disconnect when inuse: %zu\n", CONN_INUSE(conn))); return CURLE_OK; } conn->data = data; if(conn->dns_entry != NULL) { Curl_resolv_unlock(data, conn->dns_entry); conn->dns_entry = NULL; } Curl_hostcache_prune(data); /* kill old DNS cache entries */ #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) /* Cleanup NTLM connection-related data */ Curl_http_ntlm_cleanup(conn); #endif if(conn->handler->disconnect) /* This is set if protocol-specific cleanups should be made */ conn->handler->disconnect(conn, dead_connection); /* unlink ourselves! */ infof(data, "Closing connection %ld\n", conn->connection_id); Curl_conncache_remove_conn(conn, TRUE); free_fixed_hostname(&conn->host); free_fixed_hostname(&conn->conn_to_host); free_fixed_hostname(&conn->http_proxy.host); free_fixed_hostname(&conn->socks_proxy.host); DEBUGASSERT(conn->data == data); /* this assumes that the pointer is still there after the connection was detected from the cache */ Curl_ssl_close(conn, FIRSTSOCKET); conn_free(conn); return CURLE_OK; } /* * This function should return TRUE if the socket is to be assumed to * be dead. Most commonly this happens when the server has closed the * connection due to inactivity. */ static bool SocketIsDead(curl_socket_t sock) { int sval; bool ret_val = TRUE; sval = SOCKET_READABLE(sock, 0); if(sval == 0) /* timeout */ ret_val = FALSE; return ret_val; } /* * IsPipeliningPossible() * * Return a bitmask with the available pipelining and multiplexing options for * the given requested connection. */ static int IsPipeliningPossible(const struct Curl_easy *handle, const struct connectdata *conn) { int avail = 0; /* If a HTTP protocol and pipelining is enabled */ if((conn->handler->protocol & PROTO_FAMILY_HTTP) && (!conn->bits.protoconnstart || !conn->bits.close)) { if(Curl_pipeline_wanted(handle->multi, CURLPIPE_HTTP1) && (handle->set.httpversion != CURL_HTTP_VERSION_1_0) && (handle->set.httpreq == HTTPREQ_GET || handle->set.httpreq == HTTPREQ_HEAD)) /* didn't ask for HTTP/1.0 and a GET or HEAD */ avail |= CURLPIPE_HTTP1; if(Curl_pipeline_wanted(handle->multi, CURLPIPE_MULTIPLEX) && (handle->set.httpversion >= CURL_HTTP_VERSION_2)) /* allows HTTP/2 */ avail |= CURLPIPE_MULTIPLEX; } return avail; } /* Returns non-zero if a handle was removed */ int Curl_removeHandleFromPipeline(struct Curl_easy *handle, struct curl_llist *pipeline) { if(pipeline) { struct curl_llist_element *curr; curr = pipeline->head; while(curr) { if(curr->ptr == handle) { Curl_llist_remove(pipeline, curr, NULL); return 1; /* we removed a handle */ } curr = curr->next; } } return 0; } #if 0 /* this code is saved here as it is useful for debugging purposes */ static void Curl_printPipeline(struct curl_llist *pipeline) { struct curl_llist_element *curr; curr = pipeline->head; while(curr) { struct Curl_easy *data = (struct Curl_easy *) curr->ptr; infof(data, "Handle in pipeline: %s\n", data->state.path); curr = curr->next; } } #endif static struct Curl_easy* gethandleathead(struct curl_llist *pipeline) { struct curl_llist_element *curr = pipeline->head; #ifdef DEBUGBUILD { struct curl_llist_element *p = pipeline->head; while(p) { struct Curl_easy *e = p->ptr; DEBUGASSERT(GOOD_EASY_HANDLE(e)); p = p->next; } } #endif if(curr) { return (struct Curl_easy *) curr->ptr; } return NULL; } /* remove the specified connection from all (possible) pipelines and related queues */ void Curl_getoff_all_pipelines(struct Curl_easy *data, struct connectdata *conn) { if(!conn->bundle) return; if(conn->bundle->multiuse == BUNDLE_PIPELINING) { bool recv_head = (conn->readchannel_inuse && Curl_recvpipe_head(data, conn)); bool send_head = (conn->writechannel_inuse && Curl_sendpipe_head(data, conn)); if(Curl_removeHandleFromPipeline(data, &conn->recv_pipe) && recv_head) Curl_pipeline_leave_read(conn); if(Curl_removeHandleFromPipeline(data, &conn->send_pipe) && send_head) Curl_pipeline_leave_write(conn); } else { (void)Curl_removeHandleFromPipeline(data, &conn->recv_pipe); (void)Curl_removeHandleFromPipeline(data, &conn->send_pipe); } } static bool proxy_info_matches(const struct proxy_info* data, const struct proxy_info* needle) { if((data->proxytype == needle->proxytype) && (data->port == needle->port) && Curl_safe_strcasecompare(data->host.name, needle->host.name)) return TRUE; return FALSE; } /* * This function checks if the given connection is dead and extracts it from * the connection cache if so. * * When this is called as a Curl_conncache_foreach() callback, the connection * cache lock is held! * * Returns TRUE if the connection was dead and extracted. */ static bool extract_if_dead(struct connectdata *conn, struct Curl_easy *data) { size_t pipeLen = conn->send_pipe.size + conn->recv_pipe.size; if(!pipeLen && !CONN_INUSE(conn)) { /* The check for a dead socket makes sense only if there are no handles in pipeline and the connection isn't already marked in use */ bool dead; conn->data = data; if(conn->handler->connection_check) { /* The protocol has a special method for checking the state of the connection. Use it to check if the connection is dead. */ unsigned int state; state = conn->handler->connection_check(conn, CONNCHECK_ISDEAD); dead = (state & CONNRESULT_DEAD); } else { /* Use the general method for determining the death of a connection */ dead = SocketIsDead(conn->sock[FIRSTSOCKET]); } if(dead) { infof(data, "Connection %ld seems to be dead!\n", conn->connection_id); Curl_conncache_remove_conn(conn, FALSE); conn->data = NULL; /* detach */ return TRUE; } } return FALSE; } struct prunedead { struct Curl_easy *data; struct connectdata *extracted; }; /* * Wrapper to use extract_if_dead() function in Curl_conncache_foreach() * */ static int call_extract_if_dead(struct connectdata *conn, void *param) { struct prunedead *p = (struct prunedead *)param; if(extract_if_dead(conn, p->data)) { /* stop the iteration here, pass back the connection that was extracted */ p->extracted = conn; return 1; } return 0; /* continue iteration */ } /* * This function scans the connection cache for half-open/dead connections, * closes and removes them. * The cleanup is done at most once per second. */ static void prune_dead_connections(struct Curl_easy *data) { struct curltime now = Curl_now(); time_t elapsed = Curl_timediff(now, data->state.conn_cache->last_cleanup); if(elapsed >= 1000L) { struct prunedead prune; prune.data = data; prune.extracted = NULL; while(Curl_conncache_foreach(data, data->state.conn_cache, &prune, call_extract_if_dead)) { /* disconnect it */ (void)Curl_disconnect(data, prune.extracted, /* dead_connection */TRUE); } data->state.conn_cache->last_cleanup = now; } } static size_t max_pipeline_length(struct Curl_multi *multi) { return multi ? multi->max_pipeline_length : 0; } /* * Given one filled in connection struct (named needle), this function should * detect if there already is one that has all the significant details * exactly the same and thus should be used instead. * * If there is a match, this function returns TRUE - and has marked the * connection as 'in-use'. It must later be called with ConnectionDone() to * return back to 'idle' (unused) state. * * The force_reuse flag is set if the connection must be used, even if * the pipelining strategy wants to open a new connection instead of reusing. */ static bool ConnectionExists(struct Curl_easy *data, struct connectdata *needle, struct connectdata **usethis, bool *force_reuse, bool *waitpipe) { struct connectdata *check; struct connectdata *chosen = 0; bool foundPendingCandidate = FALSE; int canpipe = IsPipeliningPossible(data, needle); struct connectbundle *bundle; #ifdef USE_NTLM bool wantNTLMhttp = ((data->state.authhost.want & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && (needle->handler->protocol & PROTO_FAMILY_HTTP)); bool wantProxyNTLMhttp = (needle->bits.proxy_user_passwd && ((data->state.authproxy.want & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && (needle->handler->protocol & PROTO_FAMILY_HTTP))); #endif *force_reuse = FALSE; *waitpipe = FALSE; /* We can't pipeline if the site is blacklisted */ if((canpipe & CURLPIPE_HTTP1) && Curl_pipeline_site_blacklisted(data, needle)) canpipe &= ~ CURLPIPE_HTTP1; /* Look up the bundle with all the connections to this particular host. Locks the connection cache, beware of early returns! */ bundle = Curl_conncache_find_bundle(needle, data->state.conn_cache); if(bundle) { /* Max pipe length is zero (unlimited) for multiplexed connections */ size_t max_pipe_len = (bundle->multiuse != BUNDLE_MULTIPLEX)? max_pipeline_length(data->multi):0; size_t best_pipe_len = max_pipe_len; struct curl_llist_element *curr; infof(data, "Found bundle for host %s: %p [%s]\n", (needle->bits.conn_to_host ? needle->conn_to_host.name : needle->host.name), (void *)bundle, (bundle->multiuse == BUNDLE_PIPELINING ? "can pipeline" : (bundle->multiuse == BUNDLE_MULTIPLEX ? "can multiplex" : "serially"))); /* We can't pipeline if we don't know anything about the server */ if(canpipe) { if(bundle->multiuse <= BUNDLE_UNKNOWN) { if((bundle->multiuse == BUNDLE_UNKNOWN) && data->set.pipewait) { infof(data, "Server doesn't support multi-use yet, wait\n"); *waitpipe = TRUE; Curl_conncache_unlock(needle); return FALSE; /* no re-use */ } infof(data, "Server doesn't support multi-use (yet)\n"); canpipe = 0; } if((bundle->multiuse == BUNDLE_PIPELINING) && !Curl_pipeline_wanted(data->multi, CURLPIPE_HTTP1)) { /* not asked for, switch off */ infof(data, "Could pipeline, but not asked to!\n"); canpipe = 0; } else if((bundle->multiuse == BUNDLE_MULTIPLEX) && !Curl_pipeline_wanted(data->multi, CURLPIPE_MULTIPLEX)) { infof(data, "Could multiplex, but not asked to!\n"); canpipe = 0; } } curr = bundle->conn_list.head; while(curr) { bool match = FALSE; size_t pipeLen; /* * Note that if we use a HTTP proxy in normal mode (no tunneling), we * check connections to that proxy and not to the actual remote server. */ check = curr->ptr; curr = curr->next; if(extract_if_dead(check, data)) { /* disconnect it */ (void)Curl_disconnect(data, check, /* dead_connection */TRUE); continue; } pipeLen = check->send_pipe.size + check->recv_pipe.size; if(canpipe) { if(check->bits.protoconnstart && check->bits.close) continue; if(!check->bits.multiplex) { /* If not multiplexing, make sure the connection is fine for HTTP/1 pipelining */ struct Curl_easy* sh = gethandleathead(&check->send_pipe); struct Curl_easy* rh = gethandleathead(&check->recv_pipe); if(sh) { if(!(IsPipeliningPossible(sh, check) & CURLPIPE_HTTP1)) continue; } else if(rh) { if(!(IsPipeliningPossible(rh, check) & CURLPIPE_HTTP1)) continue; } } } else { if(pipeLen > 0) { /* can only happen within multi handles, and means that another easy handle is using this connection */ continue; } if(Curl_resolver_asynch()) { /* ip_addr_str[0] is NUL only if the resolving of the name hasn't completed yet and until then we don't re-use this connection */ if(!check->ip_addr_str[0]) { infof(data, "Connection #%ld is still name resolving, can't reuse\n", check->connection_id); continue; } } if((check->sock[FIRSTSOCKET] == CURL_SOCKET_BAD) || check->bits.close) { if(!check->bits.close) foundPendingCandidate = TRUE; /* Don't pick a connection that hasn't connected yet or that is going to get closed. */ infof(data, "Connection #%ld isn't open enough, can't reuse\n", check->connection_id); #ifdef DEBUGBUILD if(check->recv_pipe.size > 0) { infof(data, "BAD! Unconnected #%ld has a non-empty recv pipeline!\n", check->connection_id); } #endif continue; } } #ifdef USE_UNIX_SOCKETS if(needle->unix_domain_socket) { if(!check->unix_domain_socket) continue; if(strcmp(needle->unix_domain_socket, check->unix_domain_socket)) continue; if(needle->abstract_unix_socket != check->abstract_unix_socket) continue; } else if(check->unix_domain_socket) continue; #endif if((needle->handler->flags&PROTOPT_SSL) != (check->handler->flags&PROTOPT_SSL)) /* don't do mixed SSL and non-SSL connections */ if(get_protocol_family(check->handler->protocol) != needle->handler->protocol || !check->tls_upgraded) /* except protocols that have been upgraded via TLS */ continue; if(needle->bits.httpproxy != check->bits.httpproxy || needle->bits.socksproxy != check->bits.socksproxy) continue; if(needle->bits.socksproxy && !proxy_info_matches(&needle->socks_proxy, &check->socks_proxy)) continue; if(needle->bits.conn_to_host != check->bits.conn_to_host) /* don't mix connections that use the "connect to host" feature and * connections that don't use this feature */ continue; if(needle->bits.conn_to_port != check->bits.conn_to_port) /* don't mix connections that use the "connect to port" feature and * connections that don't use this feature */ continue; if(needle->bits.httpproxy) { if(!proxy_info_matches(&needle->http_proxy, &check->http_proxy)) continue; if(needle->bits.tunnel_proxy != check->bits.tunnel_proxy) continue; if(needle->http_proxy.proxytype == CURLPROXY_HTTPS) { /* use https proxy */ if(needle->handler->flags&PROTOPT_SSL) { /* use double layer ssl */ if(!Curl_ssl_config_matches(&needle->proxy_ssl_config, &check->proxy_ssl_config)) continue; if(check->proxy_ssl[FIRSTSOCKET].state != ssl_connection_complete) continue; } else { if(!Curl_ssl_config_matches(&needle->ssl_config, &check->ssl_config)) continue; if(check->ssl[FIRSTSOCKET].state != ssl_connection_complete) continue; } } } if(!canpipe && CONN_INUSE(check)) /* this request can't be pipelined but the checked connection is already in use so we skip it */ continue; if(CONN_INUSE(check) && (check->data->multi != needle->data->multi)) /* this could be subject for pipeline/multiplex use, but only if they belong to the same multi handle */ continue; if(needle->localdev || needle->localport) { /* If we are bound to a specific local end (IP+port), we must not re-use a random other one, although if we didn't ask for a particular one we can reuse one that was bound. This comparison is a bit rough and too strict. Since the input parameters can be specified in numerous ways and still end up the same it would take a lot of processing to make it really accurate. Instead, this matching will assume that re-uses of bound connections will most likely also re-use the exact same binding parameters and missing out a few edge cases shouldn't hurt anyone very much. */ if((check->localport != needle->localport) || (check->localportrange != needle->localportrange) || (needle->localdev && (!check->localdev || strcmp(check->localdev, needle->localdev)))) continue; } if(!(needle->handler->flags & PROTOPT_CREDSPERREQUEST)) { /* This protocol requires credentials per connection, so verify that we're using the same name and password as well */ if(strcmp(needle->user, check->user) || strcmp(needle->passwd, check->passwd)) { /* one of them was different */ continue; } } if(!needle->bits.httpproxy || (needle->handler->flags&PROTOPT_SSL) || needle->bits.tunnel_proxy) { /* The requested connection does not use a HTTP proxy or it uses SSL or it is a non-SSL protocol tunneled or it is a non-SSL protocol which is allowed to be upgraded via TLS */ if((strcasecompare(needle->handler->scheme, check->handler->scheme) || (get_protocol_family(check->handler->protocol) == needle->handler->protocol && check->tls_upgraded)) && (!needle->bits.conn_to_host || strcasecompare( needle->conn_to_host.name, check->conn_to_host.name)) && (!needle->bits.conn_to_port || needle->conn_to_port == check->conn_to_port) && strcasecompare(needle->host.name, check->host.name) && needle->remote_port == check->remote_port) { /* The schemes match or the the protocol family is the same and the previous connection was TLS upgraded, and the hostname and host port match */ if(needle->handler->flags & PROTOPT_SSL) { /* This is a SSL connection so verify that we're using the same SSL options as well */ if(!Curl_ssl_config_matches(&needle->ssl_config, &check->ssl_config)) { DEBUGF(infof(data, "Connection #%ld has different SSL parameters, " "can't reuse\n", check->connection_id)); continue; } if(check->ssl[FIRSTSOCKET].state != ssl_connection_complete) { foundPendingCandidate = TRUE; DEBUGF(infof(data, "Connection #%ld has not started SSL connect, " "can't reuse\n", check->connection_id)); continue; } } match = TRUE; } } else { /* The requested connection is using the same HTTP proxy in normal mode (no tunneling) */ match = TRUE; } if(match) { #if defined(USE_NTLM) /* If we are looking for an HTTP+NTLM connection, check if this is already authenticating with the right credentials. If not, keep looking so that we can reuse NTLM connections if possible. (Especially we must not reuse the same connection if partway through a handshake!) */ if(wantNTLMhttp) { if(strcmp(needle->user, check->user) || strcmp(needle->passwd, check->passwd)) continue; } else if(check->ntlm.state != NTLMSTATE_NONE) { /* Connection is using NTLM auth but we don't want NTLM */ continue; } /* Same for Proxy NTLM authentication */ if(wantProxyNTLMhttp) { /* Both check->http_proxy.user and check->http_proxy.passwd can be * NULL */ if(!check->http_proxy.user || !check->http_proxy.passwd) continue; if(strcmp(needle->http_proxy.user, check->http_proxy.user) || strcmp(needle->http_proxy.passwd, check->http_proxy.passwd)) continue; } else if(check->proxyntlm.state != NTLMSTATE_NONE) { /* Proxy connection is using NTLM auth but we don't want NTLM */ continue; } if(wantNTLMhttp || wantProxyNTLMhttp) { /* Credentials are already checked, we can use this connection */ chosen = check; if((wantNTLMhttp && (check->ntlm.state != NTLMSTATE_NONE)) || (wantProxyNTLMhttp && (check->proxyntlm.state != NTLMSTATE_NONE))) { /* We must use this connection, no other */ *force_reuse = TRUE; break; } /* Continue look up for a better connection */ continue; } #endif if(canpipe) { /* We can pipeline if we want to. Let's continue looking for the optimal connection to use, i.e the shortest pipe that is not blacklisted. */ if(pipeLen == 0) { /* We have the optimal connection. Let's stop looking. */ chosen = check; break; } /* We can't use the connection if the pipe is full */ if(max_pipe_len && (pipeLen >= max_pipe_len)) { infof(data, "Pipe is full, skip (%zu)\n", pipeLen); continue; } #ifdef USE_NGHTTP2 /* If multiplexed, make sure we don't go over concurrency limit */ if(check->bits.multiplex) { /* Multiplexed connections can only be HTTP/2 for now */ struct http_conn *httpc = &check->proto.httpc; if(pipeLen >= httpc->settings.max_concurrent_streams) { infof(data, "MAX_CONCURRENT_STREAMS reached, skip (%zu)\n", pipeLen); continue; } } #endif /* We can't use the connection if the pipe is penalized */ if(Curl_pipeline_penalized(data, check)) { infof(data, "Penalized, skip\n"); continue; } if(max_pipe_len) { if(pipeLen < best_pipe_len) { /* This connection has a shorter pipe so far. We'll pick this and continue searching */ chosen = check; best_pipe_len = pipeLen; continue; } } else { /* When not pipelining (== multiplexed), we have a match here! */ chosen = check; infof(data, "Multiplexed connection found!\n"); break; } } else { /* We have found a connection. Let's stop searching. */ chosen = check; break; } } } } if(chosen) { /* mark it as used before releasing the lock */ chosen->data = data; /* own it! */ Curl_conncache_unlock(needle); *usethis = chosen; return TRUE; /* yes, we found one to use! */ } Curl_conncache_unlock(needle); if(foundPendingCandidate && data->set.pipewait) { infof(data, "Found pending candidate for reuse and CURLOPT_PIPEWAIT is set\n"); *waitpipe = TRUE; } return FALSE; /* no matching connecting exists */ } /* after a TCP connection to the proxy has been verified, this function does the next magic step. Note: this function's sub-functions call failf() */ CURLcode Curl_connected_proxy(struct connectdata *conn, int sockindex) { CURLcode result = CURLE_OK; if(conn->bits.socksproxy) { #ifndef CURL_DISABLE_PROXY /* for the secondary socket (FTP), use the "connect to host" * but ignore the "connect to port" (use the secondary port) */ const char * const host = conn->bits.httpproxy ? conn->http_proxy.host.name : conn->bits.conn_to_host ? conn->conn_to_host.name : sockindex == SECONDARYSOCKET ? conn->secondaryhostname : conn->host.name; const int port = conn->bits.httpproxy ? (int)conn->http_proxy.port : sockindex == SECONDARYSOCKET ? conn->secondary_port : conn->bits.conn_to_port ? conn->conn_to_port : conn->remote_port; conn->bits.socksproxy_connecting = TRUE; switch(conn->socks_proxy.proxytype) { case CURLPROXY_SOCKS5: case CURLPROXY_SOCKS5_HOSTNAME: result = Curl_SOCKS5(conn->socks_proxy.user, conn->socks_proxy.passwd, host, port, sockindex, conn); break; case CURLPROXY_SOCKS4: case CURLPROXY_SOCKS4A: result = Curl_SOCKS4(conn->socks_proxy.user, host, port, sockindex, conn); break; default: failf(conn->data, "unknown proxytype option given"); result = CURLE_COULDNT_CONNECT; } /* switch proxytype */ conn->bits.socksproxy_connecting = FALSE; #else (void)sockindex; #endif /* CURL_DISABLE_PROXY */ } return result; } /* * verboseconnect() displays verbose information after a connect */ #ifndef CURL_DISABLE_VERBOSE_STRINGS void Curl_verboseconnect(struct connectdata *conn) { if(conn->data->set.verbose) infof(conn->data, "Connected to %s (%s) port %ld (#%ld)\n", conn->bits.socksproxy ? conn->socks_proxy.host.dispname : conn->bits.httpproxy ? conn->http_proxy.host.dispname : conn->bits.conn_to_host ? conn->conn_to_host.dispname : conn->host.dispname, conn->ip_addr_str, conn->port, conn->connection_id); } #endif int Curl_protocol_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks) { if(conn->handler->proto_getsock) return conn->handler->proto_getsock(conn, socks, numsocks); /* Backup getsock logic. Since there is a live socket in use, we must wait for it or it will be removed from watching when the multi_socket API is used. */ socks[0] = conn->sock[FIRSTSOCKET]; return GETSOCK_READSOCK(0) | GETSOCK_WRITESOCK(0); } int Curl_doing_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks) { if(conn && conn->handler->doing_getsock) return conn->handler->doing_getsock(conn, socks, numsocks); return GETSOCK_BLANK; } /* * We are doing protocol-specific connecting and this is being called over and * over from the multi interface until the connection phase is done on * protocol layer. */ CURLcode Curl_protocol_connecting(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; if(conn && conn->handler->connecting) { *done = FALSE; result = conn->handler->connecting(conn, done); } else *done = TRUE; return result; } /* * We are DOING this is being called over and over from the multi interface * until the DOING phase is done on protocol layer. */ CURLcode Curl_protocol_doing(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; if(conn && conn->handler->doing) { *done = FALSE; result = conn->handler->doing(conn, done); } else *done = TRUE; return result; } /* * We have discovered that the TCP connection has been successful, we can now * proceed with some action. * */ CURLcode Curl_protocol_connect(struct connectdata *conn, bool *protocol_done) { CURLcode result = CURLE_OK; *protocol_done = FALSE; if(conn->bits.tcpconnect[FIRSTSOCKET] && conn->bits.protoconnstart) { /* We already are connected, get back. This may happen when the connect worked fine in the first call, like when we connect to a local server or proxy. Note that we don't know if the protocol is actually done. Unless this protocol doesn't have any protocol-connect callback, as then we know we're done. */ if(!conn->handler->connecting) *protocol_done = TRUE; return CURLE_OK; } if(!conn->bits.protoconnstart) { result = Curl_proxy_connect(conn, FIRSTSOCKET); if(result) return result; if(CONNECT_FIRSTSOCKET_PROXY_SSL()) /* wait for HTTPS proxy SSL initialization to complete */ return CURLE_OK; if(conn->bits.tunnel_proxy && conn->bits.httpproxy && Curl_connect_ongoing(conn)) /* when using an HTTP tunnel proxy, await complete tunnel establishment before proceeding further. Return CURLE_OK so we'll be called again */ return CURLE_OK; if(conn->handler->connect_it) { /* is there a protocol-specific connect() procedure? */ /* Call the protocol-specific connect function */ result = conn->handler->connect_it(conn, protocol_done); } else *protocol_done = TRUE; /* it has started, possibly even completed but that knowledge isn't stored in this bit! */ if(!result) conn->bits.protoconnstart = TRUE; } return result; /* pass back status */ } /* * Helpers for IDNA conversions. */ static bool is_ASCII_name(const char *hostname) { const unsigned char *ch = (const unsigned char *)hostname; while(*ch) { if(*ch++ & 0x80) return FALSE; } return TRUE; } /* * Perform any necessary IDN conversion of hostname */ static CURLcode fix_hostname(struct connectdata *conn, struct hostname *host) { size_t len; struct Curl_easy *data = conn->data; #ifndef USE_LIBIDN2 (void)data; (void)conn; #elif defined(CURL_DISABLE_VERBOSE_STRINGS) (void)conn; #endif /* set the name we use to display the host name */ host->dispname = host->name; len = strlen(host->name); if(len && (host->name[len-1] == '.')) /* strip off a single trailing dot if present, primarily for SNI but there's no use for it */ host->name[len-1] = 0; /* Check name for non-ASCII and convert hostname to ACE form if we can */ if(!is_ASCII_name(host->name)) { #ifdef USE_LIBIDN2 if(idn2_check_version(IDN2_VERSION)) { char *ace_hostname = NULL; #if IDN2_VERSION_NUMBER >= 0x00140000 /* IDN2_NFC_INPUT: Normalize input string using normalization form C. IDN2_NONTRANSITIONAL: Perform Unicode TR46 non-transitional processing. */ int flags = IDN2_NFC_INPUT | IDN2_NONTRANSITIONAL; #else int flags = IDN2_NFC_INPUT; #endif int rc = idn2_lookup_ul((const char *)host->name, &ace_hostname, flags); if(rc == IDN2_OK) { host->encalloc = (char *)ace_hostname; /* change the name pointer to point to the encoded hostname */ host->name = host->encalloc; } else { failf(data, "Failed to convert %s to ACE; %s\n", host->name, idn2_strerror(rc)); return CURLE_URL_MALFORMAT; } } #elif defined(USE_WIN32_IDN) char *ace_hostname = NULL; if(curl_win32_idn_to_ascii(host->name, &ace_hostname)) { host->encalloc = ace_hostname; /* change the name pointer to point to the encoded hostname */ host->name = host->encalloc; } else { failf(data, "Failed to convert %s to ACE;\n", host->name); return CURLE_URL_MALFORMAT; } #else infof(data, "IDN support not present, can't parse Unicode domains\n"); #endif } { char *hostp; for(hostp = host->name; *hostp; hostp++) { if(*hostp <= 32) { failf(data, "Host name '%s' contains bad letter", host->name); return CURLE_URL_MALFORMAT; } } } return CURLE_OK; } /* * Frees data allocated by fix_hostname() */ static void free_fixed_hostname(struct hostname *host) { #if defined(USE_LIBIDN2) if(host->encalloc) { idn2_free(host->encalloc); /* must be freed with idn2_free() since this was allocated by libidn */ host->encalloc = NULL; } #elif defined(USE_WIN32_IDN) free(host->encalloc); /* must be freed with free() since this was allocated by curl_win32_idn_to_ascii */ host->encalloc = NULL; #else (void)host; #endif } static void llist_dtor(void *user, void *element) { (void)user; (void)element; /* Do nothing */ } /* * Allocate and initialize a new connectdata object. */ static struct connectdata *allocate_conn(struct Curl_easy *data) { struct connectdata *conn = calloc(1, sizeof(struct connectdata)); if(!conn) return NULL; #ifdef USE_SSL /* The SSL backend-specific data (ssl_backend_data) objects are allocated as a separate array to ensure suitable alignment. Note that these backend pointers can be swapped by vtls (eg ssl backend data becomes proxy backend data). */ { size_t sslsize = Curl_ssl->sizeof_ssl_backend_data; char *ssl = calloc(4, sslsize); if(!ssl) { free(conn); return NULL; } conn->ssl_extra = ssl; conn->ssl[0].backend = (void *)ssl; conn->ssl[1].backend = (void *)(ssl + sslsize); conn->proxy_ssl[0].backend = (void *)(ssl + 2 * sslsize); conn->proxy_ssl[1].backend = (void *)(ssl + 3 * sslsize); } #endif conn->handler = &Curl_handler_dummy; /* Be sure we have a handler defined already from start to avoid NULL situations and checks */ /* and we setup a few fields in case we end up actually using this struct */ conn->sock[FIRSTSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */ conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */ conn->tempsock[0] = CURL_SOCKET_BAD; /* no file descriptor */ conn->tempsock[1] = CURL_SOCKET_BAD; /* no file descriptor */ conn->connection_id = -1; /* no ID */ conn->port = -1; /* unknown at this point */ conn->remote_port = -1; /* unknown at this point */ #if defined(USE_RECV_BEFORE_SEND_WORKAROUND) && defined(DEBUGBUILD) conn->postponed[0].bindsock = CURL_SOCKET_BAD; /* no file descriptor */ conn->postponed[1].bindsock = CURL_SOCKET_BAD; /* no file descriptor */ #endif /* USE_RECV_BEFORE_SEND_WORKAROUND && DEBUGBUILD */ /* Default protocol-independent behavior doesn't support persistent connections, so we set this to force-close. Protocols that support this need to set this to FALSE in their "curl_do" functions. */ connclose(conn, "Default to force-close"); /* Store creation time to help future close decision making */ conn->created = Curl_now(); /* Store current time to give a baseline to keepalive connection times. */ conn->keepalive = Curl_now(); /* Store off the configured connection upkeep time. */ conn->upkeep_interval_ms = data->set.upkeep_interval_ms; conn->data = data; /* Setup the association between this connection and the Curl_easy */ conn->http_proxy.proxytype = data->set.proxytype; conn->socks_proxy.proxytype = CURLPROXY_SOCKS4; #ifdef CURL_DISABLE_PROXY conn->bits.proxy = FALSE; conn->bits.httpproxy = FALSE; conn->bits.socksproxy = FALSE; conn->bits.proxy_user_passwd = FALSE; conn->bits.tunnel_proxy = FALSE; #else /* CURL_DISABLE_PROXY */ /* note that these two proxy bits are now just on what looks to be requested, they may be altered down the road */ conn->bits.proxy = (data->set.str[STRING_PROXY] && *data->set.str[STRING_PROXY]) ? TRUE : FALSE; conn->bits.httpproxy = (conn->bits.proxy && (conn->http_proxy.proxytype == CURLPROXY_HTTP || conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0 || conn->http_proxy.proxytype == CURLPROXY_HTTPS)) ? TRUE : FALSE; conn->bits.socksproxy = (conn->bits.proxy && !conn->bits.httpproxy) ? TRUE : FALSE; if(data->set.str[STRING_PRE_PROXY] && *data->set.str[STRING_PRE_PROXY]) { conn->bits.proxy = TRUE; conn->bits.socksproxy = TRUE; } conn->bits.proxy_user_passwd = (data->set.str[STRING_PROXYUSERNAME]) ? TRUE : FALSE; conn->bits.tunnel_proxy = data->set.tunnel_thru_httpproxy; #endif /* CURL_DISABLE_PROXY */ conn->bits.user_passwd = (data->set.str[STRING_USERNAME]) ? TRUE : FALSE; conn->bits.ftp_use_epsv = data->set.ftp_use_epsv; conn->bits.ftp_use_eprt = data->set.ftp_use_eprt; conn->ssl_config.verifystatus = data->set.ssl.primary.verifystatus; conn->ssl_config.verifypeer = data->set.ssl.primary.verifypeer; conn->ssl_config.verifyhost = data->set.ssl.primary.verifyhost; conn->proxy_ssl_config.verifystatus = data->set.proxy_ssl.primary.verifystatus; conn->proxy_ssl_config.verifypeer = data->set.proxy_ssl.primary.verifypeer; conn->proxy_ssl_config.verifyhost = data->set.proxy_ssl.primary.verifyhost; conn->ip_version = data->set.ipver; #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) && \ defined(NTLM_WB_ENABLED) conn->ntlm_auth_hlpr_socket = CURL_SOCKET_BAD; conn->ntlm_auth_hlpr_pid = 0; conn->challenge_header = NULL; conn->response_header = NULL; #endif if(Curl_pipeline_wanted(data->multi, CURLPIPE_HTTP1) && !conn->master_buffer) { /* Allocate master_buffer to be used for HTTP/1 pipelining */ conn->master_buffer = calloc(MASTERBUF_SIZE, sizeof(char)); if(!conn->master_buffer) goto error; } /* Initialize the pipeline lists */ Curl_llist_init(&conn->send_pipe, (curl_llist_dtor) llist_dtor); Curl_llist_init(&conn->recv_pipe, (curl_llist_dtor) llist_dtor); #ifdef HAVE_GSSAPI conn->data_prot = PROT_CLEAR; #endif /* Store the local bind parameters that will be used for this connection */ if(data->set.str[STRING_DEVICE]) { conn->localdev = strdup(data->set.str[STRING_DEVICE]); if(!conn->localdev) goto error; } conn->localportrange = data->set.localportrange; conn->localport = data->set.localport; /* the close socket stuff needs to be copied to the connection struct as it may live on without (this specific) Curl_easy */ conn->fclosesocket = data->set.fclosesocket; conn->closesocket_client = data->set.closesocket_client; return conn; error: Curl_llist_destroy(&conn->send_pipe, NULL); Curl_llist_destroy(&conn->recv_pipe, NULL); free(conn->master_buffer); free(conn->localdev); #ifdef USE_SSL free(conn->ssl_extra); #endif free(conn); return NULL; } /* returns the handler if the given scheme is built-in */ const struct Curl_handler *Curl_builtin_scheme(const char *scheme) { const struct Curl_handler * const *pp; const struct Curl_handler *p; /* Scan protocol handler table and match against 'scheme'. The handler may be changed later when the protocol specific setup function is called. */ for(pp = protocols; (p = *pp) != NULL; pp++) if(strcasecompare(p->scheme, scheme)) /* Protocol found in table. Check if allowed */ return p; return NULL; /* not found */ } static CURLcode findprotocol(struct Curl_easy *data, struct connectdata *conn, const char *protostr) { const struct Curl_handler *p = Curl_builtin_scheme(protostr); if(p && /* Protocol found in table. Check if allowed */ (data->set.allowed_protocols & p->protocol)) { /* it is allowed for "normal" request, now do an extra check if this is the result of a redirect */ if(data->state.this_is_a_follow && !(data->set.redir_protocols & p->protocol)) /* nope, get out */ ; else { /* Perform setup complement if some. */ conn->handler = conn->given = p; /* 'port' and 'remote_port' are set in setup_connection_internals() */ return CURLE_OK; } } /* The protocol was not found in the table, but we don't have to assign it to anything since it is already assigned to a dummy-struct in the create_conn() function when the connectdata struct is allocated. */ failf(data, "Protocol \"%s\" not supported or disabled in " LIBCURL_NAME, protostr); return CURLE_UNSUPPORTED_PROTOCOL; } CURLcode Curl_uc_to_curlcode(CURLUcode uc) { switch(uc) { default: return CURLE_URL_MALFORMAT; case CURLUE_UNSUPPORTED_SCHEME: return CURLE_UNSUPPORTED_PROTOCOL; case CURLUE_OUT_OF_MEMORY: return CURLE_OUT_OF_MEMORY; case CURLUE_USER_NOT_ALLOWED: return CURLE_LOGIN_DENIED; } } /* * Parse URL and fill in the relevant members of the connection struct. */ static CURLcode parseurlandfillconn(struct Curl_easy *data, struct connectdata *conn) { CURLcode result; CURLU *uh; CURLUcode uc; char *hostname; Curl_up_free(data); /* cleanup previous leftovers first */ /* parse the URL */ uh = data->state.uh = curl_url(); if(!uh) return CURLE_OUT_OF_MEMORY; if(data->set.str[STRING_DEFAULT_PROTOCOL] && !Curl_is_absolute_url(data->change.url, NULL, MAX_SCHEME_LEN)) { char *url; if(data->change.url_alloc) free(data->change.url); url = aprintf("%s://%s", data->set.str[STRING_DEFAULT_PROTOCOL], data->change.url); if(!url) return CURLE_OUT_OF_MEMORY; data->change.url = url; data->change.url_alloc = TRUE; } uc = curl_url_set(uh, CURLUPART_URL, data->change.url, CURLU_GUESS_SCHEME | CURLU_NON_SUPPORT_SCHEME | (data->set.disallow_username_in_url ? CURLU_DISALLOW_USER : 0) | (data->set.path_as_is ? CURLU_PATH_AS_IS : 0)); if(uc) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_SCHEME, &data->state.up.scheme, 0); if(uc) return Curl_uc_to_curlcode(uc); result = findprotocol(data, conn, data->state.up.scheme); if(result) return result; uc = curl_url_get(uh, CURLUPART_USER, &data->state.up.user, CURLU_URLDECODE); if(!uc) { conn->user = strdup(data->state.up.user); if(!conn->user) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; } else if(uc != CURLUE_NO_USER) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_PASSWORD, &data->state.up.password, CURLU_URLDECODE); if(!uc) { conn->passwd = strdup(data->state.up.password); if(!conn->passwd) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; } else if(uc != CURLUE_NO_PASSWORD) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_OPTIONS, &data->state.up.options, CURLU_URLDECODE); if(!uc) { conn->options = strdup(data->state.up.options); if(!conn->options) return CURLE_OUT_OF_MEMORY; } else if(uc != CURLUE_NO_OPTIONS) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_HOST, &data->state.up.hostname, 0); if(uc) { if(!strcasecompare("file", data->state.up.scheme)) return CURLE_OUT_OF_MEMORY; } uc = curl_url_get(uh, CURLUPART_PATH, &data->state.up.path, 0); if(uc) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_PORT, &data->state.up.port, CURLU_DEFAULT_PORT); if(uc) { if(!strcasecompare("file", data->state.up.scheme)) return CURLE_OUT_OF_MEMORY; } else { unsigned long port = strtoul(data->state.up.port, NULL, 10); conn->remote_port = curlx_ultous(port); } (void)curl_url_get(uh, CURLUPART_QUERY, &data->state.up.query, 0); hostname = data->state.up.hostname; if(!hostname) /* this is for file:// transfers, get a dummy made */ hostname = (char *)""; if(hostname[0] == '[') { /* This looks like an IPv6 address literal. See if there is an address scope. */ char *percent = strchr(++hostname, '%'); conn->bits.ipv6_ip = TRUE; if(percent) { unsigned int identifier_offset = 3; char *endp; unsigned long scope; if(strncmp("%25", percent, 3) != 0) { infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); identifier_offset = 1; } scope = strtoul(percent + identifier_offset, &endp, 10); if(*endp == ']') { /* The address scope was well formed. Knock it out of the hostname. */ memmove(percent, endp, strlen(endp) + 1); conn->scope_id = (unsigned int)scope; } else { /* Zone identifier is not numeric */ #if defined(HAVE_NET_IF_H) && defined(IFNAMSIZ) && defined(HAVE_IF_NAMETOINDEX) char ifname[IFNAMSIZ + 2]; char *square_bracket; unsigned int scopeidx = 0; strncpy(ifname, percent + identifier_offset, IFNAMSIZ + 2); /* Ensure nullbyte termination */ ifname[IFNAMSIZ + 1] = '\0'; square_bracket = strchr(ifname, ']'); if(square_bracket) { /* Remove ']' */ *square_bracket = '\0'; scopeidx = if_nametoindex(ifname); if(scopeidx == 0) { infof(data, "Invalid network interface: %s; %s\n", ifname, strerror(errno)); } } if(scopeidx > 0) { char *p = percent + identifier_offset + strlen(ifname); /* Remove zone identifier from hostname */ memmove(percent, p, strlen(p) + 1); conn->scope_id = scopeidx; } else #endif /* HAVE_NET_IF_H && IFNAMSIZ */ infof(data, "Invalid IPv6 address format\n"); } } percent = strchr(hostname, ']'); if(percent) /* terminate IPv6 numerical at end bracket */ *percent = 0; } /* make sure the connect struct gets its own copy of the host name */ conn->host.rawalloc = strdup(hostname); if(!conn->host.rawalloc) return CURLE_OUT_OF_MEMORY; conn->host.name = conn->host.rawalloc; if(data->set.scope_id) /* Override any scope that was set above. */ conn->scope_id = data->set.scope_id; return CURLE_OK; } /* * If we're doing a resumed transfer, we need to setup our stuff * properly. */ static CURLcode setup_range(struct Curl_easy *data) { struct UrlState *s = &data->state; s->resume_from = data->set.set_resume_from; if(s->resume_from || data->set.str[STRING_SET_RANGE]) { if(s->rangestringalloc) free(s->range); if(s->resume_from) s->range = aprintf("%" CURL_FORMAT_CURL_OFF_T "-", s->resume_from); else s->range = strdup(data->set.str[STRING_SET_RANGE]); s->rangestringalloc = (s->range) ? TRUE : FALSE; if(!s->range) return CURLE_OUT_OF_MEMORY; /* tell ourselves to fetch this range */ s->use_range = TRUE; /* enable range download */ } else s->use_range = FALSE; /* disable range download */ return CURLE_OK; } /* * setup_connection_internals() - * * Setup connection internals specific to the requested protocol in the * Curl_easy. This is inited and setup before the connection is made but * is about the particular protocol that is to be used. * * This MUST get called after proxy magic has been figured out. */ static CURLcode setup_connection_internals(struct connectdata *conn) { const struct Curl_handler * p; CURLcode result; conn->socktype = SOCK_STREAM; /* most of them are TCP streams */ /* Perform setup complement if some. */ p = conn->handler; if(p->setup_connection) { result = (*p->setup_connection)(conn); if(result) return result; p = conn->handler; /* May have changed. */ } if(conn->port < 0) /* we check for -1 here since if proxy was detected already, this was very likely already set to the proxy port */ conn->port = p->defport; return CURLE_OK; } /* * Curl_free_request_state() should free temp data that was allocated in the * Curl_easy for this single request. */ void Curl_free_request_state(struct Curl_easy *data) { Curl_safefree(data->req.protop); Curl_safefree(data->req.newurl); } #ifndef CURL_DISABLE_PROXY /**************************************************************** * Checks if the host is in the noproxy list. returns true if it matches * and therefore the proxy should NOT be used. ****************************************************************/ static bool check_noproxy(const char *name, const char *no_proxy) { /* no_proxy=domain1.dom,host.domain2.dom * (a comma-separated list of hosts which should * not be proxied, or an asterisk to override * all proxy variables) */ if(no_proxy && no_proxy[0]) { size_t tok_start; size_t tok_end; const char *separator = ", "; size_t no_proxy_len; size_t namelen; char *endptr; if(strcasecompare("*", no_proxy)) { return TRUE; } /* NO_PROXY was specified and it wasn't just an asterisk */ no_proxy_len = strlen(no_proxy); if(name[0] == '[') { /* IPv6 numerical address */ endptr = strchr(name, ']'); if(!endptr) return FALSE; name++; namelen = endptr - name; } else namelen = strlen(name); for(tok_start = 0; tok_start < no_proxy_len; tok_start = tok_end + 1) { while(tok_start < no_proxy_len && strchr(separator, no_proxy[tok_start]) != NULL) { /* Look for the beginning of the token. */ ++tok_start; } if(tok_start == no_proxy_len) break; /* It was all trailing separator chars, no more tokens. */ for(tok_end = tok_start; tok_end < no_proxy_len && strchr(separator, no_proxy[tok_end]) == NULL; ++tok_end) /* Look for the end of the token. */ ; /* To match previous behaviour, where it was necessary to specify * ".local.com" to prevent matching "notlocal.com", we will leave * the '.' off. */ if(no_proxy[tok_start] == '.') ++tok_start; if((tok_end - tok_start) <= namelen) { /* Match the last part of the name to the domain we are checking. */ const char *checkn = name + namelen - (tok_end - tok_start); if(strncasecompare(no_proxy + tok_start, checkn, tok_end - tok_start)) { if((tok_end - tok_start) == namelen || *(checkn - 1) == '.') { /* We either have an exact match, or the previous character is a . * so it is within the same domain, so no proxy for this host. */ return TRUE; } } } /* if((tok_end - tok_start) <= namelen) */ } /* for(tok_start = 0; tok_start < no_proxy_len; tok_start = tok_end + 1) */ } /* NO_PROXY was specified and it wasn't just an asterisk */ return FALSE; } #ifndef CURL_DISABLE_HTTP /**************************************************************** * Detect what (if any) proxy to use. Remember that this selects a host * name and is not limited to HTTP proxies only. * The returned pointer must be freed by the caller (unless NULL) ****************************************************************/ static char *detect_proxy(struct connectdata *conn) { char *proxy = NULL; /* If proxy was not specified, we check for default proxy environment * variables, to enable i.e Lynx compliance: * * http_proxy=http://some.server.dom:port/ * https_proxy=http://some.server.dom:port/ * ftp_proxy=http://some.server.dom:port/ * no_proxy=domain1.dom,host.domain2.dom * (a comma-separated list of hosts which should * not be proxied, or an asterisk to override * all proxy variables) * all_proxy=http://some.server.dom:port/ * (seems to exist for the CERN www lib. Probably * the first to check for.) * * For compatibility, the all-uppercase versions of these variables are * checked if the lowercase versions don't exist. */ char proxy_env[128]; const char *protop = conn->handler->scheme; char *envp = proxy_env; char *prox; /* Now, build <protocol>_proxy and check for such a one to use */ while(*protop) *envp++ = (char)tolower((int)*protop++); /* append _proxy */ strcpy(envp, "_proxy"); /* read the protocol proxy: */ prox = curl_getenv(proxy_env); /* * We don't try the uppercase version of HTTP_PROXY because of * security reasons: * * When curl is used in a webserver application * environment (cgi or php), this environment variable can * be controlled by the web server user by setting the * http header 'Proxy:' to some value. * * This can cause 'internal' http/ftp requests to be * arbitrarily redirected by any external attacker. */ if(!prox && !strcasecompare("http_proxy", proxy_env)) { /* There was no lowercase variable, try the uppercase version: */ Curl_strntoupper(proxy_env, proxy_env, sizeof(proxy_env)); prox = curl_getenv(proxy_env); } envp = proxy_env; if(prox) { proxy = prox; /* use this */ } else { envp = (char *)"all_proxy"; proxy = curl_getenv(envp); /* default proxy to use */ if(!proxy) { envp = (char *)"ALL_PROXY"; proxy = curl_getenv(envp); } } if(proxy) infof(conn->data, "Uses proxy env variable %s == '%s'\n", envp, proxy); return proxy; } #endif /* CURL_DISABLE_HTTP */ /* * If this is supposed to use a proxy, we need to figure out the proxy * host name, so that we can re-use an existing connection * that may exist registered to the same proxy host. */ static CURLcode parse_proxy(struct Curl_easy *data, struct connectdata *conn, char *proxy, curl_proxytype proxytype) { char *prox_portno; char *endofprot; /* We use 'proxyptr' to point to the proxy name from now on... */ char *proxyptr; char *portptr; char *atsign; long port = -1; char *proxyuser = NULL; char *proxypasswd = NULL; bool sockstype; /* We do the proxy host string parsing here. We want the host name and the * port name. Accept a protocol:// prefix */ /* Parse the protocol part if present */ endofprot = strstr(proxy, "://"); if(endofprot) { proxyptr = endofprot + 3; if(checkprefix("https", proxy)) proxytype = CURLPROXY_HTTPS; else if(checkprefix("socks5h", proxy)) proxytype = CURLPROXY_SOCKS5_HOSTNAME; else if(checkprefix("socks5", proxy)) proxytype = CURLPROXY_SOCKS5; else if(checkprefix("socks4a", proxy)) proxytype = CURLPROXY_SOCKS4A; else if(checkprefix("socks4", proxy) || checkprefix("socks", proxy)) proxytype = CURLPROXY_SOCKS4; else if(checkprefix("http:", proxy)) ; /* leave it as HTTP or HTTP/1.0 */ else { /* Any other xxx:// reject! */ failf(data, "Unsupported proxy scheme for \'%s\'", proxy); return CURLE_COULDNT_CONNECT; } } else proxyptr = proxy; /* No xxx:// head: It's a HTTP proxy */ #ifdef USE_SSL if(!(Curl_ssl->supports & SSLSUPP_HTTPS_PROXY)) #endif if(proxytype == CURLPROXY_HTTPS) { failf(data, "Unsupported proxy \'%s\', libcurl is built without the " "HTTPS-proxy support.", proxy); return CURLE_NOT_BUILT_IN; } sockstype = proxytype == CURLPROXY_SOCKS5_HOSTNAME || proxytype == CURLPROXY_SOCKS5 || proxytype == CURLPROXY_SOCKS4A || proxytype == CURLPROXY_SOCKS4; /* Is there a username and password given in this proxy url? */ atsign = strchr(proxyptr, '@'); if(atsign) { CURLcode result = Curl_parse_login_details(proxyptr, atsign - proxyptr, &proxyuser, &proxypasswd, NULL); if(result) return result; proxyptr = atsign + 1; } /* start scanning for port number at this point */ portptr = proxyptr; /* detect and extract RFC6874-style IPv6-addresses */ if(*proxyptr == '[') { char *ptr = ++proxyptr; /* advance beyond the initial bracket */ while(*ptr && (ISXDIGIT(*ptr) || (*ptr == ':') || (*ptr == '.'))) ptr++; if(*ptr == '%') { /* There might be a zone identifier */ if(strncmp("%25", ptr, 3)) infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); ptr++; /* Allow unreserved characters as defined in RFC 3986 */ while(*ptr && (ISALPHA(*ptr) || ISXDIGIT(*ptr) || (*ptr == '-') || (*ptr == '.') || (*ptr == '_') || (*ptr == '~'))) ptr++; } if(*ptr == ']') /* yeps, it ended nicely with a bracket as well */ *ptr++ = 0; else infof(data, "Invalid IPv6 address format\n"); portptr = ptr; /* Note that if this didn't end with a bracket, we still advanced the * proxyptr first, but I can't see anything wrong with that as no host * name nor a numeric can legally start with a bracket. */ } /* Get port number off proxy.server.com:1080 */ prox_portno = strchr(portptr, ':'); if(prox_portno) { char *endp = NULL; *prox_portno = 0x0; /* cut off number from host name */ prox_portno ++; /* now set the local port number */ port = strtol(prox_portno, &endp, 10); if((endp && *endp && (*endp != '/') && (*endp != ' ')) || (port < 0) || (port > 65535)) { /* meant to detect for example invalid IPv6 numerical addresses without brackets: "2a00:fac0:a000::7:13". Accept a trailing slash only because we then allow "URL style" with the number followed by a slash, used in curl test cases already. Space is also an acceptable terminating symbol. */ infof(data, "No valid port number in proxy string (%s)\n", prox_portno); } else conn->port = port; } else { if(proxyptr[0]=='/') { /* If the first character in the proxy string is a slash, fail immediately. The following code will otherwise clear the string which will lead to code running as if no proxy was set! */ Curl_safefree(proxyuser); Curl_safefree(proxypasswd); return CURLE_COULDNT_RESOLVE_PROXY; } /* without a port number after the host name, some people seem to use a slash so we strip everything from the first slash */ atsign = strchr(proxyptr, '/'); if(atsign) *atsign = '\0'; /* cut off path part from host name */ if(data->set.proxyport) /* None given in the proxy string, then get the default one if it is given */ port = data->set.proxyport; else { if(proxytype == CURLPROXY_HTTPS) port = CURL_DEFAULT_HTTPS_PROXY_PORT; else port = CURL_DEFAULT_PROXY_PORT; } } if(*proxyptr) { struct proxy_info *proxyinfo = sockstype ? &conn->socks_proxy : &conn->http_proxy; proxyinfo->proxytype = proxytype; if(proxyuser) { /* found user and password, rip them out. note that we are unescaping them, as there is otherwise no way to have a username or password with reserved characters like ':' in them. */ Curl_safefree(proxyinfo->user); proxyinfo->user = curl_easy_unescape(data, proxyuser, 0, NULL); Curl_safefree(proxyuser); if(!proxyinfo->user) { Curl_safefree(proxypasswd); return CURLE_OUT_OF_MEMORY; } Curl_safefree(proxyinfo->passwd); if(proxypasswd && strlen(proxypasswd) < MAX_CURL_PASSWORD_LENGTH) proxyinfo->passwd = curl_easy_unescape(data, proxypasswd, 0, NULL); else proxyinfo->passwd = strdup(""); Curl_safefree(proxypasswd); if(!proxyinfo->passwd) return CURLE_OUT_OF_MEMORY; conn->bits.proxy_user_passwd = TRUE; /* enable it */ } if(port >= 0) { proxyinfo->port = port; if(conn->port < 0 || sockstype || !conn->socks_proxy.host.rawalloc) conn->port = port; } /* now, clone the cleaned proxy host name */ Curl_safefree(proxyinfo->host.rawalloc); proxyinfo->host.rawalloc = strdup(proxyptr); proxyinfo->host.name = proxyinfo->host.rawalloc; if(!proxyinfo->host.rawalloc) return CURLE_OUT_OF_MEMORY; } Curl_safefree(proxyuser); Curl_safefree(proxypasswd); return CURLE_OK; } /* * Extract the user and password from the authentication string */ static CURLcode parse_proxy_auth(struct Curl_easy *data, struct connectdata *conn) { char proxyuser[MAX_CURL_USER_LENGTH]=""; char proxypasswd[MAX_CURL_PASSWORD_LENGTH]=""; CURLcode result; if(data->set.str[STRING_PROXYUSERNAME] != NULL) { strncpy(proxyuser, data->set.str[STRING_PROXYUSERNAME], MAX_CURL_USER_LENGTH); proxyuser[MAX_CURL_USER_LENGTH-1] = '\0'; /*To be on safe side*/ } if(data->set.str[STRING_PROXYPASSWORD] != NULL) { strncpy(proxypasswd, data->set.str[STRING_PROXYPASSWORD], MAX_CURL_PASSWORD_LENGTH); proxypasswd[MAX_CURL_PASSWORD_LENGTH-1] = '\0'; /*To be on safe side*/ } result = Curl_urldecode(data, proxyuser, 0, &conn->http_proxy.user, NULL, FALSE); if(!result) result = Curl_urldecode(data, proxypasswd, 0, &conn->http_proxy.passwd, NULL, FALSE); return result; } /* create_conn helper to parse and init proxy values. to be called after unix socket init but before any proxy vars are evaluated. */ static CURLcode create_conn_helper_init_proxy(struct connectdata *conn) { char *proxy = NULL; char *socksproxy = NULL; char *no_proxy = NULL; CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; /************************************************************* * Extract the user and password from the authentication string *************************************************************/ if(conn->bits.proxy_user_passwd) { result = parse_proxy_auth(data, conn); if(result) goto out; } /************************************************************* * Detect what (if any) proxy to use *************************************************************/ if(data->set.str[STRING_PROXY]) { proxy = strdup(data->set.str[STRING_PROXY]); /* if global proxy is set, this is it */ if(NULL == proxy) { failf(data, "memory shortage"); result = CURLE_OUT_OF_MEMORY; goto out; } } if(data->set.str[STRING_PRE_PROXY]) { socksproxy = strdup(data->set.str[STRING_PRE_PROXY]); /* if global socks proxy is set, this is it */ if(NULL == socksproxy) { failf(data, "memory shortage"); result = CURLE_OUT_OF_MEMORY; goto out; } } if(!data->set.str[STRING_NOPROXY]) { const char *p = "no_proxy"; no_proxy = curl_getenv(p); if(!no_proxy) { p = "NO_PROXY"; no_proxy = curl_getenv(p); } if(no_proxy) { infof(conn->data, "Uses proxy env variable %s == '%s'\n", p, no_proxy); } } if(check_noproxy(conn->host.name, data->set.str[STRING_NOPROXY] ? data->set.str[STRING_NOPROXY] : no_proxy)) { Curl_safefree(proxy); Curl_safefree(socksproxy); } #ifndef CURL_DISABLE_HTTP else if(!proxy && !socksproxy) /* if the host is not in the noproxy list, detect proxy. */ proxy = detect_proxy(conn); #endif /* CURL_DISABLE_HTTP */ Curl_safefree(no_proxy); #ifdef USE_UNIX_SOCKETS /* For the time being do not mix proxy and unix domain sockets. See #1274 */ if(proxy && conn->unix_domain_socket) { free(proxy); proxy = NULL; } #endif if(proxy && (!*proxy || (conn->handler->flags & PROTOPT_NONETWORK))) { free(proxy); /* Don't bother with an empty proxy string or if the protocol doesn't work with network */ proxy = NULL; } if(socksproxy && (!*socksproxy || (conn->handler->flags & PROTOPT_NONETWORK))) { free(socksproxy); /* Don't bother with an empty socks proxy string or if the protocol doesn't work with network */ socksproxy = NULL; } /*********************************************************************** * If this is supposed to use a proxy, we need to figure out the proxy host * name, proxy type and port number, so that we can re-use an existing * connection that may exist registered to the same proxy host. ***********************************************************************/ if(proxy || socksproxy) { if(proxy) { result = parse_proxy(data, conn, proxy, conn->http_proxy.proxytype); Curl_safefree(proxy); /* parse_proxy copies the proxy string */ if(result) goto out; } if(socksproxy) { result = parse_proxy(data, conn, socksproxy, conn->socks_proxy.proxytype); /* parse_proxy copies the socks proxy string */ Curl_safefree(socksproxy); if(result) goto out; } if(conn->http_proxy.host.rawalloc) { #ifdef CURL_DISABLE_HTTP /* asking for a HTTP proxy is a bit funny when HTTP is disabled... */ result = CURLE_UNSUPPORTED_PROTOCOL; goto out; #else /* force this connection's protocol to become HTTP if compatible */ if(!(conn->handler->protocol & PROTO_FAMILY_HTTP)) { if((conn->handler->flags & PROTOPT_PROXY_AS_HTTP) && !conn->bits.tunnel_proxy) conn->handler = &Curl_handler_http; else /* if not converting to HTTP over the proxy, enforce tunneling */ conn->bits.tunnel_proxy = TRUE; } conn->bits.httpproxy = TRUE; #endif } else { conn->bits.httpproxy = FALSE; /* not a HTTP proxy */ conn->bits.tunnel_proxy = FALSE; /* no tunneling if not HTTP */ } if(conn->socks_proxy.host.rawalloc) { if(!conn->http_proxy.host.rawalloc) { /* once a socks proxy */ if(!conn->socks_proxy.user) { conn->socks_proxy.user = conn->http_proxy.user; conn->http_proxy.user = NULL; Curl_safefree(conn->socks_proxy.passwd); conn->socks_proxy.passwd = conn->http_proxy.passwd; conn->http_proxy.passwd = NULL; } } conn->bits.socksproxy = TRUE; } else conn->bits.socksproxy = FALSE; /* not a socks proxy */ } else { conn->bits.socksproxy = FALSE; conn->bits.httpproxy = FALSE; } conn->bits.proxy = conn->bits.httpproxy || conn->bits.socksproxy; if(!conn->bits.proxy) { /* we aren't using the proxy after all... */ conn->bits.proxy = FALSE; conn->bits.httpproxy = FALSE; conn->bits.socksproxy = FALSE; conn->bits.proxy_user_passwd = FALSE; conn->bits.tunnel_proxy = FALSE; } out: free(socksproxy); free(proxy); return result; } #endif /* CURL_DISABLE_PROXY */ /* * Curl_parse_login_details() * * This is used to parse a login string for user name, password and options in * the following formats: * * user * user:password * user:password;options * user;options * user;options:password * :password * :password;options * ;options * ;options:password * * Parameters: * * login [in] - The login string. * len [in] - The length of the login string. * userp [in/out] - The address where a pointer to newly allocated memory * holding the user will be stored upon completion. * passwdp [in/out] - The address where a pointer to newly allocated memory * holding the password will be stored upon completion. * optionsp [in/out] - The address where a pointer to newly allocated memory * holding the options will be stored upon completion. * * Returns CURLE_OK on success. */ CURLcode Curl_parse_login_details(const char *login, const size_t len, char **userp, char **passwdp, char **optionsp) { CURLcode result = CURLE_OK; char *ubuf = NULL; char *pbuf = NULL; char *obuf = NULL; const char *psep = NULL; const char *osep = NULL; size_t ulen; size_t plen; size_t olen; /* Attempt to find the password separator */ if(passwdp) { psep = strchr(login, ':'); /* Within the constraint of the login string */ if(psep >= login + len) psep = NULL; } /* Attempt to find the options separator */ if(optionsp) { osep = strchr(login, ';'); /* Within the constraint of the login string */ if(osep >= login + len) osep = NULL; } /* Calculate the portion lengths */ ulen = (psep ? (size_t)(osep && psep > osep ? osep - login : psep - login) : (osep ? (size_t)(osep - login) : len)); plen = (psep ? (osep && osep > psep ? (size_t)(osep - psep) : (size_t)(login + len - psep)) - 1 : 0); olen = (osep ? (psep && psep > osep ? (size_t)(psep - osep) : (size_t)(login + len - osep)) - 1 : 0); /* Allocate the user portion buffer */ if(userp && ulen) { ubuf = malloc(ulen + 1); if(!ubuf) result = CURLE_OUT_OF_MEMORY; } /* Allocate the password portion buffer */ if(!result && passwdp && plen) { pbuf = malloc(plen + 1); if(!pbuf) { free(ubuf); result = CURLE_OUT_OF_MEMORY; } } /* Allocate the options portion buffer */ if(!result && optionsp && olen) { obuf = malloc(olen + 1); if(!obuf) { free(pbuf); free(ubuf); result = CURLE_OUT_OF_MEMORY; } } if(!result) { /* Store the user portion if necessary */ if(ubuf) { memcpy(ubuf, login, ulen); ubuf[ulen] = '\0'; Curl_safefree(*userp); *userp = ubuf; } /* Store the password portion if necessary */ if(pbuf) { memcpy(pbuf, psep + 1, plen); pbuf[plen] = '\0'; Curl_safefree(*passwdp); *passwdp = pbuf; } /* Store the options portion if necessary */ if(obuf) { memcpy(obuf, osep + 1, olen); obuf[olen] = '\0'; Curl_safefree(*optionsp); *optionsp = obuf; } } return result; } /************************************************************* * Figure out the remote port number and fix it in the URL * * No matter if we use a proxy or not, we have to figure out the remote * port number of various reasons. * * The port number embedded in the URL is replaced, if necessary. *************************************************************/ static CURLcode parse_remote_port(struct Curl_easy *data, struct connectdata *conn) { if(data->set.use_port && data->state.allow_port) { /* if set, we use this instead of the port possibly given in the URL */ char portbuf[16]; CURLUcode uc; conn->remote_port = (unsigned short)data->set.use_port; snprintf(portbuf, sizeof(portbuf), "%u", conn->remote_port); uc = curl_url_set(data->state.uh, CURLUPART_PORT, portbuf, 0); if(uc) return CURLE_OUT_OF_MEMORY; } return CURLE_OK; } /* * Override the login details from the URL with that in the CURLOPT_USERPWD * option or a .netrc file, if applicable. */ static CURLcode override_login(struct Curl_easy *data, struct connectdata *conn, char **userp, char **passwdp, char **optionsp) { bool user_changed = FALSE; bool passwd_changed = FALSE; CURLUcode uc; if(data->set.str[STRING_USERNAME]) { free(*userp); *userp = strdup(data->set.str[STRING_USERNAME]); if(!*userp) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; /* enable user+password */ user_changed = TRUE; } if(data->set.str[STRING_PASSWORD]) { free(*passwdp); *passwdp = strdup(data->set.str[STRING_PASSWORD]); if(!*passwdp) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; /* enable user+password */ passwd_changed = TRUE; } if(data->set.str[STRING_OPTIONS]) { free(*optionsp); *optionsp = strdup(data->set.str[STRING_OPTIONS]); if(!*optionsp) return CURLE_OUT_OF_MEMORY; } conn->bits.netrc = FALSE; if(data->set.use_netrc != CURL_NETRC_IGNORED) { char *nuser = NULL; char *npasswd = NULL; int ret; if(data->set.use_netrc == CURL_NETRC_OPTIONAL) nuser = *userp; /* to separate otherwise identical machines */ ret = Curl_parsenetrc(conn->host.name, &nuser, &npasswd, data->set.str[STRING_NETRC_FILE]); if(ret > 0) { infof(data, "Couldn't find host %s in the " DOT_CHAR "netrc file; using defaults\n", conn->host.name); } else if(ret < 0) { return CURLE_OUT_OF_MEMORY; } else { /* set bits.netrc TRUE to remember that we got the name from a .netrc file, so that it is safe to use even if we followed a Location: to a different host or similar. */ conn->bits.netrc = TRUE; conn->bits.user_passwd = TRUE; /* enable user+password */ if(data->set.use_netrc == CURL_NETRC_OPTIONAL) { /* prefer credentials outside netrc */ if(nuser && !*userp) { free(*userp); *userp = nuser; user_changed = TRUE; } if(npasswd && !*passwdp) { free(*passwdp); *passwdp = npasswd; passwd_changed = TRUE; } } else { /* prefer netrc credentials */ if(nuser) { free(*userp); *userp = nuser; user_changed = TRUE; } if(npasswd) { free(*passwdp); *passwdp = npasswd; passwd_changed = TRUE; } } } } /* for updated strings, we update them in the URL */ if(user_changed) { uc = curl_url_set(data->state.uh, CURLUPART_USER, *userp, 0); if(uc) return Curl_uc_to_curlcode(uc); } if(passwd_changed) { uc = curl_url_set(data->state.uh, CURLUPART_PASSWORD, *passwdp, 0); if(uc) return Curl_uc_to_curlcode(uc); } return CURLE_OK; } /* * Set the login details so they're available in the connection */ static CURLcode set_login(struct connectdata *conn) { CURLcode result = CURLE_OK; const char *setuser = CURL_DEFAULT_USER; const char *setpasswd = CURL_DEFAULT_PASSWORD; /* If our protocol needs a password and we have none, use the defaults */ if((conn->handler->flags & PROTOPT_NEEDSPWD) && !conn->bits.user_passwd) ; else { setuser = ""; setpasswd = ""; } /* Store the default user */ if(!conn->user) { conn->user = strdup(setuser); if(!conn->user) return CURLE_OUT_OF_MEMORY; } /* Store the default password */ if(!conn->passwd) { conn->passwd = strdup(setpasswd); if(!conn->passwd) result = CURLE_OUT_OF_MEMORY; } /* if there's a user without password, consider password blank */ if(conn->user && !conn->passwd) { conn->passwd = strdup(""); if(!conn->passwd) result = CURLE_OUT_OF_MEMORY; } return result; } /* * Parses a "host:port" string to connect to. * The hostname and the port may be empty; in this case, NULL is returned for * the hostname and -1 for the port. */ static CURLcode parse_connect_to_host_port(struct Curl_easy *data, const char *host, char **hostname_result, int *port_result) { char *host_dup; char *hostptr; char *host_portno; char *portptr; int port = -1; #if defined(CURL_DISABLE_VERBOSE_STRINGS) (void) data; #endif *hostname_result = NULL; *port_result = -1; if(!host || !*host) return CURLE_OK; host_dup = strdup(host); if(!host_dup) return CURLE_OUT_OF_MEMORY; hostptr = host_dup; /* start scanning for port number at this point */ portptr = hostptr; /* detect and extract RFC6874-style IPv6-addresses */ if(*hostptr == '[') { #ifdef ENABLE_IPV6 char *ptr = ++hostptr; /* advance beyond the initial bracket */ while(*ptr && (ISXDIGIT(*ptr) || (*ptr == ':') || (*ptr == '.'))) ptr++; if(*ptr == '%') { /* There might be a zone identifier */ if(strncmp("%25", ptr, 3)) infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); ptr++; /* Allow unreserved characters as defined in RFC 3986 */ while(*ptr && (ISALPHA(*ptr) || ISXDIGIT(*ptr) || (*ptr == '-') || (*ptr == '.') || (*ptr == '_') || (*ptr == '~'))) ptr++; } if(*ptr == ']') /* yeps, it ended nicely with a bracket as well */ *ptr++ = '\0'; else infof(data, "Invalid IPv6 address format\n"); portptr = ptr; /* Note that if this didn't end with a bracket, we still advanced the * hostptr first, but I can't see anything wrong with that as no host * name nor a numeric can legally start with a bracket. */ #else failf(data, "Use of IPv6 in *_CONNECT_TO without IPv6 support built-in!"); free(host_dup); return CURLE_NOT_BUILT_IN; #endif } /* Get port number off server.com:1080 */ host_portno = strchr(portptr, ':'); if(host_portno) { char *endp = NULL; *host_portno = '\0'; /* cut off number from host name */ host_portno++; if(*host_portno) { long portparse = strtol(host_portno, &endp, 10); if((endp && *endp) || (portparse < 0) || (portparse > 65535)) { infof(data, "No valid port number in connect to host string (%s)\n", host_portno); hostptr = NULL; port = -1; } else port = (int)portparse; /* we know it will fit */ } } /* now, clone the cleaned host name */ if(hostptr) { *hostname_result = strdup(hostptr); if(!*hostname_result) { free(host_dup); return CURLE_OUT_OF_MEMORY; } } *port_result = port; free(host_dup); return CURLE_OK; } /* * Parses one "connect to" string in the form: * "HOST:PORT:CONNECT-TO-HOST:CONNECT-TO-PORT". */ static CURLcode parse_connect_to_string(struct Curl_easy *data, struct connectdata *conn, const char *conn_to_host, char **host_result, int *port_result) { CURLcode result = CURLE_OK; const char *ptr = conn_to_host; int host_match = FALSE; int port_match = FALSE; *host_result = NULL; *port_result = -1; if(*ptr == ':') { /* an empty hostname always matches */ host_match = TRUE; ptr++; } else { /* check whether the URL's hostname matches */ size_t hostname_to_match_len; char *hostname_to_match = aprintf("%s%s%s", conn->bits.ipv6_ip ? "[" : "", conn->host.name, conn->bits.ipv6_ip ? "]" : ""); if(!hostname_to_match) return CURLE_OUT_OF_MEMORY; hostname_to_match_len = strlen(hostname_to_match); host_match = strncasecompare(ptr, hostname_to_match, hostname_to_match_len); free(hostname_to_match); ptr += hostname_to_match_len; host_match = host_match && *ptr == ':'; ptr++; } if(host_match) { if(*ptr == ':') { /* an empty port always matches */ port_match = TRUE; ptr++; } else { /* check whether the URL's port matches */ char *ptr_next = strchr(ptr, ':'); if(ptr_next) { char *endp = NULL; long port_to_match = strtol(ptr, &endp, 10); if((endp == ptr_next) && (port_to_match == conn->remote_port)) { port_match = TRUE; ptr = ptr_next + 1; } } } } if(host_match && port_match) { /* parse the hostname and port to connect to */ result = parse_connect_to_host_port(data, ptr, host_result, port_result); } return result; } /* * Processes all strings in the "connect to" slist, and uses the "connect * to host" and "connect to port" of the first string that matches. */ static CURLcode parse_connect_to_slist(struct Curl_easy *data, struct connectdata *conn, struct curl_slist *conn_to_host) { CURLcode result = CURLE_OK; char *host = NULL; int port = -1; while(conn_to_host && !host && port == -1) { result = parse_connect_to_string(data, conn, conn_to_host->data, &host, &port); if(result) return result; if(host && *host) { conn->conn_to_host.rawalloc = host; conn->conn_to_host.name = host; conn->bits.conn_to_host = TRUE; infof(data, "Connecting to hostname: %s\n", host); } else { /* no "connect to host" */ conn->bits.conn_to_host = FALSE; Curl_safefree(host); } if(port >= 0) { conn->conn_to_port = port; conn->bits.conn_to_port = TRUE; infof(data, "Connecting to port: %d\n", port); } else { /* no "connect to port" */ conn->bits.conn_to_port = FALSE; port = -1; } conn_to_host = conn_to_host->next; } return result; } /************************************************************* * Resolve the address of the server or proxy *************************************************************/ static CURLcode resolve_server(struct Curl_easy *data, struct connectdata *conn, bool *async) { CURLcode result = CURLE_OK; timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE); /************************************************************* * Resolve the name of the server or proxy *************************************************************/ if(conn->bits.reuse) /* We're reusing the connection - no need to resolve anything, and fix_hostname() was called already in create_conn() for the re-use case. */ *async = FALSE; else { /* this is a fresh connect */ int rc; struct Curl_dns_entry *hostaddr; #ifdef USE_UNIX_SOCKETS if(conn->unix_domain_socket) { /* Unix domain sockets are local. The host gets ignored, just use the * specified domain socket address. Do not cache "DNS entries". There is * no DNS involved and we already have the filesystem path available */ const char *path = conn->unix_domain_socket; hostaddr = calloc(1, sizeof(struct Curl_dns_entry)); if(!hostaddr) result = CURLE_OUT_OF_MEMORY; else { bool longpath = FALSE; hostaddr->addr = Curl_unix2addr(path, &longpath, conn->abstract_unix_socket); if(hostaddr->addr) hostaddr->inuse++; else { /* Long paths are not supported for now */ if(longpath) { failf(data, "Unix socket path too long: '%s'", path); result = CURLE_COULDNT_RESOLVE_HOST; } else result = CURLE_OUT_OF_MEMORY; free(hostaddr); hostaddr = NULL; } } } else #endif if(!conn->bits.proxy) { struct hostname *connhost; if(conn->bits.conn_to_host) connhost = &conn->conn_to_host; else connhost = &conn->host; /* If not connecting via a proxy, extract the port from the URL, if it is * there, thus overriding any defaults that might have been set above. */ if(conn->bits.conn_to_port) conn->port = conn->conn_to_port; else conn->port = conn->remote_port; /* Resolve target host right on */ rc = Curl_resolv_timeout(conn, connhost->name, (int)conn->port, &hostaddr, timeout_ms); if(rc == CURLRESOLV_PENDING) *async = TRUE; else if(rc == CURLRESOLV_TIMEDOUT) result = CURLE_OPERATION_TIMEDOUT; else if(!hostaddr) { failf(data, "Couldn't resolve host '%s'", connhost->dispname); result = CURLE_COULDNT_RESOLVE_HOST; /* don't return yet, we need to clean up the timeout first */ } } else { /* This is a proxy that hasn't been resolved yet. */ struct hostname * const host = conn->bits.socksproxy ? &conn->socks_proxy.host : &conn->http_proxy.host; /* resolve proxy */ rc = Curl_resolv_timeout(conn, host->name, (int)conn->port, &hostaddr, timeout_ms); if(rc == CURLRESOLV_PENDING) *async = TRUE; else if(rc == CURLRESOLV_TIMEDOUT) result = CURLE_OPERATION_TIMEDOUT; else if(!hostaddr) { failf(data, "Couldn't resolve proxy '%s'", host->dispname); result = CURLE_COULDNT_RESOLVE_PROXY; /* don't return yet, we need to clean up the timeout first */ } } DEBUGASSERT(conn->dns_entry == NULL); conn->dns_entry = hostaddr; } return result; } /* * Cleanup the connection just allocated before we can move along and use the * previously existing one. All relevant data is copied over and old_conn is * ready for freeing once this function returns. */ static void reuse_conn(struct connectdata *old_conn, struct connectdata *conn) { free_fixed_hostname(&old_conn->http_proxy.host); free_fixed_hostname(&old_conn->socks_proxy.host); free(old_conn->http_proxy.host.rawalloc); free(old_conn->socks_proxy.host.rawalloc); /* free the SSL config struct from this connection struct as this was allocated in vain and is targeted for destruction */ Curl_free_primary_ssl_config(&old_conn->ssl_config); Curl_free_primary_ssl_config(&old_conn->proxy_ssl_config); conn->data = old_conn->data; /* get the user+password information from the old_conn struct since it may * be new for this request even when we re-use an existing connection */ conn->bits.user_passwd = old_conn->bits.user_passwd; if(conn->bits.user_passwd) { /* use the new user name and password though */ Curl_safefree(conn->user); Curl_safefree(conn->passwd); conn->user = old_conn->user; conn->passwd = old_conn->passwd; old_conn->user = NULL; old_conn->passwd = NULL; } conn->bits.proxy_user_passwd = old_conn->bits.proxy_user_passwd; if(conn->bits.proxy_user_passwd) { /* use the new proxy user name and proxy password though */ Curl_safefree(conn->http_proxy.user); Curl_safefree(conn->socks_proxy.user); Curl_safefree(conn->http_proxy.passwd); Curl_safefree(conn->socks_proxy.passwd); conn->http_proxy.user = old_conn->http_proxy.user; conn->socks_proxy.user = old_conn->socks_proxy.user; conn->http_proxy.passwd = old_conn->http_proxy.passwd; conn->socks_proxy.passwd = old_conn->socks_proxy.passwd; old_conn->http_proxy.user = NULL; old_conn->socks_proxy.user = NULL; old_conn->http_proxy.passwd = NULL; old_conn->socks_proxy.passwd = NULL; } /* host can change, when doing keepalive with a proxy or if the case is different this time etc */ free_fixed_hostname(&conn->host); free_fixed_hostname(&conn->conn_to_host); Curl_safefree(conn->host.rawalloc); Curl_safefree(conn->conn_to_host.rawalloc); conn->host = old_conn->host; conn->conn_to_host = old_conn->conn_to_host; conn->conn_to_port = old_conn->conn_to_port; conn->remote_port = old_conn->remote_port; /* persist connection info in session handle */ Curl_persistconninfo(conn); conn_reset_all_postponed_data(old_conn); /* free buffers */ /* re-use init */ conn->bits.reuse = TRUE; /* yes, we're re-using here */ Curl_safefree(old_conn->user); Curl_safefree(old_conn->passwd); Curl_safefree(old_conn->options); Curl_safefree(old_conn->http_proxy.user); Curl_safefree(old_conn->socks_proxy.user); Curl_safefree(old_conn->http_proxy.passwd); Curl_safefree(old_conn->socks_proxy.passwd); Curl_safefree(old_conn->localdev); Curl_llist_destroy(&old_conn->send_pipe, NULL); Curl_llist_destroy(&old_conn->recv_pipe, NULL); Curl_safefree(old_conn->master_buffer); #ifdef USE_UNIX_SOCKETS Curl_safefree(old_conn->unix_domain_socket); #endif } /** * create_conn() sets up a new connectdata struct, or re-uses an already * existing one, and resolves host name. * * if this function returns CURLE_OK and *async is set to TRUE, the resolve * response will be coming asynchronously. If *async is FALSE, the name is * already resolved. * * @param data The sessionhandle pointer * @param in_connect is set to the next connection data pointer * @param async is set TRUE when an async DNS resolution is pending * @see Curl_setup_conn() * * *NOTE* this function assigns the conn->data pointer! */ static CURLcode create_conn(struct Curl_easy *data, struct connectdata **in_connect, bool *async) { CURLcode result = CURLE_OK; struct connectdata *conn; struct connectdata *conn_temp = NULL; bool reuse; bool connections_available = TRUE; bool force_reuse = FALSE; bool waitpipe = FALSE; size_t max_host_connections = Curl_multi_max_host_connections(data->multi); size_t max_total_connections = Curl_multi_max_total_connections(data->multi); *async = FALSE; /************************************************************* * Check input data *************************************************************/ if(!data->change.url) { result = CURLE_URL_MALFORMAT; goto out; } /* First, split up the current URL in parts so that we can use the parts for checking against the already present connections. In order to not have to modify everything at once, we allocate a temporary connection data struct and fill in for comparison purposes. */ conn = allocate_conn(data); if(!conn) { result = CURLE_OUT_OF_MEMORY; goto out; } /* We must set the return variable as soon as possible, so that our parent can cleanup any possible allocs we may have done before any failure */ *in_connect = conn; result = parseurlandfillconn(data, conn); if(result) goto out; if(data->set.str[STRING_BEARER]) { conn->oauth_bearer = strdup(data->set.str[STRING_BEARER]); if(!conn->oauth_bearer) { result = CURLE_OUT_OF_MEMORY; goto out; } } #ifdef USE_UNIX_SOCKETS if(data->set.str[STRING_UNIX_SOCKET_PATH]) { conn->unix_domain_socket = strdup(data->set.str[STRING_UNIX_SOCKET_PATH]); if(conn->unix_domain_socket == NULL) { result = CURLE_OUT_OF_MEMORY; goto out; } conn->abstract_unix_socket = data->set.abstract_unix_socket; } #endif /* After the unix socket init but before the proxy vars are used, parse and initialize the proxy vars */ #ifndef CURL_DISABLE_PROXY result = create_conn_helper_init_proxy(conn); if(result) goto out; #endif /************************************************************* * If the protocol is using SSL and HTTP proxy is used, we set * the tunnel_proxy bit. *************************************************************/ if((conn->given->flags&PROTOPT_SSL) && conn->bits.httpproxy) conn->bits.tunnel_proxy = TRUE; /************************************************************* * Figure out the remote port number and fix it in the URL *************************************************************/ result = parse_remote_port(data, conn); if(result) goto out; /* Check for overridden login details and set them accordingly so they they are known when protocol->setup_connection is called! */ result = override_login(data, conn, &conn->user, &conn->passwd, &conn->options); if(result) goto out; result = set_login(conn); /* default credentials */ if(result) goto out; /************************************************************* * Process the "connect to" linked list of hostname/port mappings. * Do this after the remote port number has been fixed in the URL. *************************************************************/ result = parse_connect_to_slist(data, conn, data->set.connect_to); if(result) goto out; /************************************************************* * IDN-fix the hostnames *************************************************************/ result = fix_hostname(conn, &conn->host); if(result) goto out; if(conn->bits.conn_to_host) { result = fix_hostname(conn, &conn->conn_to_host); if(result) goto out; } if(conn->bits.httpproxy) { result = fix_hostname(conn, &conn->http_proxy.host); if(result) goto out; } if(conn->bits.socksproxy) { result = fix_hostname(conn, &conn->socks_proxy.host); if(result) goto out; } /************************************************************* * Check whether the host and the "connect to host" are equal. * Do this after the hostnames have been IDN-fixed. *************************************************************/ if(conn->bits.conn_to_host && strcasecompare(conn->conn_to_host.name, conn->host.name)) { conn->bits.conn_to_host = FALSE; } /************************************************************* * Check whether the port and the "connect to port" are equal. * Do this after the remote port number has been fixed in the URL. *************************************************************/ if(conn->bits.conn_to_port && conn->conn_to_port == conn->remote_port) { conn->bits.conn_to_port = FALSE; } /************************************************************* * If the "connect to" feature is used with an HTTP proxy, * we set the tunnel_proxy bit. *************************************************************/ if((conn->bits.conn_to_host || conn->bits.conn_to_port) && conn->bits.httpproxy) conn->bits.tunnel_proxy = TRUE; /************************************************************* * Setup internals depending on protocol. Needs to be done after * we figured out what/if proxy to use. *************************************************************/ result = setup_connection_internals(conn); if(result) goto out; conn->recv[FIRSTSOCKET] = Curl_recv_plain; conn->send[FIRSTSOCKET] = Curl_send_plain; conn->recv[SECONDARYSOCKET] = Curl_recv_plain; conn->send[SECONDARYSOCKET] = Curl_send_plain; conn->bits.tcp_fastopen = data->set.tcp_fastopen; /*********************************************************************** * file: is a special case in that it doesn't need a network connection ***********************************************************************/ #ifndef CURL_DISABLE_FILE if(conn->handler->flags & PROTOPT_NONETWORK) { bool done; /* this is supposed to be the connect function so we better at least check that the file is present here! */ DEBUGASSERT(conn->handler->connect_it); Curl_persistconninfo(conn); result = conn->handler->connect_it(conn, &done); /* Setup a "faked" transfer that'll do nothing */ if(!result) { conn->data = data; conn->bits.tcpconnect[FIRSTSOCKET] = TRUE; /* we are "connected */ result = Curl_conncache_add_conn(data->state.conn_cache, conn); if(result) goto out; /* * Setup whatever necessary for a resumed transfer */ result = setup_range(data); if(result) { DEBUGASSERT(conn->handler->done); /* we ignore the return code for the protocol-specific DONE */ (void)conn->handler->done(conn, result, FALSE); goto out; } Curl_setup_transfer(conn, -1, -1, FALSE, NULL, /* no download */ -1, NULL); /* no upload */ } /* since we skip do_init() */ Curl_init_do(data, conn); goto out; } #endif /* Get a cloned copy of the SSL config situation stored in the connection struct. But to get this going nicely, we must first make sure that the strings in the master copy are pointing to the correct strings in the session handle strings array! Keep in mind that the pointers in the master copy are pointing to strings that will be freed as part of the Curl_easy struct, but all cloned copies will be separately allocated. */ data->set.ssl.primary.CApath = data->set.str[STRING_SSL_CAPATH_ORIG]; data->set.proxy_ssl.primary.CApath = data->set.str[STRING_SSL_CAPATH_PROXY]; data->set.ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_ORIG]; data->set.proxy_ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_PROXY]; data->set.ssl.primary.random_file = data->set.str[STRING_SSL_RANDOM_FILE]; data->set.proxy_ssl.primary.random_file = data->set.str[STRING_SSL_RANDOM_FILE]; data->set.ssl.primary.egdsocket = data->set.str[STRING_SSL_EGDSOCKET]; data->set.proxy_ssl.primary.egdsocket = data->set.str[STRING_SSL_EGDSOCKET]; data->set.ssl.primary.cipher_list = data->set.str[STRING_SSL_CIPHER_LIST_ORIG]; data->set.proxy_ssl.primary.cipher_list = data->set.str[STRING_SSL_CIPHER_LIST_PROXY]; data->set.ssl.primary.cipher_list13 = data->set.str[STRING_SSL_CIPHER13_LIST_ORIG]; data->set.proxy_ssl.primary.cipher_list13 = data->set.str[STRING_SSL_CIPHER13_LIST_PROXY]; data->set.ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_ORIG]; data->set.proxy_ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_PROXY]; data->set.ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_ORIG]; data->set.proxy_ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_PROXY]; data->set.ssl.cert = data->set.str[STRING_CERT_ORIG]; data->set.proxy_ssl.cert = data->set.str[STRING_CERT_PROXY]; data->set.ssl.cert_type = data->set.str[STRING_CERT_TYPE_ORIG]; data->set.proxy_ssl.cert_type = data->set.str[STRING_CERT_TYPE_PROXY]; data->set.ssl.key = data->set.str[STRING_KEY_ORIG]; data->set.proxy_ssl.key = data->set.str[STRING_KEY_PROXY]; data->set.ssl.key_type = data->set.str[STRING_KEY_TYPE_ORIG]; data->set.proxy_ssl.key_type = data->set.str[STRING_KEY_TYPE_PROXY]; data->set.ssl.key_passwd = data->set.str[STRING_KEY_PASSWD_ORIG]; data->set.proxy_ssl.key_passwd = data->set.str[STRING_KEY_PASSWD_PROXY]; data->set.ssl.primary.clientcert = data->set.str[STRING_CERT_ORIG]; data->set.proxy_ssl.primary.clientcert = data->set.str[STRING_CERT_PROXY]; #ifdef USE_TLS_SRP data->set.ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_ORIG]; data->set.proxy_ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_PROXY]; data->set.ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_ORIG]; data->set.proxy_ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_PROXY]; #endif if(!Curl_clone_primary_ssl_config(&data->set.ssl.primary, &conn->ssl_config)) { result = CURLE_OUT_OF_MEMORY; goto out; } if(!Curl_clone_primary_ssl_config(&data->set.proxy_ssl.primary, &conn->proxy_ssl_config)) { result = CURLE_OUT_OF_MEMORY; goto out; } prune_dead_connections(data); /************************************************************* * Check the current list of connections to see if we can * re-use an already existing one or if we have to create a * new one. *************************************************************/ DEBUGASSERT(conn->user); DEBUGASSERT(conn->passwd); /* reuse_fresh is TRUE if we are told to use a new connection by force, but we only acknowledge this option if this is not a re-used connection already (which happens due to follow-location or during a HTTP authentication phase). */ if(data->set.reuse_fresh && !data->state.this_is_a_follow) reuse = FALSE; else reuse = ConnectionExists(data, conn, &conn_temp, &force_reuse, &waitpipe); /* If we found a reusable connection that is now marked as in use, we may still want to open a new connection if we are pipelining. */ if(reuse && !force_reuse && IsPipeliningPossible(data, conn_temp)) { size_t pipelen = conn_temp->send_pipe.size + conn_temp->recv_pipe.size; if(pipelen > 0) { infof(data, "Found connection %ld, with requests in the pipe (%zu)\n", conn_temp->connection_id, pipelen); if(Curl_conncache_bundle_size(conn_temp) < max_host_connections && Curl_conncache_size(data) < max_total_connections) { /* We want a new connection anyway */ reuse = FALSE; infof(data, "We can reuse, but we want a new connection anyway\n"); Curl_conncache_return_conn(conn_temp); } } } if(reuse) { /* * We already have a connection for this, we got the former connection * in the conn_temp variable and thus we need to cleanup the one we * just allocated before we can move along and use the previously * existing one. */ reuse_conn(conn, conn_temp); #ifdef USE_SSL free(conn->ssl_extra); #endif free(conn); /* we don't need this anymore */ conn = conn_temp; *in_connect = conn; infof(data, "Re-using existing connection! (#%ld) with %s %s\n", conn->connection_id, conn->bits.proxy?"proxy":"host", conn->socks_proxy.host.name ? conn->socks_proxy.host.dispname : conn->http_proxy.host.name ? conn->http_proxy.host.dispname : conn->host.dispname); } else { /* We have decided that we want a new connection. However, we may not be able to do that if we have reached the limit of how many connections we are allowed to open. */ if(conn->handler->flags & PROTOPT_ALPN_NPN) { /* The protocol wants it, so set the bits if enabled in the easy handle (default) */ if(data->set.ssl_enable_alpn) conn->bits.tls_enable_alpn = TRUE; if(data->set.ssl_enable_npn) conn->bits.tls_enable_npn = TRUE; } if(waitpipe) /* There is a connection that *might* become usable for pipelining "soon", and we wait for that */ connections_available = FALSE; else { /* this gets a lock on the conncache */ struct connectbundle *bundle = Curl_conncache_find_bundle(conn, data->state.conn_cache); if(max_host_connections > 0 && bundle && (bundle->num_connections >= max_host_connections)) { struct connectdata *conn_candidate; /* The bundle is full. Extract the oldest connection. */ conn_candidate = Curl_conncache_extract_bundle(data, bundle); Curl_conncache_unlock(conn); if(conn_candidate) (void)Curl_disconnect(data, conn_candidate, /* dead_connection */ FALSE); else { infof(data, "No more connections allowed to host: %zu\n", max_host_connections); connections_available = FALSE; } } else Curl_conncache_unlock(conn); } if(connections_available && (max_total_connections > 0) && (Curl_conncache_size(data) >= max_total_connections)) { struct connectdata *conn_candidate; /* The cache is full. Let's see if we can kill a connection. */ conn_candidate = Curl_conncache_extract_oldest(data); if(conn_candidate) (void)Curl_disconnect(data, conn_candidate, /* dead_connection */ FALSE); else { infof(data, "No connections available in cache\n"); connections_available = FALSE; } } if(!connections_available) { infof(data, "No connections available.\n"); conn_free(conn); *in_connect = NULL; result = CURLE_NO_CONNECTION_AVAILABLE; goto out; } else { /* * This is a brand new connection, so let's store it in the connection * cache of ours! */ result = Curl_conncache_add_conn(data->state.conn_cache, conn); if(result) goto out; } #if defined(USE_NTLM) /* If NTLM is requested in a part of this connection, make sure we don't assume the state is fine as this is a fresh connection and NTLM is connection based. */ if((data->state.authhost.picked & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && data->state.authhost.done) { infof(data, "NTLM picked AND auth done set, clear picked!\n"); data->state.authhost.picked = CURLAUTH_NONE; data->state.authhost.done = FALSE; } if((data->state.authproxy.picked & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && data->state.authproxy.done) { infof(data, "NTLM-proxy picked AND auth done set, clear picked!\n"); data->state.authproxy.picked = CURLAUTH_NONE; data->state.authproxy.done = FALSE; } #endif } /* Setup and init stuff before DO starts, in preparing for the transfer. */ Curl_init_do(data, conn); /* * Setup whatever necessary for a resumed transfer */ result = setup_range(data); if(result) goto out; /* Continue connectdata initialization here. */ /* * Inherit the proper values from the urldata struct AFTER we have arranged * the persistent connection stuff */ conn->seek_func = data->set.seek_func; conn->seek_client = data->set.seek_client; /************************************************************* * Resolve the address of the server or proxy *************************************************************/ result = resolve_server(data, conn, async); out: return result; } /* Curl_setup_conn() is called after the name resolve initiated in * create_conn() is all done. * * Curl_setup_conn() also handles reused connections * * conn->data MUST already have been setup fine (in create_conn) */ CURLcode Curl_setup_conn(struct connectdata *conn, bool *protocol_done) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; Curl_pgrsTime(data, TIMER_NAMELOOKUP); if(conn->handler->flags & PROTOPT_NONETWORK) { /* nothing to setup when not using a network */ *protocol_done = TRUE; return result; } *protocol_done = FALSE; /* default to not done */ /* set proxy_connect_closed to false unconditionally already here since it is used strictly to provide extra information to a parent function in the case of proxy CONNECT failures and we must make sure we don't have it lingering set from a previous invoke */ conn->bits.proxy_connect_closed = FALSE; /* * Set user-agent. Used for HTTP, but since we can attempt to tunnel * basically anything through a http proxy we can't limit this based on * protocol. */ if(data->set.str[STRING_USERAGENT]) { Curl_safefree(conn->allocptr.uagent); conn->allocptr.uagent = aprintf("User-Agent: %s\r\n", data->set.str[STRING_USERAGENT]); if(!conn->allocptr.uagent) return CURLE_OUT_OF_MEMORY; } data->req.headerbytecount = 0; #ifdef CURL_DO_LINEEND_CONV data->state.crlf_conversions = 0; /* reset CRLF conversion counter */ #endif /* CURL_DO_LINEEND_CONV */ /* set start time here for timeout purposes in the connect procedure, it is later set again for the progress meter purpose */ conn->now = Curl_now(); if(CURL_SOCKET_BAD == conn->sock[FIRSTSOCKET]) { conn->bits.tcpconnect[FIRSTSOCKET] = FALSE; result = Curl_connecthost(conn, conn->dns_entry); if(result) return result; } else { Curl_pgrsTime(data, TIMER_CONNECT); /* we're connected already */ Curl_pgrsTime(data, TIMER_APPCONNECT); /* we're connected already */ conn->bits.tcpconnect[FIRSTSOCKET] = TRUE; *protocol_done = TRUE; Curl_updateconninfo(conn, conn->sock[FIRSTSOCKET]); Curl_verboseconnect(conn); } conn->now = Curl_now(); /* time this *after* the connect is done, we set this here perhaps a second time */ return result; } CURLcode Curl_connect(struct Curl_easy *data, struct connectdata **in_connect, bool *asyncp, bool *protocol_done) { CURLcode result; *asyncp = FALSE; /* assume synchronous resolves by default */ /* init the single-transfer specific data */ Curl_free_request_state(data); memset(&data->req, 0, sizeof(struct SingleRequest)); data->req.maxdownload = -1; /* call the stuff that needs to be called */ result = create_conn(data, in_connect, asyncp); if(!result) { if(CONN_INUSE(*in_connect)) /* pipelining */ *protocol_done = TRUE; else if(!*asyncp) { /* DNS resolution is done: that's either because this is a reused connection, in which case DNS was unnecessary, or because DNS really did finish already (synch resolver/fast async resolve) */ result = Curl_setup_conn(*in_connect, protocol_done); } } if(result == CURLE_NO_CONNECTION_AVAILABLE) { *in_connect = NULL; return result; } else if(result && *in_connect) { /* We're not allowed to return failure with memory left allocated in the connectdata struct, free those here */ Curl_disconnect(data, *in_connect, TRUE); *in_connect = NULL; /* return a NULL */ } return result; } /* * Curl_init_do() inits the readwrite session. This is inited each time (in * the DO function before the protocol-specific DO functions are invoked) for * a transfer, sometimes multiple times on the same Curl_easy. Make sure * nothing in here depends on stuff that are setup dynamically for the * transfer. * * Allow this function to get called with 'conn' set to NULL. */ CURLcode Curl_init_do(struct Curl_easy *data, struct connectdata *conn) { struct SingleRequest *k = &data->req; if(conn) { conn->bits.do_more = FALSE; /* by default there's no curl_do_more() to use */ /* if the protocol used doesn't support wildcards, switch it off */ if(data->state.wildcardmatch && !(conn->handler->flags & PROTOPT_WILDCARD)) data->state.wildcardmatch = FALSE; } data->state.done = FALSE; /* *_done() is not called yet */ data->state.expect100header = FALSE; if(data->set.opt_no_body) /* in HTTP lingo, no body means using the HEAD request... */ data->set.httpreq = HTTPREQ_HEAD; else if(HTTPREQ_HEAD == data->set.httpreq) /* ... but if unset there really is no perfect method that is the "opposite" of HEAD but in reality most people probably think GET then. The important thing is that we can't let it remain HEAD if the opt_no_body is set FALSE since then we'll behave wrong when getting HTTP. */ data->set.httpreq = HTTPREQ_GET; k->start = Curl_now(); /* start time */ k->now = k->start; /* current time is now */ k->header = TRUE; /* assume header */ k->bytecount = 0; k->buf = data->state.buffer; k->hbufp = data->state.headerbuff; k->ignorebody = FALSE; Curl_speedinit(data); Curl_pgrsSetUploadCounter(data, 0); Curl_pgrsSetDownloadCounter(data, 0); return CURLE_OK; } /* * get_protocol_family() * * This is used to return the protocol family for a given protocol. * * Parameters: * * protocol [in] - A single bit protocol identifier such as HTTP or HTTPS. * * Returns the family as a single bit protocol identifier. */ static unsigned int get_protocol_family(unsigned int protocol) { unsigned int family; switch(protocol) { case CURLPROTO_HTTP: case CURLPROTO_HTTPS: family = CURLPROTO_HTTP; break; case CURLPROTO_FTP: case CURLPROTO_FTPS: family = CURLPROTO_FTP; break; case CURLPROTO_SCP: family = CURLPROTO_SCP; break; case CURLPROTO_SFTP: family = CURLPROTO_SFTP; break; case CURLPROTO_TELNET: family = CURLPROTO_TELNET; break; case CURLPROTO_LDAP: case CURLPROTO_LDAPS: family = CURLPROTO_LDAP; break; case CURLPROTO_DICT: family = CURLPROTO_DICT; break; case CURLPROTO_FILE: family = CURLPROTO_FILE; break; case CURLPROTO_TFTP: family = CURLPROTO_TFTP; break; case CURLPROTO_IMAP: case CURLPROTO_IMAPS: family = CURLPROTO_IMAP; break; case CURLPROTO_POP3: case CURLPROTO_POP3S: family = CURLPROTO_POP3; break; case CURLPROTO_SMTP: case CURLPROTO_SMTPS: family = CURLPROTO_SMTP; break; case CURLPROTO_RTSP: family = CURLPROTO_RTSP; break; case CURLPROTO_RTMP: case CURLPROTO_RTMPS: family = CURLPROTO_RTMP; break; case CURLPROTO_RTMPT: case CURLPROTO_RTMPTS: family = CURLPROTO_RTMPT; break; case CURLPROTO_RTMPE: family = CURLPROTO_RTMPE; break; case CURLPROTO_RTMPTE: family = CURLPROTO_RTMPTE; break; case CURLPROTO_GOPHER: family = CURLPROTO_GOPHER; break; case CURLPROTO_SMB: case CURLPROTO_SMBS: family = CURLPROTO_SMB; break; default: family = 0; break; } return family; } /* * Wrapper to call functions in Curl_conncache_foreach() * * Returns always 0. */ static int conn_upkeep(struct connectdata *conn, void *param) { /* Param is unused. */ (void)param; if(conn->handler->connection_check) { /* Do a protocol-specific keepalive check on the connection. */ conn->handler->connection_check(conn, CONNCHECK_KEEPALIVE); } return 0; /* continue iteration */ } CURLcode Curl_upkeep(struct conncache *conn_cache, void *data) { /* Loop over every connection and make connection alive. */ Curl_conncache_foreach(data, conn_cache, data, conn_upkeep); return CURLE_OK; }
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include "curl_setup.h" #ifdef HAVE_NETINET_IN_H #include <netinet/in.h> #endif #ifdef HAVE_NETDB_H #include <netdb.h> #endif #ifdef HAVE_ARPA_INET_H #include <arpa/inet.h> #endif #ifdef HAVE_NET_IF_H #include <net/if.h> #endif #ifdef HAVE_SYS_IOCTL_H #include <sys/ioctl.h> #endif #ifdef HAVE_SYS_PARAM_H #include <sys/param.h> #endif #ifdef __VMS #include <in.h> #include <inet.h> #endif #ifdef HAVE_SYS_UN_H #include <sys/un.h> #endif #ifndef HAVE_SOCKET #error "We can't compile without socket() support!" #endif #include <limits.h> #ifdef USE_LIBIDN2 #include <idn2.h> #elif defined(USE_WIN32_IDN) /* prototype for curl_win32_idn_to_ascii() */ bool curl_win32_idn_to_ascii(const char *in, char **out); #endif /* USE_LIBIDN2 */ #include "urldata.h" #include "netrc.h" #include "formdata.h" #include "mime.h" #include "vtls/vtls.h" #include "hostip.h" #include "transfer.h" #include "sendf.h" #include "progress.h" #include "cookie.h" #include "strcase.h" #include "strerror.h" #include "escape.h" #include "strtok.h" #include "share.h" #include "content_encoding.h" #include "http_digest.h" #include "http_negotiate.h" #include "select.h" #include "multiif.h" #include "easyif.h" #include "speedcheck.h" #include "warnless.h" #include "non-ascii.h" #include "inet_pton.h" #include "getinfo.h" #include "urlapi-int.h" /* And now for the protocols */ #include "ftp.h" #include "dict.h" #include "telnet.h" #include "tftp.h" #include "http.h" #include "http2.h" #include "file.h" #include "curl_ldap.h" #include "ssh.h" #include "imap.h" #include "url.h" #include "connect.h" #include "inet_ntop.h" #include "http_ntlm.h" #include "curl_ntlm_wb.h" #include "socks.h" #include "curl_rtmp.h" #include "gopher.h" #include "http_proxy.h" #include "conncache.h" #include "multihandle.h" #include "pipeline.h" #include "dotdot.h" #include "strdup.h" #include "setopt.h" /* The last 3 #include files should be in this order */ #include "curl_printf.h" #include "curl_memory.h" #include "memdebug.h" static void conn_free(struct connectdata *conn); static void free_fixed_hostname(struct hostname *host); static unsigned int get_protocol_family(unsigned int protocol); /* Some parts of the code (e.g. chunked encoding) assume this buffer has at * more than just a few bytes to play with. Don't let it become too small or * bad things will happen. */ #if READBUFFER_SIZE < READBUFFER_MIN # error READBUFFER_SIZE is too small #endif /* * Protocol table. */ static const struct Curl_handler * const protocols[] = { #ifndef CURL_DISABLE_HTTP &Curl_handler_http, #endif #if defined(USE_SSL) && !defined(CURL_DISABLE_HTTP) &Curl_handler_https, #endif #ifndef CURL_DISABLE_FTP &Curl_handler_ftp, #endif #if defined(USE_SSL) && !defined(CURL_DISABLE_FTP) &Curl_handler_ftps, #endif #ifndef CURL_DISABLE_TELNET &Curl_handler_telnet, #endif #ifndef CURL_DISABLE_DICT &Curl_handler_dict, #endif #ifndef CURL_DISABLE_LDAP &Curl_handler_ldap, #if !defined(CURL_DISABLE_LDAPS) && \ ((defined(USE_OPENLDAP) && defined(USE_SSL)) || \ (!defined(USE_OPENLDAP) && defined(HAVE_LDAP_SSL))) &Curl_handler_ldaps, #endif #endif #ifndef CURL_DISABLE_FILE &Curl_handler_file, #endif #ifndef CURL_DISABLE_TFTP &Curl_handler_tftp, #endif #if defined(USE_LIBSSH2) || defined(USE_LIBSSH) &Curl_handler_scp, #endif #if defined(USE_LIBSSH2) || defined(USE_LIBSSH) &Curl_handler_sftp, #endif #ifndef CURL_DISABLE_IMAP &Curl_handler_imap, #ifdef USE_SSL &Curl_handler_imaps, #endif #endif #ifndef CURL_DISABLE_POP3 &Curl_handler_pop3, #ifdef USE_SSL &Curl_handler_pop3s, #endif #endif #if !defined(CURL_DISABLE_SMB) && defined(USE_NTLM) && \ (CURL_SIZEOF_CURL_OFF_T > 4) && \ (!defined(USE_WINDOWS_SSPI) || defined(USE_WIN32_CRYPTO)) &Curl_handler_smb, #ifdef USE_SSL &Curl_handler_smbs, #endif #endif #ifndef CURL_DISABLE_SMTP &Curl_handler_smtp, #ifdef USE_SSL &Curl_handler_smtps, #endif #endif #ifndef CURL_DISABLE_RTSP &Curl_handler_rtsp, #endif #ifndef CURL_DISABLE_GOPHER &Curl_handler_gopher, #endif #ifdef USE_LIBRTMP &Curl_handler_rtmp, &Curl_handler_rtmpt, &Curl_handler_rtmpe, &Curl_handler_rtmpte, &Curl_handler_rtmps, &Curl_handler_rtmpts, #endif (struct Curl_handler *) NULL }; /* * Dummy handler for undefined protocol schemes. */ static const struct Curl_handler Curl_handler_dummy = { "<no protocol>", /* scheme */ ZERO_NULL, /* setup_connection */ ZERO_NULL, /* do_it */ ZERO_NULL, /* done */ ZERO_NULL, /* do_more */ ZERO_NULL, /* connect_it */ ZERO_NULL, /* connecting */ ZERO_NULL, /* doing */ ZERO_NULL, /* proto_getsock */ ZERO_NULL, /* doing_getsock */ ZERO_NULL, /* domore_getsock */ ZERO_NULL, /* perform_getsock */ ZERO_NULL, /* disconnect */ ZERO_NULL, /* readwrite */ ZERO_NULL, /* connection_check */ 0, /* defport */ 0, /* protocol */ PROTOPT_NONE /* flags */ }; void Curl_freeset(struct Curl_easy *data) { /* Free all dynamic strings stored in the data->set substructure. */ enum dupstring i; for(i = (enum dupstring)0; i < STRING_LAST; i++) { Curl_safefree(data->set.str[i]); } if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; if(data->change.url_alloc) { Curl_safefree(data->change.url); data->change.url_alloc = FALSE; } data->change.url = NULL; Curl_mime_cleanpart(&data->set.mimepost); } /* free the URL pieces */ void Curl_up_free(struct Curl_easy *data) { struct urlpieces *up = &data->state.up; Curl_safefree(up->scheme); Curl_safefree(up->hostname); Curl_safefree(up->port); Curl_safefree(up->user); Curl_safefree(up->password); Curl_safefree(up->options); Curl_safefree(up->path); Curl_safefree(up->query); curl_url_cleanup(data->state.uh); data->state.uh = NULL; } /* * This is the internal function curl_easy_cleanup() calls. This should * cleanup and free all resources associated with this sessionhandle. * * NOTE: if we ever add something that attempts to write to a socket or * similar here, we must ignore SIGPIPE first. It is currently only done * when curl_easy_perform() is invoked. */ CURLcode Curl_close(struct Curl_easy *data) { struct Curl_multi *m; if(!data) return CURLE_OK; Curl_expire_clear(data); /* shut off timers */ m = data->multi; if(m) /* This handle is still part of a multi handle, take care of this first and detach this handle from there. */ curl_multi_remove_handle(data->multi, data); if(data->multi_easy) { /* when curl_easy_perform() is used, it creates its own multi handle to use and this is the one */ curl_multi_cleanup(data->multi_easy); data->multi_easy = NULL; } /* Destroy the timeout list that is held in the easy handle. It is /normally/ done by curl_multi_remove_handle() but this is "just in case" */ Curl_llist_destroy(&data->state.timeoutlist, NULL); data->magic = 0; /* force a clear AFTER the possibly enforced removal from the multi handle, since that function uses the magic field! */ if(data->state.rangestringalloc) free(data->state.range); /* freed here just in case DONE wasn't called */ Curl_free_request_state(data); /* Close down all open SSL info and sessions */ Curl_ssl_close_all(data); Curl_safefree(data->state.first_host); Curl_safefree(data->state.scratch); Curl_ssl_free_certinfo(data); /* Cleanup possible redirect junk */ free(data->req.newurl); data->req.newurl = NULL; if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; Curl_up_free(data); Curl_safefree(data->state.buffer); Curl_safefree(data->state.headerbuff); Curl_safefree(data->state.ulbuf); Curl_flush_cookies(data, 1); Curl_digest_cleanup(data); Curl_safefree(data->info.contenttype); Curl_safefree(data->info.wouldredirect); /* this destroys the channel and we cannot use it anymore after this */ Curl_resolver_cleanup(data->state.resolver); Curl_http2_cleanup_dependencies(data); Curl_convert_close(data); /* No longer a dirty share, if it exists */ if(data->share) { Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE); data->share->dirty--; Curl_share_unlock(data, CURL_LOCK_DATA_SHARE); } /* destruct wildcard structures if it is needed */ Curl_wildcard_dtor(&data->wildcard); Curl_freeset(data); free(data); return CURLE_OK; } /* * Initialize the UserDefined fields within a Curl_easy. * This may be safely called on a new or existing Curl_easy. */ CURLcode Curl_init_userdefined(struct Curl_easy *data) { struct UserDefined *set = &data->set; CURLcode result = CURLE_OK; set->out = stdout; /* default output to stdout */ set->in_set = stdin; /* default input from stdin */ set->err = stderr; /* default stderr to stderr */ /* use fwrite as default function to store output */ set->fwrite_func = (curl_write_callback)fwrite; /* use fread as default function to read input */ set->fread_func_set = (curl_read_callback)fread; set->is_fread_set = 0; set->is_fwrite_set = 0; set->seek_func = ZERO_NULL; set->seek_client = ZERO_NULL; /* conversion callbacks for non-ASCII hosts */ set->convfromnetwork = ZERO_NULL; set->convtonetwork = ZERO_NULL; set->convfromutf8 = ZERO_NULL; set->filesize = -1; /* we don't know the size */ set->postfieldsize = -1; /* unknown size */ set->maxredirs = -1; /* allow any amount by default */ set->httpreq = HTTPREQ_GET; /* Default HTTP request */ set->rtspreq = RTSPREQ_OPTIONS; /* Default RTSP request */ set->ftp_use_epsv = TRUE; /* FTP defaults to EPSV operations */ set->ftp_use_eprt = TRUE; /* FTP defaults to EPRT operations */ set->ftp_use_pret = FALSE; /* mainly useful for drftpd servers */ set->ftp_filemethod = FTPFILE_MULTICWD; set->dns_cache_timeout = 60; /* Timeout every 60 seconds by default */ /* Set the default size of the SSL session ID cache */ set->general_ssl.max_ssl_sessions = 5; set->proxyport = 0; set->proxytype = CURLPROXY_HTTP; /* defaults to HTTP proxy */ set->httpauth = CURLAUTH_BASIC; /* defaults to basic */ set->proxyauth = CURLAUTH_BASIC; /* defaults to basic */ /* SOCKS5 proxy auth defaults to username/password + GSS-API */ set->socks5auth = CURLAUTH_BASIC | CURLAUTH_GSSAPI; /* make libcurl quiet by default: */ set->hide_progress = TRUE; /* CURLOPT_NOPROGRESS changes these */ Curl_mime_initpart(&set->mimepost, data); /* * libcurl 7.10 introduced SSL verification *by default*! This needs to be * switched off unless wanted. */ set->ssl.primary.verifypeer = TRUE; set->ssl.primary.verifyhost = TRUE; #ifdef USE_TLS_SRP set->ssl.authtype = CURL_TLSAUTH_NONE; #endif set->ssh_auth_types = CURLSSH_AUTH_DEFAULT; /* defaults to any auth type */ set->ssl.primary.sessionid = TRUE; /* session ID caching enabled by default */ set->proxy_ssl = set->ssl; set->new_file_perms = 0644; /* Default permissions */ set->new_directory_perms = 0755; /* Default permissions */ /* for the *protocols fields we don't use the CURLPROTO_ALL convenience define since we internally only use the lower 16 bits for the passed in bitmask to not conflict with the private bits */ set->allowed_protocols = CURLPROTO_ALL; set->redir_protocols = CURLPROTO_ALL & /* All except FILE, SCP and SMB */ ~(CURLPROTO_FILE | CURLPROTO_SCP | CURLPROTO_SMB | CURLPROTO_SMBS); #if defined(HAVE_GSSAPI) || defined(USE_WINDOWS_SSPI) /* * disallow unprotected protection negotiation NEC reference implementation * seem not to follow rfc1961 section 4.3/4.4 */ set->socks5_gssapi_nec = FALSE; #endif /* Set the default CA cert bundle/path detected/specified at build time. * * If Schannel (WinSSL) is the selected SSL backend then these locations * are ignored. We allow setting CA location for schannel only when * explicitly specified by the user via CURLOPT_CAINFO / --cacert. */ if(Curl_ssl_backend() != CURLSSLBACKEND_SCHANNEL) { #if defined(CURL_CA_BUNDLE) result = Curl_setstropt(&set->str[STRING_SSL_CAFILE_ORIG], CURL_CA_BUNDLE); if(result) return result; result = Curl_setstropt(&set->str[STRING_SSL_CAFILE_PROXY], CURL_CA_BUNDLE); if(result) return result; #endif #if defined(CURL_CA_PATH) result = Curl_setstropt(&set->str[STRING_SSL_CAPATH_ORIG], CURL_CA_PATH); if(result) return result; result = Curl_setstropt(&set->str[STRING_SSL_CAPATH_PROXY], CURL_CA_PATH); if(result) return result; #endif } set->wildcard_enabled = FALSE; set->chunk_bgn = ZERO_NULL; set->chunk_end = ZERO_NULL; set->tcp_keepalive = FALSE; set->tcp_keepintvl = 60; set->tcp_keepidle = 60; set->tcp_fastopen = FALSE; set->tcp_nodelay = TRUE; set->ssl_enable_npn = TRUE; set->ssl_enable_alpn = TRUE; set->expect_100_timeout = 1000L; /* Wait for a second by default. */ set->sep_headers = TRUE; /* separated header lists by default */ set->buffer_size = READBUFFER_SIZE; set->upload_buffer_size = UPLOADBUFFER_DEFAULT; set->happy_eyeballs_timeout = CURL_HET_DEFAULT; set->fnmatch = ZERO_NULL; set->upkeep_interval_ms = CURL_UPKEEP_INTERVAL_DEFAULT; set->maxconnects = DEFAULT_CONNCACHE_SIZE; /* for easy handles */ set->httpversion = #ifdef USE_NGHTTP2 CURL_HTTP_VERSION_2TLS #else CURL_HTTP_VERSION_1_1 #endif ; Curl_http2_init_userset(set); return result; } /** * Curl_open() * * @param curl is a pointer to a sessionhandle pointer that gets set by this * function. * @return CURLcode */ CURLcode Curl_open(struct Curl_easy **curl) { CURLcode result; struct Curl_easy *data; /* Very simple start-up: alloc the struct, init it with zeroes and return */ data = calloc(1, sizeof(struct Curl_easy)); if(!data) { /* this is a very serious error */ DEBUGF(fprintf(stderr, "Error: calloc of Curl_easy failed\n")); return CURLE_OUT_OF_MEMORY; } data->magic = CURLEASY_MAGIC_NUMBER; result = Curl_resolver_init(&data->state.resolver); if(result) { DEBUGF(fprintf(stderr, "Error: resolver_init failed\n")); free(data); return result; } /* We do some initial setup here, all those fields that can't be just 0 */ data->state.buffer = malloc(READBUFFER_SIZE + 1); if(!data->state.buffer) { DEBUGF(fprintf(stderr, "Error: malloc of buffer failed\n")); result = CURLE_OUT_OF_MEMORY; } else { data->state.headerbuff = malloc(HEADERSIZE); if(!data->state.headerbuff) { DEBUGF(fprintf(stderr, "Error: malloc of headerbuff failed\n")); result = CURLE_OUT_OF_MEMORY; } else { result = Curl_init_userdefined(data); data->state.headersize = HEADERSIZE; Curl_convert_init(data); Curl_initinfo(data); /* most recent connection is not yet defined */ data->state.lastconnect = NULL; data->progress.flags |= PGRS_HIDE; data->state.current_speed = -1; /* init to negative == impossible */ Curl_http2_init_state(&data->state); } } if(result) { Curl_resolver_cleanup(data->state.resolver); free(data->state.buffer); free(data->state.headerbuff); Curl_freeset(data); free(data); data = NULL; } else *curl = data; return result; } #ifdef USE_RECV_BEFORE_SEND_WORKAROUND static void conn_reset_postponed_data(struct connectdata *conn, int num) { struct postponed_data * const psnd = &(conn->postponed[num]); if(psnd->buffer) { DEBUGASSERT(psnd->allocated_size > 0); DEBUGASSERT(psnd->recv_size <= psnd->allocated_size); DEBUGASSERT(psnd->recv_size ? (psnd->recv_processed < psnd->recv_size) : (psnd->recv_processed == 0)); DEBUGASSERT(psnd->bindsock != CURL_SOCKET_BAD); free(psnd->buffer); psnd->buffer = NULL; psnd->allocated_size = 0; psnd->recv_size = 0; psnd->recv_processed = 0; #ifdef DEBUGBUILD psnd->bindsock = CURL_SOCKET_BAD; /* used only for DEBUGASSERT */ #endif /* DEBUGBUILD */ } else { DEBUGASSERT(psnd->allocated_size == 0); DEBUGASSERT(psnd->recv_size == 0); DEBUGASSERT(psnd->recv_processed == 0); DEBUGASSERT(psnd->bindsock == CURL_SOCKET_BAD); } } static void conn_reset_all_postponed_data(struct connectdata *conn) { conn_reset_postponed_data(conn, 0); conn_reset_postponed_data(conn, 1); } #else /* ! USE_RECV_BEFORE_SEND_WORKAROUND */ /* Use "do-nothing" macro instead of function when workaround not used */ #define conn_reset_all_postponed_data(c) do {} WHILE_FALSE #endif /* ! USE_RECV_BEFORE_SEND_WORKAROUND */ static void conn_free(struct connectdata *conn) { if(!conn) return; /* possible left-overs from the async name resolvers */ Curl_resolver_cancel(conn); /* close the SSL stuff before we close any sockets since they will/may write to the sockets */ Curl_ssl_close(conn, FIRSTSOCKET); Curl_ssl_close(conn, SECONDARYSOCKET); /* close possibly still open sockets */ if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET]) Curl_closesocket(conn, conn->sock[SECONDARYSOCKET]); if(CURL_SOCKET_BAD != conn->sock[FIRSTSOCKET]) Curl_closesocket(conn, conn->sock[FIRSTSOCKET]); if(CURL_SOCKET_BAD != conn->tempsock[0]) Curl_closesocket(conn, conn->tempsock[0]); if(CURL_SOCKET_BAD != conn->tempsock[1]) Curl_closesocket(conn, conn->tempsock[1]); #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) && \ defined(NTLM_WB_ENABLED) Curl_ntlm_wb_cleanup(conn); #endif Curl_safefree(conn->user); Curl_safefree(conn->passwd); Curl_safefree(conn->oauth_bearer); Curl_safefree(conn->options); Curl_safefree(conn->http_proxy.user); Curl_safefree(conn->socks_proxy.user); Curl_safefree(conn->http_proxy.passwd); Curl_safefree(conn->socks_proxy.passwd); Curl_safefree(conn->allocptr.proxyuserpwd); Curl_safefree(conn->allocptr.uagent); Curl_safefree(conn->allocptr.userpwd); Curl_safefree(conn->allocptr.accept_encoding); Curl_safefree(conn->allocptr.te); Curl_safefree(conn->allocptr.rangeline); Curl_safefree(conn->allocptr.ref); Curl_safefree(conn->allocptr.host); Curl_safefree(conn->allocptr.cookiehost); Curl_safefree(conn->allocptr.rtsp_transport); Curl_safefree(conn->trailer); Curl_safefree(conn->host.rawalloc); /* host name buffer */ Curl_safefree(conn->conn_to_host.rawalloc); /* host name buffer */ Curl_safefree(conn->secondaryhostname); Curl_safefree(conn->http_proxy.host.rawalloc); /* http proxy name buffer */ Curl_safefree(conn->socks_proxy.host.rawalloc); /* socks proxy name buffer */ Curl_safefree(conn->master_buffer); Curl_safefree(conn->connect_state); conn_reset_all_postponed_data(conn); Curl_llist_destroy(&conn->send_pipe, NULL); Curl_llist_destroy(&conn->recv_pipe, NULL); Curl_safefree(conn->localdev); Curl_free_primary_ssl_config(&conn->ssl_config); Curl_free_primary_ssl_config(&conn->proxy_ssl_config); #ifdef USE_UNIX_SOCKETS Curl_safefree(conn->unix_domain_socket); #endif #ifdef USE_SSL Curl_safefree(conn->ssl_extra); #endif free(conn); /* free all the connection oriented data */ } /* * Disconnects the given connection. Note the connection may not be the * primary connection, like when freeing room in the connection cache or * killing of a dead old connection. * * A connection needs an easy handle when closing down. We support this passed * in separately since the connection to get closed here is often already * disassociated from an easy handle. * * This function MUST NOT reset state in the Curl_easy struct if that * isn't strictly bound to the life-time of *this* particular connection. * */ CURLcode Curl_disconnect(struct Curl_easy *data, struct connectdata *conn, bool dead_connection) { if(!conn) return CURLE_OK; /* this is closed and fine already */ if(!data) { DEBUGF(infof(data, "DISCONNECT without easy handle, ignoring\n")); return CURLE_OK; } /* * If this connection isn't marked to force-close, leave it open if there * are other users of it */ if(CONN_INUSE(conn) && !dead_connection) { DEBUGF(infof(data, "Curl_disconnect when inuse: %zu\n", CONN_INUSE(conn))); return CURLE_OK; } conn->data = data; if(conn->dns_entry != NULL) { Curl_resolv_unlock(data, conn->dns_entry); conn->dns_entry = NULL; } Curl_hostcache_prune(data); /* kill old DNS cache entries */ #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) /* Cleanup NTLM connection-related data */ Curl_http_ntlm_cleanup(conn); #endif if(conn->handler->disconnect) /* This is set if protocol-specific cleanups should be made */ conn->handler->disconnect(conn, dead_connection); /* unlink ourselves! */ infof(data, "Closing connection %ld\n", conn->connection_id); Curl_conncache_remove_conn(conn, TRUE); free_fixed_hostname(&conn->host); free_fixed_hostname(&conn->conn_to_host); free_fixed_hostname(&conn->http_proxy.host); free_fixed_hostname(&conn->socks_proxy.host); DEBUGASSERT(conn->data == data); /* this assumes that the pointer is still there after the connection was detected from the cache */ Curl_ssl_close(conn, FIRSTSOCKET); conn_free(conn); return CURLE_OK; } /* * This function should return TRUE if the socket is to be assumed to * be dead. Most commonly this happens when the server has closed the * connection due to inactivity. */ static bool SocketIsDead(curl_socket_t sock) { int sval; bool ret_val = TRUE; sval = SOCKET_READABLE(sock, 0); if(sval == 0) /* timeout */ ret_val = FALSE; return ret_val; } /* * IsPipeliningPossible() * * Return a bitmask with the available pipelining and multiplexing options for * the given requested connection. */ static int IsPipeliningPossible(const struct Curl_easy *handle, const struct connectdata *conn) { int avail = 0; /* If a HTTP protocol and pipelining is enabled */ if((conn->handler->protocol & PROTO_FAMILY_HTTP) && (!conn->bits.protoconnstart || !conn->bits.close)) { if(Curl_pipeline_wanted(handle->multi, CURLPIPE_HTTP1) && (handle->set.httpversion != CURL_HTTP_VERSION_1_0) && (handle->set.httpreq == HTTPREQ_GET || handle->set.httpreq == HTTPREQ_HEAD)) /* didn't ask for HTTP/1.0 and a GET or HEAD */ avail |= CURLPIPE_HTTP1; if(Curl_pipeline_wanted(handle->multi, CURLPIPE_MULTIPLEX) && (handle->set.httpversion >= CURL_HTTP_VERSION_2)) /* allows HTTP/2 */ avail |= CURLPIPE_MULTIPLEX; } return avail; } /* Returns non-zero if a handle was removed */ int Curl_removeHandleFromPipeline(struct Curl_easy *handle, struct curl_llist *pipeline) { if(pipeline) { struct curl_llist_element *curr; curr = pipeline->head; while(curr) { if(curr->ptr == handle) { Curl_llist_remove(pipeline, curr, NULL); return 1; /* we removed a handle */ } curr = curr->next; } } return 0; } #if 0 /* this code is saved here as it is useful for debugging purposes */ static void Curl_printPipeline(struct curl_llist *pipeline) { struct curl_llist_element *curr; curr = pipeline->head; while(curr) { struct Curl_easy *data = (struct Curl_easy *) curr->ptr; infof(data, "Handle in pipeline: %s\n", data->state.path); curr = curr->next; } } #endif static struct Curl_easy* gethandleathead(struct curl_llist *pipeline) { struct curl_llist_element *curr = pipeline->head; #ifdef DEBUGBUILD { struct curl_llist_element *p = pipeline->head; while(p) { struct Curl_easy *e = p->ptr; DEBUGASSERT(GOOD_EASY_HANDLE(e)); p = p->next; } } #endif if(curr) { return (struct Curl_easy *) curr->ptr; } return NULL; } /* remove the specified connection from all (possible) pipelines and related queues */ void Curl_getoff_all_pipelines(struct Curl_easy *data, struct connectdata *conn) { if(!conn->bundle) return; if(conn->bundle->multiuse == BUNDLE_PIPELINING) { bool recv_head = (conn->readchannel_inuse && Curl_recvpipe_head(data, conn)); bool send_head = (conn->writechannel_inuse && Curl_sendpipe_head(data, conn)); if(Curl_removeHandleFromPipeline(data, &conn->recv_pipe) && recv_head) Curl_pipeline_leave_read(conn); if(Curl_removeHandleFromPipeline(data, &conn->send_pipe) && send_head) Curl_pipeline_leave_write(conn); } else { (void)Curl_removeHandleFromPipeline(data, &conn->recv_pipe); (void)Curl_removeHandleFromPipeline(data, &conn->send_pipe); } } static bool proxy_info_matches(const struct proxy_info* data, const struct proxy_info* needle) { if((data->proxytype == needle->proxytype) && (data->port == needle->port) && Curl_safe_strcasecompare(data->host.name, needle->host.name)) return TRUE; return FALSE; } /* * This function checks if the given connection is dead and extracts it from * the connection cache if so. * * When this is called as a Curl_conncache_foreach() callback, the connection * cache lock is held! * * Returns TRUE if the connection was dead and extracted. */ static bool extract_if_dead(struct connectdata *conn, struct Curl_easy *data) { size_t pipeLen = conn->send_pipe.size + conn->recv_pipe.size; if(!pipeLen && !CONN_INUSE(conn)) { /* The check for a dead socket makes sense only if there are no handles in pipeline and the connection isn't already marked in use */ bool dead; conn->data = data; if(conn->handler->connection_check) { /* The protocol has a special method for checking the state of the connection. Use it to check if the connection is dead. */ unsigned int state; state = conn->handler->connection_check(conn, CONNCHECK_ISDEAD); dead = (state & CONNRESULT_DEAD); } else { /* Use the general method for determining the death of a connection */ dead = SocketIsDead(conn->sock[FIRSTSOCKET]); } if(dead) { infof(data, "Connection %ld seems to be dead!\n", conn->connection_id); Curl_conncache_remove_conn(conn, FALSE); conn->data = NULL; /* detach */ return TRUE; } } return FALSE; } struct prunedead { struct Curl_easy *data; struct connectdata *extracted; }; /* * Wrapper to use extract_if_dead() function in Curl_conncache_foreach() * */ static int call_extract_if_dead(struct connectdata *conn, void *param) { struct prunedead *p = (struct prunedead *)param; if(extract_if_dead(conn, p->data)) { /* stop the iteration here, pass back the connection that was extracted */ p->extracted = conn; return 1; } return 0; /* continue iteration */ } /* * This function scans the connection cache for half-open/dead connections, * closes and removes them. * The cleanup is done at most once per second. */ static void prune_dead_connections(struct Curl_easy *data) { struct curltime now = Curl_now(); time_t elapsed = Curl_timediff(now, data->state.conn_cache->last_cleanup); if(elapsed >= 1000L) { struct prunedead prune; prune.data = data; prune.extracted = NULL; while(Curl_conncache_foreach(data, data->state.conn_cache, &prune, call_extract_if_dead)) { /* disconnect it */ (void)Curl_disconnect(data, prune.extracted, /* dead_connection */TRUE); } data->state.conn_cache->last_cleanup = now; } } static size_t max_pipeline_length(struct Curl_multi *multi) { return multi ? multi->max_pipeline_length : 0; } /* * Given one filled in connection struct (named needle), this function should * detect if there already is one that has all the significant details * exactly the same and thus should be used instead. * * If there is a match, this function returns TRUE - and has marked the * connection as 'in-use'. It must later be called with ConnectionDone() to * return back to 'idle' (unused) state. * * The force_reuse flag is set if the connection must be used, even if * the pipelining strategy wants to open a new connection instead of reusing. */ static bool ConnectionExists(struct Curl_easy *data, struct connectdata *needle, struct connectdata **usethis, bool *force_reuse, bool *waitpipe) { struct connectdata *check; struct connectdata *chosen = 0; bool foundPendingCandidate = FALSE; int canpipe = IsPipeliningPossible(data, needle); struct connectbundle *bundle; #ifdef USE_NTLM bool wantNTLMhttp = ((data->state.authhost.want & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && (needle->handler->protocol & PROTO_FAMILY_HTTP)); bool wantProxyNTLMhttp = (needle->bits.proxy_user_passwd && ((data->state.authproxy.want & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && (needle->handler->protocol & PROTO_FAMILY_HTTP))); #endif *force_reuse = FALSE; *waitpipe = FALSE; /* We can't pipeline if the site is blacklisted */ if((canpipe & CURLPIPE_HTTP1) && Curl_pipeline_site_blacklisted(data, needle)) canpipe &= ~ CURLPIPE_HTTP1; /* Look up the bundle with all the connections to this particular host. Locks the connection cache, beware of early returns! */ bundle = Curl_conncache_find_bundle(needle, data->state.conn_cache); if(bundle) { /* Max pipe length is zero (unlimited) for multiplexed connections */ size_t max_pipe_len = (bundle->multiuse != BUNDLE_MULTIPLEX)? max_pipeline_length(data->multi):0; size_t best_pipe_len = max_pipe_len; struct curl_llist_element *curr; infof(data, "Found bundle for host %s: %p [%s]\n", (needle->bits.conn_to_host ? needle->conn_to_host.name : needle->host.name), (void *)bundle, (bundle->multiuse == BUNDLE_PIPELINING ? "can pipeline" : (bundle->multiuse == BUNDLE_MULTIPLEX ? "can multiplex" : "serially"))); /* We can't pipeline if we don't know anything about the server */ if(canpipe) { if(bundle->multiuse <= BUNDLE_UNKNOWN) { if((bundle->multiuse == BUNDLE_UNKNOWN) && data->set.pipewait) { infof(data, "Server doesn't support multi-use yet, wait\n"); *waitpipe = TRUE; Curl_conncache_unlock(needle); return FALSE; /* no re-use */ } infof(data, "Server doesn't support multi-use (yet)\n"); canpipe = 0; } if((bundle->multiuse == BUNDLE_PIPELINING) && !Curl_pipeline_wanted(data->multi, CURLPIPE_HTTP1)) { /* not asked for, switch off */ infof(data, "Could pipeline, but not asked to!\n"); canpipe = 0; } else if((bundle->multiuse == BUNDLE_MULTIPLEX) && !Curl_pipeline_wanted(data->multi, CURLPIPE_MULTIPLEX)) { infof(data, "Could multiplex, but not asked to!\n"); canpipe = 0; } } curr = bundle->conn_list.head; while(curr) { bool match = FALSE; size_t pipeLen; /* * Note that if we use a HTTP proxy in normal mode (no tunneling), we * check connections to that proxy and not to the actual remote server. */ check = curr->ptr; curr = curr->next; if(extract_if_dead(check, data)) { /* disconnect it */ (void)Curl_disconnect(data, check, /* dead_connection */TRUE); continue; } pipeLen = check->send_pipe.size + check->recv_pipe.size; if(canpipe) { if(check->bits.protoconnstart && check->bits.close) continue; if(!check->bits.multiplex) { /* If not multiplexing, make sure the connection is fine for HTTP/1 pipelining */ struct Curl_easy* sh = gethandleathead(&check->send_pipe); struct Curl_easy* rh = gethandleathead(&check->recv_pipe); if(sh) { if(!(IsPipeliningPossible(sh, check) & CURLPIPE_HTTP1)) continue; } else if(rh) { if(!(IsPipeliningPossible(rh, check) & CURLPIPE_HTTP1)) continue; } } } else { if(pipeLen > 0) { /* can only happen within multi handles, and means that another easy handle is using this connection */ continue; } if(Curl_resolver_asynch()) { /* ip_addr_str[0] is NUL only if the resolving of the name hasn't completed yet and until then we don't re-use this connection */ if(!check->ip_addr_str[0]) { infof(data, "Connection #%ld is still name resolving, can't reuse\n", check->connection_id); continue; } } if((check->sock[FIRSTSOCKET] == CURL_SOCKET_BAD) || check->bits.close) { if(!check->bits.close) foundPendingCandidate = TRUE; /* Don't pick a connection that hasn't connected yet or that is going to get closed. */ infof(data, "Connection #%ld isn't open enough, can't reuse\n", check->connection_id); #ifdef DEBUGBUILD if(check->recv_pipe.size > 0) { infof(data, "BAD! Unconnected #%ld has a non-empty recv pipeline!\n", check->connection_id); } #endif continue; } } #ifdef USE_UNIX_SOCKETS if(needle->unix_domain_socket) { if(!check->unix_domain_socket) continue; if(strcmp(needle->unix_domain_socket, check->unix_domain_socket)) continue; if(needle->abstract_unix_socket != check->abstract_unix_socket) continue; } else if(check->unix_domain_socket) continue; #endif if((needle->handler->flags&PROTOPT_SSL) != (check->handler->flags&PROTOPT_SSL)) /* don't do mixed SSL and non-SSL connections */ if(get_protocol_family(check->handler->protocol) != needle->handler->protocol || !check->tls_upgraded) /* except protocols that have been upgraded via TLS */ continue; if(needle->bits.httpproxy != check->bits.httpproxy || needle->bits.socksproxy != check->bits.socksproxy) continue; if(needle->bits.socksproxy && !proxy_info_matches(&needle->socks_proxy, &check->socks_proxy)) continue; if(needle->bits.conn_to_host != check->bits.conn_to_host) /* don't mix connections that use the "connect to host" feature and * connections that don't use this feature */ continue; if(needle->bits.conn_to_port != check->bits.conn_to_port) /* don't mix connections that use the "connect to port" feature and * connections that don't use this feature */ continue; if(needle->bits.httpproxy) { if(!proxy_info_matches(&needle->http_proxy, &check->http_proxy)) continue; if(needle->bits.tunnel_proxy != check->bits.tunnel_proxy) continue; if(needle->http_proxy.proxytype == CURLPROXY_HTTPS) { /* use https proxy */ if(needle->handler->flags&PROTOPT_SSL) { /* use double layer ssl */ if(!Curl_ssl_config_matches(&needle->proxy_ssl_config, &check->proxy_ssl_config)) continue; if(check->proxy_ssl[FIRSTSOCKET].state != ssl_connection_complete) continue; } else { if(!Curl_ssl_config_matches(&needle->ssl_config, &check->ssl_config)) continue; if(check->ssl[FIRSTSOCKET].state != ssl_connection_complete) continue; } } } if(!canpipe && CONN_INUSE(check)) /* this request can't be pipelined but the checked connection is already in use so we skip it */ continue; if(CONN_INUSE(check) && (check->data->multi != needle->data->multi)) /* this could be subject for pipeline/multiplex use, but only if they belong to the same multi handle */ continue; if(needle->localdev || needle->localport) { /* If we are bound to a specific local end (IP+port), we must not re-use a random other one, although if we didn't ask for a particular one we can reuse one that was bound. This comparison is a bit rough and too strict. Since the input parameters can be specified in numerous ways and still end up the same it would take a lot of processing to make it really accurate. Instead, this matching will assume that re-uses of bound connections will most likely also re-use the exact same binding parameters and missing out a few edge cases shouldn't hurt anyone very much. */ if((check->localport != needle->localport) || (check->localportrange != needle->localportrange) || (needle->localdev && (!check->localdev || strcmp(check->localdev, needle->localdev)))) continue; } if(!(needle->handler->flags & PROTOPT_CREDSPERREQUEST)) { /* This protocol requires credentials per connection, so verify that we're using the same name and password as well */ if(strcmp(needle->user, check->user) || strcmp(needle->passwd, check->passwd)) { /* one of them was different */ continue; } } if(!needle->bits.httpproxy || (needle->handler->flags&PROTOPT_SSL) || needle->bits.tunnel_proxy) { /* The requested connection does not use a HTTP proxy or it uses SSL or it is a non-SSL protocol tunneled or it is a non-SSL protocol which is allowed to be upgraded via TLS */ if((strcasecompare(needle->handler->scheme, check->handler->scheme) || (get_protocol_family(check->handler->protocol) == needle->handler->protocol && check->tls_upgraded)) && (!needle->bits.conn_to_host || strcasecompare( needle->conn_to_host.name, check->conn_to_host.name)) && (!needle->bits.conn_to_port || needle->conn_to_port == check->conn_to_port) && strcasecompare(needle->host.name, check->host.name) && needle->remote_port == check->remote_port) { /* The schemes match or the the protocol family is the same and the previous connection was TLS upgraded, and the hostname and host port match */ if(needle->handler->flags & PROTOPT_SSL) { /* This is a SSL connection so verify that we're using the same SSL options as well */ if(!Curl_ssl_config_matches(&needle->ssl_config, &check->ssl_config)) { DEBUGF(infof(data, "Connection #%ld has different SSL parameters, " "can't reuse\n", check->connection_id)); continue; } if(check->ssl[FIRSTSOCKET].state != ssl_connection_complete) { foundPendingCandidate = TRUE; DEBUGF(infof(data, "Connection #%ld has not started SSL connect, " "can't reuse\n", check->connection_id)); continue; } } match = TRUE; } } else { /* The requested connection is using the same HTTP proxy in normal mode (no tunneling) */ match = TRUE; } if(match) { #if defined(USE_NTLM) /* If we are looking for an HTTP+NTLM connection, check if this is already authenticating with the right credentials. If not, keep looking so that we can reuse NTLM connections if possible. (Especially we must not reuse the same connection if partway through a handshake!) */ if(wantNTLMhttp) { if(strcmp(needle->user, check->user) || strcmp(needle->passwd, check->passwd)) continue; } else if(check->ntlm.state != NTLMSTATE_NONE) { /* Connection is using NTLM auth but we don't want NTLM */ continue; } /* Same for Proxy NTLM authentication */ if(wantProxyNTLMhttp) { /* Both check->http_proxy.user and check->http_proxy.passwd can be * NULL */ if(!check->http_proxy.user || !check->http_proxy.passwd) continue; if(strcmp(needle->http_proxy.user, check->http_proxy.user) || strcmp(needle->http_proxy.passwd, check->http_proxy.passwd)) continue; } else if(check->proxyntlm.state != NTLMSTATE_NONE) { /* Proxy connection is using NTLM auth but we don't want NTLM */ continue; } if(wantNTLMhttp || wantProxyNTLMhttp) { /* Credentials are already checked, we can use this connection */ chosen = check; if((wantNTLMhttp && (check->ntlm.state != NTLMSTATE_NONE)) || (wantProxyNTLMhttp && (check->proxyntlm.state != NTLMSTATE_NONE))) { /* We must use this connection, no other */ *force_reuse = TRUE; break; } /* Continue look up for a better connection */ continue; } #endif if(canpipe) { /* We can pipeline if we want to. Let's continue looking for the optimal connection to use, i.e the shortest pipe that is not blacklisted. */ if(pipeLen == 0) { /* We have the optimal connection. Let's stop looking. */ chosen = check; break; } /* We can't use the connection if the pipe is full */ if(max_pipe_len && (pipeLen >= max_pipe_len)) { infof(data, "Pipe is full, skip (%zu)\n", pipeLen); continue; } #ifdef USE_NGHTTP2 /* If multiplexed, make sure we don't go over concurrency limit */ if(check->bits.multiplex) { /* Multiplexed connections can only be HTTP/2 for now */ struct http_conn *httpc = &check->proto.httpc; if(pipeLen >= httpc->settings.max_concurrent_streams) { infof(data, "MAX_CONCURRENT_STREAMS reached, skip (%zu)\n", pipeLen); continue; } } #endif /* We can't use the connection if the pipe is penalized */ if(Curl_pipeline_penalized(data, check)) { infof(data, "Penalized, skip\n"); continue; } if(max_pipe_len) { if(pipeLen < best_pipe_len) { /* This connection has a shorter pipe so far. We'll pick this and continue searching */ chosen = check; best_pipe_len = pipeLen; continue; } } else { /* When not pipelining (== multiplexed), we have a match here! */ chosen = check; infof(data, "Multiplexed connection found!\n"); break; } } else { /* We have found a connection. Let's stop searching. */ chosen = check; break; } } } } if(chosen) { /* mark it as used before releasing the lock */ chosen->data = data; /* own it! */ Curl_conncache_unlock(needle); *usethis = chosen; return TRUE; /* yes, we found one to use! */ } Curl_conncache_unlock(needle); if(foundPendingCandidate && data->set.pipewait) { infof(data, "Found pending candidate for reuse and CURLOPT_PIPEWAIT is set\n"); *waitpipe = TRUE; } return FALSE; /* no matching connecting exists */ } /* after a TCP connection to the proxy has been verified, this function does the next magic step. Note: this function's sub-functions call failf() */ CURLcode Curl_connected_proxy(struct connectdata *conn, int sockindex) { CURLcode result = CURLE_OK; if(conn->bits.socksproxy) { #ifndef CURL_DISABLE_PROXY /* for the secondary socket (FTP), use the "connect to host" * but ignore the "connect to port" (use the secondary port) */ const char * const host = conn->bits.httpproxy ? conn->http_proxy.host.name : conn->bits.conn_to_host ? conn->conn_to_host.name : sockindex == SECONDARYSOCKET ? conn->secondaryhostname : conn->host.name; const int port = conn->bits.httpproxy ? (int)conn->http_proxy.port : sockindex == SECONDARYSOCKET ? conn->secondary_port : conn->bits.conn_to_port ? conn->conn_to_port : conn->remote_port; conn->bits.socksproxy_connecting = TRUE; switch(conn->socks_proxy.proxytype) { case CURLPROXY_SOCKS5: case CURLPROXY_SOCKS5_HOSTNAME: result = Curl_SOCKS5(conn->socks_proxy.user, conn->socks_proxy.passwd, host, port, sockindex, conn); break; case CURLPROXY_SOCKS4: case CURLPROXY_SOCKS4A: result = Curl_SOCKS4(conn->socks_proxy.user, host, port, sockindex, conn); break; default: failf(conn->data, "unknown proxytype option given"); result = CURLE_COULDNT_CONNECT; } /* switch proxytype */ conn->bits.socksproxy_connecting = FALSE; #else (void)sockindex; #endif /* CURL_DISABLE_PROXY */ } return result; } /* * verboseconnect() displays verbose information after a connect */ #ifndef CURL_DISABLE_VERBOSE_STRINGS void Curl_verboseconnect(struct connectdata *conn) { if(conn->data->set.verbose) infof(conn->data, "Connected to %s (%s) port %ld (#%ld)\n", conn->bits.socksproxy ? conn->socks_proxy.host.dispname : conn->bits.httpproxy ? conn->http_proxy.host.dispname : conn->bits.conn_to_host ? conn->conn_to_host.dispname : conn->host.dispname, conn->ip_addr_str, conn->port, conn->connection_id); } #endif int Curl_protocol_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks) { if(conn->handler->proto_getsock) return conn->handler->proto_getsock(conn, socks, numsocks); /* Backup getsock logic. Since there is a live socket in use, we must wait for it or it will be removed from watching when the multi_socket API is used. */ socks[0] = conn->sock[FIRSTSOCKET]; return GETSOCK_READSOCK(0) | GETSOCK_WRITESOCK(0); } int Curl_doing_getsock(struct connectdata *conn, curl_socket_t *socks, int numsocks) { if(conn && conn->handler->doing_getsock) return conn->handler->doing_getsock(conn, socks, numsocks); return GETSOCK_BLANK; } /* * We are doing protocol-specific connecting and this is being called over and * over from the multi interface until the connection phase is done on * protocol layer. */ CURLcode Curl_protocol_connecting(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; if(conn && conn->handler->connecting) { *done = FALSE; result = conn->handler->connecting(conn, done); } else *done = TRUE; return result; } /* * We are DOING this is being called over and over from the multi interface * until the DOING phase is done on protocol layer. */ CURLcode Curl_protocol_doing(struct connectdata *conn, bool *done) { CURLcode result = CURLE_OK; if(conn && conn->handler->doing) { *done = FALSE; result = conn->handler->doing(conn, done); } else *done = TRUE; return result; } /* * We have discovered that the TCP connection has been successful, we can now * proceed with some action. * */ CURLcode Curl_protocol_connect(struct connectdata *conn, bool *protocol_done) { CURLcode result = CURLE_OK; *protocol_done = FALSE; if(conn->bits.tcpconnect[FIRSTSOCKET] && conn->bits.protoconnstart) { /* We already are connected, get back. This may happen when the connect worked fine in the first call, like when we connect to a local server or proxy. Note that we don't know if the protocol is actually done. Unless this protocol doesn't have any protocol-connect callback, as then we know we're done. */ if(!conn->handler->connecting) *protocol_done = TRUE; return CURLE_OK; } if(!conn->bits.protoconnstart) { result = Curl_proxy_connect(conn, FIRSTSOCKET); if(result) return result; if(CONNECT_FIRSTSOCKET_PROXY_SSL()) /* wait for HTTPS proxy SSL initialization to complete */ return CURLE_OK; if(conn->bits.tunnel_proxy && conn->bits.httpproxy && Curl_connect_ongoing(conn)) /* when using an HTTP tunnel proxy, await complete tunnel establishment before proceeding further. Return CURLE_OK so we'll be called again */ return CURLE_OK; if(conn->handler->connect_it) { /* is there a protocol-specific connect() procedure? */ /* Call the protocol-specific connect function */ result = conn->handler->connect_it(conn, protocol_done); } else *protocol_done = TRUE; /* it has started, possibly even completed but that knowledge isn't stored in this bit! */ if(!result) conn->bits.protoconnstart = TRUE; } return result; /* pass back status */ } /* * Helpers for IDNA conversions. */ static bool is_ASCII_name(const char *hostname) { const unsigned char *ch = (const unsigned char *)hostname; while(*ch) { if(*ch++ & 0x80) return FALSE; } return TRUE; } /* * Perform any necessary IDN conversion of hostname */ static CURLcode fix_hostname(struct connectdata *conn, struct hostname *host) { size_t len; struct Curl_easy *data = conn->data; #ifndef USE_LIBIDN2 (void)data; (void)conn; #elif defined(CURL_DISABLE_VERBOSE_STRINGS) (void)conn; #endif /* set the name we use to display the host name */ host->dispname = host->name; len = strlen(host->name); if(len && (host->name[len-1] == '.')) /* strip off a single trailing dot if present, primarily for SNI but there's no use for it */ host->name[len-1] = 0; /* Check name for non-ASCII and convert hostname to ACE form if we can */ if(!is_ASCII_name(host->name)) { #ifdef USE_LIBIDN2 if(idn2_check_version(IDN2_VERSION)) { char *ace_hostname = NULL; #if IDN2_VERSION_NUMBER >= 0x00140000 /* IDN2_NFC_INPUT: Normalize input string using normalization form C. IDN2_NONTRANSITIONAL: Perform Unicode TR46 non-transitional processing. */ int flags = IDN2_NFC_INPUT | IDN2_NONTRANSITIONAL; #else int flags = IDN2_NFC_INPUT; #endif int rc = idn2_lookup_ul((const char *)host->name, &ace_hostname, flags); if(rc == IDN2_OK) { host->encalloc = (char *)ace_hostname; /* change the name pointer to point to the encoded hostname */ host->name = host->encalloc; } else { failf(data, "Failed to convert %s to ACE; %s\n", host->name, idn2_strerror(rc)); return CURLE_URL_MALFORMAT; } } #elif defined(USE_WIN32_IDN) char *ace_hostname = NULL; if(curl_win32_idn_to_ascii(host->name, &ace_hostname)) { host->encalloc = ace_hostname; /* change the name pointer to point to the encoded hostname */ host->name = host->encalloc; } else { failf(data, "Failed to convert %s to ACE;\n", host->name); return CURLE_URL_MALFORMAT; } #else infof(data, "IDN support not present, can't parse Unicode domains\n"); #endif } { char *hostp; for(hostp = host->name; *hostp; hostp++) { if(*hostp <= 32) { failf(data, "Host name '%s' contains bad letter", host->name); return CURLE_URL_MALFORMAT; } } } return CURLE_OK; } /* * Frees data allocated by fix_hostname() */ static void free_fixed_hostname(struct hostname *host) { #if defined(USE_LIBIDN2) if(host->encalloc) { idn2_free(host->encalloc); /* must be freed with idn2_free() since this was allocated by libidn */ host->encalloc = NULL; } #elif defined(USE_WIN32_IDN) free(host->encalloc); /* must be freed with free() since this was allocated by curl_win32_idn_to_ascii */ host->encalloc = NULL; #else (void)host; #endif } static void llist_dtor(void *user, void *element) { (void)user; (void)element; /* Do nothing */ } /* * Allocate and initialize a new connectdata object. */ static struct connectdata *allocate_conn(struct Curl_easy *data) { struct connectdata *conn = calloc(1, sizeof(struct connectdata)); if(!conn) return NULL; #ifdef USE_SSL /* The SSL backend-specific data (ssl_backend_data) objects are allocated as a separate array to ensure suitable alignment. Note that these backend pointers can be swapped by vtls (eg ssl backend data becomes proxy backend data). */ { size_t sslsize = Curl_ssl->sizeof_ssl_backend_data; char *ssl = calloc(4, sslsize); if(!ssl) { free(conn); return NULL; } conn->ssl_extra = ssl; conn->ssl[0].backend = (void *)ssl; conn->ssl[1].backend = (void *)(ssl + sslsize); conn->proxy_ssl[0].backend = (void *)(ssl + 2 * sslsize); conn->proxy_ssl[1].backend = (void *)(ssl + 3 * sslsize); } #endif conn->handler = &Curl_handler_dummy; /* Be sure we have a handler defined already from start to avoid NULL situations and checks */ /* and we setup a few fields in case we end up actually using this struct */ conn->sock[FIRSTSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */ conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */ conn->tempsock[0] = CURL_SOCKET_BAD; /* no file descriptor */ conn->tempsock[1] = CURL_SOCKET_BAD; /* no file descriptor */ conn->connection_id = -1; /* no ID */ conn->port = -1; /* unknown at this point */ conn->remote_port = -1; /* unknown at this point */ #if defined(USE_RECV_BEFORE_SEND_WORKAROUND) && defined(DEBUGBUILD) conn->postponed[0].bindsock = CURL_SOCKET_BAD; /* no file descriptor */ conn->postponed[1].bindsock = CURL_SOCKET_BAD; /* no file descriptor */ #endif /* USE_RECV_BEFORE_SEND_WORKAROUND && DEBUGBUILD */ /* Default protocol-independent behavior doesn't support persistent connections, so we set this to force-close. Protocols that support this need to set this to FALSE in their "curl_do" functions. */ connclose(conn, "Default to force-close"); /* Store creation time to help future close decision making */ conn->created = Curl_now(); /* Store current time to give a baseline to keepalive connection times. */ conn->keepalive = Curl_now(); /* Store off the configured connection upkeep time. */ conn->upkeep_interval_ms = data->set.upkeep_interval_ms; conn->data = data; /* Setup the association between this connection and the Curl_easy */ conn->http_proxy.proxytype = data->set.proxytype; conn->socks_proxy.proxytype = CURLPROXY_SOCKS4; #ifdef CURL_DISABLE_PROXY conn->bits.proxy = FALSE; conn->bits.httpproxy = FALSE; conn->bits.socksproxy = FALSE; conn->bits.proxy_user_passwd = FALSE; conn->bits.tunnel_proxy = FALSE; #else /* CURL_DISABLE_PROXY */ /* note that these two proxy bits are now just on what looks to be requested, they may be altered down the road */ conn->bits.proxy = (data->set.str[STRING_PROXY] && *data->set.str[STRING_PROXY]) ? TRUE : FALSE; conn->bits.httpproxy = (conn->bits.proxy && (conn->http_proxy.proxytype == CURLPROXY_HTTP || conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0 || conn->http_proxy.proxytype == CURLPROXY_HTTPS)) ? TRUE : FALSE; conn->bits.socksproxy = (conn->bits.proxy && !conn->bits.httpproxy) ? TRUE : FALSE; if(data->set.str[STRING_PRE_PROXY] && *data->set.str[STRING_PRE_PROXY]) { conn->bits.proxy = TRUE; conn->bits.socksproxy = TRUE; } conn->bits.proxy_user_passwd = (data->set.str[STRING_PROXYUSERNAME]) ? TRUE : FALSE; conn->bits.tunnel_proxy = data->set.tunnel_thru_httpproxy; #endif /* CURL_DISABLE_PROXY */ conn->bits.user_passwd = (data->set.str[STRING_USERNAME]) ? TRUE : FALSE; conn->bits.ftp_use_epsv = data->set.ftp_use_epsv; conn->bits.ftp_use_eprt = data->set.ftp_use_eprt; conn->ssl_config.verifystatus = data->set.ssl.primary.verifystatus; conn->ssl_config.verifypeer = data->set.ssl.primary.verifypeer; conn->ssl_config.verifyhost = data->set.ssl.primary.verifyhost; conn->proxy_ssl_config.verifystatus = data->set.proxy_ssl.primary.verifystatus; conn->proxy_ssl_config.verifypeer = data->set.proxy_ssl.primary.verifypeer; conn->proxy_ssl_config.verifyhost = data->set.proxy_ssl.primary.verifyhost; conn->ip_version = data->set.ipver; #if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) && \ defined(NTLM_WB_ENABLED) conn->ntlm_auth_hlpr_socket = CURL_SOCKET_BAD; conn->ntlm_auth_hlpr_pid = 0; conn->challenge_header = NULL; conn->response_header = NULL; #endif if(Curl_pipeline_wanted(data->multi, CURLPIPE_HTTP1) && !conn->master_buffer) { /* Allocate master_buffer to be used for HTTP/1 pipelining */ conn->master_buffer = calloc(MASTERBUF_SIZE, sizeof(char)); if(!conn->master_buffer) goto error; } /* Initialize the pipeline lists */ Curl_llist_init(&conn->send_pipe, (curl_llist_dtor) llist_dtor); Curl_llist_init(&conn->recv_pipe, (curl_llist_dtor) llist_dtor); #ifdef HAVE_GSSAPI conn->data_prot = PROT_CLEAR; #endif /* Store the local bind parameters that will be used for this connection */ if(data->set.str[STRING_DEVICE]) { conn->localdev = strdup(data->set.str[STRING_DEVICE]); if(!conn->localdev) goto error; } conn->localportrange = data->set.localportrange; conn->localport = data->set.localport; /* the close socket stuff needs to be copied to the connection struct as it may live on without (this specific) Curl_easy */ conn->fclosesocket = data->set.fclosesocket; conn->closesocket_client = data->set.closesocket_client; return conn; error: Curl_llist_destroy(&conn->send_pipe, NULL); Curl_llist_destroy(&conn->recv_pipe, NULL); free(conn->master_buffer); free(conn->localdev); #ifdef USE_SSL free(conn->ssl_extra); #endif free(conn); return NULL; } /* returns the handler if the given scheme is built-in */ const struct Curl_handler *Curl_builtin_scheme(const char *scheme) { const struct Curl_handler * const *pp; const struct Curl_handler *p; /* Scan protocol handler table and match against 'scheme'. The handler may be changed later when the protocol specific setup function is called. */ for(pp = protocols; (p = *pp) != NULL; pp++) if(strcasecompare(p->scheme, scheme)) /* Protocol found in table. Check if allowed */ return p; return NULL; /* not found */ } static CURLcode findprotocol(struct Curl_easy *data, struct connectdata *conn, const char *protostr) { const struct Curl_handler *p = Curl_builtin_scheme(protostr); if(p && /* Protocol found in table. Check if allowed */ (data->set.allowed_protocols & p->protocol)) { /* it is allowed for "normal" request, now do an extra check if this is the result of a redirect */ if(data->state.this_is_a_follow && !(data->set.redir_protocols & p->protocol)) /* nope, get out */ ; else { /* Perform setup complement if some. */ conn->handler = conn->given = p; /* 'port' and 'remote_port' are set in setup_connection_internals() */ return CURLE_OK; } } /* The protocol was not found in the table, but we don't have to assign it to anything since it is already assigned to a dummy-struct in the create_conn() function when the connectdata struct is allocated. */ failf(data, "Protocol \"%s\" not supported or disabled in " LIBCURL_NAME, protostr); return CURLE_UNSUPPORTED_PROTOCOL; } CURLcode Curl_uc_to_curlcode(CURLUcode uc) { switch(uc) { default: return CURLE_URL_MALFORMAT; case CURLUE_UNSUPPORTED_SCHEME: return CURLE_UNSUPPORTED_PROTOCOL; case CURLUE_OUT_OF_MEMORY: return CURLE_OUT_OF_MEMORY; case CURLUE_USER_NOT_ALLOWED: return CURLE_LOGIN_DENIED; } } /* * Parse URL and fill in the relevant members of the connection struct. */ static CURLcode parseurlandfillconn(struct Curl_easy *data, struct connectdata *conn) { CURLcode result; CURLU *uh; CURLUcode uc; char *hostname; Curl_up_free(data); /* cleanup previous leftovers first */ /* parse the URL */ uh = data->state.uh = curl_url(); if(!uh) return CURLE_OUT_OF_MEMORY; if(data->set.str[STRING_DEFAULT_PROTOCOL] && !Curl_is_absolute_url(data->change.url, NULL, MAX_SCHEME_LEN)) { char *url; if(data->change.url_alloc) free(data->change.url); url = aprintf("%s://%s", data->set.str[STRING_DEFAULT_PROTOCOL], data->change.url); if(!url) return CURLE_OUT_OF_MEMORY; data->change.url = url; data->change.url_alloc = TRUE; } uc = curl_url_set(uh, CURLUPART_URL, data->change.url, CURLU_GUESS_SCHEME | CURLU_NON_SUPPORT_SCHEME | (data->set.disallow_username_in_url ? CURLU_DISALLOW_USER : 0) | (data->set.path_as_is ? CURLU_PATH_AS_IS : 0)); if(uc) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_SCHEME, &data->state.up.scheme, 0); if(uc) return Curl_uc_to_curlcode(uc); result = findprotocol(data, conn, data->state.up.scheme); if(result) return result; uc = curl_url_get(uh, CURLUPART_USER, &data->state.up.user, CURLU_URLDECODE); if(!uc) { conn->user = strdup(data->state.up.user); if(!conn->user) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; } else if(uc != CURLUE_NO_USER) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_PASSWORD, &data->state.up.password, CURLU_URLDECODE); if(!uc) { conn->passwd = strdup(data->state.up.password); if(!conn->passwd) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; } else if(uc != CURLUE_NO_PASSWORD) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_OPTIONS, &data->state.up.options, CURLU_URLDECODE); if(!uc) { conn->options = strdup(data->state.up.options); if(!conn->options) return CURLE_OUT_OF_MEMORY; } else if(uc != CURLUE_NO_OPTIONS) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_HOST, &data->state.up.hostname, 0); if(uc) { if(!strcasecompare("file", data->state.up.scheme)) return CURLE_OUT_OF_MEMORY; } uc = curl_url_get(uh, CURLUPART_PATH, &data->state.up.path, 0); if(uc) return Curl_uc_to_curlcode(uc); uc = curl_url_get(uh, CURLUPART_PORT, &data->state.up.port, CURLU_DEFAULT_PORT); if(uc) { if(!strcasecompare("file", data->state.up.scheme)) return CURLE_OUT_OF_MEMORY; } else { unsigned long port = strtoul(data->state.up.port, NULL, 10); conn->remote_port = curlx_ultous(port); } (void)curl_url_get(uh, CURLUPART_QUERY, &data->state.up.query, 0); hostname = data->state.up.hostname; if(!hostname) /* this is for file:// transfers, get a dummy made */ hostname = (char *)""; if(hostname[0] == '[') { /* This looks like an IPv6 address literal. See if there is an address scope. */ char *percent = strchr(++hostname, '%'); conn->bits.ipv6_ip = TRUE; if(percent) { unsigned int identifier_offset = 3; char *endp; unsigned long scope; if(strncmp("%25", percent, 3) != 0) { infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); identifier_offset = 1; } scope = strtoul(percent + identifier_offset, &endp, 10); if(*endp == ']') { /* The address scope was well formed. Knock it out of the hostname. */ memmove(percent, endp, strlen(endp) + 1); conn->scope_id = (unsigned int)scope; } else { /* Zone identifier is not numeric */ #if defined(HAVE_NET_IF_H) && defined(IFNAMSIZ) && defined(HAVE_IF_NAMETOINDEX) char ifname[IFNAMSIZ + 2]; char *square_bracket; unsigned int scopeidx = 0; strncpy(ifname, percent + identifier_offset, IFNAMSIZ + 2); /* Ensure nullbyte termination */ ifname[IFNAMSIZ + 1] = '\0'; square_bracket = strchr(ifname, ']'); if(square_bracket) { /* Remove ']' */ *square_bracket = '\0'; scopeidx = if_nametoindex(ifname); if(scopeidx == 0) { infof(data, "Invalid network interface: %s; %s\n", ifname, strerror(errno)); } } if(scopeidx > 0) { char *p = percent + identifier_offset + strlen(ifname); /* Remove zone identifier from hostname */ memmove(percent, p, strlen(p) + 1); conn->scope_id = scopeidx; } else #endif /* HAVE_NET_IF_H && IFNAMSIZ */ infof(data, "Invalid IPv6 address format\n"); } } percent = strchr(hostname, ']'); if(percent) /* terminate IPv6 numerical at end bracket */ *percent = 0; } /* make sure the connect struct gets its own copy of the host name */ conn->host.rawalloc = strdup(hostname); if(!conn->host.rawalloc) return CURLE_OUT_OF_MEMORY; conn->host.name = conn->host.rawalloc; if(data->set.scope_id) /* Override any scope that was set above. */ conn->scope_id = data->set.scope_id; return CURLE_OK; } /* * If we're doing a resumed transfer, we need to setup our stuff * properly. */ static CURLcode setup_range(struct Curl_easy *data) { struct UrlState *s = &data->state; s->resume_from = data->set.set_resume_from; if(s->resume_from || data->set.str[STRING_SET_RANGE]) { if(s->rangestringalloc) free(s->range); if(s->resume_from) s->range = aprintf("%" CURL_FORMAT_CURL_OFF_T "-", s->resume_from); else s->range = strdup(data->set.str[STRING_SET_RANGE]); s->rangestringalloc = (s->range) ? TRUE : FALSE; if(!s->range) return CURLE_OUT_OF_MEMORY; /* tell ourselves to fetch this range */ s->use_range = TRUE; /* enable range download */ } else s->use_range = FALSE; /* disable range download */ return CURLE_OK; } /* * setup_connection_internals() - * * Setup connection internals specific to the requested protocol in the * Curl_easy. This is inited and setup before the connection is made but * is about the particular protocol that is to be used. * * This MUST get called after proxy magic has been figured out. */ static CURLcode setup_connection_internals(struct connectdata *conn) { const struct Curl_handler * p; CURLcode result; conn->socktype = SOCK_STREAM; /* most of them are TCP streams */ /* Perform setup complement if some. */ p = conn->handler; if(p->setup_connection) { result = (*p->setup_connection)(conn); if(result) return result; p = conn->handler; /* May have changed. */ } if(conn->port < 0) /* we check for -1 here since if proxy was detected already, this was very likely already set to the proxy port */ conn->port = p->defport; return CURLE_OK; } /* * Curl_free_request_state() should free temp data that was allocated in the * Curl_easy for this single request. */ void Curl_free_request_state(struct Curl_easy *data) { Curl_safefree(data->req.protop); Curl_safefree(data->req.newurl); } #ifndef CURL_DISABLE_PROXY /**************************************************************** * Checks if the host is in the noproxy list. returns true if it matches * and therefore the proxy should NOT be used. ****************************************************************/ static bool check_noproxy(const char *name, const char *no_proxy) { /* no_proxy=domain1.dom,host.domain2.dom * (a comma-separated list of hosts which should * not be proxied, or an asterisk to override * all proxy variables) */ if(no_proxy && no_proxy[0]) { size_t tok_start; size_t tok_end; const char *separator = ", "; size_t no_proxy_len; size_t namelen; char *endptr; if(strcasecompare("*", no_proxy)) { return TRUE; } /* NO_PROXY was specified and it wasn't just an asterisk */ no_proxy_len = strlen(no_proxy); if(name[0] == '[') { /* IPv6 numerical address */ endptr = strchr(name, ']'); if(!endptr) return FALSE; name++; namelen = endptr - name; } else namelen = strlen(name); for(tok_start = 0; tok_start < no_proxy_len; tok_start = tok_end + 1) { while(tok_start < no_proxy_len && strchr(separator, no_proxy[tok_start]) != NULL) { /* Look for the beginning of the token. */ ++tok_start; } if(tok_start == no_proxy_len) break; /* It was all trailing separator chars, no more tokens. */ for(tok_end = tok_start; tok_end < no_proxy_len && strchr(separator, no_proxy[tok_end]) == NULL; ++tok_end) /* Look for the end of the token. */ ; /* To match previous behaviour, where it was necessary to specify * ".local.com" to prevent matching "notlocal.com", we will leave * the '.' off. */ if(no_proxy[tok_start] == '.') ++tok_start; if((tok_end - tok_start) <= namelen) { /* Match the last part of the name to the domain we are checking. */ const char *checkn = name + namelen - (tok_end - tok_start); if(strncasecompare(no_proxy + tok_start, checkn, tok_end - tok_start)) { if((tok_end - tok_start) == namelen || *(checkn - 1) == '.') { /* We either have an exact match, or the previous character is a . * so it is within the same domain, so no proxy for this host. */ return TRUE; } } } /* if((tok_end - tok_start) <= namelen) */ } /* for(tok_start = 0; tok_start < no_proxy_len; tok_start = tok_end + 1) */ } /* NO_PROXY was specified and it wasn't just an asterisk */ return FALSE; } #ifndef CURL_DISABLE_HTTP /**************************************************************** * Detect what (if any) proxy to use. Remember that this selects a host * name and is not limited to HTTP proxies only. * The returned pointer must be freed by the caller (unless NULL) ****************************************************************/ static char *detect_proxy(struct connectdata *conn) { char *proxy = NULL; /* If proxy was not specified, we check for default proxy environment * variables, to enable i.e Lynx compliance: * * http_proxy=http://some.server.dom:port/ * https_proxy=http://some.server.dom:port/ * ftp_proxy=http://some.server.dom:port/ * no_proxy=domain1.dom,host.domain2.dom * (a comma-separated list of hosts which should * not be proxied, or an asterisk to override * all proxy variables) * all_proxy=http://some.server.dom:port/ * (seems to exist for the CERN www lib. Probably * the first to check for.) * * For compatibility, the all-uppercase versions of these variables are * checked if the lowercase versions don't exist. */ char proxy_env[128]; const char *protop = conn->handler->scheme; char *envp = proxy_env; char *prox; /* Now, build <protocol>_proxy and check for such a one to use */ while(*protop) *envp++ = (char)tolower((int)*protop++); /* append _proxy */ strcpy(envp, "_proxy"); /* read the protocol proxy: */ prox = curl_getenv(proxy_env); /* * We don't try the uppercase version of HTTP_PROXY because of * security reasons: * * When curl is used in a webserver application * environment (cgi or php), this environment variable can * be controlled by the web server user by setting the * http header 'Proxy:' to some value. * * This can cause 'internal' http/ftp requests to be * arbitrarily redirected by any external attacker. */ if(!prox && !strcasecompare("http_proxy", proxy_env)) { /* There was no lowercase variable, try the uppercase version: */ Curl_strntoupper(proxy_env, proxy_env, sizeof(proxy_env)); prox = curl_getenv(proxy_env); } envp = proxy_env; if(prox) { proxy = prox; /* use this */ } else { envp = (char *)"all_proxy"; proxy = curl_getenv(envp); /* default proxy to use */ if(!proxy) { envp = (char *)"ALL_PROXY"; proxy = curl_getenv(envp); } } if(proxy) infof(conn->data, "Uses proxy env variable %s == '%s'\n", envp, proxy); return proxy; } #endif /* CURL_DISABLE_HTTP */ /* * If this is supposed to use a proxy, we need to figure out the proxy * host name, so that we can re-use an existing connection * that may exist registered to the same proxy host. */ static CURLcode parse_proxy(struct Curl_easy *data, struct connectdata *conn, char *proxy, curl_proxytype proxytype) { char *prox_portno; char *endofprot; /* We use 'proxyptr' to point to the proxy name from now on... */ char *proxyptr; char *portptr; char *atsign; long port = -1; char *proxyuser = NULL; char *proxypasswd = NULL; bool sockstype; /* We do the proxy host string parsing here. We want the host name and the * port name. Accept a protocol:// prefix */ /* Parse the protocol part if present */ endofprot = strstr(proxy, "://"); if(endofprot) { proxyptr = endofprot + 3; if(checkprefix("https", proxy)) proxytype = CURLPROXY_HTTPS; else if(checkprefix("socks5h", proxy)) proxytype = CURLPROXY_SOCKS5_HOSTNAME; else if(checkprefix("socks5", proxy)) proxytype = CURLPROXY_SOCKS5; else if(checkprefix("socks4a", proxy)) proxytype = CURLPROXY_SOCKS4A; else if(checkprefix("socks4", proxy) || checkprefix("socks", proxy)) proxytype = CURLPROXY_SOCKS4; else if(checkprefix("http:", proxy)) ; /* leave it as HTTP or HTTP/1.0 */ else { /* Any other xxx:// reject! */ failf(data, "Unsupported proxy scheme for \'%s\'", proxy); return CURLE_COULDNT_CONNECT; } } else proxyptr = proxy; /* No xxx:// head: It's a HTTP proxy */ #ifdef USE_SSL if(!(Curl_ssl->supports & SSLSUPP_HTTPS_PROXY)) #endif if(proxytype == CURLPROXY_HTTPS) { failf(data, "Unsupported proxy \'%s\', libcurl is built without the " "HTTPS-proxy support.", proxy); return CURLE_NOT_BUILT_IN; } sockstype = proxytype == CURLPROXY_SOCKS5_HOSTNAME || proxytype == CURLPROXY_SOCKS5 || proxytype == CURLPROXY_SOCKS4A || proxytype == CURLPROXY_SOCKS4; /* Is there a username and password given in this proxy url? */ atsign = strchr(proxyptr, '@'); if(atsign) { CURLcode result = Curl_parse_login_details(proxyptr, atsign - proxyptr, &proxyuser, &proxypasswd, NULL); if(result) return result; proxyptr = atsign + 1; } /* start scanning for port number at this point */ portptr = proxyptr; /* detect and extract RFC6874-style IPv6-addresses */ if(*proxyptr == '[') { char *ptr = ++proxyptr; /* advance beyond the initial bracket */ while(*ptr && (ISXDIGIT(*ptr) || (*ptr == ':') || (*ptr == '.'))) ptr++; if(*ptr == '%') { /* There might be a zone identifier */ if(strncmp("%25", ptr, 3)) infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); ptr++; /* Allow unreserved characters as defined in RFC 3986 */ while(*ptr && (ISALPHA(*ptr) || ISXDIGIT(*ptr) || (*ptr == '-') || (*ptr == '.') || (*ptr == '_') || (*ptr == '~'))) ptr++; } if(*ptr == ']') /* yeps, it ended nicely with a bracket as well */ *ptr++ = 0; else infof(data, "Invalid IPv6 address format\n"); portptr = ptr; /* Note that if this didn't end with a bracket, we still advanced the * proxyptr first, but I can't see anything wrong with that as no host * name nor a numeric can legally start with a bracket. */ } /* Get port number off proxy.server.com:1080 */ prox_portno = strchr(portptr, ':'); if(prox_portno) { char *endp = NULL; *prox_portno = 0x0; /* cut off number from host name */ prox_portno ++; /* now set the local port number */ port = strtol(prox_portno, &endp, 10); if((endp && *endp && (*endp != '/') && (*endp != ' ')) || (port < 0) || (port > 65535)) { /* meant to detect for example invalid IPv6 numerical addresses without brackets: "2a00:fac0:a000::7:13". Accept a trailing slash only because we then allow "URL style" with the number followed by a slash, used in curl test cases already. Space is also an acceptable terminating symbol. */ infof(data, "No valid port number in proxy string (%s)\n", prox_portno); } else conn->port = port; } else { if(proxyptr[0]=='/') { /* If the first character in the proxy string is a slash, fail immediately. The following code will otherwise clear the string which will lead to code running as if no proxy was set! */ Curl_safefree(proxyuser); Curl_safefree(proxypasswd); return CURLE_COULDNT_RESOLVE_PROXY; } /* without a port number after the host name, some people seem to use a slash so we strip everything from the first slash */ atsign = strchr(proxyptr, '/'); if(atsign) *atsign = '\0'; /* cut off path part from host name */ if(data->set.proxyport) /* None given in the proxy string, then get the default one if it is given */ port = data->set.proxyport; else { if(proxytype == CURLPROXY_HTTPS) port = CURL_DEFAULT_HTTPS_PROXY_PORT; else port = CURL_DEFAULT_PROXY_PORT; } } if(*proxyptr) { struct proxy_info *proxyinfo = sockstype ? &conn->socks_proxy : &conn->http_proxy; proxyinfo->proxytype = proxytype; if(proxyuser) { /* found user and password, rip them out. note that we are unescaping them, as there is otherwise no way to have a username or password with reserved characters like ':' in them. */ Curl_safefree(proxyinfo->user); proxyinfo->user = curl_easy_unescape(data, proxyuser, 0, NULL); Curl_safefree(proxyuser); if(!proxyinfo->user) { Curl_safefree(proxypasswd); return CURLE_OUT_OF_MEMORY; } Curl_safefree(proxyinfo->passwd); if(proxypasswd && strlen(proxypasswd) < MAX_CURL_PASSWORD_LENGTH) proxyinfo->passwd = curl_easy_unescape(data, proxypasswd, 0, NULL); else proxyinfo->passwd = strdup(""); Curl_safefree(proxypasswd); if(!proxyinfo->passwd) return CURLE_OUT_OF_MEMORY; conn->bits.proxy_user_passwd = TRUE; /* enable it */ } if(port >= 0) { proxyinfo->port = port; if(conn->port < 0 || sockstype || !conn->socks_proxy.host.rawalloc) conn->port = port; } /* now, clone the cleaned proxy host name */ Curl_safefree(proxyinfo->host.rawalloc); proxyinfo->host.rawalloc = strdup(proxyptr); proxyinfo->host.name = proxyinfo->host.rawalloc; if(!proxyinfo->host.rawalloc) return CURLE_OUT_OF_MEMORY; } Curl_safefree(proxyuser); Curl_safefree(proxypasswd); return CURLE_OK; } /* * Extract the user and password from the authentication string */ static CURLcode parse_proxy_auth(struct Curl_easy *data, struct connectdata *conn) { char proxyuser[MAX_CURL_USER_LENGTH]=""; char proxypasswd[MAX_CURL_PASSWORD_LENGTH]=""; CURLcode result; if(data->set.str[STRING_PROXYUSERNAME] != NULL) { strncpy(proxyuser, data->set.str[STRING_PROXYUSERNAME], MAX_CURL_USER_LENGTH); proxyuser[MAX_CURL_USER_LENGTH-1] = '\0'; /*To be on safe side*/ } if(data->set.str[STRING_PROXYPASSWORD] != NULL) { strncpy(proxypasswd, data->set.str[STRING_PROXYPASSWORD], MAX_CURL_PASSWORD_LENGTH); proxypasswd[MAX_CURL_PASSWORD_LENGTH-1] = '\0'; /*To be on safe side*/ } result = Curl_urldecode(data, proxyuser, 0, &conn->http_proxy.user, NULL, FALSE); if(!result) result = Curl_urldecode(data, proxypasswd, 0, &conn->http_proxy.passwd, NULL, FALSE); return result; } /* create_conn helper to parse and init proxy values. to be called after unix socket init but before any proxy vars are evaluated. */ static CURLcode create_conn_helper_init_proxy(struct connectdata *conn) { char *proxy = NULL; char *socksproxy = NULL; char *no_proxy = NULL; CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; /************************************************************* * Extract the user and password from the authentication string *************************************************************/ if(conn->bits.proxy_user_passwd) { result = parse_proxy_auth(data, conn); if(result) goto out; } /************************************************************* * Detect what (if any) proxy to use *************************************************************/ if(data->set.str[STRING_PROXY]) { proxy = strdup(data->set.str[STRING_PROXY]); /* if global proxy is set, this is it */ if(NULL == proxy) { failf(data, "memory shortage"); result = CURLE_OUT_OF_MEMORY; goto out; } } if(data->set.str[STRING_PRE_PROXY]) { socksproxy = strdup(data->set.str[STRING_PRE_PROXY]); /* if global socks proxy is set, this is it */ if(NULL == socksproxy) { failf(data, "memory shortage"); result = CURLE_OUT_OF_MEMORY; goto out; } } if(!data->set.str[STRING_NOPROXY]) { const char *p = "no_proxy"; no_proxy = curl_getenv(p); if(!no_proxy) { p = "NO_PROXY"; no_proxy = curl_getenv(p); } if(no_proxy) { infof(conn->data, "Uses proxy env variable %s == '%s'\n", p, no_proxy); } } if(check_noproxy(conn->host.name, data->set.str[STRING_NOPROXY] ? data->set.str[STRING_NOPROXY] : no_proxy)) { Curl_safefree(proxy); Curl_safefree(socksproxy); } #ifndef CURL_DISABLE_HTTP else if(!proxy && !socksproxy) /* if the host is not in the noproxy list, detect proxy. */ proxy = detect_proxy(conn); #endif /* CURL_DISABLE_HTTP */ Curl_safefree(no_proxy); #ifdef USE_UNIX_SOCKETS /* For the time being do not mix proxy and unix domain sockets. See #1274 */ if(proxy && conn->unix_domain_socket) { free(proxy); proxy = NULL; } #endif if(proxy && (!*proxy || (conn->handler->flags & PROTOPT_NONETWORK))) { free(proxy); /* Don't bother with an empty proxy string or if the protocol doesn't work with network */ proxy = NULL; } if(socksproxy && (!*socksproxy || (conn->handler->flags & PROTOPT_NONETWORK))) { free(socksproxy); /* Don't bother with an empty socks proxy string or if the protocol doesn't work with network */ socksproxy = NULL; } /*********************************************************************** * If this is supposed to use a proxy, we need to figure out the proxy host * name, proxy type and port number, so that we can re-use an existing * connection that may exist registered to the same proxy host. ***********************************************************************/ if(proxy || socksproxy) { if(proxy) { result = parse_proxy(data, conn, proxy, conn->http_proxy.proxytype); Curl_safefree(proxy); /* parse_proxy copies the proxy string */ if(result) goto out; } if(socksproxy) { result = parse_proxy(data, conn, socksproxy, conn->socks_proxy.proxytype); /* parse_proxy copies the socks proxy string */ Curl_safefree(socksproxy); if(result) goto out; } if(conn->http_proxy.host.rawalloc) { #ifdef CURL_DISABLE_HTTP /* asking for a HTTP proxy is a bit funny when HTTP is disabled... */ result = CURLE_UNSUPPORTED_PROTOCOL; goto out; #else /* force this connection's protocol to become HTTP if compatible */ if(!(conn->handler->protocol & PROTO_FAMILY_HTTP)) { if((conn->handler->flags & PROTOPT_PROXY_AS_HTTP) && !conn->bits.tunnel_proxy) conn->handler = &Curl_handler_http; else /* if not converting to HTTP over the proxy, enforce tunneling */ conn->bits.tunnel_proxy = TRUE; } conn->bits.httpproxy = TRUE; #endif } else { conn->bits.httpproxy = FALSE; /* not a HTTP proxy */ conn->bits.tunnel_proxy = FALSE; /* no tunneling if not HTTP */ } if(conn->socks_proxy.host.rawalloc) { if(!conn->http_proxy.host.rawalloc) { /* once a socks proxy */ if(!conn->socks_proxy.user) { conn->socks_proxy.user = conn->http_proxy.user; conn->http_proxy.user = NULL; Curl_safefree(conn->socks_proxy.passwd); conn->socks_proxy.passwd = conn->http_proxy.passwd; conn->http_proxy.passwd = NULL; } } conn->bits.socksproxy = TRUE; } else conn->bits.socksproxy = FALSE; /* not a socks proxy */ } else { conn->bits.socksproxy = FALSE; conn->bits.httpproxy = FALSE; } conn->bits.proxy = conn->bits.httpproxy || conn->bits.socksproxy; if(!conn->bits.proxy) { /* we aren't using the proxy after all... */ conn->bits.proxy = FALSE; conn->bits.httpproxy = FALSE; conn->bits.socksproxy = FALSE; conn->bits.proxy_user_passwd = FALSE; conn->bits.tunnel_proxy = FALSE; } out: free(socksproxy); free(proxy); return result; } #endif /* CURL_DISABLE_PROXY */ /* * Curl_parse_login_details() * * This is used to parse a login string for user name, password and options in * the following formats: * * user * user:password * user:password;options * user;options * user;options:password * :password * :password;options * ;options * ;options:password * * Parameters: * * login [in] - The login string. * len [in] - The length of the login string. * userp [in/out] - The address where a pointer to newly allocated memory * holding the user will be stored upon completion. * passwdp [in/out] - The address where a pointer to newly allocated memory * holding the password will be stored upon completion. * optionsp [in/out] - The address where a pointer to newly allocated memory * holding the options will be stored upon completion. * * Returns CURLE_OK on success. */ CURLcode Curl_parse_login_details(const char *login, const size_t len, char **userp, char **passwdp, char **optionsp) { CURLcode result = CURLE_OK; char *ubuf = NULL; char *pbuf = NULL; char *obuf = NULL; const char *psep = NULL; const char *osep = NULL; size_t ulen; size_t plen; size_t olen; /* Attempt to find the password separator */ if(passwdp) { psep = strchr(login, ':'); /* Within the constraint of the login string */ if(psep >= login + len) psep = NULL; } /* Attempt to find the options separator */ if(optionsp) { osep = strchr(login, ';'); /* Within the constraint of the login string */ if(osep >= login + len) osep = NULL; } /* Calculate the portion lengths */ ulen = (psep ? (size_t)(osep && psep > osep ? osep - login : psep - login) : (osep ? (size_t)(osep - login) : len)); plen = (psep ? (osep && osep > psep ? (size_t)(osep - psep) : (size_t)(login + len - psep)) - 1 : 0); olen = (osep ? (psep && psep > osep ? (size_t)(psep - osep) : (size_t)(login + len - osep)) - 1 : 0); /* Allocate the user portion buffer */ if(userp && ulen) { ubuf = malloc(ulen + 1); if(!ubuf) result = CURLE_OUT_OF_MEMORY; } /* Allocate the password portion buffer */ if(!result && passwdp && plen) { pbuf = malloc(plen + 1); if(!pbuf) { free(ubuf); result = CURLE_OUT_OF_MEMORY; } } /* Allocate the options portion buffer */ if(!result && optionsp && olen) { obuf = malloc(olen + 1); if(!obuf) { free(pbuf); free(ubuf); result = CURLE_OUT_OF_MEMORY; } } if(!result) { /* Store the user portion if necessary */ if(ubuf) { memcpy(ubuf, login, ulen); ubuf[ulen] = '\0'; Curl_safefree(*userp); *userp = ubuf; } /* Store the password portion if necessary */ if(pbuf) { memcpy(pbuf, psep + 1, plen); pbuf[plen] = '\0'; Curl_safefree(*passwdp); *passwdp = pbuf; } /* Store the options portion if necessary */ if(obuf) { memcpy(obuf, osep + 1, olen); obuf[olen] = '\0'; Curl_safefree(*optionsp); *optionsp = obuf; } } return result; } /************************************************************* * Figure out the remote port number and fix it in the URL * * No matter if we use a proxy or not, we have to figure out the remote * port number of various reasons. * * The port number embedded in the URL is replaced, if necessary. *************************************************************/ static CURLcode parse_remote_port(struct Curl_easy *data, struct connectdata *conn) { if(data->set.use_port && data->state.allow_port) { /* if set, we use this instead of the port possibly given in the URL */ char portbuf[16]; CURLUcode uc; conn->remote_port = (unsigned short)data->set.use_port; snprintf(portbuf, sizeof(portbuf), "%u", conn->remote_port); uc = curl_url_set(data->state.uh, CURLUPART_PORT, portbuf, 0); if(uc) return CURLE_OUT_OF_MEMORY; } return CURLE_OK; } /* * Override the login details from the URL with that in the CURLOPT_USERPWD * option or a .netrc file, if applicable. */ static CURLcode override_login(struct Curl_easy *data, struct connectdata *conn, char **userp, char **passwdp, char **optionsp) { bool user_changed = FALSE; bool passwd_changed = FALSE; CURLUcode uc; if(data->set.str[STRING_USERNAME]) { free(*userp); *userp = strdup(data->set.str[STRING_USERNAME]); if(!*userp) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; /* enable user+password */ user_changed = TRUE; } if(data->set.str[STRING_PASSWORD]) { free(*passwdp); *passwdp = strdup(data->set.str[STRING_PASSWORD]); if(!*passwdp) return CURLE_OUT_OF_MEMORY; conn->bits.user_passwd = TRUE; /* enable user+password */ passwd_changed = TRUE; } if(data->set.str[STRING_OPTIONS]) { free(*optionsp); *optionsp = strdup(data->set.str[STRING_OPTIONS]); if(!*optionsp) return CURLE_OUT_OF_MEMORY; } conn->bits.netrc = FALSE; if(data->set.use_netrc != CURL_NETRC_IGNORED) { char *nuser = NULL; char *npasswd = NULL; int ret; if(data->set.use_netrc == CURL_NETRC_OPTIONAL) nuser = *userp; /* to separate otherwise identical machines */ ret = Curl_parsenetrc(conn->host.name, &nuser, &npasswd, data->set.str[STRING_NETRC_FILE]); if(ret > 0) { infof(data, "Couldn't find host %s in the " DOT_CHAR "netrc file; using defaults\n", conn->host.name); } else if(ret < 0) { return CURLE_OUT_OF_MEMORY; } else { /* set bits.netrc TRUE to remember that we got the name from a .netrc file, so that it is safe to use even if we followed a Location: to a different host or similar. */ conn->bits.netrc = TRUE; conn->bits.user_passwd = TRUE; /* enable user+password */ if(data->set.use_netrc == CURL_NETRC_OPTIONAL) { /* prefer credentials outside netrc */ if(nuser && !*userp) { free(*userp); *userp = nuser; user_changed = TRUE; } if(npasswd && !*passwdp) { free(*passwdp); *passwdp = npasswd; passwd_changed = TRUE; } } else { /* prefer netrc credentials */ if(nuser) { free(*userp); *userp = nuser; user_changed = TRUE; } if(npasswd) { free(*passwdp); *passwdp = npasswd; passwd_changed = TRUE; } } } } /* for updated strings, we update them in the URL */ if(user_changed) { uc = curl_url_set(data->state.uh, CURLUPART_USER, *userp, 0); if(uc) return Curl_uc_to_curlcode(uc); } if(passwd_changed) { uc = curl_url_set(data->state.uh, CURLUPART_PASSWORD, *passwdp, 0); if(uc) return Curl_uc_to_curlcode(uc); } return CURLE_OK; } /* * Set the login details so they're available in the connection */ static CURLcode set_login(struct connectdata *conn) { CURLcode result = CURLE_OK; const char *setuser = CURL_DEFAULT_USER; const char *setpasswd = CURL_DEFAULT_PASSWORD; /* If our protocol needs a password and we have none, use the defaults */ if((conn->handler->flags & PROTOPT_NEEDSPWD) && !conn->bits.user_passwd) ; else { setuser = ""; setpasswd = ""; } /* Store the default user */ if(!conn->user) { conn->user = strdup(setuser); if(!conn->user) return CURLE_OUT_OF_MEMORY; } /* Store the default password */ if(!conn->passwd) { conn->passwd = strdup(setpasswd); if(!conn->passwd) result = CURLE_OUT_OF_MEMORY; } /* if there's a user without password, consider password blank */ if(conn->user && !conn->passwd) { conn->passwd = strdup(""); if(!conn->passwd) result = CURLE_OUT_OF_MEMORY; } return result; } /* * Parses a "host:port" string to connect to. * The hostname and the port may be empty; in this case, NULL is returned for * the hostname and -1 for the port. */ static CURLcode parse_connect_to_host_port(struct Curl_easy *data, const char *host, char **hostname_result, int *port_result) { char *host_dup; char *hostptr; char *host_portno; char *portptr; int port = -1; #if defined(CURL_DISABLE_VERBOSE_STRINGS) (void) data; #endif *hostname_result = NULL; *port_result = -1; if(!host || !*host) return CURLE_OK; host_dup = strdup(host); if(!host_dup) return CURLE_OUT_OF_MEMORY; hostptr = host_dup; /* start scanning for port number at this point */ portptr = hostptr; /* detect and extract RFC6874-style IPv6-addresses */ if(*hostptr == '[') { #ifdef ENABLE_IPV6 char *ptr = ++hostptr; /* advance beyond the initial bracket */ while(*ptr && (ISXDIGIT(*ptr) || (*ptr == ':') || (*ptr == '.'))) ptr++; if(*ptr == '%') { /* There might be a zone identifier */ if(strncmp("%25", ptr, 3)) infof(data, "Please URL encode %% as %%25, see RFC 6874.\n"); ptr++; /* Allow unreserved characters as defined in RFC 3986 */ while(*ptr && (ISALPHA(*ptr) || ISXDIGIT(*ptr) || (*ptr == '-') || (*ptr == '.') || (*ptr == '_') || (*ptr == '~'))) ptr++; } if(*ptr == ']') /* yeps, it ended nicely with a bracket as well */ *ptr++ = '\0'; else infof(data, "Invalid IPv6 address format\n"); portptr = ptr; /* Note that if this didn't end with a bracket, we still advanced the * hostptr first, but I can't see anything wrong with that as no host * name nor a numeric can legally start with a bracket. */ #else failf(data, "Use of IPv6 in *_CONNECT_TO without IPv6 support built-in!"); free(host_dup); return CURLE_NOT_BUILT_IN; #endif } /* Get port number off server.com:1080 */ host_portno = strchr(portptr, ':'); if(host_portno) { char *endp = NULL; *host_portno = '\0'; /* cut off number from host name */ host_portno++; if(*host_portno) { long portparse = strtol(host_portno, &endp, 10); if((endp && *endp) || (portparse < 0) || (portparse > 65535)) { infof(data, "No valid port number in connect to host string (%s)\n", host_portno); hostptr = NULL; port = -1; } else port = (int)portparse; /* we know it will fit */ } } /* now, clone the cleaned host name */ if(hostptr) { *hostname_result = strdup(hostptr); if(!*hostname_result) { free(host_dup); return CURLE_OUT_OF_MEMORY; } } *port_result = port; free(host_dup); return CURLE_OK; } /* * Parses one "connect to" string in the form: * "HOST:PORT:CONNECT-TO-HOST:CONNECT-TO-PORT". */ static CURLcode parse_connect_to_string(struct Curl_easy *data, struct connectdata *conn, const char *conn_to_host, char **host_result, int *port_result) { CURLcode result = CURLE_OK; const char *ptr = conn_to_host; int host_match = FALSE; int port_match = FALSE; *host_result = NULL; *port_result = -1; if(*ptr == ':') { /* an empty hostname always matches */ host_match = TRUE; ptr++; } else { /* check whether the URL's hostname matches */ size_t hostname_to_match_len; char *hostname_to_match = aprintf("%s%s%s", conn->bits.ipv6_ip ? "[" : "", conn->host.name, conn->bits.ipv6_ip ? "]" : ""); if(!hostname_to_match) return CURLE_OUT_OF_MEMORY; hostname_to_match_len = strlen(hostname_to_match); host_match = strncasecompare(ptr, hostname_to_match, hostname_to_match_len); free(hostname_to_match); ptr += hostname_to_match_len; host_match = host_match && *ptr == ':'; ptr++; } if(host_match) { if(*ptr == ':') { /* an empty port always matches */ port_match = TRUE; ptr++; } else { /* check whether the URL's port matches */ char *ptr_next = strchr(ptr, ':'); if(ptr_next) { char *endp = NULL; long port_to_match = strtol(ptr, &endp, 10); if((endp == ptr_next) && (port_to_match == conn->remote_port)) { port_match = TRUE; ptr = ptr_next + 1; } } } } if(host_match && port_match) { /* parse the hostname and port to connect to */ result = parse_connect_to_host_port(data, ptr, host_result, port_result); } return result; } /* * Processes all strings in the "connect to" slist, and uses the "connect * to host" and "connect to port" of the first string that matches. */ static CURLcode parse_connect_to_slist(struct Curl_easy *data, struct connectdata *conn, struct curl_slist *conn_to_host) { CURLcode result = CURLE_OK; char *host = NULL; int port = -1; while(conn_to_host && !host && port == -1) { result = parse_connect_to_string(data, conn, conn_to_host->data, &host, &port); if(result) return result; if(host && *host) { conn->conn_to_host.rawalloc = host; conn->conn_to_host.name = host; conn->bits.conn_to_host = TRUE; infof(data, "Connecting to hostname: %s\n", host); } else { /* no "connect to host" */ conn->bits.conn_to_host = FALSE; Curl_safefree(host); } if(port >= 0) { conn->conn_to_port = port; conn->bits.conn_to_port = TRUE; infof(data, "Connecting to port: %d\n", port); } else { /* no "connect to port" */ conn->bits.conn_to_port = FALSE; port = -1; } conn_to_host = conn_to_host->next; } return result; } /************************************************************* * Resolve the address of the server or proxy *************************************************************/ static CURLcode resolve_server(struct Curl_easy *data, struct connectdata *conn, bool *async) { CURLcode result = CURLE_OK; timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE); /************************************************************* * Resolve the name of the server or proxy *************************************************************/ if(conn->bits.reuse) /* We're reusing the connection - no need to resolve anything, and fix_hostname() was called already in create_conn() for the re-use case. */ *async = FALSE; else { /* this is a fresh connect */ int rc; struct Curl_dns_entry *hostaddr; #ifdef USE_UNIX_SOCKETS if(conn->unix_domain_socket) { /* Unix domain sockets are local. The host gets ignored, just use the * specified domain socket address. Do not cache "DNS entries". There is * no DNS involved and we already have the filesystem path available */ const char *path = conn->unix_domain_socket; hostaddr = calloc(1, sizeof(struct Curl_dns_entry)); if(!hostaddr) result = CURLE_OUT_OF_MEMORY; else { bool longpath = FALSE; hostaddr->addr = Curl_unix2addr(path, &longpath, conn->abstract_unix_socket); if(hostaddr->addr) hostaddr->inuse++; else { /* Long paths are not supported for now */ if(longpath) { failf(data, "Unix socket path too long: '%s'", path); result = CURLE_COULDNT_RESOLVE_HOST; } else result = CURLE_OUT_OF_MEMORY; free(hostaddr); hostaddr = NULL; } } } else #endif if(!conn->bits.proxy) { struct hostname *connhost; if(conn->bits.conn_to_host) connhost = &conn->conn_to_host; else connhost = &conn->host; /* If not connecting via a proxy, extract the port from the URL, if it is * there, thus overriding any defaults that might have been set above. */ if(conn->bits.conn_to_port) conn->port = conn->conn_to_port; else conn->port = conn->remote_port; /* Resolve target host right on */ rc = Curl_resolv_timeout(conn, connhost->name, (int)conn->port, &hostaddr, timeout_ms); if(rc == CURLRESOLV_PENDING) *async = TRUE; else if(rc == CURLRESOLV_TIMEDOUT) result = CURLE_OPERATION_TIMEDOUT; else if(!hostaddr) { failf(data, "Couldn't resolve host '%s'", connhost->dispname); result = CURLE_COULDNT_RESOLVE_HOST; /* don't return yet, we need to clean up the timeout first */ } } else { /* This is a proxy that hasn't been resolved yet. */ struct hostname * const host = conn->bits.socksproxy ? &conn->socks_proxy.host : &conn->http_proxy.host; /* resolve proxy */ rc = Curl_resolv_timeout(conn, host->name, (int)conn->port, &hostaddr, timeout_ms); if(rc == CURLRESOLV_PENDING) *async = TRUE; else if(rc == CURLRESOLV_TIMEDOUT) result = CURLE_OPERATION_TIMEDOUT; else if(!hostaddr) { failf(data, "Couldn't resolve proxy '%s'", host->dispname); result = CURLE_COULDNT_RESOLVE_PROXY; /* don't return yet, we need to clean up the timeout first */ } } DEBUGASSERT(conn->dns_entry == NULL); conn->dns_entry = hostaddr; } return result; } /* * Cleanup the connection just allocated before we can move along and use the * previously existing one. All relevant data is copied over and old_conn is * ready for freeing once this function returns. */ static void reuse_conn(struct connectdata *old_conn, struct connectdata *conn) { free_fixed_hostname(&old_conn->http_proxy.host); free_fixed_hostname(&old_conn->socks_proxy.host); free(old_conn->http_proxy.host.rawalloc); free(old_conn->socks_proxy.host.rawalloc); /* free the SSL config struct from this connection struct as this was allocated in vain and is targeted for destruction */ Curl_free_primary_ssl_config(&old_conn->ssl_config); Curl_free_primary_ssl_config(&old_conn->proxy_ssl_config); conn->data = old_conn->data; /* get the user+password information from the old_conn struct since it may * be new for this request even when we re-use an existing connection */ conn->bits.user_passwd = old_conn->bits.user_passwd; if(conn->bits.user_passwd) { /* use the new user name and password though */ Curl_safefree(conn->user); Curl_safefree(conn->passwd); conn->user = old_conn->user; conn->passwd = old_conn->passwd; old_conn->user = NULL; old_conn->passwd = NULL; } conn->bits.proxy_user_passwd = old_conn->bits.proxy_user_passwd; if(conn->bits.proxy_user_passwd) { /* use the new proxy user name and proxy password though */ Curl_safefree(conn->http_proxy.user); Curl_safefree(conn->socks_proxy.user); Curl_safefree(conn->http_proxy.passwd); Curl_safefree(conn->socks_proxy.passwd); conn->http_proxy.user = old_conn->http_proxy.user; conn->socks_proxy.user = old_conn->socks_proxy.user; conn->http_proxy.passwd = old_conn->http_proxy.passwd; conn->socks_proxy.passwd = old_conn->socks_proxy.passwd; old_conn->http_proxy.user = NULL; old_conn->socks_proxy.user = NULL; old_conn->http_proxy.passwd = NULL; old_conn->socks_proxy.passwd = NULL; } /* host can change, when doing keepalive with a proxy or if the case is different this time etc */ free_fixed_hostname(&conn->host); free_fixed_hostname(&conn->conn_to_host); Curl_safefree(conn->host.rawalloc); Curl_safefree(conn->conn_to_host.rawalloc); conn->host = old_conn->host; conn->conn_to_host = old_conn->conn_to_host; conn->conn_to_port = old_conn->conn_to_port; conn->remote_port = old_conn->remote_port; /* persist connection info in session handle */ Curl_persistconninfo(conn); conn_reset_all_postponed_data(old_conn); /* free buffers */ /* re-use init */ conn->bits.reuse = TRUE; /* yes, we're re-using here */ Curl_safefree(old_conn->user); Curl_safefree(old_conn->passwd); Curl_safefree(old_conn->options); Curl_safefree(old_conn->http_proxy.user); Curl_safefree(old_conn->socks_proxy.user); Curl_safefree(old_conn->http_proxy.passwd); Curl_safefree(old_conn->socks_proxy.passwd); Curl_safefree(old_conn->localdev); Curl_llist_destroy(&old_conn->send_pipe, NULL); Curl_llist_destroy(&old_conn->recv_pipe, NULL); Curl_safefree(old_conn->master_buffer); #ifdef USE_UNIX_SOCKETS Curl_safefree(old_conn->unix_domain_socket); #endif } /** * create_conn() sets up a new connectdata struct, or re-uses an already * existing one, and resolves host name. * * if this function returns CURLE_OK and *async is set to TRUE, the resolve * response will be coming asynchronously. If *async is FALSE, the name is * already resolved. * * @param data The sessionhandle pointer * @param in_connect is set to the next connection data pointer * @param async is set TRUE when an async DNS resolution is pending * @see Curl_setup_conn() * * *NOTE* this function assigns the conn->data pointer! */ static CURLcode create_conn(struct Curl_easy *data, struct connectdata **in_connect, bool *async) { CURLcode result = CURLE_OK; struct connectdata *conn; struct connectdata *conn_temp = NULL; bool reuse; bool connections_available = TRUE; bool force_reuse = FALSE; bool waitpipe = FALSE; size_t max_host_connections = Curl_multi_max_host_connections(data->multi); size_t max_total_connections = Curl_multi_max_total_connections(data->multi); *async = FALSE; /************************************************************* * Check input data *************************************************************/ if(!data->change.url) { result = CURLE_URL_MALFORMAT; goto out; } /* First, split up the current URL in parts so that we can use the parts for checking against the already present connections. In order to not have to modify everything at once, we allocate a temporary connection data struct and fill in for comparison purposes. */ conn = allocate_conn(data); if(!conn) { result = CURLE_OUT_OF_MEMORY; goto out; } /* We must set the return variable as soon as possible, so that our parent can cleanup any possible allocs we may have done before any failure */ *in_connect = conn; result = parseurlandfillconn(data, conn); if(result) goto out; if(data->set.str[STRING_BEARER]) { conn->oauth_bearer = strdup(data->set.str[STRING_BEARER]); if(!conn->oauth_bearer) { result = CURLE_OUT_OF_MEMORY; goto out; } } #ifdef USE_UNIX_SOCKETS if(data->set.str[STRING_UNIX_SOCKET_PATH]) { conn->unix_domain_socket = strdup(data->set.str[STRING_UNIX_SOCKET_PATH]); if(conn->unix_domain_socket == NULL) { result = CURLE_OUT_OF_MEMORY; goto out; } conn->abstract_unix_socket = data->set.abstract_unix_socket; } #endif /* After the unix socket init but before the proxy vars are used, parse and initialize the proxy vars */ #ifndef CURL_DISABLE_PROXY result = create_conn_helper_init_proxy(conn); if(result) goto out; #endif /************************************************************* * If the protocol is using SSL and HTTP proxy is used, we set * the tunnel_proxy bit. *************************************************************/ if((conn->given->flags&PROTOPT_SSL) && conn->bits.httpproxy) conn->bits.tunnel_proxy = TRUE; /************************************************************* * Figure out the remote port number and fix it in the URL *************************************************************/ result = parse_remote_port(data, conn); if(result) goto out; /* Check for overridden login details and set them accordingly so they they are known when protocol->setup_connection is called! */ result = override_login(data, conn, &conn->user, &conn->passwd, &conn->options); if(result) goto out; result = set_login(conn); /* default credentials */ if(result) goto out; /************************************************************* * Process the "connect to" linked list of hostname/port mappings. * Do this after the remote port number has been fixed in the URL. *************************************************************/ result = parse_connect_to_slist(data, conn, data->set.connect_to); if(result) goto out; /************************************************************* * IDN-fix the hostnames *************************************************************/ result = fix_hostname(conn, &conn->host); if(result) goto out; if(conn->bits.conn_to_host) { result = fix_hostname(conn, &conn->conn_to_host); if(result) goto out; } if(conn->bits.httpproxy) { result = fix_hostname(conn, &conn->http_proxy.host); if(result) goto out; } if(conn->bits.socksproxy) { result = fix_hostname(conn, &conn->socks_proxy.host); if(result) goto out; } /************************************************************* * Check whether the host and the "connect to host" are equal. * Do this after the hostnames have been IDN-fixed. *************************************************************/ if(conn->bits.conn_to_host && strcasecompare(conn->conn_to_host.name, conn->host.name)) { conn->bits.conn_to_host = FALSE; } /************************************************************* * Check whether the port and the "connect to port" are equal. * Do this after the remote port number has been fixed in the URL. *************************************************************/ if(conn->bits.conn_to_port && conn->conn_to_port == conn->remote_port) { conn->bits.conn_to_port = FALSE; } /************************************************************* * If the "connect to" feature is used with an HTTP proxy, * we set the tunnel_proxy bit. *************************************************************/ if((conn->bits.conn_to_host || conn->bits.conn_to_port) && conn->bits.httpproxy) conn->bits.tunnel_proxy = TRUE; /************************************************************* * Setup internals depending on protocol. Needs to be done after * we figured out what/if proxy to use. *************************************************************/ result = setup_connection_internals(conn); if(result) goto out; conn->recv[FIRSTSOCKET] = Curl_recv_plain; conn->send[FIRSTSOCKET] = Curl_send_plain; conn->recv[SECONDARYSOCKET] = Curl_recv_plain; conn->send[SECONDARYSOCKET] = Curl_send_plain; conn->bits.tcp_fastopen = data->set.tcp_fastopen; /*********************************************************************** * file: is a special case in that it doesn't need a network connection ***********************************************************************/ #ifndef CURL_DISABLE_FILE if(conn->handler->flags & PROTOPT_NONETWORK) { bool done; /* this is supposed to be the connect function so we better at least check that the file is present here! */ DEBUGASSERT(conn->handler->connect_it); Curl_persistconninfo(conn); result = conn->handler->connect_it(conn, &done); /* Setup a "faked" transfer that'll do nothing */ if(!result) { conn->data = data; conn->bits.tcpconnect[FIRSTSOCKET] = TRUE; /* we are "connected */ result = Curl_conncache_add_conn(data->state.conn_cache, conn); if(result) goto out; /* * Setup whatever necessary for a resumed transfer */ result = setup_range(data); if(result) { DEBUGASSERT(conn->handler->done); /* we ignore the return code for the protocol-specific DONE */ (void)conn->handler->done(conn, result, FALSE); goto out; } Curl_setup_transfer(conn, -1, -1, FALSE, NULL, /* no download */ -1, NULL); /* no upload */ } /* since we skip do_init() */ Curl_init_do(data, conn); goto out; } #endif /* Get a cloned copy of the SSL config situation stored in the connection struct. But to get this going nicely, we must first make sure that the strings in the master copy are pointing to the correct strings in the session handle strings array! Keep in mind that the pointers in the master copy are pointing to strings that will be freed as part of the Curl_easy struct, but all cloned copies will be separately allocated. */ data->set.ssl.primary.CApath = data->set.str[STRING_SSL_CAPATH_ORIG]; data->set.proxy_ssl.primary.CApath = data->set.str[STRING_SSL_CAPATH_PROXY]; data->set.ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_ORIG]; data->set.proxy_ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_PROXY]; data->set.ssl.primary.random_file = data->set.str[STRING_SSL_RANDOM_FILE]; data->set.proxy_ssl.primary.random_file = data->set.str[STRING_SSL_RANDOM_FILE]; data->set.ssl.primary.egdsocket = data->set.str[STRING_SSL_EGDSOCKET]; data->set.proxy_ssl.primary.egdsocket = data->set.str[STRING_SSL_EGDSOCKET]; data->set.ssl.primary.cipher_list = data->set.str[STRING_SSL_CIPHER_LIST_ORIG]; data->set.proxy_ssl.primary.cipher_list = data->set.str[STRING_SSL_CIPHER_LIST_PROXY]; data->set.ssl.primary.cipher_list13 = data->set.str[STRING_SSL_CIPHER13_LIST_ORIG]; data->set.proxy_ssl.primary.cipher_list13 = data->set.str[STRING_SSL_CIPHER13_LIST_PROXY]; data->set.ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_ORIG]; data->set.proxy_ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_PROXY]; data->set.ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_ORIG]; data->set.proxy_ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_PROXY]; data->set.ssl.cert = data->set.str[STRING_CERT_ORIG]; data->set.proxy_ssl.cert = data->set.str[STRING_CERT_PROXY]; data->set.ssl.cert_type = data->set.str[STRING_CERT_TYPE_ORIG]; data->set.proxy_ssl.cert_type = data->set.str[STRING_CERT_TYPE_PROXY]; data->set.ssl.key = data->set.str[STRING_KEY_ORIG]; data->set.proxy_ssl.key = data->set.str[STRING_KEY_PROXY]; data->set.ssl.key_type = data->set.str[STRING_KEY_TYPE_ORIG]; data->set.proxy_ssl.key_type = data->set.str[STRING_KEY_TYPE_PROXY]; data->set.ssl.key_passwd = data->set.str[STRING_KEY_PASSWD_ORIG]; data->set.proxy_ssl.key_passwd = data->set.str[STRING_KEY_PASSWD_PROXY]; data->set.ssl.primary.clientcert = data->set.str[STRING_CERT_ORIG]; data->set.proxy_ssl.primary.clientcert = data->set.str[STRING_CERT_PROXY]; #ifdef USE_TLS_SRP data->set.ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_ORIG]; data->set.proxy_ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_PROXY]; data->set.ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_ORIG]; data->set.proxy_ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_PROXY]; #endif if(!Curl_clone_primary_ssl_config(&data->set.ssl.primary, &conn->ssl_config)) { result = CURLE_OUT_OF_MEMORY; goto out; } if(!Curl_clone_primary_ssl_config(&data->set.proxy_ssl.primary, &conn->proxy_ssl_config)) { result = CURLE_OUT_OF_MEMORY; goto out; } prune_dead_connections(data); /************************************************************* * Check the current list of connections to see if we can * re-use an already existing one or if we have to create a * new one. *************************************************************/ DEBUGASSERT(conn->user); DEBUGASSERT(conn->passwd); /* reuse_fresh is TRUE if we are told to use a new connection by force, but we only acknowledge this option if this is not a re-used connection already (which happens due to follow-location or during a HTTP authentication phase). */ if(data->set.reuse_fresh && !data->state.this_is_a_follow) reuse = FALSE; else reuse = ConnectionExists(data, conn, &conn_temp, &force_reuse, &waitpipe); /* If we found a reusable connection that is now marked as in use, we may still want to open a new connection if we are pipelining. */ if(reuse && !force_reuse && IsPipeliningPossible(data, conn_temp)) { size_t pipelen = conn_temp->send_pipe.size + conn_temp->recv_pipe.size; if(pipelen > 0) { infof(data, "Found connection %ld, with requests in the pipe (%zu)\n", conn_temp->connection_id, pipelen); if(Curl_conncache_bundle_size(conn_temp) < max_host_connections && Curl_conncache_size(data) < max_total_connections) { /* We want a new connection anyway */ reuse = FALSE; infof(data, "We can reuse, but we want a new connection anyway\n"); Curl_conncache_return_conn(conn_temp); } } } if(reuse) { /* * We already have a connection for this, we got the former connection * in the conn_temp variable and thus we need to cleanup the one we * just allocated before we can move along and use the previously * existing one. */ reuse_conn(conn, conn_temp); #ifdef USE_SSL free(conn->ssl_extra); #endif free(conn); /* we don't need this anymore */ conn = conn_temp; *in_connect = conn; infof(data, "Re-using existing connection! (#%ld) with %s %s\n", conn->connection_id, conn->bits.proxy?"proxy":"host", conn->socks_proxy.host.name ? conn->socks_proxy.host.dispname : conn->http_proxy.host.name ? conn->http_proxy.host.dispname : conn->host.dispname); } else { /* We have decided that we want a new connection. However, we may not be able to do that if we have reached the limit of how many connections we are allowed to open. */ if(conn->handler->flags & PROTOPT_ALPN_NPN) { /* The protocol wants it, so set the bits if enabled in the easy handle (default) */ if(data->set.ssl_enable_alpn) conn->bits.tls_enable_alpn = TRUE; if(data->set.ssl_enable_npn) conn->bits.tls_enable_npn = TRUE; } if(waitpipe) /* There is a connection that *might* become usable for pipelining "soon", and we wait for that */ connections_available = FALSE; else { /* this gets a lock on the conncache */ struct connectbundle *bundle = Curl_conncache_find_bundle(conn, data->state.conn_cache); if(max_host_connections > 0 && bundle && (bundle->num_connections >= max_host_connections)) { struct connectdata *conn_candidate; /* The bundle is full. Extract the oldest connection. */ conn_candidate = Curl_conncache_extract_bundle(data, bundle); Curl_conncache_unlock(conn); if(conn_candidate) (void)Curl_disconnect(data, conn_candidate, /* dead_connection */ FALSE); else { infof(data, "No more connections allowed to host: %zu\n", max_host_connections); connections_available = FALSE; } } else Curl_conncache_unlock(conn); } if(connections_available && (max_total_connections > 0) && (Curl_conncache_size(data) >= max_total_connections)) { struct connectdata *conn_candidate; /* The cache is full. Let's see if we can kill a connection. */ conn_candidate = Curl_conncache_extract_oldest(data); if(conn_candidate) (void)Curl_disconnect(data, conn_candidate, /* dead_connection */ FALSE); else { infof(data, "No connections available in cache\n"); connections_available = FALSE; } } if(!connections_available) { infof(data, "No connections available.\n"); conn_free(conn); *in_connect = NULL; result = CURLE_NO_CONNECTION_AVAILABLE; goto out; } else { /* * This is a brand new connection, so let's store it in the connection * cache of ours! */ result = Curl_conncache_add_conn(data->state.conn_cache, conn); if(result) goto out; } #if defined(USE_NTLM) /* If NTLM is requested in a part of this connection, make sure we don't assume the state is fine as this is a fresh connection and NTLM is connection based. */ if((data->state.authhost.picked & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && data->state.authhost.done) { infof(data, "NTLM picked AND auth done set, clear picked!\n"); data->state.authhost.picked = CURLAUTH_NONE; data->state.authhost.done = FALSE; } if((data->state.authproxy.picked & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) && data->state.authproxy.done) { infof(data, "NTLM-proxy picked AND auth done set, clear picked!\n"); data->state.authproxy.picked = CURLAUTH_NONE; data->state.authproxy.done = FALSE; } #endif } /* Setup and init stuff before DO starts, in preparing for the transfer. */ Curl_init_do(data, conn); /* * Setup whatever necessary for a resumed transfer */ result = setup_range(data); if(result) goto out; /* Continue connectdata initialization here. */ /* * Inherit the proper values from the urldata struct AFTER we have arranged * the persistent connection stuff */ conn->seek_func = data->set.seek_func; conn->seek_client = data->set.seek_client; /************************************************************* * Resolve the address of the server or proxy *************************************************************/ result = resolve_server(data, conn, async); out: return result; } /* Curl_setup_conn() is called after the name resolve initiated in * create_conn() is all done. * * Curl_setup_conn() also handles reused connections * * conn->data MUST already have been setup fine (in create_conn) */ CURLcode Curl_setup_conn(struct connectdata *conn, bool *protocol_done) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; Curl_pgrsTime(data, TIMER_NAMELOOKUP); if(conn->handler->flags & PROTOPT_NONETWORK) { /* nothing to setup when not using a network */ *protocol_done = TRUE; return result; } *protocol_done = FALSE; /* default to not done */ /* set proxy_connect_closed to false unconditionally already here since it is used strictly to provide extra information to a parent function in the case of proxy CONNECT failures and we must make sure we don't have it lingering set from a previous invoke */ conn->bits.proxy_connect_closed = FALSE; /* * Set user-agent. Used for HTTP, but since we can attempt to tunnel * basically anything through a http proxy we can't limit this based on * protocol. */ if(data->set.str[STRING_USERAGENT]) { Curl_safefree(conn->allocptr.uagent); conn->allocptr.uagent = aprintf("User-Agent: %s\r\n", data->set.str[STRING_USERAGENT]); if(!conn->allocptr.uagent) return CURLE_OUT_OF_MEMORY; } data->req.headerbytecount = 0; #ifdef CURL_DO_LINEEND_CONV data->state.crlf_conversions = 0; /* reset CRLF conversion counter */ #endif /* CURL_DO_LINEEND_CONV */ /* set start time here for timeout purposes in the connect procedure, it is later set again for the progress meter purpose */ conn->now = Curl_now(); if(CURL_SOCKET_BAD == conn->sock[FIRSTSOCKET]) { conn->bits.tcpconnect[FIRSTSOCKET] = FALSE; result = Curl_connecthost(conn, conn->dns_entry); if(result) return result; } else { Curl_pgrsTime(data, TIMER_CONNECT); /* we're connected already */ Curl_pgrsTime(data, TIMER_APPCONNECT); /* we're connected already */ conn->bits.tcpconnect[FIRSTSOCKET] = TRUE; *protocol_done = TRUE; Curl_updateconninfo(conn, conn->sock[FIRSTSOCKET]); Curl_verboseconnect(conn); } conn->now = Curl_now(); /* time this *after* the connect is done, we set this here perhaps a second time */ return result; } CURLcode Curl_connect(struct Curl_easy *data, struct connectdata **in_connect, bool *asyncp, bool *protocol_done) { CURLcode result; *asyncp = FALSE; /* assume synchronous resolves by default */ /* init the single-transfer specific data */ Curl_free_request_state(data); memset(&data->req, 0, sizeof(struct SingleRequest)); data->req.maxdownload = -1; /* call the stuff that needs to be called */ result = create_conn(data, in_connect, asyncp); if(!result) { if(CONN_INUSE(*in_connect)) /* pipelining */ *protocol_done = TRUE; else if(!*asyncp) { /* DNS resolution is done: that's either because this is a reused connection, in which case DNS was unnecessary, or because DNS really did finish already (synch resolver/fast async resolve) */ result = Curl_setup_conn(*in_connect, protocol_done); } } if(result == CURLE_NO_CONNECTION_AVAILABLE) { *in_connect = NULL; return result; } else if(result && *in_connect) { /* We're not allowed to return failure with memory left allocated in the connectdata struct, free those here */ Curl_disconnect(data, *in_connect, TRUE); *in_connect = NULL; /* return a NULL */ } return result; } /* * Curl_init_do() inits the readwrite session. This is inited each time (in * the DO function before the protocol-specific DO functions are invoked) for * a transfer, sometimes multiple times on the same Curl_easy. Make sure * nothing in here depends on stuff that are setup dynamically for the * transfer. * * Allow this function to get called with 'conn' set to NULL. */ CURLcode Curl_init_do(struct Curl_easy *data, struct connectdata *conn) { struct SingleRequest *k = &data->req; if(conn) { conn->bits.do_more = FALSE; /* by default there's no curl_do_more() to use */ /* if the protocol used doesn't support wildcards, switch it off */ if(data->state.wildcardmatch && !(conn->handler->flags & PROTOPT_WILDCARD)) data->state.wildcardmatch = FALSE; } data->state.done = FALSE; /* *_done() is not called yet */ data->state.expect100header = FALSE; if(data->set.opt_no_body) /* in HTTP lingo, no body means using the HEAD request... */ data->set.httpreq = HTTPREQ_HEAD; else if(HTTPREQ_HEAD == data->set.httpreq) /* ... but if unset there really is no perfect method that is the "opposite" of HEAD but in reality most people probably think GET then. The important thing is that we can't let it remain HEAD if the opt_no_body is set FALSE since then we'll behave wrong when getting HTTP. */ data->set.httpreq = HTTPREQ_GET; k->start = Curl_now(); /* start time */ k->now = k->start; /* current time is now */ k->header = TRUE; /* assume header */ k->bytecount = 0; k->buf = data->state.buffer; k->hbufp = data->state.headerbuff; k->ignorebody = FALSE; Curl_speedinit(data); Curl_pgrsSetUploadCounter(data, 0); Curl_pgrsSetDownloadCounter(data, 0); return CURLE_OK; } /* * get_protocol_family() * * This is used to return the protocol family for a given protocol. * * Parameters: * * protocol [in] - A single bit protocol identifier such as HTTP or HTTPS. * * Returns the family as a single bit protocol identifier. */ static unsigned int get_protocol_family(unsigned int protocol) { unsigned int family; switch(protocol) { case CURLPROTO_HTTP: case CURLPROTO_HTTPS: family = CURLPROTO_HTTP; break; case CURLPROTO_FTP: case CURLPROTO_FTPS: family = CURLPROTO_FTP; break; case CURLPROTO_SCP: family = CURLPROTO_SCP; break; case CURLPROTO_SFTP: family = CURLPROTO_SFTP; break; case CURLPROTO_TELNET: family = CURLPROTO_TELNET; break; case CURLPROTO_LDAP: case CURLPROTO_LDAPS: family = CURLPROTO_LDAP; break; case CURLPROTO_DICT: family = CURLPROTO_DICT; break; case CURLPROTO_FILE: family = CURLPROTO_FILE; break; case CURLPROTO_TFTP: family = CURLPROTO_TFTP; break; case CURLPROTO_IMAP: case CURLPROTO_IMAPS: family = CURLPROTO_IMAP; break; case CURLPROTO_POP3: case CURLPROTO_POP3S: family = CURLPROTO_POP3; break; case CURLPROTO_SMTP: case CURLPROTO_SMTPS: family = CURLPROTO_SMTP; break; case CURLPROTO_RTSP: family = CURLPROTO_RTSP; break; case CURLPROTO_RTMP: case CURLPROTO_RTMPS: family = CURLPROTO_RTMP; break; case CURLPROTO_RTMPT: case CURLPROTO_RTMPTS: family = CURLPROTO_RTMPT; break; case CURLPROTO_RTMPE: family = CURLPROTO_RTMPE; break; case CURLPROTO_RTMPTE: family = CURLPROTO_RTMPTE; break; case CURLPROTO_GOPHER: family = CURLPROTO_GOPHER; break; case CURLPROTO_SMB: case CURLPROTO_SMBS: family = CURLPROTO_SMB; break; default: family = 0; break; } return family; } /* * Wrapper to call functions in Curl_conncache_foreach() * * Returns always 0. */ static int conn_upkeep(struct connectdata *conn, void *param) { /* Param is unused. */ (void)param; if(conn->handler->connection_check) { /* Do a protocol-specific keepalive check on the connection. */ conn->handler->connection_check(conn, CONNCHECK_KEEPALIVE); } return 0; /* continue iteration */ } CURLcode Curl_upkeep(struct conncache *conn_cache, void *data) { /* Loop over every connection and make connection alive. */ Curl_conncache_foreach(data, conn_cache, data, conn_upkeep); return CURLE_OK; }
CURLcode Curl_close(struct Curl_easy *data) { struct Curl_multi *m; if(!data) return CURLE_OK; Curl_expire_clear(data); /* shut off timers */ m = data->multi; if(m) /* This handle is still part of a multi handle, take care of this first and detach this handle from there. */ curl_multi_remove_handle(data->multi, data); if(data->multi_easy) /* when curl_easy_perform() is used, it creates its own multi handle to use and this is the one */ curl_multi_cleanup(data->multi_easy); /* Destroy the timeout list that is held in the easy handle. It is /normally/ done by curl_multi_remove_handle() but this is "just in case" */ Curl_llist_destroy(&data->state.timeoutlist, NULL); data->magic = 0; /* force a clear AFTER the possibly enforced removal from the multi handle, since that function uses the magic field! */ if(data->state.rangestringalloc) free(data->state.range); /* freed here just in case DONE wasn't called */ Curl_free_request_state(data); /* Close down all open SSL info and sessions */ Curl_ssl_close_all(data); Curl_safefree(data->state.first_host); Curl_safefree(data->state.scratch); Curl_ssl_free_certinfo(data); /* Cleanup possible redirect junk */ free(data->req.newurl); data->req.newurl = NULL; if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; Curl_up_free(data); Curl_safefree(data->state.buffer); Curl_safefree(data->state.headerbuff); Curl_safefree(data->state.ulbuf); Curl_flush_cookies(data, 1); Curl_digest_cleanup(data); Curl_safefree(data->info.contenttype); Curl_safefree(data->info.wouldredirect); /* this destroys the channel and we cannot use it anymore after this */ Curl_resolver_cleanup(data->state.resolver); Curl_http2_cleanup_dependencies(data); Curl_convert_close(data); /* No longer a dirty share, if it exists */ if(data->share) { Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE); data->share->dirty--; Curl_share_unlock(data, CURL_LOCK_DATA_SHARE); } /* destruct wildcard structures if it is needed */ Curl_wildcard_dtor(&data->wildcard); Curl_freeset(data); free(data); return CURLE_OK; }
CURLcode Curl_close(struct Curl_easy *data) { struct Curl_multi *m; if(!data) return CURLE_OK; Curl_expire_clear(data); /* shut off timers */ m = data->multi; if(m) /* This handle is still part of a multi handle, take care of this first and detach this handle from there. */ curl_multi_remove_handle(data->multi, data); if(data->multi_easy) { /* when curl_easy_perform() is used, it creates its own multi handle to use and this is the one */ curl_multi_cleanup(data->multi_easy); data->multi_easy = NULL; } /* Destroy the timeout list that is held in the easy handle. It is /normally/ done by curl_multi_remove_handle() but this is "just in case" */ Curl_llist_destroy(&data->state.timeoutlist, NULL); data->magic = 0; /* force a clear AFTER the possibly enforced removal from the multi handle, since that function uses the magic field! */ if(data->state.rangestringalloc) free(data->state.range); /* freed here just in case DONE wasn't called */ Curl_free_request_state(data); /* Close down all open SSL info and sessions */ Curl_ssl_close_all(data); Curl_safefree(data->state.first_host); Curl_safefree(data->state.scratch); Curl_ssl_free_certinfo(data); /* Cleanup possible redirect junk */ free(data->req.newurl); data->req.newurl = NULL; if(data->change.referer_alloc) { Curl_safefree(data->change.referer); data->change.referer_alloc = FALSE; } data->change.referer = NULL; Curl_up_free(data); Curl_safefree(data->state.buffer); Curl_safefree(data->state.headerbuff); Curl_safefree(data->state.ulbuf); Curl_flush_cookies(data, 1); Curl_digest_cleanup(data); Curl_safefree(data->info.contenttype); Curl_safefree(data->info.wouldredirect); /* this destroys the channel and we cannot use it anymore after this */ Curl_resolver_cleanup(data->state.resolver); Curl_http2_cleanup_dependencies(data); Curl_convert_close(data); /* No longer a dirty share, if it exists */ if(data->share) { Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE); data->share->dirty--; Curl_share_unlock(data, CURL_LOCK_DATA_SHARE); } /* destruct wildcard structures if it is needed */ Curl_wildcard_dtor(&data->wildcard); Curl_freeset(data); free(data); return CURLE_OK; }
{'added': [(334, ' if(data->multi_easy) {'), (338, ' data->multi_easy = NULL;'), (339, ' }')], 'deleted': [(334, ' if(data->multi_easy)')]}
3
1
2,738
16,845
48
312
7
https://github.com/curl/curl
CVE-2018-16840
CWE-416
2,381
jas_seq.c
C++
jas_matrix_create
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2002 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Sequence/Matrix Library * * $Id$ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdlib.h> #include <assert.h> #include <math.h> #include <inttypes.h> #include "jasper/jas_seq.h" #include "jasper/jas_malloc.h" #include "jasper/jas_math.h" /******************************************************************************\ * Constructors and destructors. \******************************************************************************/ jas_matrix_t *jas_seq2d_create(int xstart, int ystart, int xend, int yend) { jas_matrix_t *matrix; assert(xstart <= xend && ystart <= yend); if (!(matrix = jas_matrix_create(yend - ystart, xend - xstart))) { return 0; } matrix->xstart_ = xstart; matrix->ystart_ = ystart; matrix->xend_ = xend; matrix->yend_ = yend; return matrix; } jas_matrix_t *jas_matrix_create(int numrows, int numcols) { jas_matrix_t *matrix; int i; size_t size; matrix = 0; if (numrows < 0 || numcols < 0) { goto error; } if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) { goto error; } matrix->flags_ = 0; matrix->numrows_ = numrows; matrix->numcols_ = numcols; matrix->rows_ = 0; matrix->maxrows_ = numrows; matrix->data_ = 0; matrix->datasize_ = 0; // matrix->datasize_ = numrows * numcols; if (!jas_safe_size_mul(numrows, numcols, &size)) { goto error; } matrix->datasize_ = size; if (matrix->maxrows_ > 0) { if (!(matrix->rows_ = jas_alloc2(matrix->maxrows_, sizeof(jas_seqent_t *)))) { goto error; } } if (matrix->datasize_ > 0) { if (!(matrix->data_ = jas_alloc2(matrix->datasize_, sizeof(jas_seqent_t)))) { goto error; } } for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[i * matrix->numcols_]; } for (i = 0; i < matrix->datasize_; ++i) { matrix->data_[i] = 0; } matrix->xstart_ = 0; matrix->ystart_ = 0; matrix->xend_ = matrix->numcols_; matrix->yend_ = matrix->numrows_; return matrix; error: if (matrix) { jas_matrix_destroy(matrix); } return 0; } void jas_matrix_destroy(jas_matrix_t *matrix) { if (matrix->data_) { assert(!(matrix->flags_ & JAS_MATRIX_REF)); jas_free(matrix->data_); matrix->data_ = 0; } if (matrix->rows_) { jas_free(matrix->rows_); matrix->rows_ = 0; } jas_free(matrix); } jas_seq2d_t *jas_seq2d_copy(jas_seq2d_t *x) { jas_matrix_t *y; int i; int j; y = jas_seq2d_create(jas_seq2d_xstart(x), jas_seq2d_ystart(x), jas_seq2d_xend(x), jas_seq2d_yend(x)); assert(y); for (i = 0; i < x->numrows_; ++i) { for (j = 0; j < x->numcols_; ++j) { *jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j); } } return y; } jas_matrix_t *jas_matrix_copy(jas_matrix_t *x) { jas_matrix_t *y; int i; int j; y = jas_matrix_create(x->numrows_, x->numcols_); for (i = 0; i < x->numrows_; ++i) { for (j = 0; j < x->numcols_; ++j) { *jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j); } } return y; } /******************************************************************************\ * Bind operations. \******************************************************************************/ void jas_seq2d_bindsub(jas_matrix_t *s, jas_matrix_t *s1, int xstart, int ystart, int xend, int yend) { jas_matrix_bindsub(s, s1, ystart - s1->ystart_, xstart - s1->xstart_, yend - s1->ystart_ - 1, xend - s1->xstart_ - 1); } void jas_matrix_bindsub(jas_matrix_t *mat0, jas_matrix_t *mat1, int r0, int c0, int r1, int c1) { int i; if (mat0->data_) { if (!(mat0->flags_ & JAS_MATRIX_REF)) { jas_free(mat0->data_); } mat0->data_ = 0; mat0->datasize_ = 0; } if (mat0->rows_) { jas_free(mat0->rows_); mat0->rows_ = 0; } mat0->flags_ |= JAS_MATRIX_REF; mat0->numrows_ = r1 - r0 + 1; mat0->numcols_ = c1 - c0 + 1; mat0->maxrows_ = mat0->numrows_; if (!(mat0->rows_ = jas_alloc2(mat0->maxrows_, sizeof(jas_seqent_t *)))) { /* There is no way to indicate failure to the caller. So, we have no choice but to abort. Ideally, this function should have a non-void return type. In practice, a non-void return type probably would not help much anyways as the caller would just have to terminate anyways. */ abort(); } for (i = 0; i < mat0->numrows_; ++i) { mat0->rows_[i] = mat1->rows_[r0 + i] + c0; } mat0->xstart_ = mat1->xstart_ + c0; mat0->ystart_ = mat1->ystart_ + r0; mat0->xend_ = mat0->xstart_ + mat0->numcols_; mat0->yend_ = mat0->ystart_ + mat0->numrows_; } /******************************************************************************\ * Arithmetic operations. \******************************************************************************/ int jas_matrix_cmp(jas_matrix_t *mat0, jas_matrix_t *mat1) { int i; int j; if (mat0->numrows_ != mat1->numrows_ || mat0->numcols_ != mat1->numcols_) { return 1; } for (i = 0; i < mat0->numrows_; i++) { for (j = 0; j < mat0->numcols_; j++) { if (jas_matrix_get(mat0, i, j) != jas_matrix_get(mat1, i, j)) { return 1; } } } return 0; } void jas_matrix_divpow2(jas_matrix_t *matrix, int n) { int i; int j; jas_seqent_t *rowstart; int rowstep; jas_seqent_t *data; if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { *data = (*data >= 0) ? ((*data) >> n) : (-((-(*data)) >> n)); } } } } void jas_matrix_clip(jas_matrix_t *matrix, jas_seqent_t minval, jas_seqent_t maxval) { int i; int j; jas_seqent_t v; jas_seqent_t *rowstart; jas_seqent_t *data; int rowstep; if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { data = rowstart; for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { v = *data; if (v < minval) { *data = minval; } else if (v > maxval) { *data = maxval; } } } } } void jas_matrix_asr(jas_matrix_t *matrix, int n) { int i; int j; jas_seqent_t *rowstart; int rowstep; jas_seqent_t *data; assert(n >= 0); if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { //*data >>= n; *data = jas_seqent_asr(*data, n); } } } } void jas_matrix_asl(jas_matrix_t *matrix, int n) { int i; int j; jas_seqent_t *rowstart; int rowstep; jas_seqent_t *data; if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { //*data <<= n; *data = jas_seqent_asl(*data, n); } } } } /******************************************************************************\ * Code. \******************************************************************************/ int jas_matrix_resize(jas_matrix_t *matrix, int numrows, int numcols) { int size; int i; size = numrows * numcols; if (size > matrix->datasize_ || numrows > matrix->maxrows_) { return -1; } matrix->numrows_ = numrows; matrix->numcols_ = numcols; for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[numcols * i]; } return 0; } void jas_matrix_setall(jas_matrix_t *matrix, jas_seqent_t val) { int i; int j; jas_seqent_t *rowstart; int rowstep; jas_seqent_t *data; if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { *data = val; } } } } jas_matrix_t *jas_seq2d_input(FILE *in) { jas_matrix_t *matrix; int i; int j; long x; int numrows; int numcols; int xoff; int yoff; if (fscanf(in, "%d %d", &xoff, &yoff) != 2) return 0; if (fscanf(in, "%d %d", &numcols, &numrows) != 2) return 0; if (!(matrix = jas_seq2d_create(xoff, yoff, xoff + numcols, yoff + numrows))) return 0; if (jas_matrix_numrows(matrix) != numrows || jas_matrix_numcols(matrix) != numcols) { abort(); } /* Get matrix data. */ for (i = 0; i < jas_matrix_numrows(matrix); i++) { for (j = 0; j < jas_matrix_numcols(matrix); j++) { if (fscanf(in, "%ld", &x) != 1) { jas_matrix_destroy(matrix); return 0; } jas_matrix_set(matrix, i, j, JAS_CAST(jas_seqent_t, x)); } } return matrix; } int jas_seq2d_output(jas_matrix_t *matrix, FILE *out) { #define MAXLINELEN 80 int i; int j; jas_seqent_t x; char buf[MAXLINELEN + 1]; char sbuf[MAXLINELEN + 1]; int n; fprintf(out, "%"PRIiFAST32" %"PRIiFAST32"\n", jas_seq2d_xstart(matrix), jas_seq2d_ystart(matrix)); fprintf(out, "%"PRIiFAST32" %"PRIiFAST32"\n", jas_matrix_numcols(matrix), jas_matrix_numrows(matrix)); buf[0] = '\0'; for (i = 0; i < jas_matrix_numrows(matrix); ++i) { for (j = 0; j < jas_matrix_numcols(matrix); ++j) { x = jas_matrix_get(matrix, i, j); sprintf(sbuf, "%s%4ld", (strlen(buf) > 0) ? " " : "", JAS_CAST(long, x)); n = JAS_CAST(int, strlen(buf)); if (n + JAS_CAST(int, strlen(sbuf)) > MAXLINELEN) { fputs(buf, out); fputs("\n", out); buf[0] = '\0'; } strcat(buf, sbuf); if (j == jas_matrix_numcols(matrix) - 1) { fputs(buf, out); fputs("\n", out); buf[0] = '\0'; } } } fputs(buf, out); return 0; }
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2002 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Sequence/Matrix Library * * $Id$ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdlib.h> #include <assert.h> #include <math.h> #include <inttypes.h> #include "jasper/jas_seq.h" #include "jasper/jas_malloc.h" #include "jasper/jas_math.h" /******************************************************************************\ * Constructors and destructors. \******************************************************************************/ jas_matrix_t *jas_seq2d_create(jas_matind_t xstart, jas_matind_t ystart, jas_matind_t xend, jas_matind_t yend) { jas_matrix_t *matrix; assert(xstart <= xend && ystart <= yend); if (!(matrix = jas_matrix_create(yend - ystart, xend - xstart))) { return 0; } matrix->xstart_ = xstart; matrix->ystart_ = ystart; matrix->xend_ = xend; matrix->yend_ = yend; return matrix; } jas_matrix_t *jas_matrix_create(jas_matind_t numrows, jas_matind_t numcols) { jas_matrix_t *matrix; jas_matind_t i; size_t size; matrix = 0; if (numrows < 0 || numcols < 0) { goto error; } if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) { goto error; } matrix->flags_ = 0; matrix->numrows_ = numrows; matrix->numcols_ = numcols; matrix->rows_ = 0; matrix->maxrows_ = numrows; matrix->data_ = 0; matrix->datasize_ = 0; // matrix->datasize_ = numrows * numcols; if (!jas_safe_size_mul(numrows, numcols, &size)) { goto error; } matrix->datasize_ = size; if (matrix->maxrows_ > 0) { if (!(matrix->rows_ = jas_alloc2(matrix->maxrows_, sizeof(jas_seqent_t *)))) { goto error; } } if (matrix->datasize_ > 0) { if (!(matrix->data_ = jas_alloc2(matrix->datasize_, sizeof(jas_seqent_t)))) { goto error; } } for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[i * matrix->numcols_]; } for (i = 0; i < matrix->datasize_; ++i) { matrix->data_[i] = 0; } matrix->xstart_ = 0; matrix->ystart_ = 0; matrix->xend_ = matrix->numcols_; matrix->yend_ = matrix->numrows_; return matrix; error: if (matrix) { jas_matrix_destroy(matrix); } return 0; } void jas_matrix_destroy(jas_matrix_t *matrix) { if (matrix->data_) { assert(!(matrix->flags_ & JAS_MATRIX_REF)); jas_free(matrix->data_); matrix->data_ = 0; } if (matrix->rows_) { jas_free(matrix->rows_); matrix->rows_ = 0; } jas_free(matrix); } jas_seq2d_t *jas_seq2d_copy(jas_seq2d_t *x) { jas_matrix_t *y; jas_matind_t i; jas_matind_t j; y = jas_seq2d_create(jas_seq2d_xstart(x), jas_seq2d_ystart(x), jas_seq2d_xend(x), jas_seq2d_yend(x)); assert(y); for (i = 0; i < x->numrows_; ++i) { for (j = 0; j < x->numcols_; ++j) { *jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j); } } return y; } jas_matrix_t *jas_matrix_copy(jas_matrix_t *x) { jas_matrix_t *y; jas_matind_t i; jas_matind_t j; y = jas_matrix_create(x->numrows_, x->numcols_); for (i = 0; i < x->numrows_; ++i) { for (j = 0; j < x->numcols_; ++j) { *jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j); } } return y; } /******************************************************************************\ * Bind operations. \******************************************************************************/ void jas_seq2d_bindsub(jas_matrix_t *s, jas_matrix_t *s1, jas_matind_t xstart, jas_matind_t ystart, jas_matind_t xend, jas_matind_t yend) { jas_matrix_bindsub(s, s1, ystart - s1->ystart_, xstart - s1->xstart_, yend - s1->ystart_ - 1, xend - s1->xstart_ - 1); } void jas_matrix_bindsub(jas_matrix_t *mat0, jas_matrix_t *mat1, jas_matind_t r0, jas_matind_t c0, jas_matind_t r1, jas_matind_t c1) { jas_matind_t i; if (mat0->data_) { if (!(mat0->flags_ & JAS_MATRIX_REF)) { jas_free(mat0->data_); } mat0->data_ = 0; mat0->datasize_ = 0; } if (mat0->rows_) { jas_free(mat0->rows_); mat0->rows_ = 0; } mat0->flags_ |= JAS_MATRIX_REF; mat0->numrows_ = r1 - r0 + 1; mat0->numcols_ = c1 - c0 + 1; mat0->maxrows_ = mat0->numrows_; if (!(mat0->rows_ = jas_alloc2(mat0->maxrows_, sizeof(jas_seqent_t *)))) { /* There is no way to indicate failure to the caller. So, we have no choice but to abort. Ideally, this function should have a non-void return type. In practice, a non-void return type probably would not help much anyways as the caller would just have to terminate anyways. */ abort(); } for (i = 0; i < mat0->numrows_; ++i) { mat0->rows_[i] = mat1->rows_[r0 + i] + c0; } mat0->xstart_ = mat1->xstart_ + c0; mat0->ystart_ = mat1->ystart_ + r0; mat0->xend_ = mat0->xstart_ + mat0->numcols_; mat0->yend_ = mat0->ystart_ + mat0->numrows_; } /******************************************************************************\ * Arithmetic operations. \******************************************************************************/ int jas_matrix_cmp(jas_matrix_t *mat0, jas_matrix_t *mat1) { jas_matind_t i; jas_matind_t j; if (mat0->numrows_ != mat1->numrows_ || mat0->numcols_ != mat1->numcols_) { return 1; } for (i = 0; i < mat0->numrows_; i++) { for (j = 0; j < mat0->numcols_; j++) { if (jas_matrix_get(mat0, i, j) != jas_matrix_get(mat1, i, j)) { return 1; } } } return 0; } void jas_matrix_divpow2(jas_matrix_t *matrix, int n) { jas_matind_t i; jas_matind_t j; jas_seqent_t *rowstart; jas_matind_t rowstep; jas_seqent_t *data; if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { *data = (*data >= 0) ? ((*data) >> n) : (-((-(*data)) >> n)); } } } } void jas_matrix_clip(jas_matrix_t *matrix, jas_seqent_t minval, jas_seqent_t maxval) { jas_matind_t i; jas_matind_t j; jas_seqent_t v; jas_seqent_t *rowstart; jas_seqent_t *data; jas_matind_t rowstep; if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { data = rowstart; for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { v = *data; if (v < minval) { *data = minval; } else if (v > maxval) { *data = maxval; } } } } } void jas_matrix_asr(jas_matrix_t *matrix, int n) { jas_matind_t i; jas_matind_t j; jas_seqent_t *rowstart; jas_matind_t rowstep; jas_seqent_t *data; assert(n >= 0); if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { //*data >>= n; *data = jas_seqent_asr(*data, n); } } } } void jas_matrix_asl(jas_matrix_t *matrix, int n) { jas_matind_t i; jas_matind_t j; jas_seqent_t *rowstart; jas_matind_t rowstep; jas_seqent_t *data; if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { //*data <<= n; *data = jas_seqent_asl(*data, n); } } } } /******************************************************************************\ * Code. \******************************************************************************/ int jas_matrix_resize(jas_matrix_t *matrix, jas_matind_t numrows, jas_matind_t numcols) { jas_matind_t size; jas_matind_t i; size = numrows * numcols; if (size > matrix->datasize_ || numrows > matrix->maxrows_) { return -1; } matrix->numrows_ = numrows; matrix->numcols_ = numcols; for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[numcols * i]; } return 0; } void jas_matrix_setall(jas_matrix_t *matrix, jas_seqent_t val) { jas_matind_t i; jas_matind_t j; jas_seqent_t *rowstart; jas_matind_t rowstep; jas_seqent_t *data; if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { *data = val; } } } } jas_matrix_t *jas_seq2d_input(FILE *in) { jas_matrix_t *matrix; jas_matind_t i; jas_matind_t j; long x; jas_matind_t numrows; jas_matind_t numcols; jas_matind_t xoff; jas_matind_t yoff; long tmp_xoff; long tmp_yoff; long tmp_numrows; long tmp_numcols; if (fscanf(in, "%ld %ld", &tmp_xoff, &tmp_yoff) != 2) { return 0; } xoff = tmp_xoff; yoff = tmp_yoff; if (fscanf(in, "%ld %ld", &tmp_numcols, &tmp_numrows) != 2) { return 0; } numrows = tmp_numrows; numcols = tmp_numcols; if (!(matrix = jas_seq2d_create(xoff, yoff, xoff + numcols, yoff + numrows))) { return 0; } if (jas_matrix_numrows(matrix) != numrows || jas_matrix_numcols(matrix) != numcols) { abort(); } /* Get matrix data. */ for (i = 0; i < jas_matrix_numrows(matrix); i++) { for (j = 0; j < jas_matrix_numcols(matrix); j++) { if (fscanf(in, "%ld", &x) != 1) { jas_matrix_destroy(matrix); return 0; } jas_matrix_set(matrix, i, j, JAS_CAST(jas_seqent_t, x)); } } return matrix; } int jas_seq2d_output(jas_matrix_t *matrix, FILE *out) { #define MAXLINELEN 80 jas_matind_t i; jas_matind_t j; jas_seqent_t x; char buf[MAXLINELEN + 1]; char sbuf[MAXLINELEN + 1]; int n; fprintf(out, "%"PRIiFAST32" %"PRIiFAST32"\n", jas_seq2d_xstart(matrix), jas_seq2d_ystart(matrix)); fprintf(out, "%"PRIiFAST32" %"PRIiFAST32"\n", jas_matrix_numcols(matrix), jas_matrix_numrows(matrix)); buf[0] = '\0'; for (i = 0; i < jas_matrix_numrows(matrix); ++i) { for (j = 0; j < jas_matrix_numcols(matrix); ++j) { x = jas_matrix_get(matrix, i, j); sprintf(sbuf, "%s%4ld", (strlen(buf) > 0) ? " " : "", JAS_CAST(long, x)); n = JAS_CAST(int, strlen(buf)); if (n + JAS_CAST(int, strlen(sbuf)) > MAXLINELEN) { fputs(buf, out); fputs("\n", out); buf[0] = '\0'; } strcat(buf, sbuf); if (j == jas_matrix_numcols(matrix) - 1) { fputs(buf, out); fputs("\n", out); buf[0] = '\0'; } } } fputs(buf, out); return 0; }
jas_matrix_t *jas_matrix_create(int numrows, int numcols) { jas_matrix_t *matrix; int i; size_t size; matrix = 0; if (numrows < 0 || numcols < 0) { goto error; } if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) { goto error; } matrix->flags_ = 0; matrix->numrows_ = numrows; matrix->numcols_ = numcols; matrix->rows_ = 0; matrix->maxrows_ = numrows; matrix->data_ = 0; matrix->datasize_ = 0; // matrix->datasize_ = numrows * numcols; if (!jas_safe_size_mul(numrows, numcols, &size)) { goto error; } matrix->datasize_ = size; if (matrix->maxrows_ > 0) { if (!(matrix->rows_ = jas_alloc2(matrix->maxrows_, sizeof(jas_seqent_t *)))) { goto error; } } if (matrix->datasize_ > 0) { if (!(matrix->data_ = jas_alloc2(matrix->datasize_, sizeof(jas_seqent_t)))) { goto error; } } for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[i * matrix->numcols_]; } for (i = 0; i < matrix->datasize_; ++i) { matrix->data_[i] = 0; } matrix->xstart_ = 0; matrix->ystart_ = 0; matrix->xend_ = matrix->numcols_; matrix->yend_ = matrix->numrows_; return matrix; error: if (matrix) { jas_matrix_destroy(matrix); } return 0; }
jas_matrix_t *jas_matrix_create(jas_matind_t numrows, jas_matind_t numcols) { jas_matrix_t *matrix; jas_matind_t i; size_t size; matrix = 0; if (numrows < 0 || numcols < 0) { goto error; } if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) { goto error; } matrix->flags_ = 0; matrix->numrows_ = numrows; matrix->numcols_ = numcols; matrix->rows_ = 0; matrix->maxrows_ = numrows; matrix->data_ = 0; matrix->datasize_ = 0; // matrix->datasize_ = numrows * numcols; if (!jas_safe_size_mul(numrows, numcols, &size)) { goto error; } matrix->datasize_ = size; if (matrix->maxrows_ > 0) { if (!(matrix->rows_ = jas_alloc2(matrix->maxrows_, sizeof(jas_seqent_t *)))) { goto error; } } if (matrix->datasize_ > 0) { if (!(matrix->data_ = jas_alloc2(matrix->datasize_, sizeof(jas_seqent_t)))) { goto error; } } for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[i * matrix->numcols_]; } for (i = 0; i < matrix->datasize_; ++i) { matrix->data_[i] = 0; } matrix->xstart_ = 0; matrix->ystart_ = 0; matrix->xend_ = matrix->numcols_; matrix->yend_ = matrix->numrows_; return matrix; error: if (matrix) { jas_matrix_destroy(matrix); } return 0; }
{'added': [(87, 'jas_matrix_t *jas_seq2d_create(jas_matind_t xstart, jas_matind_t ystart,'), (88, ' jas_matind_t xend, jas_matind_t yend)'), (102, 'jas_matrix_t *jas_matrix_create(jas_matind_t numrows, jas_matind_t numcols)'), (105, '\tjas_matind_t i;'), (184, '\tjas_matind_t i;'), (185, '\tjas_matind_t j;'), (200, '\tjas_matind_t i;'), (201, '\tjas_matind_t j;'), (215, 'void jas_seq2d_bindsub(jas_matrix_t *s, jas_matrix_t *s1, jas_matind_t xstart,'), (216, ' jas_matind_t ystart, jas_matind_t xend, jas_matind_t yend)'), (222, 'void jas_matrix_bindsub(jas_matrix_t *mat0, jas_matrix_t *mat1,'), (223, ' jas_matind_t r0, jas_matind_t c0, jas_matind_t r1, jas_matind_t c1)'), (225, '\tjas_matind_t i;'), (269, '\tjas_matind_t i;'), (270, '\tjas_matind_t j;'), (288, '\tjas_matind_t i;'), (289, '\tjas_matind_t j;'), (291, '\tjas_matind_t rowstep;'), (311, '\tjas_matind_t i;'), (312, '\tjas_matind_t j;'), (316, '\tjas_matind_t rowstep;'), (339, '\tjas_matind_t i;'), (340, '\tjas_matind_t j;'), (342, '\tjas_matind_t rowstep;'), (362, '\tjas_matind_t i;'), (363, '\tjas_matind_t j;'), (365, '\tjas_matind_t rowstep;'), (386, 'int jas_matrix_resize(jas_matrix_t *matrix, jas_matind_t numrows,'), (387, ' jas_matind_t numcols)'), (389, '\tjas_matind_t size;'), (390, '\tjas_matind_t i;'), (409, '\tjas_matind_t i;'), (410, '\tjas_matind_t j;'), (412, '\tjas_matind_t rowstep;'), (431, '\tjas_matind_t i;'), (432, '\tjas_matind_t j;'), (434, '\tjas_matind_t numrows;'), (435, '\tjas_matind_t numcols;'), (436, '\tjas_matind_t xoff;'), (437, '\tjas_matind_t yoff;'), (438, '\tlong tmp_xoff;'), (439, '\tlong tmp_yoff;'), (440, '\tlong tmp_numrows;'), (441, '\tlong tmp_numcols;'), (442, ''), (443, '\tif (fscanf(in, "%ld %ld", &tmp_xoff, &tmp_yoff) != 2) {'), (445, '\t}'), (446, '\txoff = tmp_xoff;'), (447, '\tyoff = tmp_yoff;'), (448, '\tif (fscanf(in, "%ld %ld", &tmp_numcols, &tmp_numrows) != 2) {'), (450, '\t}'), (451, '\tnumrows = tmp_numrows;'), (452, '\tnumcols = tmp_numcols;'), (453, '\tif (!(matrix = jas_seq2d_create(xoff, yoff, xoff + numcols,'), (454, '\t yoff + numrows))) {'), (456, '\t}'), (480, '\tjas_matind_t i;'), (481, '\tjas_matind_t j;')], 'deleted': [(87, 'jas_matrix_t *jas_seq2d_create(int xstart, int ystart, int xend, int yend)'), (101, 'jas_matrix_t *jas_matrix_create(int numrows, int numcols)'), (104, '\tint i;'), (183, '\tint i;'), (184, '\tint j;'), (199, '\tint i;'), (200, '\tint j;'), (214, 'void jas_seq2d_bindsub(jas_matrix_t *s, jas_matrix_t *s1, int xstart,'), (215, ' int ystart, int xend, int yend)'), (221, 'void jas_matrix_bindsub(jas_matrix_t *mat0, jas_matrix_t *mat1, int r0,'), (222, ' int c0, int r1, int c1)'), (224, '\tint i;'), (268, '\tint i;'), (269, '\tint j;'), (287, '\tint i;'), (288, '\tint j;'), (290, '\tint rowstep;'), (310, '\tint i;'), (311, '\tint j;'), (315, '\tint rowstep;'), (338, '\tint i;'), (339, '\tint j;'), (341, '\tint rowstep;'), (361, '\tint i;'), (362, '\tint j;'), (364, '\tint rowstep;'), (385, 'int jas_matrix_resize(jas_matrix_t *matrix, int numrows, int numcols)'), (387, '\tint size;'), (388, '\tint i;'), (407, '\tint i;'), (408, '\tint j;'), (410, '\tint rowstep;'), (429, '\tint i;'), (430, '\tint j;'), (432, '\tint numrows;'), (433, '\tint numcols;'), (434, '\tint xoff;'), (435, '\tint yoff;'), (436, ''), (437, '\tif (fscanf(in, "%d %d", &xoff, &yoff) != 2)'), (439, '\tif (fscanf(in, "%d %d", &numcols, &numrows) != 2)'), (441, '\tif (!(matrix = jas_seq2d_create(xoff, yoff, xoff + numcols, yoff + numrows)))'), (466, '\tint i;'), (467, '\tint j;')]}
58
44
366
2,343
52
305
12
https://github.com/mdadams/jasper
CVE-2016-9395
CWE-20
368
pack-bitmap-write.c
C
show_object
#include "cache.h" #include "commit.h" #include "tag.h" #include "diff.h" #include "revision.h" #include "list-objects.h" #include "progress.h" #include "pack-revindex.h" #include "pack.h" #include "pack-bitmap.h" #include "sha1-lookup.h" #include "pack-objects.h" struct bitmapped_commit { struct commit *commit; struct ewah_bitmap *bitmap; struct ewah_bitmap *write_as; int flags; int xor_offset; uint32_t commit_pos; }; struct bitmap_writer { struct ewah_bitmap *commits; struct ewah_bitmap *trees; struct ewah_bitmap *blobs; struct ewah_bitmap *tags; khash_sha1 *bitmaps; khash_sha1 *reused; struct packing_data *to_pack; struct bitmapped_commit *selected; unsigned int selected_nr, selected_alloc; struct progress *progress; int show_progress; unsigned char pack_checksum[20]; }; static struct bitmap_writer writer; void bitmap_writer_show_progress(int show) { writer.show_progress = show; } /** * Build the initial type index for the packfile */ void bitmap_writer_build_type_index(struct pack_idx_entry **index, uint32_t index_nr) { uint32_t i; writer.commits = ewah_new(); writer.trees = ewah_new(); writer.blobs = ewah_new(); writer.tags = ewah_new(); for (i = 0; i < index_nr; ++i) { struct object_entry *entry = (struct object_entry *)index[i]; enum object_type real_type; entry->in_pack_pos = i; switch (entry->type) { case OBJ_COMMIT: case OBJ_TREE: case OBJ_BLOB: case OBJ_TAG: real_type = entry->type; break; default: real_type = sha1_object_info(entry->idx.sha1, NULL); break; } switch (real_type) { case OBJ_COMMIT: ewah_set(writer.commits, i); break; case OBJ_TREE: ewah_set(writer.trees, i); break; case OBJ_BLOB: ewah_set(writer.blobs, i); break; case OBJ_TAG: ewah_set(writer.tags, i); break; default: die("Missing type information for %s (%d/%d)", sha1_to_hex(entry->idx.sha1), real_type, entry->type); } } } /** * Compute the actual bitmaps */ static struct object **seen_objects; static unsigned int seen_objects_nr, seen_objects_alloc; static inline void push_bitmapped_commit(struct commit *commit, struct ewah_bitmap *reused) { if (writer.selected_nr >= writer.selected_alloc) { writer.selected_alloc = (writer.selected_alloc + 32) * 2; REALLOC_ARRAY(writer.selected, writer.selected_alloc); } writer.selected[writer.selected_nr].commit = commit; writer.selected[writer.selected_nr].bitmap = reused; writer.selected[writer.selected_nr].flags = 0; writer.selected_nr++; } static inline void mark_as_seen(struct object *object) { ALLOC_GROW(seen_objects, seen_objects_nr + 1, seen_objects_alloc); seen_objects[seen_objects_nr++] = object; } static inline void reset_all_seen(void) { unsigned int i; for (i = 0; i < seen_objects_nr; ++i) { seen_objects[i]->flags &= ~(SEEN | ADDED | SHOWN); } seen_objects_nr = 0; } static uint32_t find_object_pos(const unsigned char *sha1) { struct object_entry *entry = packlist_find(writer.to_pack, sha1, NULL); if (!entry) { die("Failed to write bitmap index. Packfile doesn't have full closure " "(object %s is missing)", sha1_to_hex(sha1)); } return entry->in_pack_pos; } static void show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap *base = data; bitmap_set(base, find_object_pos(object->oid.hash)); mark_as_seen(object); } static void show_commit(struct commit *commit, void *data) { mark_as_seen((struct object *)commit); } static int add_to_include_set(struct bitmap *base, struct commit *commit) { khiter_t hash_pos; uint32_t bitmap_pos = find_object_pos(commit->object.oid.hash); if (bitmap_get(base, bitmap_pos)) return 0; hash_pos = kh_get_sha1(writer.bitmaps, commit->object.oid.hash); if (hash_pos < kh_end(writer.bitmaps)) { struct bitmapped_commit *bc = kh_value(writer.bitmaps, hash_pos); bitmap_or_ewah(base, bc->bitmap); return 0; } bitmap_set(base, bitmap_pos); return 1; } static int should_include(struct commit *commit, void *_data) { struct bitmap *base = _data; if (!add_to_include_set(base, commit)) { struct commit_list *parent = commit->parents; mark_as_seen((struct object *)commit); while (parent) { parent->item->object.flags |= SEEN; mark_as_seen((struct object *)parent->item); parent = parent->next; } return 0; } return 1; } static void compute_xor_offsets(void) { static const int MAX_XOR_OFFSET_SEARCH = 10; int i, next = 0; while (next < writer.selected_nr) { struct bitmapped_commit *stored = &writer.selected[next]; int best_offset = 0; struct ewah_bitmap *best_bitmap = stored->bitmap; struct ewah_bitmap *test_xor; for (i = 1; i <= MAX_XOR_OFFSET_SEARCH; ++i) { int curr = next - i; if (curr < 0) break; test_xor = ewah_pool_new(); ewah_xor(writer.selected[curr].bitmap, stored->bitmap, test_xor); if (test_xor->buffer_size < best_bitmap->buffer_size) { if (best_bitmap != stored->bitmap) ewah_pool_free(best_bitmap); best_bitmap = test_xor; best_offset = i; } else { ewah_pool_free(test_xor); } } stored->xor_offset = best_offset; stored->write_as = best_bitmap; next++; } } void bitmap_writer_build(struct packing_data *to_pack) { static const double REUSE_BITMAP_THRESHOLD = 0.2; int i, reuse_after, need_reset; struct bitmap *base = bitmap_new(); struct rev_info revs; writer.bitmaps = kh_init_sha1(); writer.to_pack = to_pack; if (writer.show_progress) writer.progress = start_progress("Building bitmaps", writer.selected_nr); init_revisions(&revs, NULL); revs.tag_objects = 1; revs.tree_objects = 1; revs.blob_objects = 1; revs.no_walk = 0; revs.include_check = should_include; reset_revision_walk(); reuse_after = writer.selected_nr * REUSE_BITMAP_THRESHOLD; need_reset = 0; for (i = writer.selected_nr - 1; i >= 0; --i) { struct bitmapped_commit *stored; struct object *object; khiter_t hash_pos; int hash_ret; stored = &writer.selected[i]; object = (struct object *)stored->commit; if (stored->bitmap == NULL) { if (i < writer.selected_nr - 1 && (need_reset || !in_merge_bases(writer.selected[i + 1].commit, stored->commit))) { bitmap_reset(base); reset_all_seen(); } add_pending_object(&revs, object, ""); revs.include_check_data = base; if (prepare_revision_walk(&revs)) die("revision walk setup failed"); traverse_commit_list(&revs, show_commit, show_object, base); revs.pending.nr = 0; revs.pending.alloc = 0; revs.pending.objects = NULL; stored->bitmap = bitmap_to_ewah(base); need_reset = 0; } else need_reset = 1; if (i >= reuse_after) stored->flags |= BITMAP_FLAG_REUSE; hash_pos = kh_put_sha1(writer.bitmaps, object->oid.hash, &hash_ret); if (hash_ret == 0) die("Duplicate entry when writing index: %s", oid_to_hex(&object->oid)); kh_value(writer.bitmaps, hash_pos) = stored; display_progress(writer.progress, writer.selected_nr - i); } bitmap_free(base); stop_progress(&writer.progress); compute_xor_offsets(); } /** * Select the commits that will be bitmapped */ static inline unsigned int next_commit_index(unsigned int idx) { static const unsigned int MIN_COMMITS = 100; static const unsigned int MAX_COMMITS = 5000; static const unsigned int MUST_REGION = 100; static const unsigned int MIN_REGION = 20000; unsigned int offset, next; if (idx <= MUST_REGION) return 0; if (idx <= MIN_REGION) { offset = idx - MUST_REGION; return (offset < MIN_COMMITS) ? offset : MIN_COMMITS; } offset = idx - MIN_REGION; next = (offset < MAX_COMMITS) ? offset : MAX_COMMITS; return (next > MIN_COMMITS) ? next : MIN_COMMITS; } static int date_compare(const void *_a, const void *_b) { struct commit *a = *(struct commit **)_a; struct commit *b = *(struct commit **)_b; return (long)b->date - (long)a->date; } void bitmap_writer_reuse_bitmaps(struct packing_data *to_pack) { if (prepare_bitmap_git() < 0) return; writer.reused = kh_init_sha1(); rebuild_existing_bitmaps(to_pack, writer.reused, writer.show_progress); } static struct ewah_bitmap *find_reused_bitmap(const unsigned char *sha1) { khiter_t hash_pos; if (!writer.reused) return NULL; hash_pos = kh_get_sha1(writer.reused, sha1); if (hash_pos >= kh_end(writer.reused)) return NULL; return kh_value(writer.reused, hash_pos); } void bitmap_writer_select_commits(struct commit **indexed_commits, unsigned int indexed_commits_nr, int max_bitmaps) { unsigned int i = 0, j, next; qsort(indexed_commits, indexed_commits_nr, sizeof(indexed_commits[0]), date_compare); if (writer.show_progress) writer.progress = start_progress("Selecting bitmap commits", 0); if (indexed_commits_nr < 100) { for (i = 0; i < indexed_commits_nr; ++i) push_bitmapped_commit(indexed_commits[i], NULL); return; } for (;;) { struct ewah_bitmap *reused_bitmap = NULL; struct commit *chosen = NULL; next = next_commit_index(i); if (i + next >= indexed_commits_nr) break; if (max_bitmaps > 0 && writer.selected_nr >= max_bitmaps) { writer.selected_nr = max_bitmaps; break; } if (next == 0) { chosen = indexed_commits[i]; reused_bitmap = find_reused_bitmap(chosen->object.oid.hash); } else { chosen = indexed_commits[i + next]; for (j = 0; j <= next; ++j) { struct commit *cm = indexed_commits[i + j]; reused_bitmap = find_reused_bitmap(cm->object.oid.hash); if (reused_bitmap || (cm->object.flags & NEEDS_BITMAP) != 0) { chosen = cm; break; } if (cm->parents && cm->parents->next) chosen = cm; } } push_bitmapped_commit(chosen, reused_bitmap); i += next + 1; display_progress(writer.progress, i); } stop_progress(&writer.progress); } static int sha1write_ewah_helper(void *f, const void *buf, size_t len) { /* sha1write will die on error */ sha1write(f, buf, len); return len; } /** * Write the bitmap index to disk */ static inline void dump_bitmap(struct sha1file *f, struct ewah_bitmap *bitmap) { if (ewah_serialize_to(bitmap, sha1write_ewah_helper, f) < 0) die("Failed to write bitmap index"); } static const unsigned char *sha1_access(size_t pos, void *table) { struct pack_idx_entry **index = table; return index[pos]->sha1; } static void write_selected_commits_v1(struct sha1file *f, struct pack_idx_entry **index, uint32_t index_nr) { int i; for (i = 0; i < writer.selected_nr; ++i) { struct bitmapped_commit *stored = &writer.selected[i]; int commit_pos = sha1_pos(stored->commit->object.oid.hash, index, index_nr, sha1_access); if (commit_pos < 0) die("BUG: trying to write commit not in index"); sha1write_be32(f, commit_pos); sha1write_u8(f, stored->xor_offset); sha1write_u8(f, stored->flags); dump_bitmap(f, stored->write_as); } } static void write_hash_cache(struct sha1file *f, struct pack_idx_entry **index, uint32_t index_nr) { uint32_t i; for (i = 0; i < index_nr; ++i) { struct object_entry *entry = (struct object_entry *)index[i]; uint32_t hash_value = htonl(entry->hash); sha1write(f, &hash_value, sizeof(hash_value)); } } void bitmap_writer_set_checksum(unsigned char *sha1) { hashcpy(writer.pack_checksum, sha1); } void bitmap_writer_finish(struct pack_idx_entry **index, uint32_t index_nr, const char *filename, uint16_t options) { static char tmp_file[PATH_MAX]; static uint16_t default_version = 1; static uint16_t flags = BITMAP_OPT_FULL_DAG; struct sha1file *f; struct bitmap_disk_header header; int fd = odb_mkstemp(tmp_file, sizeof(tmp_file), "pack/tmp_bitmap_XXXXXX"); if (fd < 0) die_errno("unable to create '%s'", tmp_file); f = sha1fd(fd, tmp_file); memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)); header.version = htons(default_version); header.options = htons(flags | options); header.entry_count = htonl(writer.selected_nr); hashcpy(header.checksum, writer.pack_checksum); sha1write(f, &header, sizeof(header)); dump_bitmap(f, writer.commits); dump_bitmap(f, writer.trees); dump_bitmap(f, writer.blobs); dump_bitmap(f, writer.tags); write_selected_commits_v1(f, index, index_nr); if (options & BITMAP_OPT_HASH_CACHE) write_hash_cache(f, index, index_nr); sha1close(f, NULL, CSUM_FSYNC); if (adjust_shared_perm(tmp_file)) die_errno("unable to make temporary bitmap file readable"); if (rename(tmp_file, filename)) die_errno("unable to rename temporary bitmap file to '%s'", filename); }
#include "cache.h" #include "commit.h" #include "tag.h" #include "diff.h" #include "revision.h" #include "list-objects.h" #include "progress.h" #include "pack-revindex.h" #include "pack.h" #include "pack-bitmap.h" #include "sha1-lookup.h" #include "pack-objects.h" struct bitmapped_commit { struct commit *commit; struct ewah_bitmap *bitmap; struct ewah_bitmap *write_as; int flags; int xor_offset; uint32_t commit_pos; }; struct bitmap_writer { struct ewah_bitmap *commits; struct ewah_bitmap *trees; struct ewah_bitmap *blobs; struct ewah_bitmap *tags; khash_sha1 *bitmaps; khash_sha1 *reused; struct packing_data *to_pack; struct bitmapped_commit *selected; unsigned int selected_nr, selected_alloc; struct progress *progress; int show_progress; unsigned char pack_checksum[20]; }; static struct bitmap_writer writer; void bitmap_writer_show_progress(int show) { writer.show_progress = show; } /** * Build the initial type index for the packfile */ void bitmap_writer_build_type_index(struct pack_idx_entry **index, uint32_t index_nr) { uint32_t i; writer.commits = ewah_new(); writer.trees = ewah_new(); writer.blobs = ewah_new(); writer.tags = ewah_new(); for (i = 0; i < index_nr; ++i) { struct object_entry *entry = (struct object_entry *)index[i]; enum object_type real_type; entry->in_pack_pos = i; switch (entry->type) { case OBJ_COMMIT: case OBJ_TREE: case OBJ_BLOB: case OBJ_TAG: real_type = entry->type; break; default: real_type = sha1_object_info(entry->idx.sha1, NULL); break; } switch (real_type) { case OBJ_COMMIT: ewah_set(writer.commits, i); break; case OBJ_TREE: ewah_set(writer.trees, i); break; case OBJ_BLOB: ewah_set(writer.blobs, i); break; case OBJ_TAG: ewah_set(writer.tags, i); break; default: die("Missing type information for %s (%d/%d)", sha1_to_hex(entry->idx.sha1), real_type, entry->type); } } } /** * Compute the actual bitmaps */ static struct object **seen_objects; static unsigned int seen_objects_nr, seen_objects_alloc; static inline void push_bitmapped_commit(struct commit *commit, struct ewah_bitmap *reused) { if (writer.selected_nr >= writer.selected_alloc) { writer.selected_alloc = (writer.selected_alloc + 32) * 2; REALLOC_ARRAY(writer.selected, writer.selected_alloc); } writer.selected[writer.selected_nr].commit = commit; writer.selected[writer.selected_nr].bitmap = reused; writer.selected[writer.selected_nr].flags = 0; writer.selected_nr++; } static inline void mark_as_seen(struct object *object) { ALLOC_GROW(seen_objects, seen_objects_nr + 1, seen_objects_alloc); seen_objects[seen_objects_nr++] = object; } static inline void reset_all_seen(void) { unsigned int i; for (i = 0; i < seen_objects_nr; ++i) { seen_objects[i]->flags &= ~(SEEN | ADDED | SHOWN); } seen_objects_nr = 0; } static uint32_t find_object_pos(const unsigned char *sha1) { struct object_entry *entry = packlist_find(writer.to_pack, sha1, NULL); if (!entry) { die("Failed to write bitmap index. Packfile doesn't have full closure " "(object %s is missing)", sha1_to_hex(sha1)); } return entry->in_pack_pos; } static void show_object(struct object *object, const char *name, void *data) { struct bitmap *base = data; bitmap_set(base, find_object_pos(object->oid.hash)); mark_as_seen(object); } static void show_commit(struct commit *commit, void *data) { mark_as_seen((struct object *)commit); } static int add_to_include_set(struct bitmap *base, struct commit *commit) { khiter_t hash_pos; uint32_t bitmap_pos = find_object_pos(commit->object.oid.hash); if (bitmap_get(base, bitmap_pos)) return 0; hash_pos = kh_get_sha1(writer.bitmaps, commit->object.oid.hash); if (hash_pos < kh_end(writer.bitmaps)) { struct bitmapped_commit *bc = kh_value(writer.bitmaps, hash_pos); bitmap_or_ewah(base, bc->bitmap); return 0; } bitmap_set(base, bitmap_pos); return 1; } static int should_include(struct commit *commit, void *_data) { struct bitmap *base = _data; if (!add_to_include_set(base, commit)) { struct commit_list *parent = commit->parents; mark_as_seen((struct object *)commit); while (parent) { parent->item->object.flags |= SEEN; mark_as_seen((struct object *)parent->item); parent = parent->next; } return 0; } return 1; } static void compute_xor_offsets(void) { static const int MAX_XOR_OFFSET_SEARCH = 10; int i, next = 0; while (next < writer.selected_nr) { struct bitmapped_commit *stored = &writer.selected[next]; int best_offset = 0; struct ewah_bitmap *best_bitmap = stored->bitmap; struct ewah_bitmap *test_xor; for (i = 1; i <= MAX_XOR_OFFSET_SEARCH; ++i) { int curr = next - i; if (curr < 0) break; test_xor = ewah_pool_new(); ewah_xor(writer.selected[curr].bitmap, stored->bitmap, test_xor); if (test_xor->buffer_size < best_bitmap->buffer_size) { if (best_bitmap != stored->bitmap) ewah_pool_free(best_bitmap); best_bitmap = test_xor; best_offset = i; } else { ewah_pool_free(test_xor); } } stored->xor_offset = best_offset; stored->write_as = best_bitmap; next++; } } void bitmap_writer_build(struct packing_data *to_pack) { static const double REUSE_BITMAP_THRESHOLD = 0.2; int i, reuse_after, need_reset; struct bitmap *base = bitmap_new(); struct rev_info revs; writer.bitmaps = kh_init_sha1(); writer.to_pack = to_pack; if (writer.show_progress) writer.progress = start_progress("Building bitmaps", writer.selected_nr); init_revisions(&revs, NULL); revs.tag_objects = 1; revs.tree_objects = 1; revs.blob_objects = 1; revs.no_walk = 0; revs.include_check = should_include; reset_revision_walk(); reuse_after = writer.selected_nr * REUSE_BITMAP_THRESHOLD; need_reset = 0; for (i = writer.selected_nr - 1; i >= 0; --i) { struct bitmapped_commit *stored; struct object *object; khiter_t hash_pos; int hash_ret; stored = &writer.selected[i]; object = (struct object *)stored->commit; if (stored->bitmap == NULL) { if (i < writer.selected_nr - 1 && (need_reset || !in_merge_bases(writer.selected[i + 1].commit, stored->commit))) { bitmap_reset(base); reset_all_seen(); } add_pending_object(&revs, object, ""); revs.include_check_data = base; if (prepare_revision_walk(&revs)) die("revision walk setup failed"); traverse_commit_list(&revs, show_commit, show_object, base); revs.pending.nr = 0; revs.pending.alloc = 0; revs.pending.objects = NULL; stored->bitmap = bitmap_to_ewah(base); need_reset = 0; } else need_reset = 1; if (i >= reuse_after) stored->flags |= BITMAP_FLAG_REUSE; hash_pos = kh_put_sha1(writer.bitmaps, object->oid.hash, &hash_ret); if (hash_ret == 0) die("Duplicate entry when writing index: %s", oid_to_hex(&object->oid)); kh_value(writer.bitmaps, hash_pos) = stored; display_progress(writer.progress, writer.selected_nr - i); } bitmap_free(base); stop_progress(&writer.progress); compute_xor_offsets(); } /** * Select the commits that will be bitmapped */ static inline unsigned int next_commit_index(unsigned int idx) { static const unsigned int MIN_COMMITS = 100; static const unsigned int MAX_COMMITS = 5000; static const unsigned int MUST_REGION = 100; static const unsigned int MIN_REGION = 20000; unsigned int offset, next; if (idx <= MUST_REGION) return 0; if (idx <= MIN_REGION) { offset = idx - MUST_REGION; return (offset < MIN_COMMITS) ? offset : MIN_COMMITS; } offset = idx - MIN_REGION; next = (offset < MAX_COMMITS) ? offset : MAX_COMMITS; return (next > MIN_COMMITS) ? next : MIN_COMMITS; } static int date_compare(const void *_a, const void *_b) { struct commit *a = *(struct commit **)_a; struct commit *b = *(struct commit **)_b; return (long)b->date - (long)a->date; } void bitmap_writer_reuse_bitmaps(struct packing_data *to_pack) { if (prepare_bitmap_git() < 0) return; writer.reused = kh_init_sha1(); rebuild_existing_bitmaps(to_pack, writer.reused, writer.show_progress); } static struct ewah_bitmap *find_reused_bitmap(const unsigned char *sha1) { khiter_t hash_pos; if (!writer.reused) return NULL; hash_pos = kh_get_sha1(writer.reused, sha1); if (hash_pos >= kh_end(writer.reused)) return NULL; return kh_value(writer.reused, hash_pos); } void bitmap_writer_select_commits(struct commit **indexed_commits, unsigned int indexed_commits_nr, int max_bitmaps) { unsigned int i = 0, j, next; qsort(indexed_commits, indexed_commits_nr, sizeof(indexed_commits[0]), date_compare); if (writer.show_progress) writer.progress = start_progress("Selecting bitmap commits", 0); if (indexed_commits_nr < 100) { for (i = 0; i < indexed_commits_nr; ++i) push_bitmapped_commit(indexed_commits[i], NULL); return; } for (;;) { struct ewah_bitmap *reused_bitmap = NULL; struct commit *chosen = NULL; next = next_commit_index(i); if (i + next >= indexed_commits_nr) break; if (max_bitmaps > 0 && writer.selected_nr >= max_bitmaps) { writer.selected_nr = max_bitmaps; break; } if (next == 0) { chosen = indexed_commits[i]; reused_bitmap = find_reused_bitmap(chosen->object.oid.hash); } else { chosen = indexed_commits[i + next]; for (j = 0; j <= next; ++j) { struct commit *cm = indexed_commits[i + j]; reused_bitmap = find_reused_bitmap(cm->object.oid.hash); if (reused_bitmap || (cm->object.flags & NEEDS_BITMAP) != 0) { chosen = cm; break; } if (cm->parents && cm->parents->next) chosen = cm; } } push_bitmapped_commit(chosen, reused_bitmap); i += next + 1; display_progress(writer.progress, i); } stop_progress(&writer.progress); } static int sha1write_ewah_helper(void *f, const void *buf, size_t len) { /* sha1write will die on error */ sha1write(f, buf, len); return len; } /** * Write the bitmap index to disk */ static inline void dump_bitmap(struct sha1file *f, struct ewah_bitmap *bitmap) { if (ewah_serialize_to(bitmap, sha1write_ewah_helper, f) < 0) die("Failed to write bitmap index"); } static const unsigned char *sha1_access(size_t pos, void *table) { struct pack_idx_entry **index = table; return index[pos]->sha1; } static void write_selected_commits_v1(struct sha1file *f, struct pack_idx_entry **index, uint32_t index_nr) { int i; for (i = 0; i < writer.selected_nr; ++i) { struct bitmapped_commit *stored = &writer.selected[i]; int commit_pos = sha1_pos(stored->commit->object.oid.hash, index, index_nr, sha1_access); if (commit_pos < 0) die("BUG: trying to write commit not in index"); sha1write_be32(f, commit_pos); sha1write_u8(f, stored->xor_offset); sha1write_u8(f, stored->flags); dump_bitmap(f, stored->write_as); } } static void write_hash_cache(struct sha1file *f, struct pack_idx_entry **index, uint32_t index_nr) { uint32_t i; for (i = 0; i < index_nr; ++i) { struct object_entry *entry = (struct object_entry *)index[i]; uint32_t hash_value = htonl(entry->hash); sha1write(f, &hash_value, sizeof(hash_value)); } } void bitmap_writer_set_checksum(unsigned char *sha1) { hashcpy(writer.pack_checksum, sha1); } void bitmap_writer_finish(struct pack_idx_entry **index, uint32_t index_nr, const char *filename, uint16_t options) { static char tmp_file[PATH_MAX]; static uint16_t default_version = 1; static uint16_t flags = BITMAP_OPT_FULL_DAG; struct sha1file *f; struct bitmap_disk_header header; int fd = odb_mkstemp(tmp_file, sizeof(tmp_file), "pack/tmp_bitmap_XXXXXX"); if (fd < 0) die_errno("unable to create '%s'", tmp_file); f = sha1fd(fd, tmp_file); memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)); header.version = htons(default_version); header.options = htons(flags | options); header.entry_count = htonl(writer.selected_nr); hashcpy(header.checksum, writer.pack_checksum); sha1write(f, &header, sizeof(header)); dump_bitmap(f, writer.commits); dump_bitmap(f, writer.trees); dump_bitmap(f, writer.blobs); dump_bitmap(f, writer.tags); write_selected_commits_v1(f, index, index_nr); if (options & BITMAP_OPT_HASH_CACHE) write_hash_cache(f, index, index_nr); sha1close(f, NULL, CSUM_FSYNC); if (adjust_shared_perm(tmp_file)) die_errno("unable to make temporary bitmap file readable"); if (rename(tmp_file, filename)) die_errno("unable to rename temporary bitmap file to '%s'", filename); }
static void show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap *base = data; bitmap_set(base, find_object_pos(object->oid.hash)); mark_as_seen(object); }
static void show_object(struct object *object, const char *name, void *data) { struct bitmap *base = data; bitmap_set(base, find_object_pos(object->oid.hash)); mark_as_seen(object); }
{'added': [(151, 'static void show_object(struct object *object, const char *name, void *data)')], 'deleted': [(151, 'static void show_object(struct object *object, struct strbuf *path,'), (152, '\t\t\tconst char *last, void *data)')]}
1
2
412
2,589
7
49
1
https://github.com/git/git
CVE-2016-2315
CWE-119
2,965
audio_spectrogram.cc
C++
tflite::ops::custom::audio_spectrogram::Eval
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <math.h> #include <stddef.h> #include <stdint.h> #include <vector> #include "flatbuffers/flexbuffers.h" // from @flatbuffers #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/spectrogram.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace custom { namespace audio_spectrogram { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; enum KernelType { kReference, }; typedef struct { int window_size; int stride; bool magnitude_squared; int output_height; internal::Spectrogram* spectrogram; } TfLiteAudioSpectrogramParams; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* data = new TfLiteAudioSpectrogramParams; const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer); const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); data->window_size = m["window_size"].AsInt64(); data->stride = m["stride"].AsInt64(); data->magnitude_squared = m["magnitude_squared"].AsBool(); data->spectrogram = new internal::Spectrogram; return data; } void Free(TfLiteContext* context, void* buffer) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(buffer); delete params->spectrogram; delete params; } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const int64_t sample_count = input->dims->data[0]; const int64_t length_minus_window = (sample_count - params->window_size); if (length_minus_window < 0) { params->output_height = 0; } else { params->output_height = 1 + (length_minus_window / params->stride); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input->dims->data[1]; output_size->data[1] = params->output_height; output_size->data[2] = params->spectrogram->output_frequency_channels(); return context->ResizeTensor(context, output, output_size); } template <KernelType kernel_type> TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const float* input_data = GetTensorData<float>(input); const int64_t sample_count = input->dims->data[0]; const int64_t channel_count = input->dims->data[1]; const int64_t output_width = params->spectrogram->output_frequency_channels(); float* output_flat = GetTensorData<float>(output); std::vector<float> input_for_channel(sample_count); for (int64_t channel = 0; channel < channel_count; ++channel) { float* output_slice = output_flat + (channel * params->output_height * output_width); for (int i = 0; i < sample_count; ++i) { input_for_channel[i] = input_data[i * channel_count + channel]; } std::vector<std::vector<float>> spectrogram_output; TF_LITE_ENSURE(context, params->spectrogram->ComputeSquaredMagnitudeSpectrogram( input_for_channel, &spectrogram_output)); TF_LITE_ENSURE_EQ(context, spectrogram_output.size(), params->output_height); TF_LITE_ENSURE(context, spectrogram_output.empty() || (spectrogram_output[0].size() == output_width)); for (int row_index = 0; row_index < params->output_height; ++row_index) { const std::vector<float>& spectrogram_row = spectrogram_output[row_index]; TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width); float* output_row = output_slice + (row_index * output_width); if (params->magnitude_squared) { for (int i = 0; i < output_width; ++i) { output_row[i] = spectrogram_row[i]; } } else { for (int i = 0; i < output_width; ++i) { output_row[i] = sqrtf(spectrogram_row[i]); } } } } return kTfLiteOk; } } // namespace audio_spectrogram TfLiteRegistration* Register_AUDIO_SPECTROGRAM() { static TfLiteRegistration r = { audio_spectrogram::Init, audio_spectrogram::Free, audio_spectrogram::Prepare, audio_spectrogram::Eval<audio_spectrogram::kReference>}; return &r; } } // namespace custom } // namespace ops } // namespace tflite
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <math.h> #include <stddef.h> #include <stdint.h> #include <vector> #include "flatbuffers/flexbuffers.h" // from @flatbuffers #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/spectrogram.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace custom { namespace audio_spectrogram { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; enum KernelType { kReference, }; typedef struct { int window_size; int stride; bool magnitude_squared; int output_height; internal::Spectrogram* spectrogram; } TfLiteAudioSpectrogramParams; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* data = new TfLiteAudioSpectrogramParams; const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer); const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); data->window_size = m["window_size"].AsInt64(); data->stride = m["stride"].AsInt64(); data->magnitude_squared = m["magnitude_squared"].AsBool(); data->spectrogram = new internal::Spectrogram; return data; } void Free(TfLiteContext* context, void* buffer) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(buffer); delete params->spectrogram; delete params; } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const int64_t sample_count = input->dims->data[0]; const int64_t length_minus_window = (sample_count - params->window_size); if (length_minus_window < 0) { params->output_height = 0; } else { params->output_height = 1 + (length_minus_window / params->stride); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input->dims->data[1]; output_size->data[1] = params->output_height; output_size->data[2] = params->spectrogram->output_frequency_channels(); return context->ResizeTensor(context, output, output_size); } template <KernelType kernel_type> TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const float* input_data = GetTensorData<float>(input); const int64_t sample_count = input->dims->data[0]; const int64_t channel_count = input->dims->data[1]; const int64_t output_width = params->spectrogram->output_frequency_channels(); float* output_flat = GetTensorData<float>(output); std::vector<float> input_for_channel(sample_count); for (int64_t channel = 0; channel < channel_count; ++channel) { float* output_slice = output_flat + (channel * params->output_height * output_width); for (int i = 0; i < sample_count; ++i) { input_for_channel[i] = input_data[i * channel_count + channel]; } std::vector<std::vector<float>> spectrogram_output; TF_LITE_ENSURE(context, params->spectrogram->ComputeSquaredMagnitudeSpectrogram( input_for_channel, &spectrogram_output)); TF_LITE_ENSURE_EQ(context, spectrogram_output.size(), params->output_height); TF_LITE_ENSURE(context, spectrogram_output.empty() || (spectrogram_output[0].size() == output_width)); for (int row_index = 0; row_index < params->output_height; ++row_index) { const std::vector<float>& spectrogram_row = spectrogram_output[row_index]; TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width); float* output_row = output_slice + (row_index * output_width); if (params->magnitude_squared) { for (int i = 0; i < output_width; ++i) { output_row[i] = spectrogram_row[i]; } } else { for (int i = 0; i < output_width; ++i) { output_row[i] = sqrtf(spectrogram_row[i]); } } } } return kTfLiteOk; } } // namespace audio_spectrogram TfLiteRegistration* Register_AUDIO_SPECTROGRAM() { static TfLiteRegistration r = { audio_spectrogram::Init, audio_spectrogram::Free, audio_spectrogram::Prepare, audio_spectrogram::Eval<audio_spectrogram::kReference>}; return &r; } } // namespace custom } // namespace ops } // namespace tflite
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const float* input_data = GetTensorData<float>(input); const int64_t sample_count = input->dims->data[0]; const int64_t channel_count = input->dims->data[1]; const int64_t output_width = params->spectrogram->output_frequency_channels(); float* output_flat = GetTensorData<float>(output); std::vector<float> input_for_channel(sample_count); for (int64_t channel = 0; channel < channel_count; ++channel) { float* output_slice = output_flat + (channel * params->output_height * output_width); for (int i = 0; i < sample_count; ++i) { input_for_channel[i] = input_data[i * channel_count + channel]; } std::vector<std::vector<float>> spectrogram_output; TF_LITE_ENSURE(context, params->spectrogram->ComputeSquaredMagnitudeSpectrogram( input_for_channel, &spectrogram_output)); TF_LITE_ENSURE_EQ(context, spectrogram_output.size(), params->output_height); TF_LITE_ENSURE(context, spectrogram_output.empty() || (spectrogram_output[0].size() == output_width)); for (int row_index = 0; row_index < params->output_height; ++row_index) { const std::vector<float>& spectrogram_row = spectrogram_output[row_index]; TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width); float* output_row = output_slice + (row_index * output_width); if (params->magnitude_squared) { for (int i = 0; i < output_width; ++i) { output_row[i] = spectrogram_row[i]; } } else { for (int i = 0; i < output_width; ++i) { output_row[i] = sqrtf(spectrogram_row[i]); } } } } return kTfLiteOk; }
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const float* input_data = GetTensorData<float>(input); const int64_t sample_count = input->dims->data[0]; const int64_t channel_count = input->dims->data[1]; const int64_t output_width = params->spectrogram->output_frequency_channels(); float* output_flat = GetTensorData<float>(output); std::vector<float> input_for_channel(sample_count); for (int64_t channel = 0; channel < channel_count; ++channel) { float* output_slice = output_flat + (channel * params->output_height * output_width); for (int i = 0; i < sample_count; ++i) { input_for_channel[i] = input_data[i * channel_count + channel]; } std::vector<std::vector<float>> spectrogram_output; TF_LITE_ENSURE(context, params->spectrogram->ComputeSquaredMagnitudeSpectrogram( input_for_channel, &spectrogram_output)); TF_LITE_ENSURE_EQ(context, spectrogram_output.size(), params->output_height); TF_LITE_ENSURE(context, spectrogram_output.empty() || (spectrogram_output[0].size() == output_width)); for (int row_index = 0; row_index < params->output_height; ++row_index) { const std::vector<float>& spectrogram_row = spectrogram_output[row_index]; TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width); float* output_row = output_slice + (row_index * output_width); if (params->magnitude_squared) { for (int i = 0; i < output_width; ++i) { output_row[i] = spectrogram_row[i]; } } else { for (int i = 0; i < output_width; ++i) { output_row[i] = sqrtf(spectrogram_row[i]); } } } } return kTfLiteOk; }
{'added': [(79, ' const TfLiteTensor* input;'), (80, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (81, ' TfLiteTensor* output;'), (82, ' TF_LITE_ENSURE_OK(context,'), (83, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (112, ' const TfLiteTensor* input;'), (113, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (114, ' TfLiteTensor* output;'), (115, ' TF_LITE_ENSURE_OK(context,'), (116, ' GetOutputSafe(context, node, kOutputTensor, &output));')], 'deleted': [(79, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (80, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (109, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (110, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);')]}
10
4
130
964
44
406
8
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
1,772
PipeSocketHandler.cpp
C++
et::PipeSocketHandler::listen
#include "PipeSocketHandler.hpp" namespace et { PipeSocketHandler::PipeSocketHandler() {} int PipeSocketHandler::connect(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> mutexGuard(globalMutex); string pipePath = endpoint.name(); sockaddr_un remote; int sockFd = ::socket(AF_UNIX, SOCK_STREAM, 0); FATAL_FAIL(sockFd); initSocket(sockFd); remote.sun_family = AF_UNIX; strcpy(remote.sun_path, pipePath.c_str()); VLOG(3) << "Connecting to " << endpoint << " with fd " << sockFd; int result = ::connect(sockFd, (struct sockaddr*)&remote, sizeof(sockaddr_un)); auto localErrno = GetErrno(); if (result < 0 && localErrno != EINPROGRESS) { VLOG(3) << "Connection result: " << result << " (" << strerror(localErrno) << ")"; #ifdef WIN32 ::shutdown(sockFd, SD_BOTH); #else ::shutdown(sockFd, SHUT_RDWR); #endif #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; SetErrno(localErrno); return sockFd; } fd_set fdset; FD_ZERO(&fdset); FD_SET(sockFd, &fdset); timeval tv; tv.tv_sec = 3; /* 3 second timeout */ tv.tv_usec = 0; VLOG(4) << "Before selecting sockFd"; select(sockFd + 1, NULL, &fdset, NULL, &tv); if (FD_ISSET(sockFd, &fdset)) { VLOG(4) << "sockFd " << sockFd << " is selected"; int so_error; socklen_t len = sizeof so_error; FATAL_FAIL( ::getsockopt(sockFd, SOL_SOCKET, SO_ERROR, (char*)&so_error, &len)); if (so_error == 0) { LOG(INFO) << "Connected to endpoint " << endpoint; // Initialize the socket again once it's blocking to make sure timeouts // are set initSocket(sockFd); // if we get here, we must have connected successfully } else { LOG(INFO) << "Error connecting to " << endpoint << ": " << so_error << " " << strerror(so_error); #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; } } else { auto localErrno = GetErrno(); LOG(INFO) << "Error connecting to " << endpoint << ": " << localErrno << " " << strerror(localErrno); #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; } LOG(INFO) << sockFd << " is a good socket"; if (sockFd >= 0) { addToActiveSockets(sockFd); } return sockFd; } set<int> PipeSocketHandler::listen(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> guard(globalMutex); string pipePath = endpoint.name(); if (pipeServerSockets.find(pipePath) != pipeServerSockets.end()) { throw runtime_error("Tried to listen twice on the same path"); } sockaddr_un local; int fd = socket(AF_UNIX, SOCK_STREAM, 0); FATAL_FAIL(fd); initServerSocket(fd); local.sun_family = AF_UNIX; /* local is declared before socket() ^ */ strcpy(local.sun_path, pipePath.c_str()); unlink(local.sun_path); FATAL_FAIL(::bind(fd, (struct sockaddr*)&local, sizeof(sockaddr_un))); ::listen(fd, 5); #ifndef WIN32 FATAL_FAIL(::chmod(local.sun_path, S_IRUSR | S_IWUSR | S_IXUSR)); #endif pipeServerSockets[pipePath] = set<int>({fd}); return pipeServerSockets[pipePath]; } set<int> PipeSocketHandler::getEndpointFds(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> guard(globalMutex); string pipePath = endpoint.name(); if (pipeServerSockets.find(pipePath) == pipeServerSockets.end()) { STFATAL << "Tried to getPipeFd on a pipe without calling listen() first: " << pipePath; } return pipeServerSockets[pipePath]; } void PipeSocketHandler::stopListening(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> guard(globalMutex); string pipePath = endpoint.name(); auto it = pipeServerSockets.find(pipePath); if (it == pipeServerSockets.end()) { STFATAL << "Tried to stop listening to a pipe that we weren't listening on:" << pipePath; } int sockFd = *(it->second.begin()); #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif } } // namespace et
#include "PipeSocketHandler.hpp" namespace et { PipeSocketHandler::PipeSocketHandler() {} int PipeSocketHandler::connect(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> mutexGuard(globalMutex); string pipePath = endpoint.name(); sockaddr_un remote; int sockFd = ::socket(AF_UNIX, SOCK_STREAM, 0); FATAL_FAIL(sockFd); initSocket(sockFd); remote.sun_family = AF_UNIX; strncpy(remote.sun_path, pipePath.c_str(), sizeof(remote.sun_path)); VLOG(3) << "Connecting to " << endpoint << " with fd " << sockFd; int result = ::connect(sockFd, (struct sockaddr*)&remote, sizeof(sockaddr_un)); auto localErrno = GetErrno(); if (result < 0 && localErrno != EINPROGRESS) { VLOG(3) << "Connection result: " << result << " (" << strerror(localErrno) << ")"; #ifdef WIN32 ::shutdown(sockFd, SD_BOTH); #else ::shutdown(sockFd, SHUT_RDWR); #endif #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; SetErrno(localErrno); return sockFd; } fd_set fdset; FD_ZERO(&fdset); FD_SET(sockFd, &fdset); timeval tv; tv.tv_sec = 3; /* 3 second timeout */ tv.tv_usec = 0; VLOG(4) << "Before selecting sockFd"; select(sockFd + 1, NULL, &fdset, NULL, &tv); if (FD_ISSET(sockFd, &fdset)) { VLOG(4) << "sockFd " << sockFd << " is selected"; int so_error; socklen_t len = sizeof so_error; FATAL_FAIL( ::getsockopt(sockFd, SOL_SOCKET, SO_ERROR, (char*)&so_error, &len)); if (so_error == 0) { LOG(INFO) << "Connected to endpoint " << endpoint; // Initialize the socket again once it's blocking to make sure timeouts // are set initSocket(sockFd); // if we get here, we must have connected successfully } else { LOG(INFO) << "Error connecting to " << endpoint << ": " << so_error << " " << strerror(so_error); #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; } } else { auto localErrno = GetErrno(); LOG(INFO) << "Error connecting to " << endpoint << ": " << localErrno << " " << strerror(localErrno); #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif sockFd = -1; } LOG(INFO) << sockFd << " is a good socket"; if (sockFd >= 0) { addToActiveSockets(sockFd); } return sockFd; } set<int> PipeSocketHandler::listen(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> guard(globalMutex); string pipePath = endpoint.name(); if (pipeServerSockets.find(pipePath) != pipeServerSockets.end()) { throw runtime_error("Tried to listen twice on the same path"); } sockaddr_un local; int fd = socket(AF_UNIX, SOCK_STREAM, 0); FATAL_FAIL(fd); initServerSocket(fd); local.sun_family = AF_UNIX; /* local is declared before socket() ^ */ strncpy(local.sun_path, pipePath.c_str(), sizeof(local.sun_path)); unlink(local.sun_path); FATAL_FAIL(::bind(fd, (struct sockaddr*)&local, sizeof(sockaddr_un))); ::listen(fd, 5); #ifndef WIN32 FATAL_FAIL(::chmod(local.sun_path, S_IRUSR | S_IWUSR | S_IXUSR)); #endif pipeServerSockets[pipePath] = set<int>({fd}); return pipeServerSockets[pipePath]; } set<int> PipeSocketHandler::getEndpointFds(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> guard(globalMutex); string pipePath = endpoint.name(); if (pipeServerSockets.find(pipePath) == pipeServerSockets.end()) { STFATAL << "Tried to getPipeFd on a pipe without calling listen() first: " << pipePath; } return pipeServerSockets[pipePath]; } void PipeSocketHandler::stopListening(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> guard(globalMutex); string pipePath = endpoint.name(); auto it = pipeServerSockets.find(pipePath); if (it == pipeServerSockets.end()) { STFATAL << "Tried to stop listening to a pipe that we weren't listening on:" << pipePath; } int sockFd = *(it->second.begin()); #ifdef _MSC_VER FATAL_FAIL(::closesocket(sockFd)); #else FATAL_FAIL(::close(sockFd)); #endif } } // namespace et
set<int> PipeSocketHandler::listen(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> guard(globalMutex); string pipePath = endpoint.name(); if (pipeServerSockets.find(pipePath) != pipeServerSockets.end()) { throw runtime_error("Tried to listen twice on the same path"); } sockaddr_un local; int fd = socket(AF_UNIX, SOCK_STREAM, 0); FATAL_FAIL(fd); initServerSocket(fd); local.sun_family = AF_UNIX; /* local is declared before socket() ^ */ strcpy(local.sun_path, pipePath.c_str()); unlink(local.sun_path); FATAL_FAIL(::bind(fd, (struct sockaddr*)&local, sizeof(sockaddr_un))); ::listen(fd, 5); #ifndef WIN32 FATAL_FAIL(::chmod(local.sun_path, S_IRUSR | S_IWUSR | S_IXUSR)); #endif pipeServerSockets[pipePath] = set<int>({fd}); return pipeServerSockets[pipePath]; }
set<int> PipeSocketHandler::listen(const SocketEndpoint& endpoint) { lock_guard<std::recursive_mutex> guard(globalMutex); string pipePath = endpoint.name(); if (pipeServerSockets.find(pipePath) != pipeServerSockets.end()) { throw runtime_error("Tried to listen twice on the same path"); } sockaddr_un local; int fd = socket(AF_UNIX, SOCK_STREAM, 0); FATAL_FAIL(fd); initServerSocket(fd); local.sun_family = AF_UNIX; /* local is declared before socket() ^ */ strncpy(local.sun_path, pipePath.c_str(), sizeof(local.sun_path)); unlink(local.sun_path); FATAL_FAIL(::bind(fd, (struct sockaddr*)&local, sizeof(sockaddr_un))); ::listen(fd, 5); #ifndef WIN32 FATAL_FAIL(::chmod(local.sun_path, S_IRUSR | S_IWUSR | S_IXUSR)); #endif pipeServerSockets[pipePath] = set<int>({fd}); return pipeServerSockets[pipePath]; }
{'added': [(16, ' strncpy(remote.sun_path, pipePath.c_str(), sizeof(remote.sun_path));'), (107, ' strncpy(local.sun_path, pipePath.c_str(), sizeof(local.sun_path));')], 'deleted': [(16, ' strcpy(remote.sun_path, pipePath.c_str());'), (107, ' strcpy(local.sun_path, pipePath.c_str());')]}
2
2
106
846
19
173
2
https://github.com/MisterTea/EternalTerminal
CVE-2022-24949
CWE-362
643
key.c
C
key_update
/* Basic authentication token and access key management * * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/poison.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/workqueue.h> #include <linux/random.h> #include <linux/err.h> #include "internal.h" struct kmem_cache *key_jar; struct rb_root key_serial_tree; /* tree of keys indexed by serial */ DEFINE_SPINLOCK(key_serial_lock); struct rb_root key_user_tree; /* tree of quota records indexed by UID */ DEFINE_SPINLOCK(key_user_lock); unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */ unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */ unsigned int key_quota_maxkeys = 200; /* general key count quota */ unsigned int key_quota_maxbytes = 20000; /* general key space quota */ static LIST_HEAD(key_types_list); static DECLARE_RWSEM(key_types_sem); /* We serialise key instantiation and link */ DEFINE_MUTEX(key_construction_mutex); #ifdef KEY_DEBUGGING void __key_check(const struct key *key) { printk("__key_check: key %p {%08x} should be {%08x}\n", key, key->magic, KEY_DEBUG_MAGIC); BUG(); } #endif /* * Get the key quota record for a user, allocating a new record if one doesn't * already exist. */ struct key_user *key_user_lookup(kuid_t uid) { struct key_user *candidate = NULL, *user; struct rb_node *parent, **p; try_again: parent = NULL; p = &key_user_tree.rb_node; spin_lock(&key_user_lock); /* search the tree for a user record with a matching UID */ while (*p) { parent = *p; user = rb_entry(parent, struct key_user, node); if (uid_lt(uid, user->uid)) p = &(*p)->rb_left; else if (uid_gt(uid, user->uid)) p = &(*p)->rb_right; else goto found; } /* if we get here, we failed to find a match in the tree */ if (!candidate) { /* allocate a candidate user record if we don't already have * one */ spin_unlock(&key_user_lock); user = NULL; candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); if (unlikely(!candidate)) goto out; /* the allocation may have scheduled, so we need to repeat the * search lest someone else added the record whilst we were * asleep */ goto try_again; } /* if we get here, then the user record still hadn't appeared on the * second pass - so we use the candidate record */ refcount_set(&candidate->usage, 1); atomic_set(&candidate->nkeys, 0); atomic_set(&candidate->nikeys, 0); candidate->uid = uid; candidate->qnkeys = 0; candidate->qnbytes = 0; spin_lock_init(&candidate->lock); mutex_init(&candidate->cons_lock); rb_link_node(&candidate->node, parent, p); rb_insert_color(&candidate->node, &key_user_tree); spin_unlock(&key_user_lock); user = candidate; goto out; /* okay - we found a user record for this UID */ found: refcount_inc(&user->usage); spin_unlock(&key_user_lock); kfree(candidate); out: return user; } /* * Dispose of a user structure */ void key_user_put(struct key_user *user) { if (refcount_dec_and_lock(&user->usage, &key_user_lock)) { rb_erase(&user->node, &key_user_tree); spin_unlock(&key_user_lock); kfree(user); } } /* * Allocate a serial number for a key. These are assigned randomly to avoid * security issues through covert channel problems. */ static inline void key_alloc_serial(struct key *key) { struct rb_node *parent, **p; struct key *xkey; /* propose a random serial number and look for a hole for it in the * serial number tree */ do { get_random_bytes(&key->serial, sizeof(key->serial)); key->serial >>= 1; /* negative numbers are not permitted */ } while (key->serial < 3); spin_lock(&key_serial_lock); attempt_insertion: parent = NULL; p = &key_serial_tree.rb_node; while (*p) { parent = *p; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) p = &(*p)->rb_left; else if (key->serial > xkey->serial) p = &(*p)->rb_right; else goto serial_exists; } /* we've found a suitable hole - arrange for this key to occupy it */ rb_link_node(&key->serial_node, parent, p); rb_insert_color(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); return; /* we found a key with the proposed serial number - walk the tree from * that point looking for the next unused serial number */ serial_exists: for (;;) { key->serial++; if (key->serial < 3) { key->serial = 3; goto attempt_insertion; } parent = rb_next(parent); if (!parent) goto attempt_insertion; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) goto attempt_insertion; } } /** * key_alloc - Allocate a key of the specified type. * @type: The type of key to allocate. * @desc: The key description to allow the key to be searched out. * @uid: The owner of the new key. * @gid: The group ID for the new key's group permissions. * @cred: The credentials specifying UID namespace. * @perm: The permissions mask of the new key. * @flags: Flags specifying quota properties. * @restrict_link: Optional link restriction for new keyrings. * * Allocate a key of the specified type with the attributes given. The key is * returned in an uninstantiated state and the caller needs to instantiate the * key before returning. * * The restrict_link structure (if not NULL) will be freed when the * keyring is destroyed, so it must be dynamically allocated. * * The user's key count quota is updated to reflect the creation of the key and * the user's key data quota has the default for the key type reserved. The * instantiation function should amend this as necessary. If insufficient * quota is available, -EDQUOT will be returned. * * The LSM security modules can prevent a key being created, in which case * -EACCES will be returned. * * Returns a pointer to the new key if successful and an error code otherwise. * * Note that the caller needs to ensure the key type isn't uninstantiated. * Internally this can be done by locking key_types_sem. Externally, this can * be done by either never unregistering the key type, or making sure * key_alloc() calls don't race with module unloading. */ struct key *key_alloc(struct key_type *type, const char *desc, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags, struct key_restriction *restrict_link) { struct key_user *user = NULL; struct key *key; size_t desclen, quotalen; int ret; key = ERR_PTR(-EINVAL); if (!desc || !*desc) goto error; if (type->vet_description) { ret = type->vet_description(desc); if (ret < 0) { key = ERR_PTR(ret); goto error; } } desclen = strlen(desc); quotalen = desclen + 1 + type->def_datalen; /* get hold of the key tracking for this user */ user = key_user_lookup(uid); if (!user) goto no_memory_1; /* check that the user's quota permits allocation of another key and * its description */ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&user->lock); if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { if (user->qnkeys + 1 >= maxkeys || user->qnbytes + quotalen >= maxbytes || user->qnbytes + quotalen < user->qnbytes) goto no_quota; } user->qnkeys++; user->qnbytes += quotalen; spin_unlock(&user->lock); } /* allocate and initialise the key and its description */ key = kmem_cache_zalloc(key_jar, GFP_KERNEL); if (!key) goto no_memory_2; key->index_key.desc_len = desclen; key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL); if (!key->index_key.description) goto no_memory_3; refcount_set(&key->usage, 1); init_rwsem(&key->sem); lockdep_set_class(&key->sem, &type->lock_class); key->index_key.type = type; key->user = user; key->quotalen = quotalen; key->datalen = type->def_datalen; key->uid = uid; key->gid = gid; key->perm = perm; key->restrict_link = restrict_link; if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; if (flags & KEY_ALLOC_BUILT_IN) key->flags |= 1 << KEY_FLAG_BUILTIN; if (flags & KEY_ALLOC_UID_KEYRING) key->flags |= 1 << KEY_FLAG_UID_KEYRING; #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC; #endif /* let the security module know about the key */ ret = security_key_alloc(key, cred, flags); if (ret < 0) goto security_error; /* publish the key by giving it a serial number */ atomic_inc(&user->nkeys); key_alloc_serial(key); error: return key; security_error: kfree(key->description); kmem_cache_free(key_jar, key); if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); key = ERR_PTR(ret); goto error; no_memory_3: kmem_cache_free(key_jar, key); no_memory_2: if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); no_memory_1: key = ERR_PTR(-ENOMEM); goto error; no_quota: spin_unlock(&user->lock); key_user_put(user); key = ERR_PTR(-EDQUOT); goto error; } EXPORT_SYMBOL(key_alloc); /** * key_payload_reserve - Adjust data quota reservation for the key's payload * @key: The key to make the reservation for. * @datalen: The amount of data payload the caller now wants. * * Adjust the amount of the owning user's key data quota that a key reserves. * If the amount is increased, then -EDQUOT may be returned if there isn't * enough free quota available. * * If successful, 0 is returned. */ int key_payload_reserve(struct key *key, size_t datalen) { int delta = (int)datalen - key->datalen; int ret = 0; key_check(key); /* contemplate the quota adjustment */ if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&key->user->lock); if (delta > 0 && (key->user->qnbytes + delta >= maxbytes || key->user->qnbytes + delta < key->user->qnbytes)) { ret = -EDQUOT; } else { key->user->qnbytes += delta; key->quotalen += delta; } spin_unlock(&key->user->lock); } /* change the recorded data length if that didn't generate an error */ if (ret == 0) key->datalen = datalen; return ret; } EXPORT_SYMBOL(key_payload_reserve); /* * Instantiate a key and link it into the target keyring atomically. Must be * called with the target keyring's semaphore writelocked. The target key's * semaphore need not be locked as instantiation is serialised by * key_construction_mutex. */ static int __key_instantiate_and_link(struct key *key, struct key_preparsed_payload *prep, struct key *keyring, struct key *authkey, struct assoc_array_edit **_edit) { int ret, awaken; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* instantiate the key */ ret = key->type->instantiate(key, prep); if (ret == 0) { /* mark the key as being instantiated */ atomic_inc(&key->user->nikeys); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; /* and link it into the destination keyring */ if (keyring) { if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) set_bit(KEY_FLAG_KEEP, &key->flags); __key_link(key, _edit); } /* disable the authorisation key */ if (authkey) key_revoke(authkey); if (prep->expiry != TIME_T_MAX) { key->expiry = prep->expiry; key_schedule_gc(prep->expiry + key_gc_delay); } } } mutex_unlock(&key_construction_mutex); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret; } /** * key_instantiate_and_link - Instantiate a key and link it into the keyring. * @key: The key to instantiate. * @data: The data to use to instantiate the keyring. * @datalen: The length of @data. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Instantiate a key that's in the uninstantiated state using the provided data * and, if successful, link it in to the destination keyring if one is * supplied. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, struct key *authkey) { struct key_preparsed_payload prep; struct assoc_array_edit *edit; int ret; memset(&prep, 0, sizeof(prep)); prep.data = data; prep.datalen = datalen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } if (keyring) { ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret < 0) goto error; if (keyring->restrict_link && keyring->restrict_link->check) { struct key_restriction *keyres = keyring->restrict_link; ret = keyres->check(keyring, key->type, &prep.payload, keyres->key); if (ret < 0) goto error_link_end; } } ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); error_link_end: if (keyring) __key_link_end(keyring, &key->index_key, edit); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_instantiate_and_link); /** * key_reject_and_link - Negatively instantiate a key and link it into the keyring. * @key: The key to instantiate. * @timeout: The timeout on the negative key. * @error: The error to return when the key is hit. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Negatively instantiate a key that's in the uninstantiated state and, if * successful, set its timeout and stored error and link it in to the * destination keyring if one is supplied. The key and any links to the key * will be automatically garbage collected after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the stored error code (typically ENOKEY) until the negative * key expires. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, struct key *authkey) { struct assoc_array_edit *edit; struct timespec now; int ret, awaken, link_ret = 0; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; if (keyring) { if (keyring->restrict_link) return -EPERM; link_ret = __key_link_begin(keyring, &key->index_key, &edit); } mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); key->reject_error = -error; smp_wmb(); set_bit(KEY_FLAG_NEGATIVE, &key->flags); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); now = current_kernel_time(); key->expiry = now.tv_sec + timeout; key_schedule_gc(key->expiry + key_gc_delay); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; ret = 0; /* and link it into the destination keyring */ if (keyring && link_ret == 0) __key_link(key, &edit); /* disable the authorisation key */ if (authkey) key_revoke(authkey); } mutex_unlock(&key_construction_mutex); if (keyring && link_ret == 0) __key_link_end(keyring, &key->index_key, edit); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret == 0 ? link_ret : ret; } EXPORT_SYMBOL(key_reject_and_link); /** * key_put - Discard a reference to a key. * @key: The key to discard a reference from. * * Discard a reference to a key, and when all the references are gone, we * schedule the cleanup task to come and pull it out of the tree in process * context at some later time. */ void key_put(struct key *key) { if (key) { key_check(key); if (refcount_dec_and_test(&key->usage)) schedule_work(&key_gc_work); } } EXPORT_SYMBOL(key_put); /* * Find a key by its serial number. */ struct key *key_lookup(key_serial_t id) { struct rb_node *n; struct key *key; spin_lock(&key_serial_lock); /* search the tree for the specified key */ n = key_serial_tree.rb_node; while (n) { key = rb_entry(n, struct key, serial_node); if (id < key->serial) n = n->rb_left; else if (id > key->serial) n = n->rb_right; else goto found; } not_found: key = ERR_PTR(-ENOKEY); goto error; found: /* A key is allowed to be looked up only if someone still owns a * reference to it - otherwise it's awaiting the gc. */ if (!refcount_inc_not_zero(&key->usage)) goto not_found; error: spin_unlock(&key_serial_lock); return key; } /* * Find and lock the specified key type against removal. * * We return with the sem read-locked if successful. If the type wasn't * available -ENOKEY is returned instead. */ struct key_type *key_type_lookup(const char *type) { struct key_type *ktype; down_read(&key_types_sem); /* look up the key type to see if it's one of the registered kernel * types */ list_for_each_entry(ktype, &key_types_list, link) { if (strcmp(ktype->name, type) == 0) goto found_kernel_type; } up_read(&key_types_sem); ktype = ERR_PTR(-ENOKEY); found_kernel_type: return ktype; } void key_set_timeout(struct key *key, unsigned timeout) { struct timespec now; time_t expiry = 0; /* make the changes with the locks held to prevent races */ down_write(&key->sem); if (timeout > 0) { now = current_kernel_time(); expiry = now.tv_sec + timeout; } key->expiry = expiry; key_schedule_gc(key->expiry + key_gc_delay); up_write(&key->sem); } EXPORT_SYMBOL_GPL(key_set_timeout); /* * Unlock a key type locked by key_type_lookup(). */ void key_type_put(struct key_type *ktype) { up_read(&key_types_sem); } /* * Attempt to update an existing key. * * The key is given to us with an incremented refcount that we need to discard * if we get an error. */ static inline key_ref_t __key_update(key_ref_t key_ref, struct key_preparsed_payload *prep) { struct key *key = key_ref_to_ptr(key_ref); int ret; /* need write permission on the key to update it */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) goto error; ret = -EEXIST; if (!key->type->update) goto error; down_write(&key->sem); ret = key->type->update(key, prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); if (ret < 0) goto error; out: return key_ref; error: key_put(key); key_ref = ERR_PTR(ret); goto out; } /** * key_create_or_update - Update or create and instantiate a key. * @keyring_ref: A pointer to the destination keyring with possession flag. * @type: The type of key. * @description: The searchable description for the key. * @payload: The data to use to instantiate or update the key. * @plen: The length of @payload. * @perm: The permissions mask for a new key. * @flags: The quota flags for a new key. * * Search the destination keyring for a key of the same description and if one * is found, update it, otherwise create and instantiate a new one and create a * link to it from that keyring. * * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be * concocted. * * Returns a pointer to the new key if successful, -ENODEV if the key type * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the * caller isn't permitted to modify the keyring or the LSM did not permit * creation of the key. * * On success, the possession flag from the keyring ref will be tacked on to * the key ref before it is returned. */ key_ref_t key_create_or_update(key_ref_t keyring_ref, const char *type, const char *description, const void *payload, size_t plen, key_perm_t perm, unsigned long flags) { struct keyring_index_key index_key = { .description = description, }; struct key_preparsed_payload prep; struct assoc_array_edit *edit; const struct cred *cred = current_cred(); struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; struct key_restriction *restrict_link = NULL; /* look up the key type to see if it's one of the registered kernel * types */ index_key.type = key_type_lookup(type); if (IS_ERR(index_key.type)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); if (!index_key.type->instantiate || (!index_key.description && !index_key.type->preparse)) goto error_put_type; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); key_ref = ERR_PTR(-EPERM); if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION)) restrict_link = keyring->restrict_link; key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error_put_type; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = index_key.type->def_datalen; prep.expiry = TIME_T_MAX; if (index_key.type->preparse) { ret = index_key.type->preparse(&prep); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (!index_key.description) index_key.description = prep.description; key_ref = ERR_PTR(-EINVAL); if (!index_key.description) goto error_free_prep; } index_key.desc_len = strlen(index_key.description); ret = __key_link_begin(keyring, &index_key, &edit); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (restrict_link && restrict_link->check) { ret = restrict_link->check(keyring, index_key.type, &prep.payload, restrict_link->key); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } } /* if we're going to allocate a new key, we're going to have * to modify the keyring */ ret = key_permission(keyring_ref, KEY_NEED_WRITE); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } /* if it's possible to update this type of key, search for an existing * key of the same type and description in the destination keyring and * update that instead if possible */ if (index_key.type->update) { key_ref = find_key_to_update(keyring_ref, &index_key); if (key_ref) goto found_matching_key; } /* if the client doesn't provide, decide on the permissions we want */ if (perm == KEY_PERM_UNDEF) { perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (index_key.type->read) perm |= KEY_POS_READ; if (index_key.type == &key_type_keyring || index_key.type->update) perm |= KEY_POS_WRITE; } /* allocate a new key */ key = key_alloc(index_key.type, index_key.description, cred->fsuid, cred->fsgid, cred, perm, flags, NULL); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_link_end; } /* instantiate it and link it into the target keyring */ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); goto error_link_end; } key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); error_link_end: __key_link_end(keyring, &index_key, edit); error_free_prep: if (index_key.type->preparse) index_key.type->free_preparse(&prep); error_put_type: key_type_put(index_key.type); error: return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ __key_link_end(keyring, &index_key, edit); key_ref = __key_update(key_ref, &prep); goto error_free_prep; } EXPORT_SYMBOL(key_create_or_update); /** * key_update - Update a key's contents. * @key_ref: The pointer (plus possession flag) to the key. * @payload: The data to be used to update the key. * @plen: The length of @payload. * * Attempt to update the contents of a key with the given payload data. The * caller must be granted Write permission on the key. Negative keys can be * instantiated by this method. * * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key * type does not support updating. The key type may return other errors. */ int key_update(key_ref_t key_ref, const void *payload, size_t plen) { struct key_preparsed_payload prep; struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) return ret; /* attempt to update it if supported */ if (!key->type->update) return -EOPNOTSUPP; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } down_write(&key->sem); ret = key->type->update(key, &prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_update); /** * key_revoke - Revoke a key. * @key: The key to be revoked. * * Mark a key as being revoked and ask the type to free up its resources. The * revocation timeout is set and the key and all its links will be * automatically garbage collected after key_gc_delay amount of time if they * are not manually dealt with first. */ void key_revoke(struct key *key) { struct timespec now; time_t time; key_check(key); /* make sure no one's trying to change or use the key when we mark it * - we tell lockdep that we might nest because we might be revoking an * authorisation key whilst holding the sem on a key we've just * instantiated */ down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && key->type->revoke) key->type->revoke(key); /* set the death time to no more than the expiry time */ now = current_kernel_time(); time = now.tv_sec; if (key->revoked_at == 0 || key->revoked_at > time) { key->revoked_at = time; key_schedule_gc(key->revoked_at + key_gc_delay); } up_write(&key->sem); } EXPORT_SYMBOL(key_revoke); /** * key_invalidate - Invalidate a key. * @key: The key to be invalidated. * * Mark a key as being invalidated and have it cleaned up immediately. The key * is ignored by all searches and other operations from this point. */ void key_invalidate(struct key *key) { kenter("%d", key_serial(key)); key_check(key); if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) key_schedule_gc_links(); up_write(&key->sem); } } EXPORT_SYMBOL(key_invalidate); /** * generic_key_instantiate - Simple instantiation of a key from preparsed data * @key: The key to be instantiated * @prep: The preparsed data to load. * * Instantiate a key from preparsed data. We assume we can just copy the data * in directly and clear the old pointers. * * This can be pointed to directly by the key type instantiate op pointer. */ int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { int ret; pr_devel("==>%s()\n", __func__); ret = key_payload_reserve(key, prep->quotalen); if (ret == 0) { rcu_assign_keypointer(key, prep->payload.data[0]); key->payload.data[1] = prep->payload.data[1]; key->payload.data[2] = prep->payload.data[2]; key->payload.data[3] = prep->payload.data[3]; prep->payload.data[0] = NULL; prep->payload.data[1] = NULL; prep->payload.data[2] = NULL; prep->payload.data[3] = NULL; } pr_devel("<==%s() = %d\n", __func__, ret); return ret; } EXPORT_SYMBOL(generic_key_instantiate); /** * register_key_type - Register a type of key. * @ktype: The new key type. * * Register a new key type. * * Returns 0 on success or -EEXIST if a type of this name already exists. */ int register_key_type(struct key_type *ktype) { struct key_type *p; int ret; memset(&ktype->lock_class, 0, sizeof(ktype->lock_class)); ret = -EEXIST; down_write(&key_types_sem); /* disallow key types with the same name */ list_for_each_entry(p, &key_types_list, link) { if (strcmp(p->name, ktype->name) == 0) goto out; } /* store the type */ list_add(&ktype->link, &key_types_list); pr_notice("Key type %s registered\n", ktype->name); ret = 0; out: up_write(&key_types_sem); return ret; } EXPORT_SYMBOL(register_key_type); /** * unregister_key_type - Unregister a type of key. * @ktype: The key type. * * Unregister a key type and mark all the extant keys of this type as dead. * Those keys of this type are then destroyed to get rid of their payloads and * they and their links will be garbage collected as soon as possible. */ void unregister_key_type(struct key_type *ktype) { down_write(&key_types_sem); list_del_init(&ktype->link); downgrade_write(&key_types_sem); key_gc_keytype(ktype); pr_notice("Key type %s unregistered\n", ktype->name); up_read(&key_types_sem); } EXPORT_SYMBOL(unregister_key_type); /* * Initialise the key management state. */ void __init key_init(void) { /* allocate a slab in which we can store keys */ key_jar = kmem_cache_create("key_jar", sizeof(struct key), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); /* add the special key types */ list_add_tail(&key_type_keyring.link, &key_types_list); list_add_tail(&key_type_dead.link, &key_types_list); list_add_tail(&key_type_user.link, &key_types_list); list_add_tail(&key_type_logon.link, &key_types_list); /* record the root user tracking */ rb_link_node(&root_key_user.node, NULL, &key_user_tree.rb_node); rb_insert_color(&root_key_user.node, &key_user_tree); }
/* Basic authentication token and access key management * * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/poison.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/workqueue.h> #include <linux/random.h> #include <linux/err.h> #include "internal.h" struct kmem_cache *key_jar; struct rb_root key_serial_tree; /* tree of keys indexed by serial */ DEFINE_SPINLOCK(key_serial_lock); struct rb_root key_user_tree; /* tree of quota records indexed by UID */ DEFINE_SPINLOCK(key_user_lock); unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */ unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */ unsigned int key_quota_maxkeys = 200; /* general key count quota */ unsigned int key_quota_maxbytes = 20000; /* general key space quota */ static LIST_HEAD(key_types_list); static DECLARE_RWSEM(key_types_sem); /* We serialise key instantiation and link */ DEFINE_MUTEX(key_construction_mutex); #ifdef KEY_DEBUGGING void __key_check(const struct key *key) { printk("__key_check: key %p {%08x} should be {%08x}\n", key, key->magic, KEY_DEBUG_MAGIC); BUG(); } #endif /* * Get the key quota record for a user, allocating a new record if one doesn't * already exist. */ struct key_user *key_user_lookup(kuid_t uid) { struct key_user *candidate = NULL, *user; struct rb_node *parent, **p; try_again: parent = NULL; p = &key_user_tree.rb_node; spin_lock(&key_user_lock); /* search the tree for a user record with a matching UID */ while (*p) { parent = *p; user = rb_entry(parent, struct key_user, node); if (uid_lt(uid, user->uid)) p = &(*p)->rb_left; else if (uid_gt(uid, user->uid)) p = &(*p)->rb_right; else goto found; } /* if we get here, we failed to find a match in the tree */ if (!candidate) { /* allocate a candidate user record if we don't already have * one */ spin_unlock(&key_user_lock); user = NULL; candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); if (unlikely(!candidate)) goto out; /* the allocation may have scheduled, so we need to repeat the * search lest someone else added the record whilst we were * asleep */ goto try_again; } /* if we get here, then the user record still hadn't appeared on the * second pass - so we use the candidate record */ refcount_set(&candidate->usage, 1); atomic_set(&candidate->nkeys, 0); atomic_set(&candidate->nikeys, 0); candidate->uid = uid; candidate->qnkeys = 0; candidate->qnbytes = 0; spin_lock_init(&candidate->lock); mutex_init(&candidate->cons_lock); rb_link_node(&candidate->node, parent, p); rb_insert_color(&candidate->node, &key_user_tree); spin_unlock(&key_user_lock); user = candidate; goto out; /* okay - we found a user record for this UID */ found: refcount_inc(&user->usage); spin_unlock(&key_user_lock); kfree(candidate); out: return user; } /* * Dispose of a user structure */ void key_user_put(struct key_user *user) { if (refcount_dec_and_lock(&user->usage, &key_user_lock)) { rb_erase(&user->node, &key_user_tree); spin_unlock(&key_user_lock); kfree(user); } } /* * Allocate a serial number for a key. These are assigned randomly to avoid * security issues through covert channel problems. */ static inline void key_alloc_serial(struct key *key) { struct rb_node *parent, **p; struct key *xkey; /* propose a random serial number and look for a hole for it in the * serial number tree */ do { get_random_bytes(&key->serial, sizeof(key->serial)); key->serial >>= 1; /* negative numbers are not permitted */ } while (key->serial < 3); spin_lock(&key_serial_lock); attempt_insertion: parent = NULL; p = &key_serial_tree.rb_node; while (*p) { parent = *p; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) p = &(*p)->rb_left; else if (key->serial > xkey->serial) p = &(*p)->rb_right; else goto serial_exists; } /* we've found a suitable hole - arrange for this key to occupy it */ rb_link_node(&key->serial_node, parent, p); rb_insert_color(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); return; /* we found a key with the proposed serial number - walk the tree from * that point looking for the next unused serial number */ serial_exists: for (;;) { key->serial++; if (key->serial < 3) { key->serial = 3; goto attempt_insertion; } parent = rb_next(parent); if (!parent) goto attempt_insertion; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) goto attempt_insertion; } } /** * key_alloc - Allocate a key of the specified type. * @type: The type of key to allocate. * @desc: The key description to allow the key to be searched out. * @uid: The owner of the new key. * @gid: The group ID for the new key's group permissions. * @cred: The credentials specifying UID namespace. * @perm: The permissions mask of the new key. * @flags: Flags specifying quota properties. * @restrict_link: Optional link restriction for new keyrings. * * Allocate a key of the specified type with the attributes given. The key is * returned in an uninstantiated state and the caller needs to instantiate the * key before returning. * * The restrict_link structure (if not NULL) will be freed when the * keyring is destroyed, so it must be dynamically allocated. * * The user's key count quota is updated to reflect the creation of the key and * the user's key data quota has the default for the key type reserved. The * instantiation function should amend this as necessary. If insufficient * quota is available, -EDQUOT will be returned. * * The LSM security modules can prevent a key being created, in which case * -EACCES will be returned. * * Returns a pointer to the new key if successful and an error code otherwise. * * Note that the caller needs to ensure the key type isn't uninstantiated. * Internally this can be done by locking key_types_sem. Externally, this can * be done by either never unregistering the key type, or making sure * key_alloc() calls don't race with module unloading. */ struct key *key_alloc(struct key_type *type, const char *desc, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags, struct key_restriction *restrict_link) { struct key_user *user = NULL; struct key *key; size_t desclen, quotalen; int ret; key = ERR_PTR(-EINVAL); if (!desc || !*desc) goto error; if (type->vet_description) { ret = type->vet_description(desc); if (ret < 0) { key = ERR_PTR(ret); goto error; } } desclen = strlen(desc); quotalen = desclen + 1 + type->def_datalen; /* get hold of the key tracking for this user */ user = key_user_lookup(uid); if (!user) goto no_memory_1; /* check that the user's quota permits allocation of another key and * its description */ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&user->lock); if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { if (user->qnkeys + 1 >= maxkeys || user->qnbytes + quotalen >= maxbytes || user->qnbytes + quotalen < user->qnbytes) goto no_quota; } user->qnkeys++; user->qnbytes += quotalen; spin_unlock(&user->lock); } /* allocate and initialise the key and its description */ key = kmem_cache_zalloc(key_jar, GFP_KERNEL); if (!key) goto no_memory_2; key->index_key.desc_len = desclen; key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL); if (!key->index_key.description) goto no_memory_3; refcount_set(&key->usage, 1); init_rwsem(&key->sem); lockdep_set_class(&key->sem, &type->lock_class); key->index_key.type = type; key->user = user; key->quotalen = quotalen; key->datalen = type->def_datalen; key->uid = uid; key->gid = gid; key->perm = perm; key->restrict_link = restrict_link; if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; if (flags & KEY_ALLOC_BUILT_IN) key->flags |= 1 << KEY_FLAG_BUILTIN; if (flags & KEY_ALLOC_UID_KEYRING) key->flags |= 1 << KEY_FLAG_UID_KEYRING; #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC; #endif /* let the security module know about the key */ ret = security_key_alloc(key, cred, flags); if (ret < 0) goto security_error; /* publish the key by giving it a serial number */ atomic_inc(&user->nkeys); key_alloc_serial(key); error: return key; security_error: kfree(key->description); kmem_cache_free(key_jar, key); if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); key = ERR_PTR(ret); goto error; no_memory_3: kmem_cache_free(key_jar, key); no_memory_2: if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); no_memory_1: key = ERR_PTR(-ENOMEM); goto error; no_quota: spin_unlock(&user->lock); key_user_put(user); key = ERR_PTR(-EDQUOT); goto error; } EXPORT_SYMBOL(key_alloc); /** * key_payload_reserve - Adjust data quota reservation for the key's payload * @key: The key to make the reservation for. * @datalen: The amount of data payload the caller now wants. * * Adjust the amount of the owning user's key data quota that a key reserves. * If the amount is increased, then -EDQUOT may be returned if there isn't * enough free quota available. * * If successful, 0 is returned. */ int key_payload_reserve(struct key *key, size_t datalen) { int delta = (int)datalen - key->datalen; int ret = 0; key_check(key); /* contemplate the quota adjustment */ if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&key->user->lock); if (delta > 0 && (key->user->qnbytes + delta >= maxbytes || key->user->qnbytes + delta < key->user->qnbytes)) { ret = -EDQUOT; } else { key->user->qnbytes += delta; key->quotalen += delta; } spin_unlock(&key->user->lock); } /* change the recorded data length if that didn't generate an error */ if (ret == 0) key->datalen = datalen; return ret; } EXPORT_SYMBOL(key_payload_reserve); /* * Change the key state to being instantiated. */ static void mark_key_instantiated(struct key *key, int reject_error) { /* Commit the payload before setting the state; barrier versus * key_read_state(). */ smp_store_release(&key->state, (reject_error < 0) ? reject_error : KEY_IS_POSITIVE); } /* * Instantiate a key and link it into the target keyring atomically. Must be * called with the target keyring's semaphore writelocked. The target key's * semaphore need not be locked as instantiation is serialised by * key_construction_mutex. */ static int __key_instantiate_and_link(struct key *key, struct key_preparsed_payload *prep, struct key *keyring, struct key *authkey, struct assoc_array_edit **_edit) { int ret, awaken; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (key->state == KEY_IS_UNINSTANTIATED) { /* instantiate the key */ ret = key->type->instantiate(key, prep); if (ret == 0) { /* mark the key as being instantiated */ atomic_inc(&key->user->nikeys); mark_key_instantiated(key, 0); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; /* and link it into the destination keyring */ if (keyring) { if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) set_bit(KEY_FLAG_KEEP, &key->flags); __key_link(key, _edit); } /* disable the authorisation key */ if (authkey) key_revoke(authkey); if (prep->expiry != TIME_T_MAX) { key->expiry = prep->expiry; key_schedule_gc(prep->expiry + key_gc_delay); } } } mutex_unlock(&key_construction_mutex); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret; } /** * key_instantiate_and_link - Instantiate a key and link it into the keyring. * @key: The key to instantiate. * @data: The data to use to instantiate the keyring. * @datalen: The length of @data. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Instantiate a key that's in the uninstantiated state using the provided data * and, if successful, link it in to the destination keyring if one is * supplied. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, struct key *authkey) { struct key_preparsed_payload prep; struct assoc_array_edit *edit; int ret; memset(&prep, 0, sizeof(prep)); prep.data = data; prep.datalen = datalen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } if (keyring) { ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret < 0) goto error; if (keyring->restrict_link && keyring->restrict_link->check) { struct key_restriction *keyres = keyring->restrict_link; ret = keyres->check(keyring, key->type, &prep.payload, keyres->key); if (ret < 0) goto error_link_end; } } ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); error_link_end: if (keyring) __key_link_end(keyring, &key->index_key, edit); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_instantiate_and_link); /** * key_reject_and_link - Negatively instantiate a key and link it into the keyring. * @key: The key to instantiate. * @timeout: The timeout on the negative key. * @error: The error to return when the key is hit. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Negatively instantiate a key that's in the uninstantiated state and, if * successful, set its timeout and stored error and link it in to the * destination keyring if one is supplied. The key and any links to the key * will be automatically garbage collected after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the stored error code (typically ENOKEY) until the negative * key expires. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, struct key *authkey) { struct assoc_array_edit *edit; struct timespec now; int ret, awaken, link_ret = 0; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; if (keyring) { if (keyring->restrict_link) return -EPERM; link_ret = __key_link_begin(keyring, &key->index_key, &edit); } mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (key->state == KEY_IS_UNINSTANTIATED) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); mark_key_instantiated(key, -error); now = current_kernel_time(); key->expiry = now.tv_sec + timeout; key_schedule_gc(key->expiry + key_gc_delay); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; ret = 0; /* and link it into the destination keyring */ if (keyring && link_ret == 0) __key_link(key, &edit); /* disable the authorisation key */ if (authkey) key_revoke(authkey); } mutex_unlock(&key_construction_mutex); if (keyring && link_ret == 0) __key_link_end(keyring, &key->index_key, edit); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret == 0 ? link_ret : ret; } EXPORT_SYMBOL(key_reject_and_link); /** * key_put - Discard a reference to a key. * @key: The key to discard a reference from. * * Discard a reference to a key, and when all the references are gone, we * schedule the cleanup task to come and pull it out of the tree in process * context at some later time. */ void key_put(struct key *key) { if (key) { key_check(key); if (refcount_dec_and_test(&key->usage)) schedule_work(&key_gc_work); } } EXPORT_SYMBOL(key_put); /* * Find a key by its serial number. */ struct key *key_lookup(key_serial_t id) { struct rb_node *n; struct key *key; spin_lock(&key_serial_lock); /* search the tree for the specified key */ n = key_serial_tree.rb_node; while (n) { key = rb_entry(n, struct key, serial_node); if (id < key->serial) n = n->rb_left; else if (id > key->serial) n = n->rb_right; else goto found; } not_found: key = ERR_PTR(-ENOKEY); goto error; found: /* A key is allowed to be looked up only if someone still owns a * reference to it - otherwise it's awaiting the gc. */ if (!refcount_inc_not_zero(&key->usage)) goto not_found; error: spin_unlock(&key_serial_lock); return key; } /* * Find and lock the specified key type against removal. * * We return with the sem read-locked if successful. If the type wasn't * available -ENOKEY is returned instead. */ struct key_type *key_type_lookup(const char *type) { struct key_type *ktype; down_read(&key_types_sem); /* look up the key type to see if it's one of the registered kernel * types */ list_for_each_entry(ktype, &key_types_list, link) { if (strcmp(ktype->name, type) == 0) goto found_kernel_type; } up_read(&key_types_sem); ktype = ERR_PTR(-ENOKEY); found_kernel_type: return ktype; } void key_set_timeout(struct key *key, unsigned timeout) { struct timespec now; time_t expiry = 0; /* make the changes with the locks held to prevent races */ down_write(&key->sem); if (timeout > 0) { now = current_kernel_time(); expiry = now.tv_sec + timeout; } key->expiry = expiry; key_schedule_gc(key->expiry + key_gc_delay); up_write(&key->sem); } EXPORT_SYMBOL_GPL(key_set_timeout); /* * Unlock a key type locked by key_type_lookup(). */ void key_type_put(struct key_type *ktype) { up_read(&key_types_sem); } /* * Attempt to update an existing key. * * The key is given to us with an incremented refcount that we need to discard * if we get an error. */ static inline key_ref_t __key_update(key_ref_t key_ref, struct key_preparsed_payload *prep) { struct key *key = key_ref_to_ptr(key_ref); int ret; /* need write permission on the key to update it */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) goto error; ret = -EEXIST; if (!key->type->update) goto error; down_write(&key->sem); ret = key->type->update(key, prep); if (ret == 0) /* Updating a negative key positively instantiates it */ mark_key_instantiated(key, 0); up_write(&key->sem); if (ret < 0) goto error; out: return key_ref; error: key_put(key); key_ref = ERR_PTR(ret); goto out; } /** * key_create_or_update - Update or create and instantiate a key. * @keyring_ref: A pointer to the destination keyring with possession flag. * @type: The type of key. * @description: The searchable description for the key. * @payload: The data to use to instantiate or update the key. * @plen: The length of @payload. * @perm: The permissions mask for a new key. * @flags: The quota flags for a new key. * * Search the destination keyring for a key of the same description and if one * is found, update it, otherwise create and instantiate a new one and create a * link to it from that keyring. * * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be * concocted. * * Returns a pointer to the new key if successful, -ENODEV if the key type * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the * caller isn't permitted to modify the keyring or the LSM did not permit * creation of the key. * * On success, the possession flag from the keyring ref will be tacked on to * the key ref before it is returned. */ key_ref_t key_create_or_update(key_ref_t keyring_ref, const char *type, const char *description, const void *payload, size_t plen, key_perm_t perm, unsigned long flags) { struct keyring_index_key index_key = { .description = description, }; struct key_preparsed_payload prep; struct assoc_array_edit *edit; const struct cred *cred = current_cred(); struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; struct key_restriction *restrict_link = NULL; /* look up the key type to see if it's one of the registered kernel * types */ index_key.type = key_type_lookup(type); if (IS_ERR(index_key.type)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); if (!index_key.type->instantiate || (!index_key.description && !index_key.type->preparse)) goto error_put_type; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); key_ref = ERR_PTR(-EPERM); if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION)) restrict_link = keyring->restrict_link; key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error_put_type; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = index_key.type->def_datalen; prep.expiry = TIME_T_MAX; if (index_key.type->preparse) { ret = index_key.type->preparse(&prep); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (!index_key.description) index_key.description = prep.description; key_ref = ERR_PTR(-EINVAL); if (!index_key.description) goto error_free_prep; } index_key.desc_len = strlen(index_key.description); ret = __key_link_begin(keyring, &index_key, &edit); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (restrict_link && restrict_link->check) { ret = restrict_link->check(keyring, index_key.type, &prep.payload, restrict_link->key); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } } /* if we're going to allocate a new key, we're going to have * to modify the keyring */ ret = key_permission(keyring_ref, KEY_NEED_WRITE); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } /* if it's possible to update this type of key, search for an existing * key of the same type and description in the destination keyring and * update that instead if possible */ if (index_key.type->update) { key_ref = find_key_to_update(keyring_ref, &index_key); if (key_ref) goto found_matching_key; } /* if the client doesn't provide, decide on the permissions we want */ if (perm == KEY_PERM_UNDEF) { perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (index_key.type->read) perm |= KEY_POS_READ; if (index_key.type == &key_type_keyring || index_key.type->update) perm |= KEY_POS_WRITE; } /* allocate a new key */ key = key_alloc(index_key.type, index_key.description, cred->fsuid, cred->fsgid, cred, perm, flags, NULL); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_link_end; } /* instantiate it and link it into the target keyring */ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); goto error_link_end; } key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); error_link_end: __key_link_end(keyring, &index_key, edit); error_free_prep: if (index_key.type->preparse) index_key.type->free_preparse(&prep); error_put_type: key_type_put(index_key.type); error: return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ __key_link_end(keyring, &index_key, edit); key_ref = __key_update(key_ref, &prep); goto error_free_prep; } EXPORT_SYMBOL(key_create_or_update); /** * key_update - Update a key's contents. * @key_ref: The pointer (plus possession flag) to the key. * @payload: The data to be used to update the key. * @plen: The length of @payload. * * Attempt to update the contents of a key with the given payload data. The * caller must be granted Write permission on the key. Negative keys can be * instantiated by this method. * * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key * type does not support updating. The key type may return other errors. */ int key_update(key_ref_t key_ref, const void *payload, size_t plen) { struct key_preparsed_payload prep; struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) return ret; /* attempt to update it if supported */ if (!key->type->update) return -EOPNOTSUPP; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } down_write(&key->sem); ret = key->type->update(key, &prep); if (ret == 0) /* Updating a negative key positively instantiates it */ mark_key_instantiated(key, 0); up_write(&key->sem); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_update); /** * key_revoke - Revoke a key. * @key: The key to be revoked. * * Mark a key as being revoked and ask the type to free up its resources. The * revocation timeout is set and the key and all its links will be * automatically garbage collected after key_gc_delay amount of time if they * are not manually dealt with first. */ void key_revoke(struct key *key) { struct timespec now; time_t time; key_check(key); /* make sure no one's trying to change or use the key when we mark it * - we tell lockdep that we might nest because we might be revoking an * authorisation key whilst holding the sem on a key we've just * instantiated */ down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && key->type->revoke) key->type->revoke(key); /* set the death time to no more than the expiry time */ now = current_kernel_time(); time = now.tv_sec; if (key->revoked_at == 0 || key->revoked_at > time) { key->revoked_at = time; key_schedule_gc(key->revoked_at + key_gc_delay); } up_write(&key->sem); } EXPORT_SYMBOL(key_revoke); /** * key_invalidate - Invalidate a key. * @key: The key to be invalidated. * * Mark a key as being invalidated and have it cleaned up immediately. The key * is ignored by all searches and other operations from this point. */ void key_invalidate(struct key *key) { kenter("%d", key_serial(key)); key_check(key); if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) key_schedule_gc_links(); up_write(&key->sem); } } EXPORT_SYMBOL(key_invalidate); /** * generic_key_instantiate - Simple instantiation of a key from preparsed data * @key: The key to be instantiated * @prep: The preparsed data to load. * * Instantiate a key from preparsed data. We assume we can just copy the data * in directly and clear the old pointers. * * This can be pointed to directly by the key type instantiate op pointer. */ int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { int ret; pr_devel("==>%s()\n", __func__); ret = key_payload_reserve(key, prep->quotalen); if (ret == 0) { rcu_assign_keypointer(key, prep->payload.data[0]); key->payload.data[1] = prep->payload.data[1]; key->payload.data[2] = prep->payload.data[2]; key->payload.data[3] = prep->payload.data[3]; prep->payload.data[0] = NULL; prep->payload.data[1] = NULL; prep->payload.data[2] = NULL; prep->payload.data[3] = NULL; } pr_devel("<==%s() = %d\n", __func__, ret); return ret; } EXPORT_SYMBOL(generic_key_instantiate); /** * register_key_type - Register a type of key. * @ktype: The new key type. * * Register a new key type. * * Returns 0 on success or -EEXIST if a type of this name already exists. */ int register_key_type(struct key_type *ktype) { struct key_type *p; int ret; memset(&ktype->lock_class, 0, sizeof(ktype->lock_class)); ret = -EEXIST; down_write(&key_types_sem); /* disallow key types with the same name */ list_for_each_entry(p, &key_types_list, link) { if (strcmp(p->name, ktype->name) == 0) goto out; } /* store the type */ list_add(&ktype->link, &key_types_list); pr_notice("Key type %s registered\n", ktype->name); ret = 0; out: up_write(&key_types_sem); return ret; } EXPORT_SYMBOL(register_key_type); /** * unregister_key_type - Unregister a type of key. * @ktype: The key type. * * Unregister a key type and mark all the extant keys of this type as dead. * Those keys of this type are then destroyed to get rid of their payloads and * they and their links will be garbage collected as soon as possible. */ void unregister_key_type(struct key_type *ktype) { down_write(&key_types_sem); list_del_init(&ktype->link); downgrade_write(&key_types_sem); key_gc_keytype(ktype); pr_notice("Key type %s unregistered\n", ktype->name); up_read(&key_types_sem); } EXPORT_SYMBOL(unregister_key_type); /* * Initialise the key management state. */ void __init key_init(void) { /* allocate a slab in which we can store keys */ key_jar = kmem_cache_create("key_jar", sizeof(struct key), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); /* add the special key types */ list_add_tail(&key_type_keyring.link, &key_types_list); list_add_tail(&key_type_dead.link, &key_types_list); list_add_tail(&key_type_user.link, &key_types_list); list_add_tail(&key_type_logon.link, &key_types_list); /* record the root user tracking */ rb_link_node(&root_key_user.node, NULL, &key_user_tree.rb_node); rb_insert_color(&root_key_user.node, &key_user_tree); }
int key_update(key_ref_t key_ref, const void *payload, size_t plen) { struct key_preparsed_payload prep; struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) return ret; /* attempt to update it if supported */ if (!key->type->update) return -EOPNOTSUPP; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } down_write(&key->sem); ret = key->type->update(key, &prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; }
int key_update(key_ref_t key_ref, const void *payload, size_t plen) { struct key_preparsed_payload prep; struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) return ret; /* attempt to update it if supported */ if (!key->type->update) return -EOPNOTSUPP; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } down_write(&key->sem); ret = key->type->update(key, &prep); if (ret == 0) /* Updating a negative key positively instantiates it */ mark_key_instantiated(key, 0); up_write(&key->sem); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; }
{'added': [(404, '/*'), (405, ' * Change the key state to being instantiated.'), (406, ' */'), (407, 'static void mark_key_instantiated(struct key *key, int reject_error)'), (408, '{'), (409, '\t/* Commit the payload before setting the state; barrier versus'), (410, '\t * key_read_state().'), (411, '\t */'), (412, '\tsmp_store_release(&key->state,'), (413, '\t\t\t (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);'), (414, '}'), (415, ''), (439, '\tif (key->state == KEY_IS_UNINSTANTIATED) {'), (446, '\t\t\tmark_key_instantiated(key, 0);'), (592, '\tif (key->state == KEY_IS_UNINSTANTIATED) {'), (595, '\t\tmark_key_instantiated(key, -error);'), (764, '\t\t/* Updating a negative key positively instantiates it */'), (765, '\t\tmark_key_instantiated(key, 0);'), (998, '\t\t/* Updating a negative key positively instantiates it */'), (999, '\t\tmark_key_instantiated(key, 0);')], 'deleted': [(427, '\tif (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {'), (434, '\t\t\tset_bit(KEY_FLAG_INSTANTIATED, &key->flags);'), (580, '\tif (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {'), (583, '\t\tkey->reject_error = -error;'), (584, '\t\tsmp_wmb();'), (585, '\t\tset_bit(KEY_FLAG_NEGATIVE, &key->flags);'), (586, '\t\tset_bit(KEY_FLAG_INSTANTIATED, &key->flags);'), (755, '\t\t/* updating a negative key instantiates it */'), (756, '\t\tclear_bit(KEY_FLAG_NEGATIVE, &key->flags);'), (989, '\t\t/* updating a negative key instantiates it */'), (990, '\t\tclear_bit(KEY_FLAG_NEGATIVE, &key->flags);')]}
20
11
703
4,290
31
209
7
https://github.com/torvalds/linux
CVE-2017-15951
CWE-20
2,688
ndpi_main.c
C
ndpi_reset_packet_line_info
/* * ndpi_main.c * * Copyright (C) 2011-20 - ntop.org * * This file is part of nDPI, an open source deep packet inspection * library based on the OpenDPI and PACE technology by ipoque GmbH * * nDPI is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * nDPI is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with nDPI. If not, see <http://www.gnu.org/licenses/>. * */ #include <stdlib.h> #include <errno.h> #include <sys/types.h> #define NDPI_CURRENT_PROTO NDPI_PROTOCOL_UNKNOWN #include "ndpi_config.h" #include "ndpi_api.h" #include "ahocorasick.h" #include "libcache.h" #include <time.h> #ifndef WIN32 #include <unistd.h> #endif #if defined __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ #include <sys/endian.h> #endif #include "ndpi_content_match.c.inc" #include "third_party/include/ndpi_patricia.h" #include "third_party/include/ht_hash.h" #include "third_party/include/ndpi_md5.h" /* stun.c */ extern u_int32_t get_stun_lru_key(struct ndpi_flow_struct *flow, u_int8_t rev); static int _ndpi_debug_callbacks = 0; /* #define MATCH_DEBUG 1 */ /* ****************************************** */ static void *(*_ndpi_flow_malloc)(size_t size); static void (*_ndpi_flow_free)(void *ptr); static void *(*_ndpi_malloc)(size_t size); static void (*_ndpi_free)(void *ptr); /* ****************************************** */ /* Forward */ static void addDefaultPort(struct ndpi_detection_module_struct *ndpi_str, ndpi_port_range *range, ndpi_proto_defaults_t *def, u_int8_t customUserProto, ndpi_default_ports_tree_node_t **root, const char *_func, int _line); static int removeDefaultPort(ndpi_port_range *range, ndpi_proto_defaults_t *def, ndpi_default_ports_tree_node_t **root); /* ****************************************** */ static inline uint8_t flow_is_proto(struct ndpi_flow_struct *flow, u_int16_t p) { return((flow->detected_protocol_stack[0] == p) || (flow->detected_protocol_stack[1] == p)); } /* ****************************************** */ void *ndpi_malloc(size_t size) { return(_ndpi_malloc ? _ndpi_malloc(size) : malloc(size)); } void *ndpi_flow_malloc(size_t size) { return(_ndpi_flow_malloc ? _ndpi_flow_malloc(size) : ndpi_malloc(size)); } /* ****************************************** */ void *ndpi_calloc(unsigned long count, size_t size) { size_t len = count * size; void *p = ndpi_malloc(len); if(p) memset(p, 0, len); return(p); } /* ****************************************** */ void ndpi_free(void *ptr) { if(_ndpi_free) _ndpi_free(ptr); else free(ptr); } /* ****************************************** */ void ndpi_flow_free(void *ptr) { if(_ndpi_flow_free) _ndpi_flow_free(ptr); else ndpi_free_flow((struct ndpi_flow_struct *) ptr); } /* ****************************************** */ void *ndpi_realloc(void *ptr, size_t old_size, size_t new_size) { void *ret = ndpi_malloc(new_size); if(!ret) return(ret); else { memcpy(ret, ptr, old_size); ndpi_free(ptr); return(ret); } } /* ****************************************** */ char *ndpi_strdup(const char *s) { if(s == NULL ){ return NULL; } int len = strlen(s); char *m = ndpi_malloc(len + 1); if(m) { memcpy(m, s, len); m[len] = '\0'; } return(m); } /* *********************************************************************************** */ /* Opaque structure defined here */ struct ndpi_ptree { patricia_tree_t *v4; patricia_tree_t *v6; }; /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_flow_struct(void) { return(sizeof(struct ndpi_flow_struct)); } /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_id_struct(void) { return(sizeof(struct ndpi_id_struct)); } /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_flow_tcp_struct(void) { return(sizeof(struct ndpi_flow_tcp_struct)); } /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_flow_udp_struct(void) { return(sizeof(struct ndpi_flow_udp_struct)); } /* *********************************************************************************** */ char *ndpi_get_proto_by_id(struct ndpi_detection_module_struct *ndpi_str, u_int id) { return((id >= ndpi_str->ndpi_num_supported_protocols) ? NULL : ndpi_str->proto_defaults[id].protoName); } /* *********************************************************************************** */ u_int16_t ndpi_get_proto_by_name(struct ndpi_detection_module_struct *ndpi_str, const char *name) { u_int16_t i, num = ndpi_get_num_supported_protocols(ndpi_str); for (i = 0; i < num; i++) if(strcasecmp(ndpi_get_proto_by_id(ndpi_str, i), name) == 0) return(i); return(NDPI_PROTOCOL_UNKNOWN); } /* ************************************************************************************* */ #ifdef CODE_UNUSED ndpi_port_range *ndpi_build_default_ports_range(ndpi_port_range *ports, u_int16_t portA_low, u_int16_t portA_high, u_int16_t portB_low, u_int16_t portB_high, u_int16_t portC_low, u_int16_t portC_high, u_int16_t portD_low, u_int16_t portD_high, u_int16_t portE_low, u_int16_t portE_high) { int i = 0; ports[i].port_low = portA_low, ports[i].port_high = portA_high; i++; ports[i].port_low = portB_low, ports[i].port_high = portB_high; i++; ports[i].port_low = portC_low, ports[i].port_high = portC_high; i++; ports[i].port_low = portD_low, ports[i].port_high = portD_high; i++; ports[i].port_low = portE_low, ports[i].port_high = portE_high; return(ports); } #endif /* *********************************************************************************** */ ndpi_port_range *ndpi_build_default_ports(ndpi_port_range *ports, u_int16_t portA, u_int16_t portB, u_int16_t portC, u_int16_t portD, u_int16_t portE) { int i = 0; ports[i].port_low = portA, ports[i].port_high = portA; i++; ports[i].port_low = portB, ports[i].port_high = portB; i++; ports[i].port_low = portC, ports[i].port_high = portC; i++; ports[i].port_low = portD, ports[i].port_high = portD; i++; ports[i].port_low = portE, ports[i].port_high = portE; return(ports); } /* ********************************************************************************** */ void ndpi_set_proto_breed(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protoId, ndpi_protocol_breed_t breed) { if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) return; else ndpi_str->proto_defaults[protoId].protoBreed = breed; } /* ********************************************************************************** */ void ndpi_set_proto_category(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protoId, ndpi_protocol_category_t protoCategory) { if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) return; else ndpi_str->proto_defaults[protoId].protoCategory = protoCategory; } /* ********************************************************************************** */ /* There are some (master) protocols that are informative, meaning that it shows what is the subprotocol about, but also that the subprotocol isn't a real protocol. Example: - DNS is informative as if we see a DNS request for www.facebook.com, the returned protocol is DNS.Facebook, but Facebook isn't a real subprotocol but rather it indicates a query for Facebook and not Facebook traffic. - HTTP/SSL are NOT informative as SSL.Facebook (likely) means that this is SSL (HTTPS) traffic containg Facebook traffic. */ u_int8_t ndpi_is_subprotocol_informative(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protoId) { if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) return(0); switch (protoId) { /* All dissectors that have calls to ndpi_match_host_subprotocol() */ case NDPI_PROTOCOL_DNS: return(1); break; default: return(0); } } /* ********************************************************************************** */ void ndpi_exclude_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t protocol_id, const char *_file, const char *_func, int _line) { if(protocol_id < NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES if(ndpi_str && ndpi_str->ndpi_log_level >= NDPI_LOG_DEBUG && ndpi_str->ndpi_debug_printf != NULL) { (*(ndpi_str->ndpi_debug_printf))(protocol_id, ndpi_str, NDPI_LOG_DEBUG, _file, _func, _line, "exclude %s\n", ndpi_get_proto_name(ndpi_str, protocol_id)); } #endif NDPI_ADD_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, protocol_id); } } /* ********************************************************************************** */ void ndpi_set_proto_defaults(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_breed_t breed, u_int16_t protoId, u_int8_t can_have_a_subprotocol, u_int16_t tcp_master_protoId[2], u_int16_t udp_master_protoId[2], char *protoName, ndpi_protocol_category_t protoCategory, ndpi_port_range *tcpDefPorts, ndpi_port_range *udpDefPorts) { char *name; int j; if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) { #ifdef DEBUG NDPI_LOG_ERR(ndpi_str, "[NDPI] %s/protoId=%d: INTERNAL ERROR\n", protoName, protoId); #endif return; } if(ndpi_str->proto_defaults[protoId].protoName != NULL) { #ifdef DEBUG NDPI_LOG_ERR(ndpi_str, "[NDPI] %s/protoId=%d: already initialized. Ignoring it\n", protoName, protoId); #endif return; } name = ndpi_strdup(protoName); if(ndpi_str->proto_defaults[protoId].protoName) ndpi_free(ndpi_str->proto_defaults[protoId].protoName); ndpi_str->proto_defaults[protoId].protoName = name, ndpi_str->proto_defaults[protoId].protoCategory = protoCategory, ndpi_str->proto_defaults[protoId].protoId = protoId, ndpi_str->proto_defaults[protoId].protoBreed = breed; ndpi_str->proto_defaults[protoId].can_have_a_subprotocol = can_have_a_subprotocol; memcpy(&ndpi_str->proto_defaults[protoId].master_tcp_protoId, tcp_master_protoId, 2 * sizeof(u_int16_t)); memcpy(&ndpi_str->proto_defaults[protoId].master_udp_protoId, udp_master_protoId, 2 * sizeof(u_int16_t)); for (j = 0; j < MAX_DEFAULT_PORTS; j++) { if(udpDefPorts[j].port_low != 0) addDefaultPort(ndpi_str, &udpDefPorts[j], &ndpi_str->proto_defaults[protoId], 0, &ndpi_str->udpRoot, __FUNCTION__, __LINE__); if(tcpDefPorts[j].port_low != 0) addDefaultPort(ndpi_str, &tcpDefPorts[j], &ndpi_str->proto_defaults[protoId], 0, &ndpi_str->tcpRoot, __FUNCTION__, __LINE__); /* No port range, just the lower port */ ndpi_str->proto_defaults[protoId].tcp_default_ports[j] = tcpDefPorts[j].port_low; ndpi_str->proto_defaults[protoId].udp_default_ports[j] = udpDefPorts[j].port_low; } } /* ******************************************************************** */ static int ndpi_default_ports_tree_node_t_cmp(const void *a, const void *b) { ndpi_default_ports_tree_node_t *fa = (ndpi_default_ports_tree_node_t *) a; ndpi_default_ports_tree_node_t *fb = (ndpi_default_ports_tree_node_t *) b; //printf("[NDPI] %s(%d, %d)\n", __FUNCTION__, fa->default_port, fb->default_port); return((fa->default_port == fb->default_port) ? 0 : ((fa->default_port < fb->default_port) ? -1 : 1)); } /* ******************************************************************** */ void ndpi_default_ports_tree_node_t_walker(const void *node, const ndpi_VISIT which, const int depth) { ndpi_default_ports_tree_node_t *f = *(ndpi_default_ports_tree_node_t **) node; printf("<%d>Walk on node %s (%u)\n", depth, which == ndpi_preorder ? "ndpi_preorder" : which == ndpi_postorder ? "ndpi_postorder" : which == ndpi_endorder ? "ndpi_endorder" : which == ndpi_leaf ? "ndpi_leaf" : "unknown", f->default_port); } /* ******************************************************************** */ static void addDefaultPort(struct ndpi_detection_module_struct *ndpi_str, ndpi_port_range *range, ndpi_proto_defaults_t *def, u_int8_t customUserProto, ndpi_default_ports_tree_node_t **root, const char *_func, int _line) { u_int16_t port; for (port = range->port_low; port <= range->port_high; port++) { ndpi_default_ports_tree_node_t *node = (ndpi_default_ports_tree_node_t *) ndpi_malloc(sizeof(ndpi_default_ports_tree_node_t)); ndpi_default_ports_tree_node_t *ret; if(!node) { NDPI_LOG_ERR(ndpi_str, "%s:%d not enough memory\n", _func, _line); break; } node->proto = def, node->default_port = port, node->customUserProto = customUserProto; ret = (ndpi_default_ports_tree_node_t *) ndpi_tsearch(node, (void *) root, ndpi_default_ports_tree_node_t_cmp); /* Add it to the tree */ if(ret != node) { NDPI_LOG_DBG(ndpi_str, "[NDPI] %s:%d found duplicate for port %u: overwriting it with new value\n", _func, _line, port); ret->proto = def; ndpi_free(node); } } } /* ****************************************************** */ /* NOTE This function must be called with a semaphore set, this in order to avoid changing the datastructures while using them */ static int removeDefaultPort(ndpi_port_range *range, ndpi_proto_defaults_t *def, ndpi_default_ports_tree_node_t **root) { ndpi_default_ports_tree_node_t node; u_int16_t port; for (port = range->port_low; port <= range->port_high; port++) { ndpi_default_ports_tree_node_t *ret; node.proto = def, node.default_port = port; ret = (ndpi_default_ports_tree_node_t *) ndpi_tdelete( &node, (void *) root, ndpi_default_ports_tree_node_t_cmp); /* Add it to the tree */ if(ret != NULL) { ndpi_free((ndpi_default_ports_tree_node_t *) ret); return(0); } } return(-1); } /* ****************************************************** */ static int ndpi_string_to_automa(struct ndpi_detection_module_struct *ndpi_str, ndpi_automa *automa, char *value, u_int16_t protocol_id, ndpi_protocol_category_t category, ndpi_protocol_breed_t breed, u_int8_t free_str_on_duplicate) { AC_PATTERN_t ac_pattern; AC_ERROR_t rc; if((value == NULL) || (protocol_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS))) { NDPI_LOG_ERR(ndpi_str, "[NDPI] protoId=%d: INTERNAL ERROR\n", protocol_id); return(-1); } if(automa->ac_automa == NULL) return(-2); ac_pattern.astring = value, ac_pattern.rep.number = protocol_id, ac_pattern.rep.category = (u_int16_t) category, ac_pattern.rep.breed = (u_int16_t) breed; #ifdef MATCH_DEBUG printf("Adding to automa [%s][protocol_id: %u][category: %u][breed: %u]\n", value, protocol_id, category, breed); #endif if(value == NULL) ac_pattern.length = 0; else ac_pattern.length = strlen(ac_pattern.astring); rc = ac_automata_add(((AC_AUTOMATA_t *) automa->ac_automa), &ac_pattern); if(rc != ACERR_DUPLICATE_PATTERN && rc != ACERR_SUCCESS) return(-2); if(rc == ACERR_DUPLICATE_PATTERN && free_str_on_duplicate) ndpi_free(value); return(0); } /* ****************************************************** */ static int ndpi_add_host_url_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *_value, int protocol_id, ndpi_protocol_category_t category, ndpi_protocol_breed_t breed) { int rv; char *value = ndpi_strdup(_value); if(!value) return(-1); #ifdef DEBUG NDPI_LOG_DBG2(ndpi_str, "[NDPI] Adding [%s][%d]\n", value, protocol_id); #endif rv = ndpi_string_to_automa(ndpi_str, &ndpi_str->host_automa, value, protocol_id, category, breed, 1); if(rv != 0) ndpi_free(value); return(rv); } /* ****************************************************** */ #ifdef CODE_UNUSED int ndpi_add_content_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *value, int protocol_id, ndpi_protocol_category_t category, ndpi_protocol_breed_t breed) { return(ndpi_string_to_automa(ndpi_str, &ndpi_str->content_automa, value, protocol_id, category, breed, 0)); } #endif /* ****************************************************** */ /* NOTE This function must be called with a semaphore set, this in order to avoid changing the datastructures while using them */ static int ndpi_remove_host_url_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *value, int protocol_id) { NDPI_LOG_ERR(ndpi_str, "[NDPI] Missing implementation for proto %s/%d\n", value, protocol_id); return(-1); } /* ******************************************************************** */ void ndpi_init_protocol_match(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_match *match) { u_int16_t no_master[2] = {NDPI_PROTOCOL_NO_MASTER_PROTO, NDPI_PROTOCOL_NO_MASTER_PROTO}; ndpi_port_range ports_a[MAX_DEFAULT_PORTS], ports_b[MAX_DEFAULT_PORTS]; if(ndpi_str->proto_defaults[match->protocol_id].protoName == NULL) { ndpi_str->proto_defaults[match->protocol_id].protoName = ndpi_strdup(match->proto_name); ndpi_str->proto_defaults[match->protocol_id].protoId = match->protocol_id; ndpi_str->proto_defaults[match->protocol_id].protoCategory = match->protocol_category; ndpi_str->proto_defaults[match->protocol_id].protoBreed = match->protocol_breed; ndpi_set_proto_defaults(ndpi_str, ndpi_str->proto_defaults[match->protocol_id].protoBreed, ndpi_str->proto_defaults[match->protocol_id].protoId, 0 /* can_have_a_subprotocol */, no_master, no_master, ndpi_str->proto_defaults[match->protocol_id].protoName, ndpi_str->proto_defaults[match->protocol_id].protoCategory, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); } ndpi_add_host_url_subprotocol(ndpi_str, match->string_to_match, match->protocol_id, match->protocol_category, match->protocol_breed); } /* ******************************************************************** */ /* Self check function to be called onli for testing purposes */ void ndpi_self_check_host_match() { u_int32_t i, j; for (i = 0; host_match[i].string_to_match != NULL; i++) { for (j = 0; host_match[j].string_to_match != NULL; j++) { if((i != j) && (strcmp(host_match[i].string_to_match, host_match[j].string_to_match) == 0)) { printf("[INTERNAL ERROR]: Duplicate string detected '%s' [id: %u, id %u]\n", host_match[i].string_to_match, i, j); printf("\nPlease fix host_match[] in ndpi_content_match.c.inc\n"); exit(0); } } } } /* ******************************************************************** */ static void init_string_based_protocols(struct ndpi_detection_module_struct *ndpi_str) { int i; for (i = 0; host_match[i].string_to_match != NULL; i++) ndpi_init_protocol_match(ndpi_str, &host_match[i]); ndpi_enable_loaded_categories(ndpi_str); #ifdef MATCH_DEBUG // ac_automata_display(ndpi_str->host_automa.ac_automa, 'n'); #endif #if 1 for (i = 0; ndpi_en_bigrams[i] != NULL; i++) ndpi_string_to_automa(ndpi_str, &ndpi_str->bigrams_automa, (char *) ndpi_en_bigrams[i], 1, 1, 1, 0); #else for (i = 0; ndpi_en_popular_bigrams[i] != NULL; i++) ndpi_string_to_automa(ndpi_str, &ndpi_str->bigrams_automa, (char *) ndpi_en_popular_bigrams[i], 1, 1, 1, 0); #endif for (i = 0; ndpi_en_impossible_bigrams[i] != NULL; i++) ndpi_string_to_automa(ndpi_str, &ndpi_str->impossible_bigrams_automa, (char *) ndpi_en_impossible_bigrams[i], 1, 1, 1, 0); } /* ******************************************************************** */ int ndpi_set_detection_preferences(struct ndpi_detection_module_struct *ndpi_str, ndpi_detection_preference pref, int value) { switch (pref) { case ndpi_pref_direction_detect_disable: ndpi_str->direction_detect_disable = (u_int8_t) value; break; default: return(-1); } return(0); } /* ******************************************************************** */ static void ndpi_validate_protocol_initialization(struct ndpi_detection_module_struct *ndpi_str) { int i; for (i = 0; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) { if(ndpi_str->proto_defaults[i].protoName == NULL) { NDPI_LOG_ERR(ndpi_str, "[NDPI] INTERNAL ERROR missing protoName initialization for [protoId=%d]: recovering\n", i); } else { if((i != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[i].protoCategory == NDPI_PROTOCOL_CATEGORY_UNSPECIFIED)) { NDPI_LOG_ERR(ndpi_str, "[NDPI] INTERNAL ERROR missing category [protoId=%d/%s] initialization: recovering\n", i, ndpi_str->proto_defaults[i].protoName ? ndpi_str->proto_defaults[i].protoName : "???"); } } } } /* ******************************************************************** */ /* This function is used to map protocol name and default ports and it MUST be updated whenever a new protocol is added to NDPI. Do NOT add web services (NDPI_SERVICE_xxx) here. */ static void ndpi_init_protocol_defaults(struct ndpi_detection_module_struct *ndpi_str) { ndpi_port_range ports_a[MAX_DEFAULT_PORTS], ports_b[MAX_DEFAULT_PORTS]; u_int16_t no_master[2] = {NDPI_PROTOCOL_NO_MASTER_PROTO, NDPI_PROTOCOL_NO_MASTER_PROTO}, custom_master[2]; /* Reset all settings */ memset(ndpi_str->proto_defaults, 0, sizeof(ndpi_str->proto_defaults)); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNRATED, NDPI_PROTOCOL_UNKNOWN, 0 /* can_have_a_subprotocol */, no_master, no_master, "Unknown", NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_FTP_CONTROL, 0 /* can_have_a_subprotocol */, no_master, no_master, "FTP_CONTROL", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 21, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_FTP_DATA, 0 /* can_have_a_subprotocol */, no_master, no_master, "FTP_DATA", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 20, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_MAIL_POP, 0 /* can_have_a_subprotocol */, no_master, no_master, "POP3", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 110, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MAIL_POPS, 0 /* can_have_a_subprotocol */, no_master, no_master, "POPS", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 995, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MAIL_SMTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMTP", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 25, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MAIL_SMTPS, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMTPS", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 465, 587, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_MAIL_IMAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IMAP", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 143, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MAIL_IMAPS, 0 /* can_have_a_subprotocol */, no_master, no_master, "IMAPS", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 993, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DNS, 1 /* can_have_a_subprotocol */, no_master, no_master, "DNS", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 53, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 53, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IPP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IPP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IMO, 0 /* can_have_a_subprotocol */, no_master, no_master, "IMO", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 80, 0 /* ntop */, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MDNS, 1 /* can_have_a_subprotocol */, no_master, no_master, "MDNS", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5353, 5354, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "NTP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 123, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NETBIOS, 0 /* can_have_a_subprotocol */, no_master, no_master, "NetBIOS", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 139, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 137, 138, 139, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NFS, 0 /* can_have_a_subprotocol */, no_master, no_master, "NFS", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 2049, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 2049, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SSDP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SSDP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_BGP, 0 /* can_have_a_subprotocol */, no_master, no_master, "BGP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 179, 2605, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SNMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SNMP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 161, 162, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_XDMCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "XDMCP", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 177, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 177, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_DANGEROUS, NDPI_PROTOCOL_SMBV1, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMBv1", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 445, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SYSLOG, 0 /* can_have_a_subprotocol */, no_master, no_master, "Syslog", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 514, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 514, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DHCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "DHCP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 67, 68, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_POSTGRES, 0 /* can_have_a_subprotocol */, no_master, no_master, "PostgreSQL", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 5432, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MYSQL, 0 /* can_have_a_subprotocol */, no_master, no_master, "MySQL", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 3306, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_DIRECT_DOWNLOAD_LINK, 0 /* can_have_a_subprotocol */, no_master, no_master, "Direct_Download_Link", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_APPLEJUICE, 0 /* can_have_a_subprotocol */, no_master, no_master, "AppleJuice", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_DIRECTCONNECT, 0 /* can_have_a_subprotocol */, no_master, no_master, "DirectConnect", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NATS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Nats", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_NTOP, 0 /* can_have_a_subprotocol */, no_master, no_master, "ntop", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_VMWARE, 0 /* can_have_a_subprotocol */, no_master, no_master, "VMware", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 903, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 902, 903, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_FBZERO, 0 /* can_have_a_subprotocol */, no_master, no_master, "FacebookZero", NDPI_PROTOCOL_CATEGORY_SOCIAL_NETWORK, ndpi_build_default_ports(ports_a, 443, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_KONTIKI, 0 /* can_have_a_subprotocol */, no_master, no_master, "Kontiki", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_OPENFT, 0 /* can_have_a_subprotocol */, no_master, no_master, "OpenFT", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_FASTTRACK, 0 /* can_have_a_subprotocol */, no_master, no_master, "FastTrack", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_GNUTELLA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Gnutella", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_EDONKEY, 0 /* can_have_a_subprotocol */, no_master, no_master, "eDonkey", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_BITTORRENT, 0 /* can_have_a_subprotocol */, no_master, no_master, "BitTorrent", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 51413, 53646, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6771, 51413, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SKYPE, 0 /* can_have_a_subprotocol */, no_master, no_master, "Skype", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SKYPE_CALL, 0 /* can_have_a_subprotocol */, no_master, no_master, "SkypeCall", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_TIKTOK, 0 /* can_have_a_subprotocol */, no_master, no_master, "TikTok", NDPI_PROTOCOL_CATEGORY_SOCIAL_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TEREDO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Teredo", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3544, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WECHAT, 0 /* can_have_a_subprotocol */, no_master, /* wechat.com */ no_master, "WeChat", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MEMCACHED, 0 /* can_have_a_subprotocol */, no_master, no_master, "Memcached", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 11211, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 11211, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SMBV23, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMBv23", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 445, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_MINING, 0 /* can_have_a_subprotocol */, no_master, no_master, "Mining", CUSTOM_CATEGORY_MINING, ndpi_build_default_ports(ports_a, 8333, 30303, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NEST_LOG_SINK, 0 /* can_have_a_subprotocol */, no_master, no_master, "NestLogSink", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 11095, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MODBUS, 1 /* no subprotocol */, no_master, no_master, "Modbus", NDPI_PROTOCOL_CATEGORY_NETWORK, /* Perhaps IoT in the future */ ndpi_build_default_ports(ports_a, 502, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHATSAPP_CALL, 0 /* can_have_a_subprotocol */, no_master, no_master, "WhatsAppCall", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_DATASAVER, 0 /* can_have_a_subprotocol */, no_master, no_master, "DataSaver", NDPI_PROTOCOL_CATEGORY_WEB /* dummy */, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SIGNAL, 0 /* can_have_a_subprotocol */, no_master, /* https://signal.org */ no_master, "Signal", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_DOH_DOT, 0 /* can_have_a_subprotocol */, no_master, no_master, "DoH_DoT", NDPI_PROTOCOL_CATEGORY_NETWORK /* dummy */, ndpi_build_default_ports(ports_a, 853, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FREE_205, 0 /* can_have_a_subprotocol */, no_master, no_master, "FREE_205", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WIREGUARD, 0 /* can_have_a_subprotocol */, no_master, no_master, "WireGuard", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 51820, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PPSTREAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPStream", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_XBOX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Xbox", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 3074, 3076, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3074, 3076, 500, 3544, 4500) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PLAYSTATION, 0 /* can_have_a_subprotocol */, no_master, no_master, "Playstation", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 1935, 3478, 3479, 3480, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3478, 3479, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_QQ, 0 /* can_have_a_subprotocol */, no_master, no_master, "QQ", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_RTSP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTSP", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 554, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 554, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_ICECAST, 0 /* can_have_a_subprotocol */, no_master, no_master, "IceCast", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PPLIVE, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPLive", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PPSTREAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPStream", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_ZATTOO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Zattoo", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SHOUTCAST, 0 /* can_have_a_subprotocol */, no_master, no_master, "ShoutCast", NDPI_PROTOCOL_CATEGORY_MUSIC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SOPCAST, 0 /* can_have_a_subprotocol */, no_master, no_master, "Sopcast", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FREE_58, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free58", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_TVUPLAYER, 0 /* can_have_a_subprotocol */, no_master, no_master, "TVUplayer", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_DOWNLOAD, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_Download", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_QQLIVE, 0 /* can_have_a_subprotocol */, no_master, no_master, "QQLive", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_THUNDER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Thunder", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SOULSEEK, 0 /* can_have_a_subprotocol */, no_master, no_master, "Soulseek", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_PS_VUE, 0 /* can_have_a_subprotocol */, no_master, no_master, "PS_VUE", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_IRC, 0 /* can_have_a_subprotocol */, no_master, no_master, "IRC", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 194, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 194, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AYIYA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Ayiya", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5072, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_UNENCRYPTED_JABBER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Unencrypted_Jabber", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_FREE_69, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free69", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FREE_71, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free71", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_VRRP, 0 /* can_have_a_subprotocol */, no_master, no_master, "VRRP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_STEAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "Steam", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_HALFLIFE2, 0 /* can_have_a_subprotocol */, no_master, no_master, "HalfLife2", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WORLDOFWARCRAFT, 0 /* can_have_a_subprotocol */, no_master, no_master, "WorldOfWarcraft", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_HOTSPOT_SHIELD, 0 /* can_have_a_subprotocol */, no_master, no_master, "HotspotShield", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_TELNET, 0 /* can_have_a_subprotocol */, no_master, no_master, "Telnet", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 23, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); custom_master[0] = NDPI_PROTOCOL_SIP, custom_master[1] = NDPI_PROTOCOL_H323; ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_STUN, 0 /* can_have_a_subprotocol */, no_master, custom_master, "STUN", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3478, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_IP_IPSEC, 0 /* can_have_a_subprotocol */, no_master, no_master, "IPsec", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 500, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 500, 4500, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_GRE, 0 /* can_have_a_subprotocol */, no_master, no_master, "GRE", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_ICMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "ICMP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_IGMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IGMP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_EGP, 0 /* can_have_a_subprotocol */, no_master, no_master, "EGP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_SCTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SCTP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_OSPF, 0 /* can_have_a_subprotocol */, no_master, no_master, "OSPF", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 2604, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_IP_IN_IP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IP_in_IP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTP", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RDP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RDP", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 3389, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3389, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_VNC, 0 /* can_have_a_subprotocol */, no_master, no_master, "VNC", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 5900, 5901, 5800, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_FREE90, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free90", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 5900, 5901, 5800, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ZOOM, 0 /* can_have_a_subprotocol */, no_master, no_master, "Zoom", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHATSAPP_FILES, 0 /* can_have_a_subprotocol */, no_master, no_master, "WhatsAppFiles", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHATSAPP, 0 /* can_have_a_subprotocol */, no_master, no_master, "WhatsApp", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_TLS, 1 /* can_have_a_subprotocol */, no_master, no_master, "TLS", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 443, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SSH, 0 /* can_have_a_subprotocol */, no_master, no_master, "SSH", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 22, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_USENET, 0 /* can_have_a_subprotocol */, no_master, no_master, "Usenet", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MGCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "MGCP", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IAX, 0 /* can_have_a_subprotocol */, no_master, no_master, "IAX", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 4569, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 4569, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AFP, 0 /* can_have_a_subprotocol */, no_master, no_master, "AFP", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 548, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 548, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_HULU, 0 /* can_have_a_subprotocol */, no_master, no_master, "Hulu", NDPI_PROTOCOL_CATEGORY_STREAMING, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CHECKMK, 0 /* can_have_a_subprotocol */, no_master, no_master, "CHECKMK", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 6556, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_STEALTHNET, 0 /* can_have_a_subprotocol */, no_master, no_master, "Stealthnet", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_AIMINI, 0 /* can_have_a_subprotocol */, no_master, no_master, "Aimini", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SIP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SIP", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 5060, 5061, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5060, 5061, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TRUPHONE, 0 /* can_have_a_subprotocol */, no_master, no_master, "TruPhone", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_ICMPV6, 0 /* can_have_a_subprotocol */, no_master, no_master, "ICMPV6", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DHCPV6, 0 /* can_have_a_subprotocol */, no_master, no_master, "DHCPV6", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_ARMAGETRON, 0 /* can_have_a_subprotocol */, no_master, no_master, "Armagetron", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_CROSSFIRE, 0 /* can_have_a_subprotocol */, no_master, no_master, "Crossfire", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_DOFUS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Dofus", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FIESTA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Fiesta", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FLORENSIA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Florensia", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_GUILDWARS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Guildwars", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_ACTIVESYNC, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_ActiveSync", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_KERBEROS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Kerberos", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 88, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 88, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LDAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "LDAP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 389, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 389, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_MAPLESTORY, 0 /* can_have_a_subprotocol */, no_master, no_master, "MapleStory", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MSSQL_TDS, 0 /* can_have_a_subprotocol */, no_master, no_master, "MsSQL-TDS", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 1433, 1434, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_PPTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPTP", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WARCRAFT3, 0 /* can_have_a_subprotocol */, no_master, no_master, "Warcraft3", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WORLD_OF_KUNG_FU, 0 /* can_have_a_subprotocol */, no_master, no_master, "WorldOfKungFu", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DCERPC, 0 /* can_have_a_subprotocol */, no_master, no_master, "DCE_RPC", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 135, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NETFLOW, 0 /* can_have_a_subprotocol */, no_master, no_master, "NetFlow", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 2055, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SFLOW, 0 /* can_have_a_subprotocol */, no_master, no_master, "sFlow", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6343, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_CONNECT, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_Connect", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_PROXY, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_Proxy", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 8080, 3128, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CITRIX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Citrix", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 1494, 2598, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WEBEX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Webex", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RADIUS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Radius", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 1812, 1813, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1812, 1813, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TEAMVIEWER, 0 /* can_have_a_subprotocol */, no_master, no_master, "TeamViewer", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 5938, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5938, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LOTUS_NOTES, 0 /* can_have_a_subprotocol */, no_master, no_master, "LotusNotes", NDPI_PROTOCOL_CATEGORY_COLLABORATIVE, ndpi_build_default_ports(ports_a, 1352, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SAP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 3201, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); /* Missing dissector: port based only */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_GTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "GTP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 2152, 2123, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_UPNP, 0 /* can_have_a_subprotocol */, no_master, no_master, "UPnP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 1780, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1900, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TELEGRAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "Telegram", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_QUIC, 1 /* can_have_a_subprotocol */, no_master, no_master, "QUIC", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 443, 80, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DIAMETER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Diameter", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 3868, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_APPLE_PUSH, 0 /* can_have_a_subprotocol */, no_master, no_master, "ApplePush", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DROPBOX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Dropbox", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 17500, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SPOTIFY, 0 /* can_have_a_subprotocol */, no_master, no_master, "Spotify", NDPI_PROTOCOL_CATEGORY_MUSIC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MESSENGER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Messenger", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LISP, 0 /* can_have_a_subprotocol */, no_master, no_master, "LISP", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 4342, 4341, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_EAQ, 0 /* can_have_a_subprotocol */, no_master, no_master, "EAQ", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6000, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_KAKAOTALK_VOICE, 0 /* can_have_a_subprotocol */, no_master, no_master, "KakaoTalk_Voice", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_MPEGTS, 0 /* can_have_a_subprotocol */, no_master, no_master, "MPEG_TS", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); /* http://en.wikipedia.org/wiki/Link-local_Multicast_Name_Resolution */ ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LLMNR, 0 /* can_have_a_subprotocol */, no_master, no_master, "LLMNR", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 5355, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5355, 0, 0, 0, 0) /* UDP */); /* Missing dissector: port based only */ ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_REMOTE_SCAN, 0 /* can_have_a_subprotocol */, no_master, no_master, "RemoteScan", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 6077, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6078, 0, 0, 0, 0) /* UDP */); /* Missing dissector: port based only */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_H323, 0 /* can_have_a_subprotocol */, no_master, no_master, "H323", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 1719, 1720, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1719, 1720, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_OPENVPN, 0 /* can_have_a_subprotocol */, no_master, no_master, "OpenVPN", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 1194, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1194, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NOE, 0 /* can_have_a_subprotocol */, no_master, no_master, "NOE", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CISCOVPN, 0 /* can_have_a_subprotocol */, no_master, no_master, "CiscoVPN", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 10000, 8008, 8009, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 10000, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TEAMSPEAK, 0 /* can_have_a_subprotocol */, no_master, no_master, "TeamSpeak", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SKINNY, 0 /* can_have_a_subprotocol */, no_master, no_master, "CiscoSkinny", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 2000, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RTCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTCP", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RSYNC, 0 /* can_have_a_subprotocol */, no_master, no_master, "RSYNC", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 873, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ORACLE, 0 /* can_have_a_subprotocol */, no_master, no_master, "Oracle", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 1521, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CORBA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Corba", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_UBUNTUONE, 0 /* can_have_a_subprotocol */, no_master, no_master, "UbuntuONE", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHOIS_DAS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Whois-DAS", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 43, 4343, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_COLLECTD, 0 /* can_have_a_subprotocol */, no_master, no_master, "Collectd", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 25826, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SOCKS, 0 /* can_have_a_subprotocol */, no_master, no_master, "SOCKS", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 1080, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 1080, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TFTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "TFTP", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 69, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RTMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTMP", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 1935, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PANDO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Pando_Media_Booster", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MEGACO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Megaco", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 2944, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_REDIS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Redis", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 6379, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ZMQ, 0 /* can_have_a_subprotocol */, no_master, no_master, "ZeroMQ", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_VHUA, 0 /* can_have_a_subprotocol */, no_master, no_master, "VHUA", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 58267, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_STARCRAFT, 0 /* can_have_a_subprotocol */, no_master, no_master, "Starcraft", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 1119, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 1119, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_UBNTAC2, 0 /* can_have_a_subprotocol */, no_master, no_master, "UBNTAC2", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 10001, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_VIBER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Viber", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 7985, 5242, 5243, 4244, 0), /* TCP */ ndpi_build_default_ports(ports_b, 7985, 7987, 5242, 5243, 4244)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_COAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "COAP", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 5683, 5684, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MQTT, 0 /* can_have_a_subprotocol */, no_master, no_master, "MQTT", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 1883, 8883, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SOMEIP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SOMEIP", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 30491, 30501, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 30491, 30501, 30490, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RX, 0 /* can_have_a_subprotocol */, no_master, no_master, "RX", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_GIT, 0 /* can_have_a_subprotocol */, no_master, no_master, "Git", NDPI_PROTOCOL_CATEGORY_COLLABORATIVE, ndpi_build_default_ports(ports_a, 9418, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DRDA, 0 /* can_have_a_subprotocol */, no_master, no_master, "DRDA", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HANGOUT_DUO, 0 /* can_have_a_subprotocol */, no_master, no_master, "GoogleHangoutDuo", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_BJNP, 0 /* can_have_a_subprotocol */, no_master, no_master, "BJNP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 8612, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SMPP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMPP", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_OOKLA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Ookla", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AMQP, 0 /* can_have_a_subprotocol */, no_master, no_master, "AMQP", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_DNSCRYPT, 0 /* can_have_a_subprotocol */, no_master, no_master, "DNScrypt", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TINC, 0 /* can_have_a_subprotocol */, no_master, no_master, "TINC", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 655, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 655, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_FIX, 0 /* can_have_a_subprotocol */, no_master, no_master, "FIX", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_NINTENDO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Nintendo", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_CSGO, 0 /* can_have_a_subprotocol */, no_master, no_master, "CSGO", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AJP, 0 /* can_have_a_subprotocol */, no_master, no_master, "AJP", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 8009, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TARGUS_GETDATA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Targus Dataspeed", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 5001, 5201, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5001, 5201, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AMAZON_VIDEO, 0 /* can_have_a_subprotocol */, no_master, no_master, "AmazonVideo", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DNP3, 1 /* no subprotocol */, no_master, no_master, "DNP3", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 20000, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IEC60870, 1 /* no subprotocol */, no_master, no_master, "IEC60870", NDPI_PROTOCOL_CATEGORY_NETWORK, /* Perhaps IoT in the future */ ndpi_build_default_ports(ports_a, 2404, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_BLOOMBERG, 1 /* no subprotocol */, no_master, no_master, "Bloomberg", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CAPWAP, 1 /* no subprotocol */, no_master, no_master, "CAPWAP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5246, 5247, 0, 0, 0) /* UDP */ ); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ZABBIX, 1 /* no subprotocol */, no_master, no_master, "Zabbix", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 10050, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */ ); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_S7COMM, 1 /* no subprotocol */, no_master, no_master, "s7comm", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 102, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MSTEAMS, 1 /* no subprotocol */, no_master, no_master, "Teams", NDPI_PROTOCOL_CATEGORY_COLLABORATIVE, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */ ); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WEBSOCKET, 1 /* can_have_a_subprotocol */, no_master, no_master, "WebSocket", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ANYDESK, 1 /* no subprotocol */, no_master, no_master, "AnyDesk", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/custom_ndpi_main.c" #endif /* calling function for host and content matched protocols */ init_string_based_protocols(ndpi_str); ndpi_validate_protocol_initialization(ndpi_str); } /* ****************************************************** */ #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/custom_ndpi_protocols.c" #endif /* ****************************************************** */ static int ac_match_handler(AC_MATCH_t *m, AC_TEXT_t *txt, AC_REP_t *match) { int min_len = (txt->length < m->patterns->length) ? txt->length : m->patterns->length; char buf[64] = {'\0'}, *whatfound; int min_buf_len = (txt->length > 63 /* sizeof(buf)-1 */) ? 63 : txt->length; u_int buf_len = strlen(buf); strncpy(buf, txt->astring, min_buf_len); buf[min_buf_len] = '\0'; #ifdef MATCH_DEBUG printf("Searching [to search: %s/%u][pattern: %s/%u] [len: %d][match_num: %u][%s]\n", buf, (unigned int) txt->length, m->patterns->astring, (unigned int) m->patterns->length, min_len, m->match_num, m->patterns->astring); #endif whatfound = strstr(buf, m->patterns->astring); #ifdef MATCH_DEBUG printf("[NDPI] %s() [searching=%s][pattern=%s][%s][%c]\n", __FUNCTION__, buf, m->patterns->astring, whatfound ? whatfound : "<NULL>", whatfound[-1]); #endif if(whatfound) { /* The patch below allows in case of pattern ws.amazon.com to avoid matching aws.amazon.com whereas a.ws.amazon.com has to match */ if((whatfound != buf) && (m->patterns->astring[0] != '.') /* The searched pattern does not start with . */ && strchr(m->patterns->astring, '.') /* The matched pattern has a . (e.g. numeric or sym IPs) */) { int len = strlen(m->patterns->astring); if((whatfound[-1] != '.') || ((m->patterns->astring[len - 1] != '.') && (whatfound[len] != '\0') /* endsWith does not hold here */)) { return(0); } else { memcpy(match, &m->patterns[0].rep, sizeof(AC_REP_t)); /* Partial match? */ return(0); /* Keep searching as probably there is a better match */ } } } /* Return 1 for stopping to the first match. We might consider searching for the more specific match, paying more cpu cycles. */ memcpy(match, &m->patterns[0].rep, sizeof(AC_REP_t)); if(((buf_len >= min_len) && (strncmp(&buf[buf_len - min_len], m->patterns->astring, min_len) == 0)) || (strncmp(buf, m->patterns->astring, min_len) == 0) /* begins with */ ) { #ifdef MATCH_DEBUG printf("Found match [%s][%s] [len: %d]" // "[proto_id: %u]" "\n", buf, m->patterns->astring, min_len /* , *matching_protocol_id */); #endif return(1); /* If the pattern found matches the string at the beginning we stop here */ } else { #ifdef MATCH_DEBUG printf("NO match found: continue\n"); #endif return(0); /* 0 to continue searching, !0 to stop */ } } /* ******************************************************************** */ static int fill_prefix_v4(prefix_t *p, const struct in_addr *a, int b, int mb) { if(b < 0 || b > mb) return(-1); memset(p, 0, sizeof(prefix_t)); memcpy(&p->add.sin, a, (mb + 7) / 8); p->family = AF_INET; p->bitlen = b; p->ref_count = 0; return(0); } /* ******************************************* */ static int fill_prefix_v6(prefix_t *prefix, const struct in6_addr *addr, int bits, int maxbits) { #ifdef PATRICIA_IPV6 if(bits < 0 || bits > maxbits) return -1; memcpy(&prefix->add.sin6, addr, (maxbits + 7) / 8); prefix->family = AF_INET6, prefix->bitlen = bits, prefix->ref_count = 0; return 0; #else return(-1); #endif } /* ******************************************* */ u_int16_t ndpi_network_ptree_match(struct ndpi_detection_module_struct *ndpi_str, struct in_addr *pin /* network byte order */) { prefix_t prefix; patricia_node_t *node; /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, pin, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->protocols_ptree, &prefix); return(node ? node->value.uv.user_value : NDPI_PROTOCOL_UNKNOWN); } /* ******************************************* */ u_int16_t ndpi_network_port_ptree_match(struct ndpi_detection_module_struct *ndpi_str, struct in_addr *pin /* network byte order */, u_int16_t port /* network byte order */) { prefix_t prefix; patricia_node_t *node; /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, pin, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->protocols_ptree, &prefix); if(node) { if((node->value.uv.additional_user_value == 0) || (node->value.uv.additional_user_value == port)) return(node->value.uv.user_value); } return(NDPI_PROTOCOL_UNKNOWN); } /* ******************************************* */ #if 0 static u_int8_t tor_ptree_match(struct ndpi_detection_module_struct *ndpi_str, struct in_addr *pin) { return((ndpi_network_ptree_match(ndpi_str, pin) == NDPI_PROTOCOL_TOR) ? 1 : 0); } #endif /* ******************************************* */ u_int8_t ndpi_is_tor_flow(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; if(packet->tcp != NULL) { if(packet->iph) { if(flow->guessed_host_protocol_id == NDPI_PROTOCOL_TOR) return(1); } } return(0); } /* ******************************************* */ static patricia_node_t *add_to_ptree(patricia_tree_t *tree, int family, void *addr, int bits) { prefix_t prefix; patricia_node_t *node; fill_prefix_v4(&prefix, (struct in_addr *) addr, bits, tree->maxbits); node = ndpi_patricia_lookup(tree, &prefix); if(node) memset(&node->value, 0, sizeof(node->value)); return(node); } /* ******************************************* */ /* Load a file containing IPv4 addresses in CIDR format as 'protocol_id' Return: the number of entries loaded or -1 in case of error */ int ndpi_load_ipv4_ptree(struct ndpi_detection_module_struct *ndpi_str, const char *path, u_int16_t protocol_id) { char buffer[128], *line, *addr, *cidr, *saveptr; FILE *fd; int len; u_int num_loaded = 0; fd = fopen(path, "r"); if(fd == NULL) { NDPI_LOG_ERR(ndpi_str, "Unable to open file %s [%s]\n", path, strerror(errno)); return(-1); } while (1) { line = fgets(buffer, sizeof(buffer), fd); if(line == NULL) break; len = strlen(line); if((len <= 1) || (line[0] == '#')) continue; line[len - 1] = '\0'; addr = strtok_r(line, "/", &saveptr); if(addr) { struct in_addr pin; patricia_node_t *node; cidr = strtok_r(NULL, "\n", &saveptr); pin.s_addr = inet_addr(addr); if((node = add_to_ptree(ndpi_str->protocols_ptree, AF_INET, &pin, cidr ? atoi(cidr) : 32 /* bits */)) != NULL) { node->value.uv.user_value = protocol_id, node->value.uv.additional_user_value = 0 /* port */; num_loaded++; } } } fclose(fd); return(num_loaded); } /* ******************************************* */ static void ndpi_init_ptree_ipv4(struct ndpi_detection_module_struct *ndpi_str, void *ptree, ndpi_network host_list[], u_int8_t skip_tor_hosts) { int i; for (i = 0; host_list[i].network != 0x0; i++) { struct in_addr pin; patricia_node_t *node; if(skip_tor_hosts && (host_list[i].value == NDPI_PROTOCOL_TOR)) continue; pin.s_addr = htonl(host_list[i].network); if((node = add_to_ptree(ptree, AF_INET, &pin, host_list[i].cidr /* bits */)) != NULL) { node->value.uv.user_value = host_list[i].value, node->value.uv.additional_user_value = 0; } } } /* ******************************************* */ static int ndpi_add_host_ip_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *value, u_int16_t protocol_id) { patricia_node_t *node; struct in_addr pin; int bits = 32; char *ptr = strrchr(value, '/'); u_int16_t port = 0; /* Format ip:8.248.73.247:443 */ char *double_column; if(ptr) { ptr[0] = '\0'; ptr++; if((double_column = strrchr(ptr, ':')) != NULL) { double_column[0] = '\0'; port = atoi(&double_column[1]); } if(atoi(ptr) >= 0 && atoi(ptr) <= 32) bits = atoi(ptr); } else { /* Let's check if there is the port defined Example: ip:8.248.73.247:443@AmazonPrime */ double_column = strrchr(value, ':'); if(double_column) { double_column[0] = '\0'; port = atoi(&double_column[1]); } } inet_pton(AF_INET, value, &pin); if((node = add_to_ptree(ndpi_str->protocols_ptree, AF_INET, &pin, bits)) != NULL) { node->value.uv.user_value = protocol_id, node->value.uv.additional_user_value = htons(port); } return(0); } void set_ndpi_malloc(void *(*__ndpi_malloc)(size_t size)) { _ndpi_malloc = __ndpi_malloc; } void set_ndpi_flow_malloc(void *(*__ndpi_flow_malloc)(size_t size)) { _ndpi_flow_malloc = __ndpi_flow_malloc; } void set_ndpi_free(void (*__ndpi_free)(void *ptr)) { _ndpi_free = __ndpi_free; } void set_ndpi_flow_free(void (*__ndpi_flow_free)(void *ptr)) { _ndpi_flow_free = __ndpi_flow_free; } void ndpi_debug_printf(unsigned int proto, struct ndpi_detection_module_struct *ndpi_str, ndpi_log_level_t log_level, const char *file_name, const char *func_name, int line_number, const char *format, ...) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES va_list args; #define MAX_STR_LEN 250 char str[MAX_STR_LEN]; if(ndpi_str != NULL && log_level > NDPI_LOG_ERROR && proto > 0 && proto < NDPI_MAX_SUPPORTED_PROTOCOLS && !NDPI_ISSET(&ndpi_str->debug_bitmask, proto)) return; va_start(args, format); vsnprintf(str, sizeof(str) - 1, format, args); va_end(args); if(ndpi_str != NULL) { printf("%s:%s:%-3d - [%s]: %s", file_name, func_name, line_number, ndpi_get_proto_name(ndpi_str, proto), str); } else { printf("Proto: %u, %s", proto, str); } #endif } void set_ndpi_debug_function(struct ndpi_detection_module_struct *ndpi_str, ndpi_debug_function_ptr ndpi_debug_printf) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES ndpi_str->ndpi_debug_printf = ndpi_debug_printf; #endif } /* ****************************************** */ /* Keep it in order and in sync with ndpi_protocol_category_t in ndpi_typedefs.h */ static const char *categories[] = { "Unspecified", "Media", "VPN", "Email", "DataTransfer", "Web", "SocialNetwork", "Download-FileTransfer-FileSharing", "Game", "Chat", "VoIP", "Database", "RemoteAccess", "Cloud", "Network", "Collaborative", "RPC", "Streaming", "System", "SoftwareUpdate", "", "", "", "", "", "Music", "Video", "Shopping", "Productivity", "FileSharing", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "Mining", /* 99 */ "Malware", "Advertisement", "Banned_Site", "Site_Unavailable", "Allowed_Site", "Antimalware", }; /* ******************************************************************** */ struct ndpi_detection_module_struct *ndpi_init_detection_module(ndpi_init_prefs prefs) { struct ndpi_detection_module_struct *ndpi_str = ndpi_malloc(sizeof(struct ndpi_detection_module_struct)); int i; if(ndpi_str == NULL) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES NDPI_LOG_ERR(ndpi_str, "ndpi_init_detection_module initial malloc failed for ndpi_str\n"); #endif /* NDPI_ENABLE_DEBUG_MESSAGES */ return(NULL); } memset(ndpi_str, 0, sizeof(struct ndpi_detection_module_struct)); #ifdef NDPI_ENABLE_DEBUG_MESSAGES set_ndpi_debug_function(ndpi_str, (ndpi_debug_function_ptr) ndpi_debug_printf); #endif /* NDPI_ENABLE_DEBUG_MESSAGES */ if((ndpi_str->protocols_ptree = ndpi_New_Patricia(32 /* IPv4 */)) != NULL) ndpi_init_ptree_ipv4(ndpi_str, ndpi_str->protocols_ptree, host_protocol_list, prefs & ndpi_dont_load_tor_hosts); NDPI_BITMASK_RESET(ndpi_str->detection_bitmask); #ifdef NDPI_ENABLE_DEBUG_MESSAGES ndpi_str->user_data = NULL; #endif ndpi_str->ticks_per_second = 1000; /* ndpi_str->ticks_per_second */ ndpi_str->tcp_max_retransmission_window_size = NDPI_DEFAULT_MAX_TCP_RETRANSMISSION_WINDOW_SIZE; ndpi_str->directconnect_connection_ip_tick_timeout = NDPI_DIRECTCONNECT_CONNECTION_IP_TICK_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->rtsp_connection_timeout = NDPI_RTSP_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->irc_timeout = NDPI_IRC_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->gnutella_timeout = NDPI_GNUTELLA_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->thunder_timeout = NDPI_THUNDER_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->zattoo_connection_timeout = NDPI_ZATTOO_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->jabber_stun_timeout = NDPI_JABBER_STUN_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->jabber_file_transfer_timeout = NDPI_JABBER_FT_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->soulseek_connection_ip_tick_timeout = NDPI_SOULSEEK_CONNECTION_IP_TICK_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->ndpi_num_supported_protocols = NDPI_MAX_SUPPORTED_PROTOCOLS; ndpi_str->ndpi_num_custom_protocols = 0; ndpi_str->host_automa.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->content_automa.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->bigrams_automa.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->impossible_bigrams_automa.ac_automa = ac_automata_init(ac_match_handler); if((sizeof(categories) / sizeof(char *)) != NDPI_PROTOCOL_NUM_CATEGORIES) { NDPI_LOG_ERR(ndpi_str, "[NDPI] invalid categories length: expected %u, got %u\n", NDPI_PROTOCOL_NUM_CATEGORIES, (unsigned int) (sizeof(categories) / sizeof(char *))); return(NULL); } ndpi_str->custom_categories.hostnames.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->custom_categories.hostnames_shadow.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->custom_categories.ipAddresses = ndpi_New_Patricia(32 /* IPv4 */); ndpi_str->custom_categories.ipAddresses_shadow = ndpi_New_Patricia(32 /* IPv4 */); if((ndpi_str->custom_categories.ipAddresses == NULL) || (ndpi_str->custom_categories.ipAddresses_shadow == NULL)) return(NULL); ndpi_init_protocol_defaults(ndpi_str); for (i = 0; i < NUM_CUSTOM_CATEGORIES; i++) snprintf(ndpi_str->custom_category_labels[i], CUSTOM_CATEGORY_LABEL_LEN, "User custom category %u", (unsigned int) (i + 1)); return(ndpi_str); } /* *********************************************** */ void ndpi_finalize_initalization(struct ndpi_detection_module_struct *ndpi_str) { u_int i; for (i = 0; i < 4; i++) { ndpi_automa *automa; switch (i) { case 0: automa = &ndpi_str->host_automa; break; case 1: automa = &ndpi_str->content_automa; break; case 2: automa = &ndpi_str->bigrams_automa; break; case 3: automa = &ndpi_str->impossible_bigrams_automa; break; default: automa = NULL; break; } if(automa) { ac_automata_finalize((AC_AUTOMATA_t *) automa->ac_automa); automa->ac_automa_finalized = 1; } } } /* *********************************************** */ /* Wrappers */ void *ndpi_init_automa(void) { return(ac_automata_init(ac_match_handler)); } /* ****************************************************** */ int ndpi_add_string_value_to_automa(void *_automa, char *str, u_int32_t num) { AC_PATTERN_t ac_pattern; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; AC_ERROR_t rc; if(automa == NULL) return(-1); memset(&ac_pattern, 0, sizeof(ac_pattern)); ac_pattern.astring = str; ac_pattern.rep.number = num; ac_pattern.length = strlen(ac_pattern.astring); rc = ac_automata_add(automa, &ac_pattern); return(rc == ACERR_SUCCESS || rc == ACERR_DUPLICATE_PATTERN ? 0 : -1); } /* ****************************************************** */ int ndpi_add_string_to_automa(void *_automa, char *str) { return(ndpi_add_string_value_to_automa(_automa, str, 1)); } /* ****************************************************** */ void ndpi_free_automa(void *_automa) { ac_automata_release((AC_AUTOMATA_t *) _automa, 0); } /* ****************************************************** */ void ndpi_finalize_automa(void *_automa) { ac_automata_finalize((AC_AUTOMATA_t *) _automa); } /* ****************************************************** */ int ndpi_match_string(void *_automa, char *string_to_match) { AC_REP_t match = { NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED }; AC_TEXT_t ac_input_text; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; int rc; if((automa == NULL) || (string_to_match == NULL) || (string_to_match[0] == '\0')) return(-2); ac_input_text.astring = string_to_match, ac_input_text.length = strlen(string_to_match); rc = ac_automata_search(automa, &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; return(rc ? match.number : 0); } /* ****************************************************** */ int ndpi_match_string_protocol_id(void *_automa, char *string_to_match, u_int match_len, u_int16_t *protocol_id, ndpi_protocol_category_t *category, ndpi_protocol_breed_t *breed) { AC_TEXT_t ac_input_text; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; AC_REP_t match = { 0, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED }; int rc; *protocol_id = (u_int16_t)-1; if((automa == NULL) || (string_to_match == NULL) || (string_to_match[0] == '\0')) return(-2); ac_input_text.astring = string_to_match, ac_input_text.length = match_len; rc = ac_automata_search(automa, &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; if(rc) *protocol_id = (u_int16_t)match.number, *category = match.category, *breed = match.breed; else *protocol_id = NDPI_PROTOCOL_UNKNOWN; return((*protocol_id != NDPI_PROTOCOL_UNKNOWN) ? 0 : -1); } /* ****************************************************** */ int ndpi_match_string_value(void *_automa, char *string_to_match, u_int match_len, u_int32_t *num) { AC_TEXT_t ac_input_text; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; AC_REP_t match = { 0, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED }; int rc; *num = (u_int32_t)-1; if((automa == NULL) || (string_to_match == NULL) || (string_to_match[0] == '\0')) return(-2); ac_input_text.astring = string_to_match, ac_input_text.length = match_len; rc = ac_automata_search(automa, &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; if(rc) *num = match.number; else *num = 0; return(rc ? 0 : -1); } /* *********************************************** */ int ndpi_match_custom_category(struct ndpi_detection_module_struct *ndpi_str, char *name, u_int name_len, ndpi_protocol_category_t *category) { ndpi_protocol_breed_t breed; u_int16_t id; int rc = ndpi_match_string_protocol_id(ndpi_str->custom_categories.hostnames.ac_automa, name, name_len, &id, category, &breed); return(rc); } /* *********************************************** */ int ndpi_get_custom_category_match(struct ndpi_detection_module_struct *ndpi_str, char *name_or_ip, u_int name_len, ndpi_protocol_category_t *id) { char ipbuf[64], *ptr; struct in_addr pin; u_int cp_len = ndpi_min(sizeof(ipbuf) - 1, name_len); if(!ndpi_str->custom_categories.categories_loaded) return(-1); if(cp_len > 0) { memcpy(ipbuf, name_or_ip, cp_len); ipbuf[cp_len] = '\0'; } else ipbuf[0] = '\0'; ptr = strrchr(ipbuf, '/'); if(ptr) ptr[0] = '\0'; if(inet_pton(AF_INET, ipbuf, &pin) == 1) { /* Search IP */ prefix_t prefix; patricia_node_t *node; /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, &pin, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->custom_categories.ipAddresses, &prefix); if(node) { *id = node->value.uv.user_value; return(0); } return(-1); } else { /* Search Host */ return(ndpi_match_custom_category(ndpi_str, name_or_ip, name_len, id)); } } /* *********************************************** */ static void free_ptree_data(void *data) { ; } /* ****************************************************** */ void ndpi_exit_detection_module(struct ndpi_detection_module_struct *ndpi_str) { if(ndpi_str != NULL) { int i; for (i = 0; i < (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS); i++) { if(ndpi_str->proto_defaults[i].protoName) ndpi_free(ndpi_str->proto_defaults[i].protoName); } /* NDPI_PROTOCOL_TINC */ if(ndpi_str->tinc_cache) cache_free((cache_t)(ndpi_str->tinc_cache)); if(ndpi_str->ookla_cache) ndpi_lru_free_cache(ndpi_str->ookla_cache); if(ndpi_str->stun_cache) ndpi_lru_free_cache(ndpi_str->stun_cache); if(ndpi_str->msteams_cache) ndpi_lru_free_cache(ndpi_str->msteams_cache); if(ndpi_str->protocols_ptree) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->protocols_ptree, free_ptree_data); if(ndpi_str->udpRoot != NULL) ndpi_tdestroy(ndpi_str->udpRoot, ndpi_free); if(ndpi_str->tcpRoot != NULL) ndpi_tdestroy(ndpi_str->tcpRoot, ndpi_free); if(ndpi_str->host_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->host_automa.ac_automa, 1 /* free patterns strings memory */); if(ndpi_str->content_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->content_automa.ac_automa, 0); if(ndpi_str->bigrams_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->bigrams_automa.ac_automa, 0); if(ndpi_str->impossible_bigrams_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->impossible_bigrams_automa.ac_automa, 0); if(ndpi_str->custom_categories.hostnames.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames.ac_automa, 1 /* free patterns strings memory */); if(ndpi_str->custom_categories.hostnames_shadow.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames_shadow.ac_automa, 1 /* free patterns strings memory */); if(ndpi_str->custom_categories.ipAddresses != NULL) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->custom_categories.ipAddresses, free_ptree_data); if(ndpi_str->custom_categories.ipAddresses_shadow != NULL) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->custom_categories.ipAddresses_shadow, free_ptree_data); #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/ndpi_exit_detection_module.c" #endif ndpi_free(ndpi_str); } } /* ****************************************************** */ int ndpi_get_protocol_id_master_proto(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protocol_id, u_int16_t **tcp_master_proto, u_int16_t **udp_master_proto) { if(protocol_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) { *tcp_master_proto = ndpi_str->proto_defaults[NDPI_PROTOCOL_UNKNOWN].master_tcp_protoId, *udp_master_proto = ndpi_str->proto_defaults[NDPI_PROTOCOL_UNKNOWN].master_udp_protoId; return(-1); } *tcp_master_proto = ndpi_str->proto_defaults[protocol_id].master_tcp_protoId, *udp_master_proto = ndpi_str->proto_defaults[protocol_id].master_udp_protoId; return(0); } /* ****************************************************** */ static ndpi_default_ports_tree_node_t *ndpi_get_guessed_protocol_id(struct ndpi_detection_module_struct *ndpi_str, u_int8_t proto, u_int16_t sport, u_int16_t dport) { ndpi_default_ports_tree_node_t node; if(sport && dport) { int low = ndpi_min(sport, dport); int high = ndpi_max(sport, dport); const void *ret; node.default_port = low; /* Check server port first */ ret = ndpi_tfind(&node, (proto == IPPROTO_TCP) ? (void *) &ndpi_str->tcpRoot : (void *) &ndpi_str->udpRoot, ndpi_default_ports_tree_node_t_cmp); if(ret == NULL) { node.default_port = high; ret = ndpi_tfind(&node, (proto == IPPROTO_TCP) ? (void *) &ndpi_str->tcpRoot : (void *) &ndpi_str->udpRoot, ndpi_default_ports_tree_node_t_cmp); } if(ret) return(*(ndpi_default_ports_tree_node_t **) ret); } return(NULL); } /* ****************************************************** */ /* These are UDP protocols that must fit a single packet and thus that if have NOT been detected they cannot be guessed as they have been excluded */ u_int8_t is_udp_guessable_protocol(u_int16_t l7_guessed_proto) { switch (l7_guessed_proto) { case NDPI_PROTOCOL_QUIC: case NDPI_PROTOCOL_SNMP: case NDPI_PROTOCOL_NETFLOW: /* TODO: add more protocols (if any missing) */ return(1); } return(0); } /* ****************************************************** */ u_int16_t ndpi_guess_protocol_id(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int8_t proto, u_int16_t sport, u_int16_t dport, u_int8_t *user_defined_proto) { *user_defined_proto = 0; /* Default */ if(sport && dport) { ndpi_default_ports_tree_node_t *found = ndpi_get_guessed_protocol_id(ndpi_str, proto, sport, dport); if(found != NULL) { u_int16_t guessed_proto = found->proto->protoId; /* We need to check if the guessed protocol isn't excluded by nDPI */ if(flow && (proto == IPPROTO_UDP) && NDPI_COMPARE_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, guessed_proto) && is_udp_guessable_protocol(guessed_proto)) return(NDPI_PROTOCOL_UNKNOWN); else { *user_defined_proto = found->customUserProto; return(guessed_proto); } } } else { /* No TCP/UDP */ switch (proto) { case NDPI_IPSEC_PROTOCOL_ESP: case NDPI_IPSEC_PROTOCOL_AH: return(NDPI_PROTOCOL_IP_IPSEC); break; case NDPI_GRE_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_GRE); break; case NDPI_ICMP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_ICMP); break; case NDPI_IGMP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_IGMP); break; case NDPI_EGP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_EGP); break; case NDPI_SCTP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_SCTP); break; case NDPI_OSPF_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_OSPF); break; case NDPI_IPIP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_IP_IN_IP); break; case NDPI_ICMPV6_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_ICMPV6); break; case 112: return(NDPI_PROTOCOL_IP_VRRP); break; } } return(NDPI_PROTOCOL_UNKNOWN); } /* ******************************************************************** */ u_int ndpi_get_num_supported_protocols(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->ndpi_num_supported_protocols); } /* ******************************************************************** */ #ifdef WIN32 char *strsep(char **sp, char *sep) { char *p, *s; if(sp == NULL || *sp == NULL || **sp == '\0') return(NULL); s = *sp; p = s + strcspn(s, sep); if(*p != '\0') *p++ = '\0'; *sp = p; return(s); } #endif /* ******************************************************************** */ int ndpi_handle_rule(struct ndpi_detection_module_struct *ndpi_str, char *rule, u_int8_t do_add) { char *at, *proto, *elem; ndpi_proto_defaults_t *def; u_int16_t subprotocol_id, i; at = strrchr(rule, '@'); if(at == NULL) { NDPI_LOG_ERR(ndpi_str, "Invalid rule '%s'\n", rule); return(-1); } else at[0] = 0, proto = &at[1]; for (i = 0; proto[i] != '\0'; i++) { switch (proto[i]) { case '/': case '&': case '^': case ':': case ';': case '\'': case '"': case ' ': proto[i] = '_'; break; } } for (i = 0, def = NULL; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) { if(ndpi_str->proto_defaults[i].protoName && strcasecmp(ndpi_str->proto_defaults[i].protoName, proto) == 0) { def = &ndpi_str->proto_defaults[i]; subprotocol_id = i; break; } } if(def == NULL) { if(!do_add) { /* We need to remove a rule */ NDPI_LOG_ERR(ndpi_str, "Unable to find protocol '%s': skipping rule '%s'\n", proto, rule); return(-3); } else { ndpi_port_range ports_a[MAX_DEFAULT_PORTS], ports_b[MAX_DEFAULT_PORTS]; u_int16_t no_master[2] = {NDPI_PROTOCOL_NO_MASTER_PROTO, NDPI_PROTOCOL_NO_MASTER_PROTO}; if(ndpi_str->ndpi_num_custom_protocols >= (NDPI_MAX_NUM_CUSTOM_PROTOCOLS - 1)) { NDPI_LOG_ERR(ndpi_str, "Too many protocols defined (%u): skipping protocol %s\n", ndpi_str->ndpi_num_custom_protocols, proto); return(-2); } ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, ndpi_str->ndpi_num_supported_protocols, 0 /* can_have_a_subprotocol */, no_master, no_master, proto, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, /* TODO add protocol category support in rules */ ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); def = &ndpi_str->proto_defaults[ndpi_str->ndpi_num_supported_protocols]; subprotocol_id = ndpi_str->ndpi_num_supported_protocols; ndpi_str->ndpi_num_supported_protocols++, ndpi_str->ndpi_num_custom_protocols++; } } while ((elem = strsep(&rule, ",")) != NULL) { char *attr = elem, *value = NULL; ndpi_port_range range; int is_tcp = 0, is_udp = 0, is_ip = 0; if(strncmp(attr, "tcp:", 4) == 0) is_tcp = 1, value = &attr[4]; else if(strncmp(attr, "udp:", 4) == 0) is_udp = 1, value = &attr[4]; else if(strncmp(attr, "ip:", 3) == 0) is_ip = 1, value = &attr[3]; else if(strncmp(attr, "host:", 5) == 0) { /* host:"<value>",host:"<value>",.....@<subproto> */ value = &attr[5]; if(value[0] == '"') value++; /* remove leading " */ if(value[strlen(value) - 1] == '"') value[strlen(value) - 1] = '\0'; /* remove trailing " */ } if(is_tcp || is_udp) { u_int p_low, p_high; if(sscanf(value, "%u-%u", &p_low, &p_high) == 2) range.port_low = p_low, range.port_high = p_high; else range.port_low = range.port_high = atoi(&elem[4]); if(do_add) addDefaultPort(ndpi_str, &range, def, 1 /* Custom user proto */, is_tcp ? &ndpi_str->tcpRoot : &ndpi_str->udpRoot, __FUNCTION__, __LINE__); else removeDefaultPort(&range, def, is_tcp ? &ndpi_str->tcpRoot : &ndpi_str->udpRoot); } else if(is_ip) { /* NDPI_PROTOCOL_TOR */ ndpi_add_host_ip_subprotocol(ndpi_str, value, subprotocol_id); } else { if(do_add) ndpi_add_host_url_subprotocol(ndpi_str, value, subprotocol_id, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_ACCEPTABLE); else ndpi_remove_host_url_subprotocol(ndpi_str, value, subprotocol_id); } } return(0); } /* ******************************************************************** */ /* * Format: * * <host|ip> <category_id> * * Notes: * - host and category are separated by a single TAB * - empty lines or lines starting with # are ignored */ int ndpi_load_categories_file(struct ndpi_detection_module_struct *ndpi_str, const char *path) { char buffer[512], *line, *name, *category, *saveptr; FILE *fd; int len, num = 0; fd = fopen(path, "r"); if(fd == NULL) { NDPI_LOG_ERR(ndpi_str, "Unable to open file %s [%s]\n", path, strerror(errno)); return(-1); } while (1) { line = fgets(buffer, sizeof(buffer), fd); if(line == NULL) break; len = strlen(line); if((len <= 1) || (line[0] == '#')) continue; line[len - 1] = '\0'; name = strtok_r(line, "\t", &saveptr); if(name) { category = strtok_r(NULL, "\t", &saveptr); if(category) { int rc = ndpi_load_category(ndpi_str, name, (ndpi_protocol_category_t) atoi(category)); if(rc >= 0) num++; } } } fclose(fd); ndpi_enable_loaded_categories(ndpi_str); return(num); } /* ******************************************************************** */ /* Format: <tcp|udp>:<port>,<tcp|udp>:<port>,.....@<proto> Subprotocols Format: host:"<value>",host:"<value>",.....@<subproto> IP based Subprotocols Format (<value> is IP or CIDR): ip:<value>,ip:<value>,.....@<subproto> Example: tcp:80,tcp:3128@HTTP udp:139@NETBIOS */ int ndpi_load_protocols_file(struct ndpi_detection_module_struct *ndpi_str, const char *path) { FILE *fd; char *buffer, *old_buffer; int chunk_len = 512, buffer_len = chunk_len, old_buffer_len; int i, rc = -1; fd = fopen(path, "r"); if(fd == NULL) { NDPI_LOG_ERR(ndpi_str, "Unable to open file %s [%s]\n", path, strerror(errno)); goto error; } buffer = ndpi_malloc(buffer_len); if(buffer == NULL) { NDPI_LOG_ERR(ndpi_str, "Memory allocation failure\n"); goto close_fd; } while (1) { char *line = buffer; int line_len = buffer_len; while ((line = fgets(line, line_len, fd)) != NULL && line[strlen(line) - 1] != '\n') { i = strlen(line); old_buffer = buffer; old_buffer_len = buffer_len; buffer_len += chunk_len; buffer = ndpi_realloc(old_buffer, old_buffer_len, buffer_len); if(buffer == NULL) { NDPI_LOG_ERR(ndpi_str, "Memory allocation failure\n"); ndpi_free(old_buffer); goto close_fd; } line = &buffer[i]; line_len = chunk_len; } if(!line) /* safety check */ break; i = strlen(buffer); if((i <= 1) || (buffer[0] == '#')) continue; else buffer[i - 1] = '\0'; ndpi_handle_rule(ndpi_str, buffer, 1); } rc = 0; ndpi_free(buffer); close_fd: fclose(fd); error: return(rc); } /* ******************************************************************** */ /* ntop */ void ndpi_set_bitmask_protocol_detection(char *label, struct ndpi_detection_module_struct *ndpi_str, const NDPI_PROTOCOL_BITMASK *detection_bitmask, const u_int32_t idx, u_int16_t ndpi_protocol_id, void (*func)(struct ndpi_detection_module_struct *, struct ndpi_flow_struct *flow), const NDPI_SELECTION_BITMASK_PROTOCOL_SIZE ndpi_selection_bitmask, u_int8_t b_save_bitmask_unknow, u_int8_t b_add_detection_bitmask) { /* Compare specify protocol bitmask with main detection bitmask */ if(NDPI_COMPARE_PROTOCOL_TO_BITMASK(*detection_bitmask, ndpi_protocol_id) != 0) { #ifdef DEBUG NDPI_LOG_DBG2(ndpi_str, "[NDPI] ndpi_set_bitmask_protocol_detection: %s : [callback_buffer] idx= %u, [proto_defaults] " "protocol_id=%u\n", label, idx, ndpi_protocol_id); #endif if(ndpi_str->proto_defaults[ndpi_protocol_id].protoIdx != 0) { NDPI_LOG_DBG2(ndpi_str, "[NDPI] Internal error: protocol %s/%u has been already registered\n", label, ndpi_protocol_id); #ifdef DEBUG } else { NDPI_LOG_DBG2(ndpi_str, "[NDPI] Adding %s with protocol id %d\n", label, ndpi_protocol_id); #endif } /* Set function and index protocol within proto_default structure for port protocol detection and callback_buffer function for DPI protocol detection */ ndpi_str->proto_defaults[ndpi_protocol_id].protoIdx = idx; ndpi_str->proto_defaults[ndpi_protocol_id].func = ndpi_str->callback_buffer[idx].func = func; /* Set ndpi_selection_bitmask for protocol */ ndpi_str->callback_buffer[idx].ndpi_selection_bitmask = ndpi_selection_bitmask; /* Reset protocol detection bitmask via NDPI_PROTOCOL_UNKNOWN and than add specify protocol bitmast to callback buffer. */ if(b_save_bitmask_unknow) NDPI_SAVE_AS_BITMASK(ndpi_str->callback_buffer[idx].detection_bitmask, NDPI_PROTOCOL_UNKNOWN); if(b_add_detection_bitmask) NDPI_ADD_PROTOCOL_TO_BITMASK(ndpi_str->callback_buffer[idx].detection_bitmask, ndpi_protocol_id); NDPI_SAVE_AS_BITMASK(ndpi_str->callback_buffer[idx].excluded_protocol_bitmask, ndpi_protocol_id); } } /* ******************************************************************** */ void ndpi_set_protocol_detection_bitmask2(struct ndpi_detection_module_struct *ndpi_str, const NDPI_PROTOCOL_BITMASK *dbm) { NDPI_PROTOCOL_BITMASK detection_bitmask_local; NDPI_PROTOCOL_BITMASK *detection_bitmask = &detection_bitmask_local; u_int32_t a = 0; NDPI_BITMASK_SET(detection_bitmask_local, *dbm); NDPI_BITMASK_SET(ndpi_str->detection_bitmask, *dbm); /* set this here to zero to be interrupt safe */ ndpi_str->callback_buffer_size = 0; /* HTTP */ init_http_dissector(ndpi_str, &a, detection_bitmask); /* STARCRAFT */ init_starcraft_dissector(ndpi_str, &a, detection_bitmask); /* TLS */ init_tls_dissector(ndpi_str, &a, detection_bitmask); /* STUN */ init_stun_dissector(ndpi_str, &a, detection_bitmask); /* RTP */ init_rtp_dissector(ndpi_str, &a, detection_bitmask); /* RTSP */ init_rtsp_dissector(ndpi_str, &a, detection_bitmask); /* RDP */ init_rdp_dissector(ndpi_str, &a, detection_bitmask); /* SIP */ init_sip_dissector(ndpi_str, &a, detection_bitmask); /* IMO */ init_imo_dissector(ndpi_str, &a, detection_bitmask); /* Teredo */ init_teredo_dissector(ndpi_str, &a, detection_bitmask); /* EDONKEY */ init_edonkey_dissector(ndpi_str, &a, detection_bitmask); /* FASTTRACK */ init_fasttrack_dissector(ndpi_str, &a, detection_bitmask); /* GNUTELLA */ init_gnutella_dissector(ndpi_str, &a, detection_bitmask); /* DIRECTCONNECT */ init_directconnect_dissector(ndpi_str, &a, detection_bitmask); /* NATS */ init_nats_dissector(ndpi_str, &a, detection_bitmask); /* APPLEJUICE */ init_applejuice_dissector(ndpi_str, &a, detection_bitmask); /* SOULSEEK */ init_soulseek_dissector(ndpi_str, &a, detection_bitmask); /* SOCKS */ init_socks_dissector(ndpi_str, &a, detection_bitmask); /* IRC */ init_irc_dissector(ndpi_str, &a, detection_bitmask); /* JABBER */ init_jabber_dissector(ndpi_str, &a, detection_bitmask); /* MAIL_POP */ init_mail_pop_dissector(ndpi_str, &a, detection_bitmask); /* MAIL_IMAP */ init_mail_imap_dissector(ndpi_str, &a, detection_bitmask); /* MAIL_SMTP */ init_mail_smtp_dissector(ndpi_str, &a, detection_bitmask); /* USENET */ init_usenet_dissector(ndpi_str, &a, detection_bitmask); /* DNS */ init_dns_dissector(ndpi_str, &a, detection_bitmask); /* FILETOPIA */ init_fbzero_dissector(ndpi_str, &a, detection_bitmask); /* VMWARE */ init_vmware_dissector(ndpi_str, &a, detection_bitmask); /* NON_TCP_UDP */ init_non_tcp_udp_dissector(ndpi_str, &a, detection_bitmask); /* SOPCAST */ init_sopcast_dissector(ndpi_str, &a, detection_bitmask); /* TVUPLAYER */ init_tvuplayer_dissector(ndpi_str, &a, detection_bitmask); /* PPSTREAM */ init_ppstream_dissector(ndpi_str, &a, detection_bitmask); /* PPLIVE */ init_pplive_dissector(ndpi_str, &a, detection_bitmask); /* IAX */ init_iax_dissector(ndpi_str, &a, detection_bitmask); /* MGPC */ init_mgpc_dissector(ndpi_str, &a, detection_bitmask); /* ZATTOO */ init_zattoo_dissector(ndpi_str, &a, detection_bitmask); /* QQ */ init_qq_dissector(ndpi_str, &a, detection_bitmask); /* SSH */ init_ssh_dissector(ndpi_str, &a, detection_bitmask); /* AYIYA */ init_ayiya_dissector(ndpi_str, &a, detection_bitmask); /* THUNDER */ init_thunder_dissector(ndpi_str, &a, detection_bitmask); /* VNC */ init_vnc_dissector(ndpi_str, &a, detection_bitmask); /* TEAMVIEWER */ init_teamviewer_dissector(ndpi_str, &a, detection_bitmask); /* DHCP */ init_dhcp_dissector(ndpi_str, &a, detection_bitmask); /* STEAM */ init_steam_dissector(ndpi_str, &a, detection_bitmask); /* HALFLIFE2 */ init_halflife2_dissector(ndpi_str, &a, detection_bitmask); /* XBOX */ init_xbox_dissector(ndpi_str, &a, detection_bitmask); /* HTTP_APPLICATION_ACTIVESYNC */ init_http_activesync_dissector(ndpi_str, &a, detection_bitmask); /* SMB */ init_smb_dissector(ndpi_str, &a, detection_bitmask); /* MINING */ init_mining_dissector(ndpi_str, &a, detection_bitmask); /* TELNET */ init_telnet_dissector(ndpi_str, &a, detection_bitmask); /* NTP */ init_ntp_dissector(ndpi_str, &a, detection_bitmask); /* NFS */ init_nfs_dissector(ndpi_str, &a, detection_bitmask); /* SSDP */ init_ssdp_dissector(ndpi_str, &a, detection_bitmask); /* WORLD_OF_WARCRAFT */ init_world_of_warcraft_dissector(ndpi_str, &a, detection_bitmask); /* POSTGRES */ init_postgres_dissector(ndpi_str, &a, detection_bitmask); /* MYSQL */ init_mysql_dissector(ndpi_str, &a, detection_bitmask); /* BGP */ init_bgp_dissector(ndpi_str, &a, detection_bitmask); /* SNMP */ init_snmp_dissector(ndpi_str, &a, detection_bitmask); /* KONTIKI */ init_kontiki_dissector(ndpi_str, &a, detection_bitmask); /* ICECAST */ init_icecast_dissector(ndpi_str, &a, detection_bitmask); /* SHOUTCAST */ init_shoutcast_dissector(ndpi_str, &a, detection_bitmask); /* KERBEROS */ init_kerberos_dissector(ndpi_str, &a, detection_bitmask); /* OPENFT */ init_openft_dissector(ndpi_str, &a, detection_bitmask); /* SYSLOG */ init_syslog_dissector(ndpi_str, &a, detection_bitmask); /* DIRECT_DOWNLOAD_LINK */ init_directdownloadlink_dissector(ndpi_str, &a, detection_bitmask); /* NETBIOS */ init_netbios_dissector(ndpi_str, &a, detection_bitmask); /* MDNS */ init_mdns_dissector(ndpi_str, &a, detection_bitmask); /* IPP */ init_ipp_dissector(ndpi_str, &a, detection_bitmask); /* LDAP */ init_ldap_dissector(ndpi_str, &a, detection_bitmask); /* WARCRAFT3 */ init_warcraft3_dissector(ndpi_str, &a, detection_bitmask); /* XDMCP */ init_xdmcp_dissector(ndpi_str, &a, detection_bitmask); /* TFTP */ init_tftp_dissector(ndpi_str, &a, detection_bitmask); /* MSSQL_TDS */ init_mssql_tds_dissector(ndpi_str, &a, detection_bitmask); /* PPTP */ init_pptp_dissector(ndpi_str, &a, detection_bitmask); /* STEALTHNET */ init_stealthnet_dissector(ndpi_str, &a, detection_bitmask); /* DHCPV6 */ init_dhcpv6_dissector(ndpi_str, &a, detection_bitmask); /* AFP */ init_afp_dissector(ndpi_str, &a, detection_bitmask); /* check_mk */ init_checkmk_dissector(ndpi_str, &a, detection_bitmask); /* AIMINI */ init_aimini_dissector(ndpi_str, &a, detection_bitmask); /* FLORENSIA */ init_florensia_dissector(ndpi_str, &a, detection_bitmask); /* MAPLESTORY */ init_maplestory_dissector(ndpi_str, &a, detection_bitmask); /* DOFUS */ init_dofus_dissector(ndpi_str, &a, detection_bitmask); /* WORLD_OF_KUNG_FU */ init_world_of_kung_fu_dissector(ndpi_str, &a, detection_bitmask); /* FIESTA */ init_fiesta_dissector(ndpi_str, &a, detection_bitmask); /* CROSSIFIRE */ init_crossfire_dissector(ndpi_str, &a, detection_bitmask); /* GUILDWARS */ init_guildwars_dissector(ndpi_str, &a, detection_bitmask); /* ARMAGETRON */ init_armagetron_dissector(ndpi_str, &a, detection_bitmask); /* DROPBOX */ init_dropbox_dissector(ndpi_str, &a, detection_bitmask); /* SPOTIFY */ init_spotify_dissector(ndpi_str, &a, detection_bitmask); /* RADIUS */ init_radius_dissector(ndpi_str, &a, detection_bitmask); /* CITRIX */ init_citrix_dissector(ndpi_str, &a, detection_bitmask); /* LOTUS_NOTES */ init_lotus_notes_dissector(ndpi_str, &a, detection_bitmask); /* GTP */ init_gtp_dissector(ndpi_str, &a, detection_bitmask); /* DCERPC */ init_dcerpc_dissector(ndpi_str, &a, detection_bitmask); /* NETFLOW */ init_netflow_dissector(ndpi_str, &a, detection_bitmask); /* SFLOW */ init_sflow_dissector(ndpi_str, &a, detection_bitmask); /* H323 */ init_h323_dissector(ndpi_str, &a, detection_bitmask); /* OPENVPN */ init_openvpn_dissector(ndpi_str, &a, detection_bitmask); /* NOE */ init_noe_dissector(ndpi_str, &a, detection_bitmask); /* CISCOVPN */ init_ciscovpn_dissector(ndpi_str, &a, detection_bitmask); /* TEAMSPEAK */ init_teamspeak_dissector(ndpi_str, &a, detection_bitmask); /* TOR */ init_tor_dissector(ndpi_str, &a, detection_bitmask); /* SKINNY */ init_skinny_dissector(ndpi_str, &a, detection_bitmask); /* RTCP */ init_rtcp_dissector(ndpi_str, &a, detection_bitmask); /* RSYNC */ init_rsync_dissector(ndpi_str, &a, detection_bitmask); /* WHOIS_DAS */ init_whois_das_dissector(ndpi_str, &a, detection_bitmask); /* ORACLE */ init_oracle_dissector(ndpi_str, &a, detection_bitmask); /* CORBA */ init_corba_dissector(ndpi_str, &a, detection_bitmask); /* RTMP */ init_rtmp_dissector(ndpi_str, &a, detection_bitmask); /* FTP_CONTROL */ init_ftp_control_dissector(ndpi_str, &a, detection_bitmask); /* FTP_DATA */ init_ftp_data_dissector(ndpi_str, &a, detection_bitmask); /* PANDO */ init_pando_dissector(ndpi_str, &a, detection_bitmask); /* MEGACO */ init_megaco_dissector(ndpi_str, &a, detection_bitmask); /* REDIS */ init_redis_dissector(ndpi_str, &a, detection_bitmask); /* UPnP */ init_upnp_dissector(ndpi_str, &a, detection_bitmask); /* VHUA */ init_vhua_dissector(ndpi_str, &a, detection_bitmask); /* ZMQ */ init_zmq_dissector(ndpi_str, &a, detection_bitmask); /* TELEGRAM */ init_telegram_dissector(ndpi_str, &a, detection_bitmask); /* QUIC */ init_quic_dissector(ndpi_str, &a, detection_bitmask); /* DIAMETER */ init_diameter_dissector(ndpi_str, &a, detection_bitmask); /* APPLE_PUSH */ init_apple_push_dissector(ndpi_str, &a, detection_bitmask); /* EAQ */ init_eaq_dissector(ndpi_str, &a, detection_bitmask); /* KAKAOTALK_VOICE */ init_kakaotalk_voice_dissector(ndpi_str, &a, detection_bitmask); /* MPEGTS */ init_mpegts_dissector(ndpi_str, &a, detection_bitmask); /* UBNTAC2 */ init_ubntac2_dissector(ndpi_str, &a, detection_bitmask); /* COAP */ init_coap_dissector(ndpi_str, &a, detection_bitmask); /* MQTT */ init_mqtt_dissector(ndpi_str, &a, detection_bitmask); /* SOME/IP */ init_someip_dissector(ndpi_str, &a, detection_bitmask); /* RX */ init_rx_dissector(ndpi_str, &a, detection_bitmask); /* GIT */ init_git_dissector(ndpi_str, &a, detection_bitmask); /* HANGOUT */ init_hangout_dissector(ndpi_str, &a, detection_bitmask); /* DRDA */ init_drda_dissector(ndpi_str, &a, detection_bitmask); /* BJNP */ init_bjnp_dissector(ndpi_str, &a, detection_bitmask); /* SMPP */ init_smpp_dissector(ndpi_str, &a, detection_bitmask); /* TINC */ init_tinc_dissector(ndpi_str, &a, detection_bitmask); /* FIX */ init_fix_dissector(ndpi_str, &a, detection_bitmask); /* NINTENDO */ init_nintendo_dissector(ndpi_str, &a, detection_bitmask); /* MODBUS */ init_modbus_dissector(ndpi_str, &a, detection_bitmask); /* CAPWAP */ init_capwap_dissector(ndpi_str, &a, detection_bitmask); /* ZABBIX */ init_zabbix_dissector(ndpi_str, &a, detection_bitmask); /*** Put false-positive sensitive protocols at the end ***/ /* VIBER */ init_viber_dissector(ndpi_str, &a, detection_bitmask); /* SKYPE */ init_skype_dissector(ndpi_str, &a, detection_bitmask); /* BITTORRENT */ init_bittorrent_dissector(ndpi_str, &a, detection_bitmask); /* WHATSAPP */ init_whatsapp_dissector(ndpi_str, &a, detection_bitmask); /* OOKLA */ init_ookla_dissector(ndpi_str, &a, detection_bitmask); /* AMQP */ init_amqp_dissector(ndpi_str, &a, detection_bitmask); /* CSGO */ init_csgo_dissector(ndpi_str, &a, detection_bitmask); /* LISP */ init_lisp_dissector(ndpi_str, &a, detection_bitmask); /* AJP */ init_ajp_dissector(ndpi_str, &a, detection_bitmask); /* Memcached */ init_memcached_dissector(ndpi_str, &a, detection_bitmask); /* Nest Log Sink */ init_nest_log_sink_dissector(ndpi_str, &a, detection_bitmask); /* WireGuard VPN */ init_wireguard_dissector(ndpi_str, &a, detection_bitmask); /* Amazon_Video */ init_amazon_video_dissector(ndpi_str, &a, detection_bitmask); /* Targus Getdata */ init_targus_getdata_dissector(ndpi_str, &a, detection_bitmask); /* S7 comm */ init_s7comm_dissector(ndpi_str, &a, detection_bitmask); /* IEC 60870-5-104 */ init_104_dissector(ndpi_str, &a, detection_bitmask); /* WEBSOCKET */ init_websocket_dissector(ndpi_str, &a, detection_bitmask); #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/custom_ndpi_main_init.c" #endif /* ----------------------------------------------------------------- */ ndpi_str->callback_buffer_size = a; NDPI_LOG_DBG2(ndpi_str, "callback_buffer_size is %u\n", ndpi_str->callback_buffer_size); /* now build the specific buffer for tcp, udp and non_tcp_udp */ ndpi_str->callback_buffer_size_tcp_payload = 0; ndpi_str->callback_buffer_size_tcp_no_payload = 0; for (a = 0; a < ndpi_str->callback_buffer_size; a++) { if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & (NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC)) != 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "callback_buffer_tcp_payload, adding buffer %u as entry %u\n", a, ndpi_str->callback_buffer_size_tcp_payload); memcpy(&ndpi_str->callback_buffer_tcp_payload[ndpi_str->callback_buffer_size_tcp_payload], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_tcp_payload++; if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & NDPI_SELECTION_BITMASK_PROTOCOL_HAS_PAYLOAD) == 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2( ndpi_str, "\tcallback_buffer_tcp_no_payload, additional adding buffer %u to no_payload process\n", a); memcpy(&ndpi_str->callback_buffer_tcp_no_payload[ndpi_str->callback_buffer_size_tcp_no_payload], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_tcp_no_payload++; } } } ndpi_str->callback_buffer_size_udp = 0; for (a = 0; a < ndpi_str->callback_buffer_size; a++) { if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & (NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC)) != 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "callback_buffer_size_udp: adding buffer : %u as entry %u\n", a, ndpi_str->callback_buffer_size_udp); memcpy(&ndpi_str->callback_buffer_udp[ndpi_str->callback_buffer_size_udp], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_udp++; } } ndpi_str->callback_buffer_size_non_tcp_udp = 0; for (a = 0; a < ndpi_str->callback_buffer_size; a++) { if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & (NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP)) == 0 || (ndpi_str->callback_buffer[a].ndpi_selection_bitmask & NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC) != 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "callback_buffer_non_tcp_udp: adding buffer : %u as entry %u\n", a, ndpi_str->callback_buffer_size_non_tcp_udp); memcpy(&ndpi_str->callback_buffer_non_tcp_udp[ndpi_str->callback_buffer_size_non_tcp_udp], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_non_tcp_udp++; } } } #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* handle extension headers in IPv6 packets * arguments: * l4ptr: pointer to the byte following the initial IPv6 header * l4len: the length of the IPv6 packet excluding the IPv6 header * nxt_hdr: next header value from the IPv6 header * result: * l4ptr: pointer to the start of the actual packet payload * l4len: length of the actual payload * nxt_hdr: protocol of the actual payload * returns 0 upon success and 1 upon failure */ int ndpi_handle_ipv6_extension_headers(struct ndpi_detection_module_struct *ndpi_str, const u_int8_t **l4ptr, u_int16_t *l4len, u_int8_t *nxt_hdr) { while ((*nxt_hdr == 0 || *nxt_hdr == 43 || *nxt_hdr == 44 || *nxt_hdr == 60 || *nxt_hdr == 135 || *nxt_hdr == 59)) { u_int16_t ehdr_len; // no next header if(*nxt_hdr == 59) { return(1); } // fragment extension header has fixed size of 8 bytes and the first byte is the next header type if(*nxt_hdr == 44) { if(*l4len < 8) { return(1); } *nxt_hdr = (*l4ptr)[0]; *l4len -= 8; (*l4ptr) += 8; continue; } // the other extension headers have one byte for the next header type // and one byte for the extension header length in 8 byte steps minus the first 8 bytes if(*l4len < 2) { return(1); } ehdr_len = (*l4ptr)[1]; ehdr_len *= 8; ehdr_len += 8; if(*l4len < ehdr_len) { return(1); } *nxt_hdr = (*l4ptr)[0]; *l4len -= ehdr_len; (*l4ptr) += ehdr_len; } return(0); } #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ static u_int8_t ndpi_iph_is_valid_and_not_fragmented(const struct ndpi_iphdr *iph, const u_int16_t ipsize) { //#ifdef REQUIRE_FULL_PACKETS if(ipsize < iph->ihl * 4 || ipsize < ntohs(iph->tot_len) || ntohs(iph->tot_len) < iph->ihl * 4 || (iph->frag_off & htons(0x1FFF)) != 0) { return(0); } //#endif return(1); } static u_int8_t ndpi_detection_get_l4_internal(struct ndpi_detection_module_struct *ndpi_str, const u_int8_t *l3, u_int16_t l3_len, const u_int8_t **l4_return, u_int16_t *l4_len_return, u_int8_t *l4_protocol_return, u_int32_t flags) { const struct ndpi_iphdr *iph = NULL; #ifdef NDPI_DETECTION_SUPPORT_IPV6 const struct ndpi_ipv6hdr *iph_v6 = NULL; #endif u_int16_t l4len = 0; const u_int8_t *l4ptr = NULL; u_int8_t l4protocol = 0; if(l3 == NULL || l3_len < sizeof(struct ndpi_iphdr)) return(1); if((iph = (const struct ndpi_iphdr *) l3) == NULL) return(1); if(iph->version == IPVERSION && iph->ihl >= 5) { NDPI_LOG_DBG2(ndpi_str, "ipv4 header\n"); } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if(iph->version == 6 && l3_len >= sizeof(struct ndpi_ipv6hdr)) { NDPI_LOG_DBG2(ndpi_str, "ipv6 header\n"); iph_v6 = (const struct ndpi_ipv6hdr *) l3; iph = NULL; } #endif else { return(1); } if((flags & NDPI_DETECTION_ONLY_IPV6) && iph != NULL) { NDPI_LOG_DBG2(ndpi_str, "ipv4 header found but excluded by flag\n"); return(1); } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if((flags & NDPI_DETECTION_ONLY_IPV4) && iph_v6 != NULL) { NDPI_LOG_DBG2(ndpi_str, "ipv6 header found but excluded by flag\n"); return(1); } #endif if(iph != NULL && ndpi_iph_is_valid_and_not_fragmented(iph, l3_len)) { u_int16_t len = ntohs(iph->tot_len); u_int16_t hlen = (iph->ihl * 4); l4ptr = (((const u_int8_t *) iph) + iph->ihl * 4); if(len == 0) len = l3_len; l4len = (len > hlen) ? (len - hlen) : 0; l4protocol = iph->protocol; } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if(iph_v6 != NULL && (l3_len - sizeof(struct ndpi_ipv6hdr)) >= ntohs(iph_v6->ip6_hdr.ip6_un1_plen)) { l4ptr = (((const u_int8_t *) iph_v6) + sizeof(struct ndpi_ipv6hdr)); l4len = ntohs(iph_v6->ip6_hdr.ip6_un1_plen); l4protocol = iph_v6->ip6_hdr.ip6_un1_nxt; // we need to handle IPv6 extension headers if present if(ndpi_handle_ipv6_extension_headers(ndpi_str, &l4ptr, &l4len, &l4protocol) != 0) { return(1); } } #endif else { return(1); } if(l4_return != NULL) { *l4_return = l4ptr; } if(l4_len_return != NULL) { *l4_len_return = l4len; } if(l4_protocol_return != NULL) { *l4_protocol_return = l4protocol; } return(0); } /* ************************************************ */ void ndpi_apply_flow_protocol_to_packet(struct ndpi_flow_struct *flow, struct ndpi_packet_struct *packet) { memcpy(&packet->detected_protocol_stack, &flow->detected_protocol_stack, sizeof(packet->detected_protocol_stack)); memcpy(&packet->protocol_stack_info, &flow->protocol_stack_info, sizeof(packet->protocol_stack_info)); } /* ************************************************ */ static int ndpi_init_packet_header(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, unsigned short packetlen) { const struct ndpi_iphdr *decaps_iph = NULL; u_int16_t l3len; u_int16_t l4len; const u_int8_t *l4ptr; u_int8_t l4protocol; u_int8_t l4_result; if(!flow) return(1); /* reset payload_packet_len, will be set if ipv4 tcp or udp */ flow->packet.payload_packet_len = 0; flow->packet.l4_packet_len = 0; flow->packet.l3_packet_len = packetlen; flow->packet.tcp = NULL, flow->packet.udp = NULL; flow->packet.generic_l4_ptr = NULL; #ifdef NDPI_DETECTION_SUPPORT_IPV6 flow->packet.iphv6 = NULL; #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ ndpi_apply_flow_protocol_to_packet(flow, &flow->packet); l3len = flow->packet.l3_packet_len; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(flow->packet.iph != NULL) { #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ decaps_iph = flow->packet.iph; #ifdef NDPI_DETECTION_SUPPORT_IPV6 } #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ if(decaps_iph && decaps_iph->version == IPVERSION && decaps_iph->ihl >= 5) { NDPI_LOG_DBG2(ndpi_str, "ipv4 header\n"); } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if(decaps_iph && decaps_iph->version == 6 && l3len >= sizeof(struct ndpi_ipv6hdr) && (ndpi_str->ip_version_limit & NDPI_DETECTION_ONLY_IPV4) == 0) { NDPI_LOG_DBG2(ndpi_str, "ipv6 header\n"); flow->packet.iphv6 = (struct ndpi_ipv6hdr *) flow->packet.iph; flow->packet.iph = NULL; } #endif else { flow->packet.iph = NULL; return(1); } /* needed: * - unfragmented packets * - ip header <= packet len * - ip total length >= packet len */ l4ptr = NULL; l4len = 0; l4protocol = 0; l4_result = ndpi_detection_get_l4_internal(ndpi_str, (const u_int8_t *) decaps_iph, l3len, &l4ptr, &l4len, &l4protocol, 0); if(l4_result != 0) { return(1); } flow->packet.l4_protocol = l4protocol; flow->packet.l4_packet_len = l4len; flow->l4_proto = l4protocol; /* tcp / udp detection */ if(l4protocol == IPPROTO_TCP && flow->packet.l4_packet_len >= 20 /* min size of tcp */) { /* tcp */ flow->packet.tcp = (struct ndpi_tcphdr *) l4ptr; if(flow->packet.l4_packet_len >= flow->packet.tcp->doff * 4) { flow->packet.payload_packet_len = flow->packet.l4_packet_len - flow->packet.tcp->doff * 4; flow->packet.actual_payload_len = flow->packet.payload_packet_len; flow->packet.payload = ((u_int8_t *) flow->packet.tcp) + (flow->packet.tcp->doff * 4); /* check for new tcp syn packets, here * idea: reset detection state if a connection is unknown */ if(flow->packet.tcp->syn != 0 && flow->packet.tcp->ack == 0 && flow->init_finished != 0 && flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { u_int8_t backup; u_int16_t backup1, backup2; if(flow->http.url) { ndpi_free(flow->http.url); flow->http.url = NULL; } if(flow->http.content_type) { ndpi_free(flow->http.content_type); flow->http.content_type = NULL; } if(flow->http.user_agent) { ndpi_free(flow->http.user_agent); flow->http.user_agent = NULL; } if(flow->kerberos_buf.pktbuf) { ndpi_free(flow->kerberos_buf.pktbuf); flow->kerberos_buf.pktbuf = NULL; } if(flow->l4.tcp.tls.message.buffer) { ndpi_free(flow->l4.tcp.tls.message.buffer); flow->l4.tcp.tls.message.buffer = NULL; flow->l4.tcp.tls.message.buffer_len = flow->l4.tcp.tls.message.buffer_used = 0; } backup = flow->num_processed_pkts; backup1 = flow->guessed_protocol_id; backup2 = flow->guessed_host_protocol_id; memset(flow, 0, sizeof(*(flow))); flow->num_processed_pkts = backup; flow->guessed_protocol_id = backup1; flow->guessed_host_protocol_id = backup2; NDPI_LOG_DBG(ndpi_str, "tcp syn packet for unknown protocol, reset detection state\n"); } } else { /* tcp header not complete */ flow->packet.tcp = NULL; } } else if(l4protocol == IPPROTO_UDP && flow->packet.l4_packet_len >= 8 /* size of udp */) { flow->packet.udp = (struct ndpi_udphdr *) l4ptr; flow->packet.payload_packet_len = flow->packet.l4_packet_len - 8; flow->packet.payload = ((u_int8_t *) flow->packet.udp) + 8; } else { flow->packet.generic_l4_ptr = l4ptr; } return(0); } /* ************************************************ */ void ndpi_connection_tracking(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { if(!flow) { return; } else { /* const for gcc code optimization and cleaner code */ struct ndpi_packet_struct *packet = &flow->packet; const struct ndpi_iphdr *iph = packet->iph; #ifdef NDPI_DETECTION_SUPPORT_IPV6 const struct ndpi_ipv6hdr *iphv6 = packet->iphv6; #endif const struct ndpi_tcphdr *tcph = packet->tcp; const struct ndpi_udphdr *udph = flow->packet.udp; packet->tcp_retransmission = 0, packet->packet_direction = 0; if(ndpi_str->direction_detect_disable) { packet->packet_direction = flow->packet_direction; } else { if(iph != NULL && ntohl(iph->saddr) < ntohl(iph->daddr)) packet->packet_direction = 1; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(iphv6 != NULL && NDPI_COMPARE_IPV6_ADDRESS_STRUCTS(&iphv6->ip6_src, &iphv6->ip6_dst) != 0) packet->packet_direction = 1; #endif } packet->packet_lines_parsed_complete = 0; if(flow->init_finished == 0) { flow->init_finished = 1; flow->setup_packet_direction = packet->packet_direction; } if(tcph != NULL) { /* reset retried bytes here before setting it */ packet->num_retried_bytes = 0; if(!ndpi_str->direction_detect_disable) packet->packet_direction = (ntohs(tcph->source) < ntohs(tcph->dest)) ? 1 : 0; if(tcph->syn != 0 && tcph->ack == 0 && flow->l4.tcp.seen_syn == 0 && flow->l4.tcp.seen_syn_ack == 0 && flow->l4.tcp.seen_ack == 0) { flow->l4.tcp.seen_syn = 1; } if(tcph->syn != 0 && tcph->ack != 0 && flow->l4.tcp.seen_syn == 1 && flow->l4.tcp.seen_syn_ack == 0 && flow->l4.tcp.seen_ack == 0) { flow->l4.tcp.seen_syn_ack = 1; } if(tcph->syn == 0 && tcph->ack == 1 && flow->l4.tcp.seen_syn == 1 && flow->l4.tcp.seen_syn_ack == 1 && flow->l4.tcp.seen_ack == 0) { flow->l4.tcp.seen_ack = 1; } if((flow->next_tcp_seq_nr[0] == 0 && flow->next_tcp_seq_nr[1] == 0) || (flow->next_tcp_seq_nr[0] == 0 || flow->next_tcp_seq_nr[1] == 0)) { /* initialize tcp sequence counters */ /* the ack flag needs to be set to get valid sequence numbers from the other * direction. Usually it will catch the second packet syn+ack but it works * also for asymmetric traffic where it will use the first data packet * * if the syn flag is set add one to the sequence number, * otherwise use the payload length. */ if(tcph->ack != 0) { flow->next_tcp_seq_nr[flow->packet.packet_direction] = ntohl(tcph->seq) + (tcph->syn ? 1 : packet->payload_packet_len); flow->next_tcp_seq_nr[1 - flow->packet.packet_direction] = ntohl(tcph->ack_seq); } } else if(packet->payload_packet_len > 0) { /* check tcp sequence counters */ if(((u_int32_t)(ntohl(tcph->seq) - flow->next_tcp_seq_nr[packet->packet_direction])) > ndpi_str->tcp_max_retransmission_window_size) { packet->tcp_retransmission = 1; /* CHECK IF PARTIAL RETRY IS HAPPENING */ if((flow->next_tcp_seq_nr[packet->packet_direction] - ntohl(tcph->seq) < packet->payload_packet_len)) { /* num_retried_bytes actual_payload_len hold info about the partial retry analyzer which require this info can make use of this info Other analyzer can use packet->payload_packet_len */ packet->num_retried_bytes = (u_int16_t)(flow->next_tcp_seq_nr[packet->packet_direction] - ntohl(tcph->seq)); packet->actual_payload_len = packet->payload_packet_len - packet->num_retried_bytes; flow->next_tcp_seq_nr[packet->packet_direction] = ntohl(tcph->seq) + packet->payload_packet_len; } } /* normal path actual_payload_len is initialized to payload_packet_len during tcp header parsing itself. It will be changed only in case of retransmission */ else { packet->num_retried_bytes = 0; flow->next_tcp_seq_nr[packet->packet_direction] = ntohl(tcph->seq) + packet->payload_packet_len; } } if(tcph->rst) { flow->next_tcp_seq_nr[0] = 0; flow->next_tcp_seq_nr[1] = 0; } } else if(udph != NULL) { if(!ndpi_str->direction_detect_disable) packet->packet_direction = (htons(udph->source) < htons(udph->dest)) ? 1 : 0; } if(flow->packet_counter < MAX_PACKET_COUNTER && packet->payload_packet_len) { flow->packet_counter++; } if(flow->packet_direction_counter[packet->packet_direction] < MAX_PACKET_COUNTER && packet->payload_packet_len) { flow->packet_direction_counter[packet->packet_direction]++; } if(flow->byte_counter[packet->packet_direction] + packet->payload_packet_len > flow->byte_counter[packet->packet_direction]) { flow->byte_counter[packet->packet_direction] += packet->payload_packet_len; } } } /* ************************************************ */ void check_ndpi_other_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { if(!flow) return; void *func = NULL; u_int32_t a; u_int16_t proto_index = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoIdx; int16_t proto_id = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoId; NDPI_PROTOCOL_BITMASK detection_bitmask; NDPI_SAVE_AS_BITMASK(detection_bitmask, flow->packet.detected_protocol_stack[0]); if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } for (a = 0; a < ndpi_str->callback_buffer_size_non_tcp_udp; a++) { if((func != ndpi_str->callback_buffer_non_tcp_udp[a].func) && (ndpi_str->callback_buffer_non_tcp_udp[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_non_tcp_udp[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_non_tcp_udp[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_non_tcp_udp[a].detection_bitmask, detection_bitmask) != 0) { if(ndpi_str->callback_buffer_non_tcp_udp[a].func != NULL) ndpi_str->callback_buffer_non_tcp_udp[a].func(ndpi_str, flow); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } } } /* ************************************************ */ void check_ndpi_udp_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { void *func = NULL; u_int32_t a; u_int16_t proto_index = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoIdx; int16_t proto_id = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoId; NDPI_PROTOCOL_BITMASK detection_bitmask; NDPI_SAVE_AS_BITMASK(detection_bitmask, flow->packet.detected_protocol_stack[0]); if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } if(flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { for (a = 0; a < ndpi_str->callback_buffer_size_udp; a++) { if((func != ndpi_str->callback_buffer_udp[a].func) && (ndpi_str->callback_buffer_udp[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_udp[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_udp[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_udp[a].detection_bitmask, detection_bitmask) != 0) { ndpi_str->callback_buffer_udp[a].func(ndpi_str, flow); // NDPI_LOG_DBG(ndpi_str, "[UDP,CALL] dissector of protocol as callback_buffer idx = %d\n",a); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } else if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "[UDP,SKIP] dissector of protocol as callback_buffer idx = %d\n", a); } } } /* ************************************************ */ void check_ndpi_tcp_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { void *func = NULL; u_int32_t a; u_int16_t proto_index = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoIdx; int16_t proto_id = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoId; NDPI_PROTOCOL_BITMASK detection_bitmask; NDPI_SAVE_AS_BITMASK(detection_bitmask, flow->packet.detected_protocol_stack[0]); if(flow->packet.payload_packet_len != 0) { if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } if(flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { for (a = 0; a < ndpi_str->callback_buffer_size_tcp_payload; a++) { if((func != ndpi_str->callback_buffer_tcp_payload[a].func) && (ndpi_str->callback_buffer_tcp_payload[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_tcp_payload[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_tcp_payload[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_tcp_payload[a].detection_bitmask, detection_bitmask) != 0) { ndpi_str->callback_buffer_tcp_payload[a].func(ndpi_str, flow); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } } } } else { /* no payload */ if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL) && ((ndpi_str->callback_buffer[flow->guessed_protocol_id].ndpi_selection_bitmask & NDPI_SELECTION_BITMASK_PROTOCOL_HAS_PAYLOAD) == 0)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } for (a = 0; a < ndpi_str->callback_buffer_size_tcp_no_payload; a++) { if((func != ndpi_str->callback_buffer_tcp_payload[a].func) && (ndpi_str->callback_buffer_tcp_no_payload[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_tcp_no_payload[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_tcp_no_payload[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_tcp_no_payload[a].detection_bitmask, detection_bitmask) != 0) { ndpi_str->callback_buffer_tcp_no_payload[a].func(ndpi_str, flow); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } } } } /* ********************************************************************************* */ void ndpi_check_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { if(flow->packet.tcp != NULL) check_ndpi_tcp_flow_func(ndpi_str, flow, ndpi_selection_packet); else if(flow->packet.udp != NULL) check_ndpi_udp_flow_func(ndpi_str, flow, ndpi_selection_packet); else check_ndpi_other_flow_func(ndpi_str, flow, ndpi_selection_packet); } /* ********************************************************************************* */ u_int16_t ndpi_guess_host_protocol_id(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { u_int16_t ret = NDPI_PROTOCOL_UNKNOWN; if(flow->packet.iph) { struct in_addr addr; u_int16_t sport, dport; addr.s_addr = flow->packet.iph->saddr; if((flow->l4_proto == IPPROTO_TCP) && flow->packet.tcp) sport = flow->packet.tcp->source, dport = flow->packet.tcp->dest; else if((flow->l4_proto == IPPROTO_UDP) && flow->packet.udp) sport = flow->packet.udp->source, dport = flow->packet.udp->dest; else sport = dport = 0; /* guess host protocol */ ret = ndpi_network_port_ptree_match(ndpi_str, &addr, sport); if(ret == NDPI_PROTOCOL_UNKNOWN) { addr.s_addr = flow->packet.iph->daddr; ret = ndpi_network_port_ptree_match(ndpi_str, &addr, dport); } } return(ret); } /* ********************************************************************************* */ ndpi_protocol ndpi_detection_giveup(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int8_t enable_guess, u_int8_t *protocol_was_guessed) { ndpi_protocol ret = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; *protocol_was_guessed = 0; if(flow == NULL) return(ret); /* Init defaults */ ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; ret.category = flow->category; /* Ensure that we don't change our mind if detection is already complete */ if((ret.master_protocol != NDPI_PROTOCOL_UNKNOWN) && (ret.app_protocol != NDPI_PROTOCOL_UNKNOWN)) return(ret); /* TODO: add the remaining stage_XXXX protocols */ if(flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { u_int16_t guessed_protocol_id = NDPI_PROTOCOL_UNKNOWN, guessed_host_protocol_id = NDPI_PROTOCOL_UNKNOWN; if(flow->guessed_protocol_id == NDPI_PROTOCOL_STUN) goto check_stun_export; else if((flow->guessed_protocol_id == NDPI_PROTOCOL_HANGOUT_DUO) || (flow->guessed_protocol_id == NDPI_PROTOCOL_MESSENGER) || (flow->guessed_protocol_id == NDPI_PROTOCOL_WHATSAPP_CALL)) { *protocol_was_guessed = 1; ndpi_set_detected_protocol(ndpi_str, flow, flow->guessed_protocol_id, NDPI_PROTOCOL_UNKNOWN); } else if((flow->l4.tcp.tls.hello_processed == 1) && (flow->protos.stun_ssl.ssl.client_requested_server_name[0] != '\0')) { *protocol_was_guessed = 1; ndpi_set_detected_protocol(ndpi_str, flow, NDPI_PROTOCOL_TLS, NDPI_PROTOCOL_UNKNOWN); } else if(enable_guess) { if((flow->guessed_protocol_id == NDPI_PROTOCOL_UNKNOWN) && (flow->packet.l4_protocol == IPPROTO_TCP) && flow->l4.tcp.tls.hello_processed) flow->guessed_protocol_id = NDPI_PROTOCOL_TLS; guessed_protocol_id = flow->guessed_protocol_id, guessed_host_protocol_id = flow->guessed_host_protocol_id; if((guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) && ((flow->packet.l4_protocol == IPPROTO_UDP) && NDPI_ISSET(&flow->excluded_protocol_bitmask, guessed_host_protocol_id) && is_udp_guessable_protocol(guessed_host_protocol_id))) flow->guessed_host_protocol_id = guessed_host_protocol_id = NDPI_PROTOCOL_UNKNOWN; /* Ignore guessed protocol if they have been discarded */ if((guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) // && (guessed_host_protocol_id == NDPI_PROTOCOL_UNKNOWN) && (flow->packet.l4_protocol == IPPROTO_UDP) && NDPI_ISSET(&flow->excluded_protocol_bitmask, guessed_protocol_id) && is_udp_guessable_protocol(guessed_protocol_id)) flow->guessed_protocol_id = guessed_protocol_id = NDPI_PROTOCOL_UNKNOWN; if((guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) || (guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN)) { if((guessed_protocol_id == 0) && (flow->protos.stun_ssl.stun.num_binding_requests > 0) && (flow->protos.stun_ssl.stun.num_processed_pkts > 0)) guessed_protocol_id = NDPI_PROTOCOL_STUN; if(flow->host_server_name[0] != '\0') { ndpi_protocol_match_result ret_match; memset(&ret_match, 0, sizeof(ret_match)); ndpi_match_host_subprotocol(ndpi_str, flow, (char *) flow->host_server_name, strlen((const char *) flow->host_server_name), &ret_match, NDPI_PROTOCOL_DNS); if(ret_match.protocol_id != NDPI_PROTOCOL_UNKNOWN) guessed_host_protocol_id = ret_match.protocol_id; } *protocol_was_guessed = 1; ndpi_int_change_protocol(ndpi_str, flow, guessed_host_protocol_id, guessed_protocol_id); } } } else if(enable_guess) { if(flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) { *protocol_was_guessed = 1; flow->detected_protocol_stack[1] = flow->guessed_protocol_id; } if(flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) { *protocol_was_guessed = 1; flow->detected_protocol_stack[0] = flow->guessed_host_protocol_id; } if(flow->detected_protocol_stack[1] == flow->detected_protocol_stack[0]) { *protocol_was_guessed = 1; flow->detected_protocol_stack[1] = flow->guessed_host_protocol_id; } } if((flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) && (flow->guessed_protocol_id == NDPI_PROTOCOL_STUN)) { check_stun_export: if(flow->protos.stun_ssl.stun.num_processed_pkts || flow->protos.stun_ssl.stun.num_udp_pkts) { // if(/* (flow->protos.stun_ssl.stun.num_processed_pkts >= NDPI_MIN_NUM_STUN_DETECTION) */ *protocol_was_guessed = 1; ndpi_set_detected_protocol(ndpi_str, flow, flow->guessed_host_protocol_id, NDPI_PROTOCOL_STUN); } } ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; if(ret.master_protocol == NDPI_PROTOCOL_STUN) { if(ret.app_protocol == NDPI_PROTOCOL_FACEBOOK) ret.app_protocol = NDPI_PROTOCOL_MESSENGER; else if(ret.app_protocol == NDPI_PROTOCOL_GOOGLE) { /* As Google has recently introduced Duo, we need to distinguish between it and hangout thing that should be handled by the STUN dissector */ ret.app_protocol = NDPI_PROTOCOL_HANGOUT_DUO; } } if(ret.app_protocol != NDPI_PROTOCOL_UNKNOWN) { *protocol_was_guessed = 1; ndpi_fill_protocol_category(ndpi_str, flow, &ret); } return(ret); } /* ********************************************************************************* */ void ndpi_process_extra_packet(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, const unsigned char *packet, const unsigned short packetlen, const u_int64_t current_time_ms, struct ndpi_id_struct *src, struct ndpi_id_struct *dst) { if(flow == NULL) return; if(flow->server_id == NULL) flow->server_id = dst; /* Default */ /* need at least 20 bytes for ip header */ if(packetlen < 20) { return; } flow->packet.current_time_ms = current_time_ms; /* parse packet */ flow->packet.iph = (struct ndpi_iphdr *) packet; /* we are interested in ipv4 packet */ /* set up the packet headers for the extra packet function to use if it wants */ if(ndpi_init_packet_header(ndpi_str, flow, packetlen) != 0) return; /* detect traffic for tcp or udp only */ flow->src = src, flow->dst = dst; ndpi_connection_tracking(ndpi_str, flow); /* call the extra packet function (which may add more data/info to flow) */ if(flow->extra_packets_func) { if((flow->extra_packets_func(ndpi_str, flow)) == 0) flow->check_extra_packets = 0; if(++flow->num_extra_packets_checked == flow->max_extra_packets_to_check) flow->extra_packets_func = NULL; /* Enough packets detected */ } } /* ********************************************************************************* */ int ndpi_load_ip_category(struct ndpi_detection_module_struct *ndpi_str, const char *ip_address_and_mask, ndpi_protocol_category_t category) { patricia_node_t *node; struct in_addr pin; int bits = 32; char *ptr; char ipbuf[64]; strncpy(ipbuf, ip_address_and_mask, sizeof(ipbuf)); ipbuf[sizeof(ipbuf) - 1] = '\0'; ptr = strrchr(ipbuf, '/'); if(ptr) { *(ptr++) = '\0'; if(atoi(ptr) >= 0 && atoi(ptr) <= 32) bits = atoi(ptr); } if(inet_pton(AF_INET, ipbuf, &pin) != 1) { NDPI_LOG_DBG2(ndpi_str, "Invalid ip/ip+netmask: %s\n", ip_address_and_mask); return(-1); } if((node = add_to_ptree(ndpi_str->custom_categories.ipAddresses_shadow, AF_INET, &pin, bits)) != NULL) { node->value.uv.user_value = (u_int16_t)category, node->value.uv.additional_user_value = 0; } return(0); } /* ********************************************************************************* */ int ndpi_load_hostname_category(struct ndpi_detection_module_struct *ndpi_str, const char *name_to_add, ndpi_protocol_category_t category) { char *name; if(name_to_add == NULL) return(-1); name = ndpi_strdup(name_to_add); if(name == NULL) return(-1); #if 0 printf("===> %s() Loading %s as %u\n", __FUNCTION__, name, category); #endif AC_PATTERN_t ac_pattern; AC_ERROR_t rc; memset(&ac_pattern, 0, sizeof(ac_pattern)); if(ndpi_str->custom_categories.hostnames_shadow.ac_automa == NULL) { free(name); return(-1); } ac_pattern.astring = name, ac_pattern.length = strlen(ac_pattern.astring); ac_pattern.rep.number = (u_int32_t) category, ac_pattern.rep.category = category;; rc = ac_automata_add(ndpi_str->custom_categories.hostnames_shadow.ac_automa, &ac_pattern); if(rc != ACERR_DUPLICATE_PATTERN && rc != ACERR_SUCCESS) { free(name); return(-1); } if(rc == ACERR_DUPLICATE_PATTERN) free(name); return(0); } /* ********************************************************************************* */ /* Loads an IP or name category */ int ndpi_load_category(struct ndpi_detection_module_struct *ndpi_struct, const char *ip_or_name, ndpi_protocol_category_t category) { int rv; /* Try to load as IP address first */ rv = ndpi_load_ip_category(ndpi_struct, ip_or_name, category); if(rv < 0) { /* IP load failed, load as hostname */ rv = ndpi_load_hostname_category(ndpi_struct, ip_or_name, category); } return(rv); } /* ********************************************************************************* */ int ndpi_enable_loaded_categories(struct ndpi_detection_module_struct *ndpi_str) { int i; /* First add the nDPI known categories matches */ for (i = 0; category_match[i].string_to_match != NULL; i++) ndpi_load_category(ndpi_str, category_match[i].string_to_match, category_match[i].protocol_category); /* Free */ ac_automata_release((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames.ac_automa, 1 /* free patterns strings memory */); /* Finalize */ ac_automata_finalize((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames_shadow.ac_automa); /* Swap */ ndpi_str->custom_categories.hostnames.ac_automa = ndpi_str->custom_categories.hostnames_shadow.ac_automa; /* Realloc */ ndpi_str->custom_categories.hostnames_shadow.ac_automa = ac_automata_init(ac_match_handler); if(ndpi_str->custom_categories.ipAddresses != NULL) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->custom_categories.ipAddresses, free_ptree_data); ndpi_str->custom_categories.ipAddresses = ndpi_str->custom_categories.ipAddresses_shadow; ndpi_str->custom_categories.ipAddresses_shadow = ndpi_New_Patricia(32 /* IPv4 */); ndpi_str->custom_categories.categories_loaded = 1; return(0); } /* ********************************************************************************* */ int ndpi_fill_ip_protocol_category(struct ndpi_detection_module_struct *ndpi_str, u_int32_t saddr, u_int32_t daddr, ndpi_protocol *ret) { if(ndpi_str->custom_categories.categories_loaded) { prefix_t prefix; patricia_node_t *node; if(saddr == 0) node = NULL; else { /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, (struct in_addr *) &saddr, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->custom_categories.ipAddresses, &prefix); } if(!node) { if(daddr != 0) { fill_prefix_v4(&prefix, (struct in_addr *) &daddr, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->custom_categories.ipAddresses, &prefix); } } if(node) { ret->category = (ndpi_protocol_category_t) node->value.uv.user_value; return(1); } } ret->category = ndpi_get_proto_category(ndpi_str, *ret); return(0); } /* ********************************************************************************* */ void ndpi_fill_protocol_category(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_protocol *ret) { if(ndpi_str->custom_categories.categories_loaded) { if(flow->guessed_header_category != NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) { flow->category = ret->category = flow->guessed_header_category; return; } if(flow->host_server_name[0] != '\0') { u_int32_t id; int rc = ndpi_match_custom_category(ndpi_str, (char *) flow->host_server_name, strlen((char *) flow->host_server_name), &id); if(rc == 0) { flow->category = ret->category = (ndpi_protocol_category_t) id; return; } } if(flow->l4.tcp.tls.hello_processed == 1 && flow->protos.stun_ssl.ssl.client_requested_server_name[0] != '\0') { u_int32_t id; int rc = ndpi_match_custom_category(ndpi_str, (char *) flow->protos.stun_ssl.ssl.client_requested_server_name, strlen(flow->protos.stun_ssl.ssl.client_requested_server_name), &id); if(rc == 0) { flow->category = ret->category = (ndpi_protocol_category_t) id; return; } } } flow->category = ret->category = ndpi_get_proto_category(ndpi_str, *ret); } /* ********************************************************************************* */ static void ndpi_reset_packet_line_info(struct ndpi_packet_struct *packet) { packet->parsed_lines = 0, packet->empty_line_position_set = 0, packet->host_line.ptr = NULL, packet->host_line.len = 0, packet->referer_line.ptr = NULL, packet->referer_line.len = 0, packet->content_line.ptr = NULL, packet->content_line.len = 0, packet->accept_line.ptr = NULL, packet->accept_line.len = 0, packet->user_agent_line.ptr = NULL, packet->user_agent_line.len = 0, packet->http_url_name.ptr = NULL, packet->http_url_name.len = 0, packet->http_encoding.ptr = NULL, packet->http_encoding.len = 0, packet->http_transfer_encoding.ptr = NULL, packet->http_transfer_encoding.len = 0, packet->http_contentlen.ptr = NULL, packet->http_contentlen.len = 0, packet->http_cookie.ptr = NULL, packet->http_cookie.len = 0, packet->http_origin.len = 0, packet->http_origin.ptr = NULL, packet->http_x_session_type.ptr = NULL, packet->http_x_session_type.len = 0, packet->server_line.ptr = NULL, packet->server_line.len = 0, packet->http_method.ptr = NULL, packet->http_method.len = 0, packet->http_response.ptr = NULL, packet->http_response.len = 0, packet->http_num_headers = 0; } /* ********************************************************************************* */ static int ndpi_check_protocol_port_mismatch_exceptions(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_default_ports_tree_node_t *expected_proto, ndpi_protocol *returned_proto) { /* For TLS (and other protocols) it is not simple to guess the exact protocol so before triggering an alert we need to make sure what we have exhausted all the possible options available */ if(returned_proto->master_protocol == NDPI_PROTOCOL_TLS) { switch(expected_proto->proto->protoId) { case NDPI_PROTOCOL_MAIL_IMAPS: case NDPI_PROTOCOL_MAIL_POPS: case NDPI_PROTOCOL_MAIL_SMTPS: return(1); /* This is a reasonable exception */ break; } } return(0); } /* ********************************************************************************* */ static void ndpi_reconcile_protocols(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_protocol *ret) { /* Skype for a host doing MS Teams means MS Teams (MS Teams uses Skype as transport protocol for voice/video) */ if(flow) { /* Do not go for DNS when there is an application protocol. Example DNS.Apple */ if((flow->detected_protocol_stack[1] != NDPI_PROTOCOL_UNKNOWN) && (flow->detected_protocol_stack[0] /* app */ != flow->detected_protocol_stack[1] /* major */)) NDPI_CLR_BIT(flow->risk, NDPI_SUSPICIOUS_DGA_DOMAIN); } switch(ret->app_protocol) { case NDPI_PROTOCOL_MSTEAMS: if(flow->packet.iph && flow->packet.tcp) { // printf("====>> NDPI_PROTOCOL_MSTEAMS\n"); if(ndpi_str->msteams_cache == NULL) ndpi_str->msteams_cache = ndpi_lru_cache_init(1024); if(ndpi_str->msteams_cache) ndpi_lru_add_to_cache(ndpi_str->msteams_cache, flow->packet.iph->saddr, (flow->packet.current_time_ms / 1000) & 0xFFFF /* 16 bit */); } break; case NDPI_PROTOCOL_SKYPE: case NDPI_PROTOCOL_SKYPE_CALL: if(flow->packet.iph && flow->packet.udp && ndpi_str->msteams_cache) { u_int16_t when; if(ndpi_lru_find_cache(ndpi_str->msteams_cache, flow->packet.iph->saddr, &when, 0 /* Don't remove it as it can be used for other connections */)) { u_int16_t tdiff = ((flow->packet.current_time_ms /1000) & 0xFFFF) - when; if(tdiff < 60 /* sec */) { // printf("====>> NDPI_PROTOCOL_SKYPE(_CALL) -> NDPI_PROTOCOL_MSTEAMS [%u]\n", tdiff); ret->app_protocol = NDPI_PROTOCOL_MSTEAMS; /* Refresh cache */ ndpi_lru_add_to_cache(ndpi_str->msteams_cache, flow->packet.iph->saddr, (flow->packet.current_time_ms / 1000) & 0xFFFF /* 16 bit */); } } } break; } /* switch */ } /* ********************************************************************************* */ ndpi_protocol ndpi_detection_process_packet(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, const unsigned char *packet, const unsigned short packetlen, const u_int64_t current_time_ms, struct ndpi_id_struct *src, struct ndpi_id_struct *dst) { NDPI_SELECTION_BITMASK_PROTOCOL_SIZE ndpi_selection_packet; u_int32_t a; ndpi_protocol ret = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; if(ndpi_str->ndpi_log_level >= NDPI_LOG_TRACE) NDPI_LOG(flow ? flow->detected_protocol_stack[0] : NDPI_PROTOCOL_UNKNOWN, ndpi_str, NDPI_LOG_TRACE, "START packet processing\n"); if(flow == NULL) return(ret); else ret.category = flow->category; flow->num_processed_pkts++; /* Init default */ ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; if(flow->server_id == NULL) flow->server_id = dst; /* Default */ if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) { if(flow->check_extra_packets) { ndpi_process_extra_packet(ndpi_str, flow, packet, packetlen, current_time_ms, src, dst); /* Update in case of new match */ ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0], ret.category = flow->category; goto invalidate_ptr; } else goto ret_protocols; } /* need at least 20 bytes for ip header */ if(packetlen < 20) { /* reset protocol which is normally done in init_packet_header */ ndpi_int_reset_packet_protocol(&flow->packet); goto invalidate_ptr; } flow->packet.current_time_ms = current_time_ms; /* parse packet */ flow->packet.iph = (struct ndpi_iphdr *) packet; /* we are interested in ipv4 packet */ if(ndpi_init_packet_header(ndpi_str, flow, packetlen) != 0) goto invalidate_ptr; /* detect traffic for tcp or udp only */ flow->src = src, flow->dst = dst; ndpi_connection_tracking(ndpi_str, flow); /* build ndpi_selection packet bitmask */ ndpi_selection_packet = NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC; if(flow->packet.iph != NULL) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_IP | NDPI_SELECTION_BITMASK_PROTOCOL_IPV4_OR_IPV6; if(flow->packet.tcp != NULL) ndpi_selection_packet |= (NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP); if(flow->packet.udp != NULL) ndpi_selection_packet |= (NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP); if(flow->packet.payload_packet_len != 0) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_HAS_PAYLOAD; if(flow->packet.tcp_retransmission == 0) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_NO_TCP_RETRANSMISSION; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(flow->packet.iphv6 != NULL) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_IPV6 | NDPI_SELECTION_BITMASK_PROTOCOL_IPV4_OR_IPV6; #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ if((!flow->protocol_id_already_guessed) && ( #ifdef NDPI_DETECTION_SUPPORT_IPV6 flow->packet.iphv6 || #endif flow->packet.iph)) { u_int16_t sport, dport; u_int8_t protocol; u_int8_t user_defined_proto; flow->protocol_id_already_guessed = 1; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(flow->packet.iphv6 != NULL) { protocol = flow->packet.iphv6->ip6_hdr.ip6_un1_nxt; } else #endif { protocol = flow->packet.iph->protocol; } if(flow->packet.udp) sport = ntohs(flow->packet.udp->source), dport = ntohs(flow->packet.udp->dest); else if(flow->packet.tcp) sport = ntohs(flow->packet.tcp->source), dport = ntohs(flow->packet.tcp->dest); else sport = dport = 0; /* guess protocol */ flow->guessed_protocol_id = (int16_t) ndpi_guess_protocol_id(ndpi_str, flow, protocol, sport, dport, &user_defined_proto); flow->guessed_host_protocol_id = ndpi_guess_host_protocol_id(ndpi_str, flow); if(ndpi_str->custom_categories.categories_loaded && flow->packet.iph) { ndpi_fill_ip_protocol_category(ndpi_str, flow->packet.iph->saddr, flow->packet.iph->daddr, &ret); flow->guessed_header_category = ret.category; } else flow->guessed_header_category = NDPI_PROTOCOL_CATEGORY_UNSPECIFIED; if(flow->guessed_protocol_id >= NDPI_MAX_SUPPORTED_PROTOCOLS) { /* This is a custom protocol and it has priority over everything else */ ret.master_protocol = NDPI_PROTOCOL_UNKNOWN, ret.app_protocol = flow->guessed_protocol_id ? flow->guessed_protocol_id : flow->guessed_host_protocol_id; ndpi_fill_protocol_category(ndpi_str, flow, &ret); goto invalidate_ptr; } if(user_defined_proto && flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) { if(flow->packet.iph) { if(flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) { u_int8_t protocol_was_guessed; /* ret.master_protocol = flow->guessed_protocol_id , ret.app_protocol = flow->guessed_host_protocol_id; /\* ****** *\/ */ ret = ndpi_detection_giveup(ndpi_str, flow, 0, &protocol_was_guessed); } ndpi_fill_protocol_category(ndpi_str, flow, &ret); goto invalidate_ptr; } } else { /* guess host protocol */ if(flow->packet.iph) { flow->guessed_host_protocol_id = ndpi_guess_host_protocol_id(ndpi_str, flow); /* We could implement a shortcut here skipping dissectors for protocols we have identified by other means such as with the IP However we do NOT stop here and skip invoking the dissectors because we want to dissect the flow (e.g. dissect the TLS) and extract metadata. */ #if SKIP_INVOKING_THE_DISSECTORS if(flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) { /* We have identified a protocol using the IP address so it is not worth to dissect the traffic as we already have the solution */ ret.master_protocol = flow->guessed_protocol_id, ret.app_protocol = flow->guessed_host_protocol_id; } #endif } } } if(flow->guessed_host_protocol_id >= NDPI_MAX_SUPPORTED_PROTOCOLS) { /* This is a custom protocol and it has priority over everything else */ ret.master_protocol = flow->guessed_protocol_id, ret.app_protocol = flow->guessed_host_protocol_id; ndpi_check_flow_func(ndpi_str, flow, &ndpi_selection_packet); ndpi_fill_protocol_category(ndpi_str, flow, &ret); goto invalidate_ptr; } ndpi_check_flow_func(ndpi_str, flow, &ndpi_selection_packet); a = flow->packet.detected_protocol_stack[0]; if(NDPI_COMPARE_PROTOCOL_TO_BITMASK(ndpi_str->detection_bitmask, a) == 0) a = NDPI_PROTOCOL_UNKNOWN; if(a != NDPI_PROTOCOL_UNKNOWN) { int i; for (i = 0; i < sizeof(flow->host_server_name); i++) { if(flow->host_server_name[i] != '\0') flow->host_server_name[i] = tolower(flow->host_server_name[i]); else { flow->host_server_name[i] = '\0'; break; } } } ret_protocols: if(flow->detected_protocol_stack[1] != NDPI_PROTOCOL_UNKNOWN) { ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; if(ret.app_protocol == ret.master_protocol) ret.master_protocol = NDPI_PROTOCOL_UNKNOWN; } else ret.app_protocol = flow->detected_protocol_stack[0]; /* Don't overwrite the category if already set */ if((flow->category == NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) && (ret.app_protocol != NDPI_PROTOCOL_UNKNOWN)) ndpi_fill_protocol_category(ndpi_str, flow, &ret); else ret.category = flow->category; if((flow->num_processed_pkts == 1) && (ret.master_protocol == NDPI_PROTOCOL_UNKNOWN) && (ret.app_protocol == NDPI_PROTOCOL_UNKNOWN) && flow->packet.tcp && (flow->packet.tcp->syn == 0) && (flow->guessed_protocol_id == 0)) { u_int8_t protocol_was_guessed; /* This is a TCP flow - whose first packet is NOT a SYN - no protocol has been detected We don't see how future packets can match anything hence we giveup here */ ret = ndpi_detection_giveup(ndpi_str, flow, 0, &protocol_was_guessed); } if((ret.master_protocol == NDPI_PROTOCOL_UNKNOWN) && (ret.app_protocol != NDPI_PROTOCOL_UNKNOWN) && (flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN)) { ret.master_protocol = ret.app_protocol; ret.app_protocol = flow->guessed_host_protocol_id; } if((!flow->risk_checked) && (ret.master_protocol != NDPI_PROTOCOL_UNKNOWN)) { ndpi_default_ports_tree_node_t *found; u_int16_t *default_ports, sport, dport; if(flow->packet.udp) found = ndpi_get_guessed_protocol_id(ndpi_str, IPPROTO_UDP, sport = ntohs(flow->packet.udp->source), dport = ntohs(flow->packet.udp->dest)), default_ports = ndpi_str->proto_defaults[ret.master_protocol].udp_default_ports; else if(flow->packet.tcp) found = ndpi_get_guessed_protocol_id(ndpi_str, IPPROTO_TCP, sport = ntohs(flow->packet.tcp->source), dport = ntohs(flow->packet.tcp->dest)), default_ports = ndpi_str->proto_defaults[ret.master_protocol].tcp_default_ports; else found = NULL, default_ports = NULL; if(found && (found->proto->protoId != NDPI_PROTOCOL_UNKNOWN) && (found->proto->protoId != ret.master_protocol)) { // printf("******** %u / %u\n", found->proto->protoId, ret.master_protocol); if(!ndpi_check_protocol_port_mismatch_exceptions(ndpi_str, flow, found, &ret)) NDPI_SET_BIT(flow->risk, NDPI_KNOWN_PROTOCOL_ON_NON_STANDARD_PORT); } else if(default_ports && (default_ports[0] != 0)) { u_int8_t found = 0, i; for(i=0; (i<MAX_DEFAULT_PORTS) && (default_ports[i] != 0); i++) { if((default_ports[i] == sport) || (default_ports[i] == dport)) { found = 1; break; } } /* for */ if(!found) { // printf("******** Invalid default port\n"); NDPI_SET_BIT(flow->risk, NDPI_KNOWN_PROTOCOL_ON_NON_STANDARD_PORT); } } flow->risk_checked = 1; } ndpi_reconcile_protocols(ndpi_str, flow, &ret); invalidate_ptr: /* Invalidate packet memory to avoid accessing the pointers below when the packet is no longer accessible */ flow->packet.iph = NULL, flow->packet.tcp = NULL, flow->packet.udp = NULL, flow->packet.payload = NULL; ndpi_reset_packet_line_info(&flow->packet); return(ret); } /* ********************************************************************************* */ u_int32_t ndpi_bytestream_to_number(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int32_t val; val = 0; // cancel if eof, ' ' or line end chars are reached while (*str >= '0' && *str <= '9' && max_chars_to_read > 0) { val *= 10; val += *str - '0'; str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } return(val); } /* ********************************************************************************* */ #ifdef CODE_UNUSED u_int32_t ndpi_bytestream_dec_or_hex_to_number(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int32_t val; val = 0; if(max_chars_to_read <= 2 || str[0] != '0' || str[1] != 'x') { return(ndpi_bytestream_to_number(str, max_chars_to_read, bytes_read)); } else { /*use base 16 system */ str += 2; max_chars_to_read -= 2; *bytes_read = *bytes_read + 2; while (max_chars_to_read > 0) { if(*str >= '0' && *str <= '9') { val *= 16; val += *str - '0'; } else if(*str >= 'a' && *str <= 'f') { val *= 16; val += *str + 10 - 'a'; } else if(*str >= 'A' && *str <= 'F') { val *= 16; val += *str + 10 - 'A'; } else { break; } str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } } return(val); } #endif /* ********************************************************************************* */ u_int64_t ndpi_bytestream_to_number64(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int64_t val; val = 0; // cancel if eof, ' ' or line end chars are reached while (max_chars_to_read > 0 && *str >= '0' && *str <= '9') { val *= 10; val += *str - '0'; str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } return(val); } /* ********************************************************************************* */ u_int64_t ndpi_bytestream_dec_or_hex_to_number64(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int64_t val; val = 0; if(max_chars_to_read <= 2 || str[0] != '0' || str[1] != 'x') { return(ndpi_bytestream_to_number64(str, max_chars_to_read, bytes_read)); } else { /*use base 16 system */ str += 2; max_chars_to_read -= 2; *bytes_read = *bytes_read + 2; while (max_chars_to_read > 0) { if(*str >= '0' && *str <= '9') { val *= 16; val += *str - '0'; } else if(*str >= 'a' && *str <= 'f') { val *= 16; val += *str + 10 - 'a'; } else if(*str >= 'A' && *str <= 'F') { val *= 16; val += *str + 10 - 'A'; } else { break; } str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } } return(val); } /* ********************************************************************************* */ u_int32_t ndpi_bytestream_to_ipv4(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int32_t val; u_int16_t read = 0; u_int16_t oldread; u_int32_t c; /* ip address must be X.X.X.X with each X between 0 and 255 */ oldread = read; c = ndpi_bytestream_to_number(str, max_chars_to_read, &read); if(c > 255 || oldread == read || max_chars_to_read == read || str[read] != '.') return(0); read++; val = c << 24; oldread = read; c = ndpi_bytestream_to_number(&str[read], max_chars_to_read - read, &read); if(c > 255 || oldread == read || max_chars_to_read == read || str[read] != '.') return(0); read++; val = val + (c << 16); oldread = read; c = ndpi_bytestream_to_number(&str[read], max_chars_to_read - read, &read); if(c > 255 || oldread == read || max_chars_to_read == read || str[read] != '.') return(0); read++; val = val + (c << 8); oldread = read; c = ndpi_bytestream_to_number(&str[read], max_chars_to_read - read, &read); if(c > 255 || oldread == read || max_chars_to_read == read) return(0); val = val + c; *bytes_read = *bytes_read + read; return(htonl(val)); } /* ********************************************************************************* */ /* internal function for every detection to parse one packet and to increase the info buffer */ void ndpi_parse_packet_line_info(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { u_int32_t a; struct ndpi_packet_struct *packet = &flow->packet; if((packet->payload_packet_len < 3) || (packet->payload == NULL)) return; if(packet->packet_lines_parsed_complete != 0) return; packet->packet_lines_parsed_complete = 1; ndpi_reset_packet_line_info(packet); packet->line[packet->parsed_lines].ptr = packet->payload; packet->line[packet->parsed_lines].len = 0; for (a = 0; ((a+1) < packet->payload_packet_len) && (packet->parsed_lines < NDPI_MAX_PARSE_LINES_PER_PACKET); a++) { if((packet->payload[a] == 0x0d) && (packet->payload[a+1] == 0x0a)) { /* If end of line char sequence CR+NL "\r\n", process line */ if(((a + 3) < packet->payload_packet_len) && (packet->payload[a+2] == 0x0d) && (packet->payload[a+3] == 0x0a)) { /* \r\n\r\n */ int diff; /* No unsigned ! */ u_int32_t a1 = a + 4; diff = packet->payload_packet_len - a1; if(diff > 0) { diff = ndpi_min(diff, sizeof(flow->initial_binary_bytes)); memcpy(&flow->initial_binary_bytes, &packet->payload[a1], diff); flow->initial_binary_bytes_len = diff; } } packet->line[packet->parsed_lines].len = (u_int16_t)(((unsigned long) &packet->payload[a]) - ((unsigned long) packet->line[packet->parsed_lines].ptr)); /* First line of a HTTP response parsing. Expected a "HTTP/1.? ???" */ if(packet->parsed_lines == 0 && packet->line[0].len >= NDPI_STATICSTRING_LEN("HTTP/1.X 200 ") && strncasecmp((const char *) packet->line[0].ptr, "HTTP/1.", NDPI_STATICSTRING_LEN("HTTP/1.")) == 0 && packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.X ")] > '0' && /* response code between 000 and 699 */ packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.X ")] < '6') { packet->http_response.ptr = &packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.1 ")]; packet->http_response.len = packet->line[0].len - NDPI_STATICSTRING_LEN("HTTP/1.1 "); packet->http_num_headers++; /* Set server HTTP response code */ if(packet->payload_packet_len >= 12) { char buf[4]; /* Set server HTTP response code */ strncpy(buf, (char *) &packet->payload[9], 3); buf[3] = '\0'; flow->http.response_status_code = atoi(buf); /* https://en.wikipedia.org/wiki/List_of_HTTP_status_codes */ if((flow->http.response_status_code < 100) || (flow->http.response_status_code > 509)) flow->http.response_status_code = 0; /* Out of range */ } } /* "Server:" header line in HTTP response */ if(packet->line[packet->parsed_lines].len > NDPI_STATICSTRING_LEN("Server:") + 1 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Server:", NDPI_STATICSTRING_LEN("Server:")) == 0) { // some stupid clients omit a space and place the servername directly after the colon if(packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:")] == ' ') { packet->server_line.ptr = &packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:") + 1]; packet->server_line.len = packet->line[packet->parsed_lines].len - (NDPI_STATICSTRING_LEN("Server:") + 1); } else { packet->server_line.ptr = &packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:")]; packet->server_line.len = packet->line[packet->parsed_lines].len - NDPI_STATICSTRING_LEN("Server:"); } packet->http_num_headers++; } /* "Host:" header line in HTTP request */ if(packet->line[packet->parsed_lines].len > 6 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Host:", 5) == 0) { // some stupid clients omit a space and place the hostname directly after the colon if(packet->line[packet->parsed_lines].ptr[5] == ' ') { packet->host_line.ptr = &packet->line[packet->parsed_lines].ptr[6]; packet->host_line.len = packet->line[packet->parsed_lines].len - 6; } else { packet->host_line.ptr = &packet->line[packet->parsed_lines].ptr[5]; packet->host_line.len = packet->line[packet->parsed_lines].len - 5; } packet->http_num_headers++; } /* "X-Forwarded-For:" header line in HTTP request. Commonly used for HTTP proxies. */ if(packet->line[packet->parsed_lines].len > 17 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "X-Forwarded-For:", 16) == 0) { // some stupid clients omit a space and place the hostname directly after the colon if(packet->line[packet->parsed_lines].ptr[16] == ' ') { packet->forwarded_line.ptr = &packet->line[packet->parsed_lines].ptr[17]; packet->forwarded_line.len = packet->line[packet->parsed_lines].len - 17; } else { packet->forwarded_line.ptr = &packet->line[packet->parsed_lines].ptr[16]; packet->forwarded_line.len = packet->line[packet->parsed_lines].len - 16; } packet->http_num_headers++; } /* "Content-Type:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 14 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Type: ", 14) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-type: ", 14) == 0)) { packet->content_line.ptr = &packet->line[packet->parsed_lines].ptr[14]; packet->content_line.len = packet->line[packet->parsed_lines].len - 14; while ((packet->content_line.len > 0) && (packet->content_line.ptr[0] == ' ')) packet->content_line.len--, packet->content_line.ptr++; packet->http_num_headers++; } /* "Content-Type:" header line in HTTP AGAIN. Probably a bogus response without space after ":" */ if((packet->content_line.len == 0) && (packet->line[packet->parsed_lines].len > 13) && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-type:", 13) == 0)) { packet->content_line.ptr = &packet->line[packet->parsed_lines].ptr[13]; packet->content_line.len = packet->line[packet->parsed_lines].len - 13; packet->http_num_headers++; } if(packet->content_line.len > 0) { /* application/json; charset=utf-8 */ char separator[] = {';', '\r', '\0'}; int i; for (i = 0; separator[i] != '\0'; i++) { char *c = memchr((char *) packet->content_line.ptr, separator[i], packet->content_line.len); if(c != NULL) packet->content_line.len = c - (char *) packet->content_line.ptr; } } /* "Accept:" header line in HTTP request. */ if(packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept: ", 8) == 0) { packet->accept_line.ptr = &packet->line[packet->parsed_lines].ptr[8]; packet->accept_line.len = packet->line[packet->parsed_lines].len - 8; packet->http_num_headers++; } /* "Referer:" header line in HTTP request. */ if(packet->line[packet->parsed_lines].len > 9 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Referer: ", 9) == 0) { packet->referer_line.ptr = &packet->line[packet->parsed_lines].ptr[9]; packet->referer_line.len = packet->line[packet->parsed_lines].len - 9; packet->http_num_headers++; } /* "User-Agent:" header line in HTTP request. */ if(packet->line[packet->parsed_lines].len > 12 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "User-Agent: ", 12) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "User-agent: ", 12) == 0)) { packet->user_agent_line.ptr = &packet->line[packet->parsed_lines].ptr[12]; packet->user_agent_line.len = packet->line[packet->parsed_lines].len - 12; packet->http_num_headers++; } /* "Content-Encoding:" header line in HTTP response (and request?). */ if(packet->line[packet->parsed_lines].len > 18 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Encoding: ", 18) == 0) { packet->http_encoding.ptr = &packet->line[packet->parsed_lines].ptr[18]; packet->http_encoding.len = packet->line[packet->parsed_lines].len - 18; packet->http_num_headers++; } /* "Transfer-Encoding:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 19 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Transfer-Encoding: ", 19) == 0) { packet->http_transfer_encoding.ptr = &packet->line[packet->parsed_lines].ptr[19]; packet->http_transfer_encoding.len = packet->line[packet->parsed_lines].len - 19; packet->http_num_headers++; } /* "Content-Length:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 16 && ((strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Length: ", 16) == 0) || (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "content-length: ", 16) == 0))) { packet->http_contentlen.ptr = &packet->line[packet->parsed_lines].ptr[16]; packet->http_contentlen.len = packet->line[packet->parsed_lines].len - 16; packet->http_num_headers++; } /* "Content-Disposition"*/ if(packet->line[packet->parsed_lines].len > 21 && ((strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Disposition: ", 21) == 0))) { packet->content_disposition_line.ptr = &packet->line[packet->parsed_lines].ptr[21]; packet->content_disposition_line.len = packet->line[packet->parsed_lines].len - 21; packet->http_num_headers++; } /* "Cookie:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Cookie: ", 8) == 0) { packet->http_cookie.ptr = &packet->line[packet->parsed_lines].ptr[8]; packet->http_cookie.len = packet->line[packet->parsed_lines].len - 8; packet->http_num_headers++; } /* "Origin:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Origin: ", 8) == 0) { packet->http_origin.ptr = &packet->line[packet->parsed_lines].ptr[8]; packet->http_origin.len = packet->line[packet->parsed_lines].len - 8; packet->http_num_headers++; } /* "X-Session-Type:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 16 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "X-Session-Type: ", 16) == 0) { packet->http_x_session_type.ptr = &packet->line[packet->parsed_lines].ptr[16]; packet->http_x_session_type.len = packet->line[packet->parsed_lines].len - 16; packet->http_num_headers++; } /* Identification and counting of other HTTP headers. * We consider the most common headers, but there are many others, * which can be seen at references below: * - https://tools.ietf.org/html/rfc7230 * - https://en.wikipedia.org/wiki/List_of_HTTP_header_fields */ if((packet->line[packet->parsed_lines].len > 6 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Date: ", 6) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Vary: ", 6) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "ETag: ", 6) == 0)) || (packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Pragma: ", 8) == 0) || (packet->line[packet->parsed_lines].len > 9 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Expires: ", 9) == 0) || (packet->line[packet->parsed_lines].len > 12 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Set-Cookie: ", 12) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Keep-Alive: ", 12) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Connection: ", 12) == 0)) || (packet->line[packet->parsed_lines].len > 15 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Last-Modified: ", 15) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Ranges: ", 15) == 0)) || (packet->line[packet->parsed_lines].len > 17 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Language: ", 17) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Encoding: ", 17) == 0)) || (packet->line[packet->parsed_lines].len > 27 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Upgrade-Insecure-Requests: ", 27) == 0)) { /* Just count. In the future, if needed, this if can be splited to parse these headers */ packet->http_num_headers++; } if(packet->line[packet->parsed_lines].len == 0) { packet->empty_line_position = a; packet->empty_line_position_set = 1; } if(packet->parsed_lines >= (NDPI_MAX_PARSE_LINES_PER_PACKET - 1)) return; packet->parsed_lines++; packet->line[packet->parsed_lines].ptr = &packet->payload[a + 2]; packet->line[packet->parsed_lines].len = 0; a++; /* next char in the payload */ } } if(packet->parsed_lines >= 1) { packet->line[packet->parsed_lines].len = (u_int16_t)(((unsigned long) &packet->payload[packet->payload_packet_len]) - ((unsigned long) packet->line[packet->parsed_lines].ptr)); packet->parsed_lines++; } } /* ********************************************************************************* */ void ndpi_parse_packet_line_info_any(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int32_t a; u_int16_t end = packet->payload_packet_len; if(packet->packet_lines_parsed_complete != 0) return; packet->packet_lines_parsed_complete = 1; packet->parsed_lines = 0; if(packet->payload_packet_len == 0) return; packet->line[packet->parsed_lines].ptr = packet->payload; packet->line[packet->parsed_lines].len = 0; for (a = 0; a < end; a++) { if(packet->payload[a] == 0x0a) { packet->line[packet->parsed_lines].len = (u_int16_t)( ((unsigned long) &packet->payload[a]) - ((unsigned long) packet->line[packet->parsed_lines].ptr)); if(a > 0 && packet->payload[a - 1] == 0x0d) packet->line[packet->parsed_lines].len--; if(packet->parsed_lines >= (NDPI_MAX_PARSE_LINES_PER_PACKET - 1)) break; packet->parsed_lines++; packet->line[packet->parsed_lines].ptr = &packet->payload[a + 1]; packet->line[packet->parsed_lines].len = 0; if((a + 1) >= packet->payload_packet_len) break; //a++; } } } /* ********************************************************************************* */ u_int16_t ndpi_check_for_email_address(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t counter) { struct ndpi_packet_struct *packet = &flow->packet; NDPI_LOG_DBG2(ndpi_str, "called ndpi_check_for_email_address\n"); if(packet->payload_packet_len > counter && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') || (packet->payload[counter] >= 'A' && packet->payload[counter] <= 'Z') || (packet->payload[counter] >= '0' && packet->payload[counter] <= '9') || packet->payload[counter] == '-' || packet->payload[counter] == '_')) { NDPI_LOG_DBG2(ndpi_str, "first letter\n"); counter++; while (packet->payload_packet_len > counter && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') || (packet->payload[counter] >= 'A' && packet->payload[counter] <= 'Z') || (packet->payload[counter] >= '0' && packet->payload[counter] <= '9') || packet->payload[counter] == '-' || packet->payload[counter] == '_' || packet->payload[counter] == '.')) { NDPI_LOG_DBG2(ndpi_str, "further letter\n"); counter++; if(packet->payload_packet_len > counter && packet->payload[counter] == '@') { NDPI_LOG_DBG2(ndpi_str, "@\n"); counter++; while (packet->payload_packet_len > counter && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') || (packet->payload[counter] >= 'A' && packet->payload[counter] <= 'Z') || (packet->payload[counter] >= '0' && packet->payload[counter] <= '9') || packet->payload[counter] == '-' || packet->payload[counter] == '_')) { NDPI_LOG_DBG2(ndpi_str, "letter\n"); counter++; if(packet->payload_packet_len > counter && packet->payload[counter] == '.') { NDPI_LOG_DBG2(ndpi_str, ".\n"); counter++; if(packet->payload_packet_len > counter + 1 && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') && (packet->payload[counter + 1] >= 'a' && packet->payload[counter + 1] <= 'z'))) { NDPI_LOG_DBG2(ndpi_str, "two letters\n"); counter += 2; if(packet->payload_packet_len > counter && (packet->payload[counter] == ' ' || packet->payload[counter] == ';')) { NDPI_LOG_DBG2(ndpi_str, "whitespace1\n"); return(counter); } else if(packet->payload_packet_len > counter && packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') { NDPI_LOG_DBG2(ndpi_str, "one letter\n"); counter++; if(packet->payload_packet_len > counter && (packet->payload[counter] == ' ' || packet->payload[counter] == ';')) { NDPI_LOG_DBG2(ndpi_str, "whitespace2\n"); return(counter); } else if(packet->payload_packet_len > counter && packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') { counter++; if(packet->payload_packet_len > counter && (packet->payload[counter] == ' ' || packet->payload[counter] == ';')) { NDPI_LOG_DBG2(ndpi_str, "whitespace3\n"); return(counter); } else { return(0); } } else { return(0); } } else { return(0); } } else { return(0); } } } return(0); } } } return(0); } #ifdef NDPI_ENABLE_DEBUG_MESSAGES /* ********************************************************************************* */ void ndpi_debug_get_last_log_function_line(struct ndpi_detection_module_struct *ndpi_str, const char **file, const char **func, u_int32_t *line) { *file = ""; *func = ""; if(ndpi_str->ndpi_debug_print_file != NULL) *file = ndpi_str->ndpi_debug_print_file; if(ndpi_str->ndpi_debug_print_function != NULL) *func = ndpi_str->ndpi_debug_print_function; *line = ndpi_str->ndpi_debug_print_line; } #endif /* ********************************************************************************* */ u_int8_t ndpi_detection_get_l4(const u_int8_t *l3, u_int16_t l3_len, const u_int8_t **l4_return, u_int16_t *l4_len_return, u_int8_t *l4_protocol_return, u_int32_t flags) { return(ndpi_detection_get_l4_internal(NULL, l3, l3_len, l4_return, l4_len_return, l4_protocol_return, flags)); } /* ********************************************************************************* */ void ndpi_set_detected_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { struct ndpi_id_struct *src = flow->src, *dst = flow->dst; ndpi_int_change_protocol(ndpi_str, flow, upper_detected_protocol, lower_detected_protocol); if(src != NULL) { NDPI_ADD_PROTOCOL_TO_BITMASK(src->detected_protocol_bitmask, upper_detected_protocol); if(lower_detected_protocol != NDPI_PROTOCOL_UNKNOWN) NDPI_ADD_PROTOCOL_TO_BITMASK(src->detected_protocol_bitmask, lower_detected_protocol); } if(dst != NULL) { NDPI_ADD_PROTOCOL_TO_BITMASK(dst->detected_protocol_bitmask, upper_detected_protocol); if(lower_detected_protocol != NDPI_PROTOCOL_UNKNOWN) NDPI_ADD_PROTOCOL_TO_BITMASK(dst->detected_protocol_bitmask, lower_detected_protocol); } } /* ********************************************************************************* */ u_int16_t ndpi_get_flow_masterprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { return(flow->detected_protocol_stack[1]); } /* ********************************************************************************* */ void ndpi_int_change_flow_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { if(!flow) return; flow->detected_protocol_stack[0] = upper_detected_protocol, flow->detected_protocol_stack[1] = lower_detected_protocol; } /* ********************************************************************************* */ void ndpi_int_change_packet_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { struct ndpi_packet_struct *packet = &flow->packet; /* NOTE: everything below is identically to change_flow_protocol * except flow->packet If you want to change something here, * don't! Change it for the flow function and apply it here * as well */ if(!packet) return; packet->detected_protocol_stack[0] = upper_detected_protocol, packet->detected_protocol_stack[1] = lower_detected_protocol; } /* ********************************************************************************* */ /* generic function for changing the protocol * * what it does is: * 1.update the flow protocol stack with the new protocol * 2.update the packet protocol stack with the new protocol */ void ndpi_int_change_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { if((upper_detected_protocol == NDPI_PROTOCOL_UNKNOWN) && (lower_detected_protocol != NDPI_PROTOCOL_UNKNOWN)) upper_detected_protocol = lower_detected_protocol; if(upper_detected_protocol == lower_detected_protocol) lower_detected_protocol = NDPI_PROTOCOL_UNKNOWN; if((upper_detected_protocol != NDPI_PROTOCOL_UNKNOWN) && (lower_detected_protocol == NDPI_PROTOCOL_UNKNOWN)) { if((flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (upper_detected_protocol != flow->guessed_host_protocol_id)) { if(ndpi_str->proto_defaults[upper_detected_protocol].can_have_a_subprotocol) { lower_detected_protocol = upper_detected_protocol; upper_detected_protocol = flow->guessed_host_protocol_id; } } } ndpi_int_change_flow_protocol(ndpi_str, flow, upper_detected_protocol, lower_detected_protocol); ndpi_int_change_packet_protocol(ndpi_str, flow, upper_detected_protocol, lower_detected_protocol); } /* ********************************************************************************* */ void ndpi_int_change_category(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_protocol_category_t protocol_category) { flow->category = protocol_category; } /* ********************************************************************************* */ /* turns a packet back to unknown */ void ndpi_int_reset_packet_protocol(struct ndpi_packet_struct *packet) { int a; for (a = 0; a < NDPI_PROTOCOL_SIZE; a++) packet->detected_protocol_stack[a] = NDPI_PROTOCOL_UNKNOWN; } /* ********************************************************************************* */ void ndpi_int_reset_protocol(struct ndpi_flow_struct *flow) { if(flow) { int a; for (a = 0; a < NDPI_PROTOCOL_SIZE; a++) flow->detected_protocol_stack[a] = NDPI_PROTOCOL_UNKNOWN; } } /* ********************************************************************************* */ void NDPI_PROTOCOL_IP_clear(ndpi_ip_addr_t *ip) { memset(ip, 0, sizeof(ndpi_ip_addr_t)); } /* ********************************************************************************* */ #ifdef CODE_UNUSED /* NTOP */ int NDPI_PROTOCOL_IP_is_set(const ndpi_ip_addr_t *ip) { return(memcmp(ip, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", sizeof(ndpi_ip_addr_t)) != 0); } #endif /* ********************************************************************************* */ /* check if the source ip address in packet and ip are equal */ /* NTOP */ int ndpi_packet_src_ip_eql(const struct ndpi_packet_struct *packet, const ndpi_ip_addr_t *ip) { #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* IPv6 */ if(packet->iphv6 != NULL) { if(packet->iphv6->ip6_src.u6_addr.u6_addr32[0] == ip->ipv6.u6_addr.u6_addr32[0] && packet->iphv6->ip6_src.u6_addr.u6_addr32[1] == ip->ipv6.u6_addr.u6_addr32[1] && packet->iphv6->ip6_src.u6_addr.u6_addr32[2] == ip->ipv6.u6_addr.u6_addr32[2] && packet->iphv6->ip6_src.u6_addr.u6_addr32[3] == ip->ipv6.u6_addr.u6_addr32[3]) return(1); //else return(0); } #endif /* IPv4 */ if(packet->iph->saddr == ip->ipv4) return(1); return(0); } /* ********************************************************************************* */ /* check if the destination ip address in packet and ip are equal */ int ndpi_packet_dst_ip_eql(const struct ndpi_packet_struct *packet, const ndpi_ip_addr_t *ip) { #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* IPv6 */ if(packet->iphv6 != NULL) { if(packet->iphv6->ip6_dst.u6_addr.u6_addr32[0] == ip->ipv6.u6_addr.u6_addr32[0] && packet->iphv6->ip6_dst.u6_addr.u6_addr32[1] == ip->ipv6.u6_addr.u6_addr32[1] && packet->iphv6->ip6_dst.u6_addr.u6_addr32[2] == ip->ipv6.u6_addr.u6_addr32[2] && packet->iphv6->ip6_dst.u6_addr.u6_addr32[3] == ip->ipv6.u6_addr.u6_addr32[3]) return(1); //else return(0); } #endif /* IPv4 */ if(packet->iph->saddr == ip->ipv4) return(1); return(0); } /* ********************************************************************************* */ /* get the source ip address from packet and put it into ip */ /* NTOP */ void ndpi_packet_src_ip_get(const struct ndpi_packet_struct *packet, ndpi_ip_addr_t *ip) { NDPI_PROTOCOL_IP_clear(ip); #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* IPv6 */ if(packet->iphv6 != NULL) { ip->ipv6.u6_addr.u6_addr32[0] = packet->iphv6->ip6_src.u6_addr.u6_addr32[0]; ip->ipv6.u6_addr.u6_addr32[1] = packet->iphv6->ip6_src.u6_addr.u6_addr32[1]; ip->ipv6.u6_addr.u6_addr32[2] = packet->iphv6->ip6_src.u6_addr.u6_addr32[2]; ip->ipv6.u6_addr.u6_addr32[3] = packet->iphv6->ip6_src.u6_addr.u6_addr32[3]; } else #endif /* IPv4 */ ip->ipv4 = packet->iph->saddr; } /* ********************************************************************************* */ /* get the destination ip address from packet and put it into ip */ /* NTOP */ void ndpi_packet_dst_ip_get(const struct ndpi_packet_struct *packet, ndpi_ip_addr_t *ip) { NDPI_PROTOCOL_IP_clear(ip); #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(packet->iphv6 != NULL) { ip->ipv6.u6_addr.u6_addr32[0] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[0]; ip->ipv6.u6_addr.u6_addr32[1] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[1]; ip->ipv6.u6_addr.u6_addr32[2] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[2]; ip->ipv6.u6_addr.u6_addr32[3] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[3]; } else #endif ip->ipv4 = packet->iph->daddr; } /* ********************************************************************************* */ u_int8_t ndpi_is_ipv6(const ndpi_ip_addr_t *ip) { #ifdef NDPI_DETECTION_SUPPORT_IPV6 return(ip->ipv6.u6_addr.u6_addr32[1] != 0 || ip->ipv6.u6_addr.u6_addr32[2] != 0 || ip->ipv6.u6_addr.u6_addr32[3] != 0); #else return(0); #endif } /* ********************************************************************************* */ char *ndpi_get_ip_string(const ndpi_ip_addr_t *ip, char *buf, u_int buf_len) { const u_int8_t *a = (const u_int8_t *) &ip->ipv4; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(ndpi_is_ipv6(ip)) { if(inet_ntop(AF_INET6, &ip->ipv6.u6_addr, buf, buf_len) == NULL) buf[0] = '\0'; return(buf); } #endif snprintf(buf, buf_len, "%u.%u.%u.%u", a[0], a[1], a[2], a[3]); return(buf); } /* ****************************************************** */ /* Returns -1 on failutre, otherwise fills parsed_ip and returns the IP version */ int ndpi_parse_ip_string(const char *ip_str, ndpi_ip_addr_t *parsed_ip) { int rv = -1; memset(parsed_ip, 0, sizeof(*parsed_ip)); if(strchr(ip_str, '.')) { if(inet_pton(AF_INET, ip_str, &parsed_ip->ipv4) > 0) rv = 4; #ifdef NDPI_DETECTION_SUPPORT_IPV6 } else { if(inet_pton(AF_INET6, ip_str, &parsed_ip->ipv6) > 0) rv = 6; #endif } return(rv); } /* ****************************************************** */ u_int16_t ntohs_ndpi_bytestream_to_number(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int16_t val = ndpi_bytestream_to_number(str, max_chars_to_read, bytes_read); return(ntohs(val)); } /* ****************************************************** */ u_int8_t ndpi_is_proto(ndpi_protocol proto, u_int16_t p) { return(((proto.app_protocol == p) || (proto.master_protocol == p)) ? 1 : 0); } /* ****************************************************** */ u_int16_t ndpi_get_lower_proto(ndpi_protocol proto) { return((proto.master_protocol != NDPI_PROTOCOL_UNKNOWN) ? proto.master_protocol : proto.app_protocol); } /* ****************************************************** */ ndpi_protocol ndpi_guess_undetected_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int8_t proto, u_int32_t shost /* host byte order */, u_int16_t sport, u_int32_t dhost /* host byte order */, u_int16_t dport) { u_int32_t rc; struct in_addr addr; ndpi_protocol ret = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; u_int8_t user_defined_proto; if((proto == IPPROTO_TCP) || (proto == IPPROTO_UDP)) { rc = ndpi_search_tcp_or_udp_raw(ndpi_str, flow, proto, shost, dhost, sport, dport); if(rc != NDPI_PROTOCOL_UNKNOWN) { if(flow && (proto == IPPROTO_UDP) && NDPI_COMPARE_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, rc) && is_udp_guessable_protocol(rc)) ; else { ret.app_protocol = rc, ret.master_protocol = ndpi_guess_protocol_id(ndpi_str, flow, proto, sport, dport, &user_defined_proto); if(ret.app_protocol == ret.master_protocol) ret.master_protocol = NDPI_PROTOCOL_UNKNOWN; ret.category = ndpi_get_proto_category(ndpi_str, ret); return(ret); } } rc = ndpi_guess_protocol_id(ndpi_str, flow, proto, sport, dport, &user_defined_proto); if(rc != NDPI_PROTOCOL_UNKNOWN) { if(flow && (proto == IPPROTO_UDP) && NDPI_COMPARE_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, rc) && is_udp_guessable_protocol(rc)) ; else { ret.app_protocol = rc; if(rc == NDPI_PROTOCOL_TLS) goto check_guessed_skype; else { ret.category = ndpi_get_proto_category(ndpi_str, ret); return(ret); } } } check_guessed_skype: addr.s_addr = htonl(shost); if(ndpi_network_ptree_match(ndpi_str, &addr) == NDPI_PROTOCOL_SKYPE) { ret.app_protocol = NDPI_PROTOCOL_SKYPE; } else { addr.s_addr = htonl(dhost); if(ndpi_network_ptree_match(ndpi_str, &addr) == NDPI_PROTOCOL_SKYPE) ret.app_protocol = NDPI_PROTOCOL_SKYPE; } } else ret.app_protocol = ndpi_guess_protocol_id(ndpi_str, flow, proto, sport, dport, &user_defined_proto); ret.category = ndpi_get_proto_category(ndpi_str, ret); return(ret); } /* ****************************************************** */ char *ndpi_protocol2id(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol proto, char *buf, u_int buf_len) { if((proto.master_protocol != NDPI_PROTOCOL_UNKNOWN) && (proto.master_protocol != proto.app_protocol)) { if(proto.app_protocol != NDPI_PROTOCOL_UNKNOWN) snprintf(buf, buf_len, "%u.%u", proto.master_protocol, proto.app_protocol); else snprintf(buf, buf_len, "%u", proto.master_protocol); } else snprintf(buf, buf_len, "%u", proto.app_protocol); return(buf); } /* ****************************************************** */ char *ndpi_protocol2name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol proto, char *buf, u_int buf_len) { if((proto.master_protocol != NDPI_PROTOCOL_UNKNOWN) && (proto.master_protocol != proto.app_protocol)) { if(proto.app_protocol != NDPI_PROTOCOL_UNKNOWN) snprintf(buf, buf_len, "%s.%s", ndpi_get_proto_name(ndpi_str, proto.master_protocol), ndpi_get_proto_name(ndpi_str, proto.app_protocol)); else snprintf(buf, buf_len, "%s", ndpi_get_proto_name(ndpi_str, proto.master_protocol)); } else snprintf(buf, buf_len, "%s", ndpi_get_proto_name(ndpi_str, proto.app_protocol)); return(buf); } /* ****************************************************** */ int ndpi_is_custom_category(ndpi_protocol_category_t category) { switch (category) { case NDPI_PROTOCOL_CATEGORY_CUSTOM_1: case NDPI_PROTOCOL_CATEGORY_CUSTOM_2: case NDPI_PROTOCOL_CATEGORY_CUSTOM_3: case NDPI_PROTOCOL_CATEGORY_CUSTOM_4: case NDPI_PROTOCOL_CATEGORY_CUSTOM_5: return(1); break; default: return(0); break; } } /* ****************************************************** */ void ndpi_category_set_name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_category_t category, char *name) { if(!name) return; switch (category) { case NDPI_PROTOCOL_CATEGORY_CUSTOM_1: snprintf(ndpi_str->custom_category_labels[0], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_2: snprintf(ndpi_str->custom_category_labels[1], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_3: snprintf(ndpi_str->custom_category_labels[2], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_4: snprintf(ndpi_str->custom_category_labels[3], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_5: snprintf(ndpi_str->custom_category_labels[4], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; default: break; } } /* ****************************************************** */ const char *ndpi_category_get_name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_category_t category) { if((!ndpi_str) || (category >= NDPI_PROTOCOL_NUM_CATEGORIES)) { static char b[24]; if(!ndpi_str) snprintf(b, sizeof(b), "NULL nDPI"); else snprintf(b, sizeof(b), "Invalid category %d", (int) category); return(b); } if((category >= NDPI_PROTOCOL_CATEGORY_CUSTOM_1) && (category <= NDPI_PROTOCOL_CATEGORY_CUSTOM_5)) { switch (category) { case NDPI_PROTOCOL_CATEGORY_CUSTOM_1: return(ndpi_str->custom_category_labels[0]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_2: return(ndpi_str->custom_category_labels[1]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_3: return(ndpi_str->custom_category_labels[2]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_4: return(ndpi_str->custom_category_labels[3]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_5: return(ndpi_str->custom_category_labels[4]); case NDPI_PROTOCOL_NUM_CATEGORIES: return("Code should not use this internal constant"); default: return("Unspecified"); } } else return(categories[category]); } /* ****************************************************** */ ndpi_protocol_category_t ndpi_get_proto_category(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol proto) { if(proto.category != NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) return(proto.category); /* simple rule: sub protocol first, master after */ else if((proto.master_protocol == NDPI_PROTOCOL_UNKNOWN) || (ndpi_str->proto_defaults[proto.app_protocol].protoCategory != NDPI_PROTOCOL_CATEGORY_UNSPECIFIED)) { if(proto.app_protocol < (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) return(ndpi_str->proto_defaults[proto.app_protocol].protoCategory); } else if(proto.master_protocol < (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) return(ndpi_str->proto_defaults[proto.master_protocol].protoCategory); return(NDPI_PROTOCOL_CATEGORY_UNSPECIFIED); } /* ****************************************************** */ char *ndpi_get_proto_name(struct ndpi_detection_module_struct *ndpi_str, u_int16_t proto_id) { if((proto_id >= ndpi_str->ndpi_num_supported_protocols) || (proto_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) || (ndpi_str->proto_defaults[proto_id].protoName == NULL)) proto_id = NDPI_PROTOCOL_UNKNOWN; return(ndpi_str->proto_defaults[proto_id].protoName); } /* ****************************************************** */ ndpi_protocol_breed_t ndpi_get_proto_breed(struct ndpi_detection_module_struct *ndpi_str, u_int16_t proto_id) { if((proto_id >= ndpi_str->ndpi_num_supported_protocols) || (proto_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) || (ndpi_str->proto_defaults[proto_id].protoName == NULL)) proto_id = NDPI_PROTOCOL_UNKNOWN; return(ndpi_str->proto_defaults[proto_id].protoBreed); } /* ****************************************************** */ char *ndpi_get_proto_breed_name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_breed_t breed_id) { switch (breed_id) { case NDPI_PROTOCOL_SAFE: return("Safe"); break; case NDPI_PROTOCOL_ACCEPTABLE: return("Acceptable"); break; case NDPI_PROTOCOL_FUN: return("Fun"); break; case NDPI_PROTOCOL_UNSAFE: return("Unsafe"); break; case NDPI_PROTOCOL_POTENTIALLY_DANGEROUS: return("Potentially Dangerous"); break; case NDPI_PROTOCOL_DANGEROUS: return("Dangerous"); break; case NDPI_PROTOCOL_UNRATED: default: return("Unrated"); break; } } /* ****************************************************** */ int ndpi_get_protocol_id(struct ndpi_detection_module_struct *ndpi_str, char *proto) { int i; for (i = 0; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) if(strcasecmp(proto, ndpi_str->proto_defaults[i].protoName) == 0) return(i); return(-1); } /* ****************************************************** */ int ndpi_get_category_id(struct ndpi_detection_module_struct *ndpi_str, char *cat) { int i; for (i = 0; i < NDPI_PROTOCOL_NUM_CATEGORIES; i++) { const char *name = ndpi_category_get_name(ndpi_str, i); if(strcasecmp(cat, name) == 0) return(i); } return(-1); } /* ****************************************************** */ void ndpi_dump_protocols(struct ndpi_detection_module_struct *ndpi_str) { int i; for (i = 0; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) printf("%3d %-22s %-8s %-12s %s\n", i, ndpi_str->proto_defaults[i].protoName, ndpi_get_l4_proto_name(ndpi_get_l4_proto_info(ndpi_str, i)), ndpi_get_proto_breed_name(ndpi_str, ndpi_str->proto_defaults[i].protoBreed), ndpi_category_get_name(ndpi_str, ndpi_str->proto_defaults[i].protoCategory)); } /* ****************************************************** */ /* * Find the first occurrence of find in s, where the search is limited to the * first slen characters of s. */ char *ndpi_strnstr(const char *s, const char *find, size_t slen) { char c; size_t len; if((c = *find++) != '\0') { len = strnlen(find, slen); do { char sc; do { if(slen-- < 1 || (sc = *s++) == '\0') return(NULL); } while (sc != c); if(len > slen) return(NULL); } while (strncmp(s, find, len) != 0); s--; } return((char *) s); } /* ****************************************************** */ /* * Same as ndpi_strnstr but case-insensitive */ const char * ndpi_strncasestr(const char *str1, const char *str2, size_t len) { size_t str1_len = strnlen(str1, len); size_t str2_len = strlen(str2); size_t i; for(i = 0; i < (str1_len - str2_len + 1); i++){ if(str1[0] == '\0') return NULL; else if(strncasecmp(str1, str2, str2_len) == 0) return(str1); str1++; } return NULL; } /* ****************************************************** */ int ndpi_match_prefix(const u_int8_t *payload, size_t payload_len, const char *str, size_t str_len) { int rc = str_len <= payload_len ? memcmp(payload, str, str_len) == 0 : 0; return(rc); } /* ****************************************************** */ int ndpi_match_string_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *string_to_match, u_int string_to_match_len, ndpi_protocol_match_result *ret_match, u_int8_t is_host_match) { AC_TEXT_t ac_input_text; ndpi_automa *automa = is_host_match ? &ndpi_str->host_automa : &ndpi_str->content_automa; AC_REP_t match = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED}; int rc; if((automa->ac_automa == NULL) || (string_to_match_len == 0)) return(NDPI_PROTOCOL_UNKNOWN); if(!automa->ac_automa_finalized) { printf("[%s:%d] [NDPI] Internal error: please call ndpi_finalize_initalization()\n", __FILE__, __LINE__); return(0); /* No matches */ } ac_input_text.astring = string_to_match, ac_input_text.length = string_to_match_len; rc = ac_automata_search(((AC_AUTOMATA_t *) automa->ac_automa), &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; /* We need to take into account also rc == 0 that is used for partial matches */ ret_match->protocol_id = match.number, ret_match->protocol_category = match.category, ret_match->protocol_breed = match.breed; return(rc ? match.number : 0); } /* **************************************** */ static u_int8_t ndpi_is_more_generic_protocol(u_int16_t previous_proto, u_int16_t new_proto) { /* Sometimes certificates are more generic than previously identified protocols */ if((previous_proto == NDPI_PROTOCOL_UNKNOWN) || (previous_proto == new_proto)) return(0); switch (previous_proto) { case NDPI_PROTOCOL_WHATSAPP_CALL: case NDPI_PROTOCOL_WHATSAPP_FILES: if(new_proto == NDPI_PROTOCOL_WHATSAPP) return(1); } return(0); } /* ****************************************************** */ static u_int16_t ndpi_automa_match_string_subprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *string_to_match, u_int string_to_match_len, u_int16_t master_protocol_id, ndpi_protocol_match_result *ret_match, u_int8_t is_host_match) { int matching_protocol_id; struct ndpi_packet_struct *packet = &flow->packet; matching_protocol_id = ndpi_match_string_subprotocol(ndpi_str, string_to_match, string_to_match_len, ret_match, is_host_match); #ifdef DEBUG { char m[256]; int len = ndpi_min(sizeof(m), string_to_match_len); strncpy(m, string_to_match, len); m[len] = '\0'; NDPI_LOG_DBG2(ndpi_str, "[NDPI] ndpi_match_host_subprotocol(%s): %s\n", m, ndpi_str->proto_defaults[matching_protocol_id].protoName); } #endif if((matching_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (!ndpi_is_more_generic_protocol(packet->detected_protocol_stack[0], matching_protocol_id))) { /* Move the protocol on slot 0 down one position */ packet->detected_protocol_stack[1] = master_protocol_id, packet->detected_protocol_stack[0] = matching_protocol_id; flow->detected_protocol_stack[0] = packet->detected_protocol_stack[0], flow->detected_protocol_stack[1] = packet->detected_protocol_stack[1]; if(flow->category == NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) flow->category = ret_match->protocol_category; return(packet->detected_protocol_stack[0]); } #ifdef DEBUG string_to_match[string_to_match_len] = '\0'; NDPI_LOG_DBG2(ndpi_str, "[NTOP] Unable to find a match for '%s'\n", string_to_match); #endif ret_match->protocol_id = NDPI_PROTOCOL_UNKNOWN, ret_match->protocol_category = NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, ret_match->protocol_breed = NDPI_PROTOCOL_UNRATED; return(NDPI_PROTOCOL_UNKNOWN); } /* ****************************************************** */ u_int16_t ndpi_match_host_subprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *string_to_match, u_int string_to_match_len, ndpi_protocol_match_result *ret_match, u_int16_t master_protocol_id) { u_int16_t rc = ndpi_automa_match_string_subprotocol(ndpi_str, flow, string_to_match, string_to_match_len, master_protocol_id, ret_match, 1); ndpi_protocol_category_t id = ret_match->protocol_category; if(ndpi_get_custom_category_match(ndpi_str, string_to_match, string_to_match_len, &id) != -1) { /* if(id != -1) */ { flow->category = ret_match->protocol_category = id; rc = master_protocol_id; } } return(rc); } /* **************************************** */ int ndpi_match_hostname_protocol(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int16_t master_protocol, char *name, u_int name_len) { ndpi_protocol_match_result ret_match; u_int16_t subproto, what_len; char *what; if((name_len > 2) && (name[0] == '*') && (name[1] == '.')) what = &name[1], what_len = name_len - 1; else what = name, what_len = name_len; subproto = ndpi_match_host_subprotocol(ndpi_struct, flow, what, what_len, &ret_match, master_protocol); if(subproto != NDPI_PROTOCOL_UNKNOWN) { ndpi_set_detected_protocol(ndpi_struct, flow, subproto, master_protocol); ndpi_int_change_category(ndpi_struct, flow, ret_match.protocol_category); return(1); } else return(0); } /* ****************************************************** */ u_int16_t ndpi_match_content_subprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *string_to_match, u_int string_to_match_len, ndpi_protocol_match_result *ret_match, u_int16_t master_protocol_id) { return(ndpi_automa_match_string_subprotocol(ndpi_str, flow, string_to_match, string_to_match_len, master_protocol_id, ret_match, 0)); } /* ****************************************************** */ int ndpi_match_bigram(struct ndpi_detection_module_struct *ndpi_str, ndpi_automa *automa, char *bigram_to_match) { AC_TEXT_t ac_input_text; AC_REP_t match = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED}; int rc; if((automa->ac_automa == NULL) || (bigram_to_match == NULL)) return(-1); if(!automa->ac_automa_finalized) { #if 1 ndpi_finalize_initalization(ndpi_str); #else printf("[%s:%d] [NDPI] Internal error: please call ndpi_finalize_initalization()\n", __FILE__, __LINE__); return(0); /* No matches */ #endif } ac_input_text.astring = bigram_to_match, ac_input_text.length = 2; rc = ac_automata_search(((AC_AUTOMATA_t *) automa->ac_automa), &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; return(rc ? match.number : 0); } /* ****************************************************** */ void ndpi_free_flow(struct ndpi_flow_struct *flow) { if(flow) { if(flow->http.url) ndpi_free(flow->http.url); if(flow->http.content_type) ndpi_free(flow->http.content_type); if(flow->http.user_agent) ndpi_free(flow->http.user_agent); if(flow->kerberos_buf.pktbuf) ndpi_free(flow->kerberos_buf.pktbuf); if(flow_is_proto(flow, NDPI_PROTOCOL_TLS)) { if(flow->protos.stun_ssl.ssl.server_names) ndpi_free(flow->protos.stun_ssl.ssl.server_names); if(flow->protos.stun_ssl.ssl.alpn) ndpi_free(flow->protos.stun_ssl.ssl.alpn); if(flow->protos.stun_ssl.ssl.tls_supported_versions) ndpi_free(flow->protos.stun_ssl.ssl.tls_supported_versions); if(flow->protos.stun_ssl.ssl.issuerDN) ndpi_free(flow->protos.stun_ssl.ssl.issuerDN); if(flow->protos.stun_ssl.ssl.subjectDN) ndpi_free(flow->protos.stun_ssl.ssl.subjectDN); if(flow->l4.tcp.tls.srv_cert_fingerprint_ctx) ndpi_free(flow->l4.tcp.tls.srv_cert_fingerprint_ctx); if(flow->protos.stun_ssl.ssl.encrypted_sni.esni) ndpi_free(flow->protos.stun_ssl.ssl.encrypted_sni.esni); } if(flow->l4_proto == IPPROTO_TCP) { if(flow->l4.tcp.tls.message.buffer) ndpi_free(flow->l4.tcp.tls.message.buffer); } ndpi_free(flow); } } /* ****************************************************** */ char *ndpi_revision() { return(NDPI_GIT_RELEASE); } /* ****************************************************** */ #ifdef WIN32 /* https://stackoverflow.com/questions/10905892/equivalent-of-gettimeday-for-windows */ int gettimeofday(struct timeval *tp, struct timezone *tzp) { // Note: some broken versions only have 8 trailing zero's, the correct epoch has 9 trailing zero's // This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC) // until 00:00:00 January 1, 1970 static const uint64_t EPOCH = ((uint64_t) 116444736000000000ULL); SYSTEMTIME system_time; FILETIME file_time; uint64_t time; GetSystemTime(&system_time); SystemTimeToFileTime(&system_time, &file_time); time = ((uint64_t) file_time.dwLowDateTime); time += ((uint64_t) file_time.dwHighDateTime) << 32; tp->tv_sec = (long) ((time - EPOCH) / 10000000L); tp->tv_usec = (long) (system_time.wMilliseconds * 1000); return(0); } #endif int NDPI_BITMASK_COMPARE(NDPI_PROTOCOL_BITMASK a, NDPI_PROTOCOL_BITMASK b) { int i; for (i = 0; i < NDPI_NUM_FDS_BITS; i++) { if(a.fds_bits[i] & b.fds_bits[i]) return(1); } return(0); } #ifdef CODE_UNUSED int NDPI_BITMASK_IS_EMPTY(NDPI_PROTOCOL_BITMASK a) { int i; for (i = 0; i < NDPI_NUM_FDS_BITS; i++) if(a.fds_bits[i] != 0) return(0); return(1); } void NDPI_DUMP_BITMASK(NDPI_PROTOCOL_BITMASK a) { int i; for (i = 0; i < NDPI_NUM_FDS_BITS; i++) printf("[%d=%u]", i, a.fds_bits[i]); printf("\n"); } #endif u_int16_t ndpi_get_api_version() { return(NDPI_API_VERSION); } ndpi_proto_defaults_t *ndpi_get_proto_defaults(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->proto_defaults); } u_int ndpi_get_ndpi_num_supported_protocols(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->ndpi_num_supported_protocols); } u_int ndpi_get_ndpi_num_custom_protocols(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->ndpi_num_custom_protocols); } u_int ndpi_get_ndpi_detection_module_size() { return(sizeof(struct ndpi_detection_module_struct)); } void ndpi_set_log_level(struct ndpi_detection_module_struct *ndpi_str, u_int l){ ndpi_str->ndpi_log_level = l; } /* ******************************************************************** */ /* LRU cache */ struct ndpi_lru_cache *ndpi_lru_cache_init(u_int32_t num_entries) { struct ndpi_lru_cache *c = (struct ndpi_lru_cache *) ndpi_malloc(sizeof(struct ndpi_lru_cache)); if(!c) return(NULL); c->entries = (struct ndpi_lru_cache_entry *) ndpi_calloc(num_entries, sizeof(struct ndpi_lru_cache_entry)); if(!c->entries) { ndpi_free(c); return(NULL); } else c->num_entries = num_entries; return(c); } void ndpi_lru_free_cache(struct ndpi_lru_cache *c) { ndpi_free(c->entries); ndpi_free(c); } u_int8_t ndpi_lru_find_cache(struct ndpi_lru_cache *c, u_int32_t key, u_int16_t *value, u_int8_t clean_key_when_found) { u_int32_t slot = key % c->num_entries; if(c->entries[slot].is_full) { *value = c->entries[slot].value; if(clean_key_when_found) c->entries[slot].is_full = 0; return(1); } else return(0); } void ndpi_lru_add_to_cache(struct ndpi_lru_cache *c, u_int32_t key, u_int16_t value) { u_int32_t slot = key % c->num_entries; c->entries[slot].is_full = 1, c->entries[slot].key = key, c->entries[slot].value = value; } /* ******************************************************************** */ /* This function tells if it's possible to further dissect a given flow 0 - All possible dissection has been completed 1 - Additional dissection is possible */ u_int8_t ndpi_extra_dissection_possible(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { u_int16_t proto = flow->detected_protocol_stack[1] ? flow->detected_protocol_stack[1] : flow->detected_protocol_stack[0]; #if 0 printf("[DEBUG] %s(%u.%u): %u\n", __FUNCTION__, flow->detected_protocol_stack[0], flow->detected_protocol_stack[1], proto); #endif switch (proto) { case NDPI_PROTOCOL_TLS: if(!flow->l4.tcp.tls.certificate_processed) return(1); /* TODO: add check for TLS 1.3 */ break; case NDPI_PROTOCOL_HTTP: if((flow->host_server_name[0] == '\0') || (flow->http.response_status_code == 0)) return(1); break; case NDPI_PROTOCOL_DNS: if(flow->protos.dns.num_answers == 0) return(1); break; case NDPI_PROTOCOL_FTP_CONTROL: case NDPI_PROTOCOL_MAIL_POP: case NDPI_PROTOCOL_MAIL_IMAP: case NDPI_PROTOCOL_MAIL_SMTP: if(flow->protos.ftp_imap_pop_smtp.password[0] == '\0') return(1); break; case NDPI_PROTOCOL_SSH: if((flow->protos.ssh.hassh_client[0] == '\0') || (flow->protos.ssh.hassh_server[0] == '\0')) return(1); break; case NDPI_PROTOCOL_TELNET: if(!flow->protos.telnet.password_detected) return(1); break; } return(0); } /* ******************************************************************** */ const char *ndpi_get_l4_proto_name(ndpi_l4_proto_info proto) { switch (proto) { case ndpi_l4_proto_unknown: return(""); break; case ndpi_l4_proto_tcp_only: return("TCP"); break; case ndpi_l4_proto_udp_only: return("UDP"); break; case ndpi_l4_proto_tcp_and_udp: return("TCP/UDP"); break; } return(""); } /* ******************************************************************** */ ndpi_l4_proto_info ndpi_get_l4_proto_info(struct ndpi_detection_module_struct *ndpi_struct, u_int16_t ndpi_proto_id) { if(ndpi_proto_id < ndpi_struct->ndpi_num_supported_protocols) { u_int16_t idx = ndpi_struct->proto_defaults[ndpi_proto_id].protoIdx; NDPI_SELECTION_BITMASK_PROTOCOL_SIZE bm = ndpi_struct->callback_buffer[idx].ndpi_selection_bitmask; if(bm & NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP) return(ndpi_l4_proto_tcp_only); else if(bm & NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP) return(ndpi_l4_proto_udp_only); else if(bm & NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP) return(ndpi_l4_proto_tcp_and_udp); } return(ndpi_l4_proto_unknown); /* default */ } /* ******************************************************************** */ ndpi_ptree_t *ndpi_ptree_create(void) { ndpi_ptree_t *tree = (ndpi_ptree_t *) ndpi_malloc(sizeof(ndpi_ptree_t)); if(tree) { tree->v4 = ndpi_New_Patricia(32); tree->v6 = ndpi_New_Patricia(128); if((!tree->v4) || (!tree->v6)) { ndpi_ptree_destroy(tree); return(NULL); } } return(tree); } /* ******************************************************************** */ void ndpi_ptree_destroy(ndpi_ptree_t *tree) { if(tree) { if(tree->v4) ndpi_Destroy_Patricia(tree->v4, free_ptree_data); if(tree->v6) ndpi_Destroy_Patricia(tree->v6, free_ptree_data); ndpi_free(tree); } } /* ******************************************************************** */ int ndpi_ptree_insert(ndpi_ptree_t *tree, const ndpi_ip_addr_t *addr, u_int8_t bits, uint user_data) { u_int8_t is_v6 = ndpi_is_ipv6(addr); patricia_tree_t *ptree = is_v6 ? tree->v6 : tree->v4; prefix_t prefix; patricia_node_t *node; if(bits > ptree->maxbits) return(-1); if(is_v6) fill_prefix_v6(&prefix, (const struct in6_addr *) &addr->ipv6, bits, ptree->maxbits); else fill_prefix_v4(&prefix, (const struct in_addr *) &addr->ipv4, bits, ptree->maxbits); /* Verify that the node does not already exist */ node = ndpi_patricia_search_best(ptree, &prefix); if(node && (node->prefix->bitlen == bits)) return(-2); node = ndpi_patricia_lookup(ptree, &prefix); if(node != NULL) { node->value.uv.user_value = user_data, node->value.uv.additional_user_value = 0; return(0); } return(-3); } /* ******************************************************************** */ int ndpi_ptree_match_addr(ndpi_ptree_t *tree, const ndpi_ip_addr_t *addr, uint *user_data) { u_int8_t is_v6 = ndpi_is_ipv6(addr); patricia_tree_t *ptree = is_v6 ? tree->v6 : tree->v4; prefix_t prefix; patricia_node_t *node; int bits = ptree->maxbits; if(is_v6) fill_prefix_v6(&prefix, (const struct in6_addr *) &addr->ipv6, bits, ptree->maxbits); else fill_prefix_v4(&prefix, (const struct in_addr *) &addr->ipv4, bits, ptree->maxbits); node = ndpi_patricia_search_best(ptree, &prefix); if(node) { *user_data = node->value.uv.user_value; return(0); } return(-1); } /* ******************************************************************** */ void ndpi_md5(const u_char *data, size_t data_len, u_char hash[16]) { ndpi_MD5_CTX ctx; ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, data, data_len); ndpi_MD5Final(hash, &ctx); } /* ******************************************************************** */ static int enough(int a, int b) { u_int8_t percentage = 20; if(b == 0) return(0); if(a == 0) return(1); if(b > (((a+1)*percentage)/100)) return(1); return(0); } /* ******************************************************************** */ // #define DGA_DEBUG 1 int ndpi_check_dga_name(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *name) { int len, rc = 0; len = strlen(name); if(len >= 5) { int i, j, num_found = 0, num_impossible = 0, num_bigram_checks = 0, num_digits = 0, num_vowels = 0, num_words = 0; char tmp[128], *word, *tok_tmp; len = snprintf(tmp, sizeof(tmp)-1, "%s", name); if(len < 0) return(0); for(i=0, j=0; (i<len) && (j<(sizeof(tmp)-1)); i++) { tmp[j++] = tolower(name[i]); } tmp[j] = '\0'; len = j; for(word = strtok_r(tmp, ".", &tok_tmp); ; word = strtok_r(NULL, ".", &tok_tmp)) { if(!word) break; num_words++; if(strlen(word) < 3) continue; #ifdef DGA_DEBUG printf("-> %s [%s][len: %u]\n", word, name, (unsigned int)strlen(word)); #endif for(i = 0; word[i+1] != '\0'; i++) { if(isdigit(word[i])) { num_digits++; // if(!isdigit(word[i+1])) num_impossible++; continue; } switch(word[i]) { case '_': case '-': case ':': continue; break; case '.': continue; break; } switch(word[i]) { case 'a': case 'e': case 'i': case 'o': case 'u': num_vowels++; break; } if(isdigit(word[i+1])) { num_digits++; // num_impossible++; continue; } num_bigram_checks++; if(ndpi_match_bigram(ndpi_str, &ndpi_str->bigrams_automa, &word[i])) { num_found++; } else { if(ndpi_match_bigram(ndpi_str, &ndpi_str->impossible_bigrams_automa, &word[i])) { #ifdef DGA_DEBUG printf("IMPOSSIBLE %s\n", &word[i]); #endif num_impossible++; } } } /* for */ } /* for */ #ifdef DGA_DEBUG printf("[num_found: %u][num_impossible: %u][num_digits: %u][num_bigram_checks: %u][num_vowels: %u/%u]\n", num_found, num_impossible, num_digits, num_bigram_checks, num_vowels, j-num_vowels); #endif if(num_bigram_checks && ((num_found == 0) || ((num_digits > 5) && (num_words <= 3)) || enough(num_found, num_impossible))) rc = 1; if(rc && flow) NDPI_SET_BIT(flow->risk, NDPI_SUSPICIOUS_DGA_DOMAIN); #ifdef DGA_DEBUG if(rc) printf("DGA %s [num_found: %u][num_impossible: %u]\n", name, num_found, num_impossible); #endif } return(rc); }
/* * ndpi_main.c * * Copyright (C) 2011-20 - ntop.org * * This file is part of nDPI, an open source deep packet inspection * library based on the OpenDPI and PACE technology by ipoque GmbH * * nDPI is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * nDPI is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with nDPI. If not, see <http://www.gnu.org/licenses/>. * */ #include <stdlib.h> #include <errno.h> #include <sys/types.h> #define NDPI_CURRENT_PROTO NDPI_PROTOCOL_UNKNOWN #include "ndpi_config.h" #include "ndpi_api.h" #include "ahocorasick.h" #include "libcache.h" #include <time.h> #ifndef WIN32 #include <unistd.h> #endif #if defined __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ #include <sys/endian.h> #endif #include "ndpi_content_match.c.inc" #include "third_party/include/ndpi_patricia.h" #include "third_party/include/ht_hash.h" #include "third_party/include/ndpi_md5.h" /* stun.c */ extern u_int32_t get_stun_lru_key(struct ndpi_flow_struct *flow, u_int8_t rev); static int _ndpi_debug_callbacks = 0; /* #define MATCH_DEBUG 1 */ /* ****************************************** */ static void *(*_ndpi_flow_malloc)(size_t size); static void (*_ndpi_flow_free)(void *ptr); static void *(*_ndpi_malloc)(size_t size); static void (*_ndpi_free)(void *ptr); /* ****************************************** */ /* Forward */ static void addDefaultPort(struct ndpi_detection_module_struct *ndpi_str, ndpi_port_range *range, ndpi_proto_defaults_t *def, u_int8_t customUserProto, ndpi_default_ports_tree_node_t **root, const char *_func, int _line); static int removeDefaultPort(ndpi_port_range *range, ndpi_proto_defaults_t *def, ndpi_default_ports_tree_node_t **root); /* ****************************************** */ static inline uint8_t flow_is_proto(struct ndpi_flow_struct *flow, u_int16_t p) { return((flow->detected_protocol_stack[0] == p) || (flow->detected_protocol_stack[1] == p)); } /* ****************************************** */ void *ndpi_malloc(size_t size) { return(_ndpi_malloc ? _ndpi_malloc(size) : malloc(size)); } void *ndpi_flow_malloc(size_t size) { return(_ndpi_flow_malloc ? _ndpi_flow_malloc(size) : ndpi_malloc(size)); } /* ****************************************** */ void *ndpi_calloc(unsigned long count, size_t size) { size_t len = count * size; void *p = ndpi_malloc(len); if(p) memset(p, 0, len); return(p); } /* ****************************************** */ void ndpi_free(void *ptr) { if(_ndpi_free) _ndpi_free(ptr); else free(ptr); } /* ****************************************** */ void ndpi_flow_free(void *ptr) { if(_ndpi_flow_free) _ndpi_flow_free(ptr); else ndpi_free_flow((struct ndpi_flow_struct *) ptr); } /* ****************************************** */ void *ndpi_realloc(void *ptr, size_t old_size, size_t new_size) { void *ret = ndpi_malloc(new_size); if(!ret) return(ret); else { memcpy(ret, ptr, old_size); ndpi_free(ptr); return(ret); } } /* ****************************************** */ char *ndpi_strdup(const char *s) { if(s == NULL ){ return NULL; } int len = strlen(s); char *m = ndpi_malloc(len + 1); if(m) { memcpy(m, s, len); m[len] = '\0'; } return(m); } /* *********************************************************************************** */ /* Opaque structure defined here */ struct ndpi_ptree { patricia_tree_t *v4; patricia_tree_t *v6; }; /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_flow_struct(void) { return(sizeof(struct ndpi_flow_struct)); } /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_id_struct(void) { return(sizeof(struct ndpi_id_struct)); } /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_flow_tcp_struct(void) { return(sizeof(struct ndpi_flow_tcp_struct)); } /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_flow_udp_struct(void) { return(sizeof(struct ndpi_flow_udp_struct)); } /* *********************************************************************************** */ char *ndpi_get_proto_by_id(struct ndpi_detection_module_struct *ndpi_str, u_int id) { return((id >= ndpi_str->ndpi_num_supported_protocols) ? NULL : ndpi_str->proto_defaults[id].protoName); } /* *********************************************************************************** */ u_int16_t ndpi_get_proto_by_name(struct ndpi_detection_module_struct *ndpi_str, const char *name) { u_int16_t i, num = ndpi_get_num_supported_protocols(ndpi_str); for (i = 0; i < num; i++) if(strcasecmp(ndpi_get_proto_by_id(ndpi_str, i), name) == 0) return(i); return(NDPI_PROTOCOL_UNKNOWN); } /* ************************************************************************************* */ #ifdef CODE_UNUSED ndpi_port_range *ndpi_build_default_ports_range(ndpi_port_range *ports, u_int16_t portA_low, u_int16_t portA_high, u_int16_t portB_low, u_int16_t portB_high, u_int16_t portC_low, u_int16_t portC_high, u_int16_t portD_low, u_int16_t portD_high, u_int16_t portE_low, u_int16_t portE_high) { int i = 0; ports[i].port_low = portA_low, ports[i].port_high = portA_high; i++; ports[i].port_low = portB_low, ports[i].port_high = portB_high; i++; ports[i].port_low = portC_low, ports[i].port_high = portC_high; i++; ports[i].port_low = portD_low, ports[i].port_high = portD_high; i++; ports[i].port_low = portE_low, ports[i].port_high = portE_high; return(ports); } #endif /* *********************************************************************************** */ ndpi_port_range *ndpi_build_default_ports(ndpi_port_range *ports, u_int16_t portA, u_int16_t portB, u_int16_t portC, u_int16_t portD, u_int16_t portE) { int i = 0; ports[i].port_low = portA, ports[i].port_high = portA; i++; ports[i].port_low = portB, ports[i].port_high = portB; i++; ports[i].port_low = portC, ports[i].port_high = portC; i++; ports[i].port_low = portD, ports[i].port_high = portD; i++; ports[i].port_low = portE, ports[i].port_high = portE; return(ports); } /* ********************************************************************************** */ void ndpi_set_proto_breed(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protoId, ndpi_protocol_breed_t breed) { if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) return; else ndpi_str->proto_defaults[protoId].protoBreed = breed; } /* ********************************************************************************** */ void ndpi_set_proto_category(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protoId, ndpi_protocol_category_t protoCategory) { if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) return; else ndpi_str->proto_defaults[protoId].protoCategory = protoCategory; } /* ********************************************************************************** */ /* There are some (master) protocols that are informative, meaning that it shows what is the subprotocol about, but also that the subprotocol isn't a real protocol. Example: - DNS is informative as if we see a DNS request for www.facebook.com, the returned protocol is DNS.Facebook, but Facebook isn't a real subprotocol but rather it indicates a query for Facebook and not Facebook traffic. - HTTP/SSL are NOT informative as SSL.Facebook (likely) means that this is SSL (HTTPS) traffic containg Facebook traffic. */ u_int8_t ndpi_is_subprotocol_informative(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protoId) { if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) return(0); switch (protoId) { /* All dissectors that have calls to ndpi_match_host_subprotocol() */ case NDPI_PROTOCOL_DNS: return(1); break; default: return(0); } } /* ********************************************************************************** */ void ndpi_exclude_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t protocol_id, const char *_file, const char *_func, int _line) { if(protocol_id < NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES if(ndpi_str && ndpi_str->ndpi_log_level >= NDPI_LOG_DEBUG && ndpi_str->ndpi_debug_printf != NULL) { (*(ndpi_str->ndpi_debug_printf))(protocol_id, ndpi_str, NDPI_LOG_DEBUG, _file, _func, _line, "exclude %s\n", ndpi_get_proto_name(ndpi_str, protocol_id)); } #endif NDPI_ADD_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, protocol_id); } } /* ********************************************************************************** */ void ndpi_set_proto_defaults(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_breed_t breed, u_int16_t protoId, u_int8_t can_have_a_subprotocol, u_int16_t tcp_master_protoId[2], u_int16_t udp_master_protoId[2], char *protoName, ndpi_protocol_category_t protoCategory, ndpi_port_range *tcpDefPorts, ndpi_port_range *udpDefPorts) { char *name; int j; if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) { #ifdef DEBUG NDPI_LOG_ERR(ndpi_str, "[NDPI] %s/protoId=%d: INTERNAL ERROR\n", protoName, protoId); #endif return; } if(ndpi_str->proto_defaults[protoId].protoName != NULL) { #ifdef DEBUG NDPI_LOG_ERR(ndpi_str, "[NDPI] %s/protoId=%d: already initialized. Ignoring it\n", protoName, protoId); #endif return; } name = ndpi_strdup(protoName); if(ndpi_str->proto_defaults[protoId].protoName) ndpi_free(ndpi_str->proto_defaults[protoId].protoName); ndpi_str->proto_defaults[protoId].protoName = name, ndpi_str->proto_defaults[protoId].protoCategory = protoCategory, ndpi_str->proto_defaults[protoId].protoId = protoId, ndpi_str->proto_defaults[protoId].protoBreed = breed; ndpi_str->proto_defaults[protoId].can_have_a_subprotocol = can_have_a_subprotocol; memcpy(&ndpi_str->proto_defaults[protoId].master_tcp_protoId, tcp_master_protoId, 2 * sizeof(u_int16_t)); memcpy(&ndpi_str->proto_defaults[protoId].master_udp_protoId, udp_master_protoId, 2 * sizeof(u_int16_t)); for (j = 0; j < MAX_DEFAULT_PORTS; j++) { if(udpDefPorts[j].port_low != 0) addDefaultPort(ndpi_str, &udpDefPorts[j], &ndpi_str->proto_defaults[protoId], 0, &ndpi_str->udpRoot, __FUNCTION__, __LINE__); if(tcpDefPorts[j].port_low != 0) addDefaultPort(ndpi_str, &tcpDefPorts[j], &ndpi_str->proto_defaults[protoId], 0, &ndpi_str->tcpRoot, __FUNCTION__, __LINE__); /* No port range, just the lower port */ ndpi_str->proto_defaults[protoId].tcp_default_ports[j] = tcpDefPorts[j].port_low; ndpi_str->proto_defaults[protoId].udp_default_ports[j] = udpDefPorts[j].port_low; } } /* ******************************************************************** */ static int ndpi_default_ports_tree_node_t_cmp(const void *a, const void *b) { ndpi_default_ports_tree_node_t *fa = (ndpi_default_ports_tree_node_t *) a; ndpi_default_ports_tree_node_t *fb = (ndpi_default_ports_tree_node_t *) b; //printf("[NDPI] %s(%d, %d)\n", __FUNCTION__, fa->default_port, fb->default_port); return((fa->default_port == fb->default_port) ? 0 : ((fa->default_port < fb->default_port) ? -1 : 1)); } /* ******************************************************************** */ void ndpi_default_ports_tree_node_t_walker(const void *node, const ndpi_VISIT which, const int depth) { ndpi_default_ports_tree_node_t *f = *(ndpi_default_ports_tree_node_t **) node; printf("<%d>Walk on node %s (%u)\n", depth, which == ndpi_preorder ? "ndpi_preorder" : which == ndpi_postorder ? "ndpi_postorder" : which == ndpi_endorder ? "ndpi_endorder" : which == ndpi_leaf ? "ndpi_leaf" : "unknown", f->default_port); } /* ******************************************************************** */ static void addDefaultPort(struct ndpi_detection_module_struct *ndpi_str, ndpi_port_range *range, ndpi_proto_defaults_t *def, u_int8_t customUserProto, ndpi_default_ports_tree_node_t **root, const char *_func, int _line) { u_int16_t port; for (port = range->port_low; port <= range->port_high; port++) { ndpi_default_ports_tree_node_t *node = (ndpi_default_ports_tree_node_t *) ndpi_malloc(sizeof(ndpi_default_ports_tree_node_t)); ndpi_default_ports_tree_node_t *ret; if(!node) { NDPI_LOG_ERR(ndpi_str, "%s:%d not enough memory\n", _func, _line); break; } node->proto = def, node->default_port = port, node->customUserProto = customUserProto; ret = (ndpi_default_ports_tree_node_t *) ndpi_tsearch(node, (void *) root, ndpi_default_ports_tree_node_t_cmp); /* Add it to the tree */ if(ret != node) { NDPI_LOG_DBG(ndpi_str, "[NDPI] %s:%d found duplicate for port %u: overwriting it with new value\n", _func, _line, port); ret->proto = def; ndpi_free(node); } } } /* ****************************************************** */ /* NOTE This function must be called with a semaphore set, this in order to avoid changing the datastructures while using them */ static int removeDefaultPort(ndpi_port_range *range, ndpi_proto_defaults_t *def, ndpi_default_ports_tree_node_t **root) { ndpi_default_ports_tree_node_t node; u_int16_t port; for (port = range->port_low; port <= range->port_high; port++) { ndpi_default_ports_tree_node_t *ret; node.proto = def, node.default_port = port; ret = (ndpi_default_ports_tree_node_t *) ndpi_tdelete( &node, (void *) root, ndpi_default_ports_tree_node_t_cmp); /* Add it to the tree */ if(ret != NULL) { ndpi_free((ndpi_default_ports_tree_node_t *) ret); return(0); } } return(-1); } /* ****************************************************** */ static int ndpi_string_to_automa(struct ndpi_detection_module_struct *ndpi_str, ndpi_automa *automa, char *value, u_int16_t protocol_id, ndpi_protocol_category_t category, ndpi_protocol_breed_t breed, u_int8_t free_str_on_duplicate) { AC_PATTERN_t ac_pattern; AC_ERROR_t rc; if((value == NULL) || (protocol_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS))) { NDPI_LOG_ERR(ndpi_str, "[NDPI] protoId=%d: INTERNAL ERROR\n", protocol_id); return(-1); } if(automa->ac_automa == NULL) return(-2); ac_pattern.astring = value, ac_pattern.rep.number = protocol_id, ac_pattern.rep.category = (u_int16_t) category, ac_pattern.rep.breed = (u_int16_t) breed; #ifdef MATCH_DEBUG printf("Adding to automa [%s][protocol_id: %u][category: %u][breed: %u]\n", value, protocol_id, category, breed); #endif if(value == NULL) ac_pattern.length = 0; else ac_pattern.length = strlen(ac_pattern.astring); rc = ac_automata_add(((AC_AUTOMATA_t *) automa->ac_automa), &ac_pattern); if(rc != ACERR_DUPLICATE_PATTERN && rc != ACERR_SUCCESS) return(-2); if(rc == ACERR_DUPLICATE_PATTERN && free_str_on_duplicate) ndpi_free(value); return(0); } /* ****************************************************** */ static int ndpi_add_host_url_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *_value, int protocol_id, ndpi_protocol_category_t category, ndpi_protocol_breed_t breed) { int rv; char *value = ndpi_strdup(_value); if(!value) return(-1); #ifdef DEBUG NDPI_LOG_DBG2(ndpi_str, "[NDPI] Adding [%s][%d]\n", value, protocol_id); #endif rv = ndpi_string_to_automa(ndpi_str, &ndpi_str->host_automa, value, protocol_id, category, breed, 1); if(rv != 0) ndpi_free(value); return(rv); } /* ****************************************************** */ #ifdef CODE_UNUSED int ndpi_add_content_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *value, int protocol_id, ndpi_protocol_category_t category, ndpi_protocol_breed_t breed) { return(ndpi_string_to_automa(ndpi_str, &ndpi_str->content_automa, value, protocol_id, category, breed, 0)); } #endif /* ****************************************************** */ /* NOTE This function must be called with a semaphore set, this in order to avoid changing the datastructures while using them */ static int ndpi_remove_host_url_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *value, int protocol_id) { NDPI_LOG_ERR(ndpi_str, "[NDPI] Missing implementation for proto %s/%d\n", value, protocol_id); return(-1); } /* ******************************************************************** */ void ndpi_init_protocol_match(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_match *match) { u_int16_t no_master[2] = {NDPI_PROTOCOL_NO_MASTER_PROTO, NDPI_PROTOCOL_NO_MASTER_PROTO}; ndpi_port_range ports_a[MAX_DEFAULT_PORTS], ports_b[MAX_DEFAULT_PORTS]; if(ndpi_str->proto_defaults[match->protocol_id].protoName == NULL) { ndpi_str->proto_defaults[match->protocol_id].protoName = ndpi_strdup(match->proto_name); ndpi_str->proto_defaults[match->protocol_id].protoId = match->protocol_id; ndpi_str->proto_defaults[match->protocol_id].protoCategory = match->protocol_category; ndpi_str->proto_defaults[match->protocol_id].protoBreed = match->protocol_breed; ndpi_set_proto_defaults(ndpi_str, ndpi_str->proto_defaults[match->protocol_id].protoBreed, ndpi_str->proto_defaults[match->protocol_id].protoId, 0 /* can_have_a_subprotocol */, no_master, no_master, ndpi_str->proto_defaults[match->protocol_id].protoName, ndpi_str->proto_defaults[match->protocol_id].protoCategory, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); } ndpi_add_host_url_subprotocol(ndpi_str, match->string_to_match, match->protocol_id, match->protocol_category, match->protocol_breed); } /* ******************************************************************** */ /* Self check function to be called onli for testing purposes */ void ndpi_self_check_host_match() { u_int32_t i, j; for (i = 0; host_match[i].string_to_match != NULL; i++) { for (j = 0; host_match[j].string_to_match != NULL; j++) { if((i != j) && (strcmp(host_match[i].string_to_match, host_match[j].string_to_match) == 0)) { printf("[INTERNAL ERROR]: Duplicate string detected '%s' [id: %u, id %u]\n", host_match[i].string_to_match, i, j); printf("\nPlease fix host_match[] in ndpi_content_match.c.inc\n"); exit(0); } } } } /* ******************************************************************** */ static void init_string_based_protocols(struct ndpi_detection_module_struct *ndpi_str) { int i; for (i = 0; host_match[i].string_to_match != NULL; i++) ndpi_init_protocol_match(ndpi_str, &host_match[i]); ndpi_enable_loaded_categories(ndpi_str); #ifdef MATCH_DEBUG // ac_automata_display(ndpi_str->host_automa.ac_automa, 'n'); #endif #if 1 for (i = 0; ndpi_en_bigrams[i] != NULL; i++) ndpi_string_to_automa(ndpi_str, &ndpi_str->bigrams_automa, (char *) ndpi_en_bigrams[i], 1, 1, 1, 0); #else for (i = 0; ndpi_en_popular_bigrams[i] != NULL; i++) ndpi_string_to_automa(ndpi_str, &ndpi_str->bigrams_automa, (char *) ndpi_en_popular_bigrams[i], 1, 1, 1, 0); #endif for (i = 0; ndpi_en_impossible_bigrams[i] != NULL; i++) ndpi_string_to_automa(ndpi_str, &ndpi_str->impossible_bigrams_automa, (char *) ndpi_en_impossible_bigrams[i], 1, 1, 1, 0); } /* ******************************************************************** */ int ndpi_set_detection_preferences(struct ndpi_detection_module_struct *ndpi_str, ndpi_detection_preference pref, int value) { switch (pref) { case ndpi_pref_direction_detect_disable: ndpi_str->direction_detect_disable = (u_int8_t) value; break; default: return(-1); } return(0); } /* ******************************************************************** */ static void ndpi_validate_protocol_initialization(struct ndpi_detection_module_struct *ndpi_str) { int i; for (i = 0; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) { if(ndpi_str->proto_defaults[i].protoName == NULL) { NDPI_LOG_ERR(ndpi_str, "[NDPI] INTERNAL ERROR missing protoName initialization for [protoId=%d]: recovering\n", i); } else { if((i != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[i].protoCategory == NDPI_PROTOCOL_CATEGORY_UNSPECIFIED)) { NDPI_LOG_ERR(ndpi_str, "[NDPI] INTERNAL ERROR missing category [protoId=%d/%s] initialization: recovering\n", i, ndpi_str->proto_defaults[i].protoName ? ndpi_str->proto_defaults[i].protoName : "???"); } } } } /* ******************************************************************** */ /* This function is used to map protocol name and default ports and it MUST be updated whenever a new protocol is added to NDPI. Do NOT add web services (NDPI_SERVICE_xxx) here. */ static void ndpi_init_protocol_defaults(struct ndpi_detection_module_struct *ndpi_str) { ndpi_port_range ports_a[MAX_DEFAULT_PORTS], ports_b[MAX_DEFAULT_PORTS]; u_int16_t no_master[2] = {NDPI_PROTOCOL_NO_MASTER_PROTO, NDPI_PROTOCOL_NO_MASTER_PROTO}, custom_master[2]; /* Reset all settings */ memset(ndpi_str->proto_defaults, 0, sizeof(ndpi_str->proto_defaults)); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNRATED, NDPI_PROTOCOL_UNKNOWN, 0 /* can_have_a_subprotocol */, no_master, no_master, "Unknown", NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_FTP_CONTROL, 0 /* can_have_a_subprotocol */, no_master, no_master, "FTP_CONTROL", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 21, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_FTP_DATA, 0 /* can_have_a_subprotocol */, no_master, no_master, "FTP_DATA", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 20, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_MAIL_POP, 0 /* can_have_a_subprotocol */, no_master, no_master, "POP3", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 110, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MAIL_POPS, 0 /* can_have_a_subprotocol */, no_master, no_master, "POPS", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 995, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MAIL_SMTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMTP", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 25, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MAIL_SMTPS, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMTPS", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 465, 587, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_MAIL_IMAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IMAP", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 143, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MAIL_IMAPS, 0 /* can_have_a_subprotocol */, no_master, no_master, "IMAPS", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 993, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DNS, 1 /* can_have_a_subprotocol */, no_master, no_master, "DNS", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 53, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 53, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IPP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IPP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IMO, 0 /* can_have_a_subprotocol */, no_master, no_master, "IMO", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 80, 0 /* ntop */, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MDNS, 1 /* can_have_a_subprotocol */, no_master, no_master, "MDNS", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5353, 5354, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "NTP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 123, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NETBIOS, 0 /* can_have_a_subprotocol */, no_master, no_master, "NetBIOS", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 139, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 137, 138, 139, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NFS, 0 /* can_have_a_subprotocol */, no_master, no_master, "NFS", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 2049, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 2049, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SSDP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SSDP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_BGP, 0 /* can_have_a_subprotocol */, no_master, no_master, "BGP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 179, 2605, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SNMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SNMP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 161, 162, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_XDMCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "XDMCP", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 177, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 177, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_DANGEROUS, NDPI_PROTOCOL_SMBV1, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMBv1", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 445, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SYSLOG, 0 /* can_have_a_subprotocol */, no_master, no_master, "Syslog", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 514, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 514, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DHCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "DHCP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 67, 68, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_POSTGRES, 0 /* can_have_a_subprotocol */, no_master, no_master, "PostgreSQL", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 5432, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MYSQL, 0 /* can_have_a_subprotocol */, no_master, no_master, "MySQL", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 3306, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_DIRECT_DOWNLOAD_LINK, 0 /* can_have_a_subprotocol */, no_master, no_master, "Direct_Download_Link", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_APPLEJUICE, 0 /* can_have_a_subprotocol */, no_master, no_master, "AppleJuice", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_DIRECTCONNECT, 0 /* can_have_a_subprotocol */, no_master, no_master, "DirectConnect", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NATS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Nats", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_NTOP, 0 /* can_have_a_subprotocol */, no_master, no_master, "ntop", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_VMWARE, 0 /* can_have_a_subprotocol */, no_master, no_master, "VMware", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 903, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 902, 903, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_FBZERO, 0 /* can_have_a_subprotocol */, no_master, no_master, "FacebookZero", NDPI_PROTOCOL_CATEGORY_SOCIAL_NETWORK, ndpi_build_default_ports(ports_a, 443, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_KONTIKI, 0 /* can_have_a_subprotocol */, no_master, no_master, "Kontiki", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_OPENFT, 0 /* can_have_a_subprotocol */, no_master, no_master, "OpenFT", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_FASTTRACK, 0 /* can_have_a_subprotocol */, no_master, no_master, "FastTrack", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_GNUTELLA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Gnutella", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_EDONKEY, 0 /* can_have_a_subprotocol */, no_master, no_master, "eDonkey", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_BITTORRENT, 0 /* can_have_a_subprotocol */, no_master, no_master, "BitTorrent", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 51413, 53646, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6771, 51413, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SKYPE, 0 /* can_have_a_subprotocol */, no_master, no_master, "Skype", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SKYPE_CALL, 0 /* can_have_a_subprotocol */, no_master, no_master, "SkypeCall", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_TIKTOK, 0 /* can_have_a_subprotocol */, no_master, no_master, "TikTok", NDPI_PROTOCOL_CATEGORY_SOCIAL_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TEREDO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Teredo", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3544, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WECHAT, 0 /* can_have_a_subprotocol */, no_master, /* wechat.com */ no_master, "WeChat", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MEMCACHED, 0 /* can_have_a_subprotocol */, no_master, no_master, "Memcached", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 11211, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 11211, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SMBV23, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMBv23", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 445, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_MINING, 0 /* can_have_a_subprotocol */, no_master, no_master, "Mining", CUSTOM_CATEGORY_MINING, ndpi_build_default_ports(ports_a, 8333, 30303, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NEST_LOG_SINK, 0 /* can_have_a_subprotocol */, no_master, no_master, "NestLogSink", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 11095, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MODBUS, 1 /* no subprotocol */, no_master, no_master, "Modbus", NDPI_PROTOCOL_CATEGORY_NETWORK, /* Perhaps IoT in the future */ ndpi_build_default_ports(ports_a, 502, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHATSAPP_CALL, 0 /* can_have_a_subprotocol */, no_master, no_master, "WhatsAppCall", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_DATASAVER, 0 /* can_have_a_subprotocol */, no_master, no_master, "DataSaver", NDPI_PROTOCOL_CATEGORY_WEB /* dummy */, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SIGNAL, 0 /* can_have_a_subprotocol */, no_master, /* https://signal.org */ no_master, "Signal", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_DOH_DOT, 0 /* can_have_a_subprotocol */, no_master, no_master, "DoH_DoT", NDPI_PROTOCOL_CATEGORY_NETWORK /* dummy */, ndpi_build_default_ports(ports_a, 853, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FREE_205, 0 /* can_have_a_subprotocol */, no_master, no_master, "FREE_205", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WIREGUARD, 0 /* can_have_a_subprotocol */, no_master, no_master, "WireGuard", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 51820, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PPSTREAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPStream", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_XBOX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Xbox", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 3074, 3076, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3074, 3076, 500, 3544, 4500) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PLAYSTATION, 0 /* can_have_a_subprotocol */, no_master, no_master, "Playstation", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 1935, 3478, 3479, 3480, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3478, 3479, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_QQ, 0 /* can_have_a_subprotocol */, no_master, no_master, "QQ", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_RTSP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTSP", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 554, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 554, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_ICECAST, 0 /* can_have_a_subprotocol */, no_master, no_master, "IceCast", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PPLIVE, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPLive", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PPSTREAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPStream", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_ZATTOO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Zattoo", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SHOUTCAST, 0 /* can_have_a_subprotocol */, no_master, no_master, "ShoutCast", NDPI_PROTOCOL_CATEGORY_MUSIC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SOPCAST, 0 /* can_have_a_subprotocol */, no_master, no_master, "Sopcast", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FREE_58, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free58", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_TVUPLAYER, 0 /* can_have_a_subprotocol */, no_master, no_master, "TVUplayer", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_DOWNLOAD, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_Download", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_QQLIVE, 0 /* can_have_a_subprotocol */, no_master, no_master, "QQLive", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_THUNDER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Thunder", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SOULSEEK, 0 /* can_have_a_subprotocol */, no_master, no_master, "Soulseek", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_PS_VUE, 0 /* can_have_a_subprotocol */, no_master, no_master, "PS_VUE", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_IRC, 0 /* can_have_a_subprotocol */, no_master, no_master, "IRC", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 194, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 194, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AYIYA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Ayiya", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5072, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_UNENCRYPTED_JABBER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Unencrypted_Jabber", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_FREE_69, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free69", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FREE_71, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free71", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_VRRP, 0 /* can_have_a_subprotocol */, no_master, no_master, "VRRP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_STEAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "Steam", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_HALFLIFE2, 0 /* can_have_a_subprotocol */, no_master, no_master, "HalfLife2", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WORLDOFWARCRAFT, 0 /* can_have_a_subprotocol */, no_master, no_master, "WorldOfWarcraft", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_HOTSPOT_SHIELD, 0 /* can_have_a_subprotocol */, no_master, no_master, "HotspotShield", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_TELNET, 0 /* can_have_a_subprotocol */, no_master, no_master, "Telnet", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 23, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); custom_master[0] = NDPI_PROTOCOL_SIP, custom_master[1] = NDPI_PROTOCOL_H323; ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_STUN, 0 /* can_have_a_subprotocol */, no_master, custom_master, "STUN", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3478, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_IP_IPSEC, 0 /* can_have_a_subprotocol */, no_master, no_master, "IPsec", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 500, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 500, 4500, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_GRE, 0 /* can_have_a_subprotocol */, no_master, no_master, "GRE", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_ICMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "ICMP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_IGMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IGMP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_EGP, 0 /* can_have_a_subprotocol */, no_master, no_master, "EGP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_SCTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SCTP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_OSPF, 0 /* can_have_a_subprotocol */, no_master, no_master, "OSPF", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 2604, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_IP_IN_IP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IP_in_IP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTP", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RDP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RDP", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 3389, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3389, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_VNC, 0 /* can_have_a_subprotocol */, no_master, no_master, "VNC", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 5900, 5901, 5800, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_FREE90, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free90", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 5900, 5901, 5800, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ZOOM, 0 /* can_have_a_subprotocol */, no_master, no_master, "Zoom", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHATSAPP_FILES, 0 /* can_have_a_subprotocol */, no_master, no_master, "WhatsAppFiles", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHATSAPP, 0 /* can_have_a_subprotocol */, no_master, no_master, "WhatsApp", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_TLS, 1 /* can_have_a_subprotocol */, no_master, no_master, "TLS", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 443, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SSH, 0 /* can_have_a_subprotocol */, no_master, no_master, "SSH", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 22, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_USENET, 0 /* can_have_a_subprotocol */, no_master, no_master, "Usenet", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MGCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "MGCP", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IAX, 0 /* can_have_a_subprotocol */, no_master, no_master, "IAX", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 4569, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 4569, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AFP, 0 /* can_have_a_subprotocol */, no_master, no_master, "AFP", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 548, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 548, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_HULU, 0 /* can_have_a_subprotocol */, no_master, no_master, "Hulu", NDPI_PROTOCOL_CATEGORY_STREAMING, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CHECKMK, 0 /* can_have_a_subprotocol */, no_master, no_master, "CHECKMK", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 6556, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_STEALTHNET, 0 /* can_have_a_subprotocol */, no_master, no_master, "Stealthnet", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_AIMINI, 0 /* can_have_a_subprotocol */, no_master, no_master, "Aimini", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SIP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SIP", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 5060, 5061, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5060, 5061, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TRUPHONE, 0 /* can_have_a_subprotocol */, no_master, no_master, "TruPhone", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_ICMPV6, 0 /* can_have_a_subprotocol */, no_master, no_master, "ICMPV6", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DHCPV6, 0 /* can_have_a_subprotocol */, no_master, no_master, "DHCPV6", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_ARMAGETRON, 0 /* can_have_a_subprotocol */, no_master, no_master, "Armagetron", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_CROSSFIRE, 0 /* can_have_a_subprotocol */, no_master, no_master, "Crossfire", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_DOFUS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Dofus", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FIESTA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Fiesta", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FLORENSIA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Florensia", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_GUILDWARS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Guildwars", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_ACTIVESYNC, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_ActiveSync", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_KERBEROS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Kerberos", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 88, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 88, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LDAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "LDAP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 389, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 389, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_MAPLESTORY, 0 /* can_have_a_subprotocol */, no_master, no_master, "MapleStory", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MSSQL_TDS, 0 /* can_have_a_subprotocol */, no_master, no_master, "MsSQL-TDS", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 1433, 1434, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_PPTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPTP", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WARCRAFT3, 0 /* can_have_a_subprotocol */, no_master, no_master, "Warcraft3", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WORLD_OF_KUNG_FU, 0 /* can_have_a_subprotocol */, no_master, no_master, "WorldOfKungFu", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DCERPC, 0 /* can_have_a_subprotocol */, no_master, no_master, "DCE_RPC", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 135, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NETFLOW, 0 /* can_have_a_subprotocol */, no_master, no_master, "NetFlow", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 2055, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SFLOW, 0 /* can_have_a_subprotocol */, no_master, no_master, "sFlow", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6343, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_CONNECT, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_Connect", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_PROXY, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_Proxy", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 8080, 3128, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CITRIX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Citrix", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 1494, 2598, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WEBEX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Webex", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RADIUS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Radius", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 1812, 1813, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1812, 1813, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TEAMVIEWER, 0 /* can_have_a_subprotocol */, no_master, no_master, "TeamViewer", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 5938, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5938, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LOTUS_NOTES, 0 /* can_have_a_subprotocol */, no_master, no_master, "LotusNotes", NDPI_PROTOCOL_CATEGORY_COLLABORATIVE, ndpi_build_default_ports(ports_a, 1352, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SAP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 3201, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); /* Missing dissector: port based only */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_GTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "GTP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 2152, 2123, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_UPNP, 0 /* can_have_a_subprotocol */, no_master, no_master, "UPnP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 1780, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1900, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TELEGRAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "Telegram", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_QUIC, 1 /* can_have_a_subprotocol */, no_master, no_master, "QUIC", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 443, 80, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DIAMETER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Diameter", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 3868, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_APPLE_PUSH, 0 /* can_have_a_subprotocol */, no_master, no_master, "ApplePush", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DROPBOX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Dropbox", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 17500, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SPOTIFY, 0 /* can_have_a_subprotocol */, no_master, no_master, "Spotify", NDPI_PROTOCOL_CATEGORY_MUSIC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MESSENGER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Messenger", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LISP, 0 /* can_have_a_subprotocol */, no_master, no_master, "LISP", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 4342, 4341, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_EAQ, 0 /* can_have_a_subprotocol */, no_master, no_master, "EAQ", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6000, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_KAKAOTALK_VOICE, 0 /* can_have_a_subprotocol */, no_master, no_master, "KakaoTalk_Voice", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_MPEGTS, 0 /* can_have_a_subprotocol */, no_master, no_master, "MPEG_TS", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); /* http://en.wikipedia.org/wiki/Link-local_Multicast_Name_Resolution */ ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LLMNR, 0 /* can_have_a_subprotocol */, no_master, no_master, "LLMNR", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 5355, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5355, 0, 0, 0, 0) /* UDP */); /* Missing dissector: port based only */ ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_REMOTE_SCAN, 0 /* can_have_a_subprotocol */, no_master, no_master, "RemoteScan", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 6077, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6078, 0, 0, 0, 0) /* UDP */); /* Missing dissector: port based only */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_H323, 0 /* can_have_a_subprotocol */, no_master, no_master, "H323", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 1719, 1720, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1719, 1720, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_OPENVPN, 0 /* can_have_a_subprotocol */, no_master, no_master, "OpenVPN", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 1194, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1194, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NOE, 0 /* can_have_a_subprotocol */, no_master, no_master, "NOE", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CISCOVPN, 0 /* can_have_a_subprotocol */, no_master, no_master, "CiscoVPN", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 10000, 8008, 8009, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 10000, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TEAMSPEAK, 0 /* can_have_a_subprotocol */, no_master, no_master, "TeamSpeak", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SKINNY, 0 /* can_have_a_subprotocol */, no_master, no_master, "CiscoSkinny", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 2000, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RTCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTCP", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RSYNC, 0 /* can_have_a_subprotocol */, no_master, no_master, "RSYNC", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 873, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ORACLE, 0 /* can_have_a_subprotocol */, no_master, no_master, "Oracle", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 1521, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CORBA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Corba", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_UBUNTUONE, 0 /* can_have_a_subprotocol */, no_master, no_master, "UbuntuONE", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHOIS_DAS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Whois-DAS", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 43, 4343, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_COLLECTD, 0 /* can_have_a_subprotocol */, no_master, no_master, "Collectd", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 25826, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SOCKS, 0 /* can_have_a_subprotocol */, no_master, no_master, "SOCKS", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 1080, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 1080, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TFTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "TFTP", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 69, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RTMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTMP", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 1935, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PANDO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Pando_Media_Booster", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MEGACO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Megaco", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 2944, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_REDIS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Redis", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 6379, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ZMQ, 0 /* can_have_a_subprotocol */, no_master, no_master, "ZeroMQ", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_VHUA, 0 /* can_have_a_subprotocol */, no_master, no_master, "VHUA", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 58267, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_STARCRAFT, 0 /* can_have_a_subprotocol */, no_master, no_master, "Starcraft", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 1119, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 1119, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_UBNTAC2, 0 /* can_have_a_subprotocol */, no_master, no_master, "UBNTAC2", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 10001, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_VIBER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Viber", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 7985, 5242, 5243, 4244, 0), /* TCP */ ndpi_build_default_ports(ports_b, 7985, 7987, 5242, 5243, 4244)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_COAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "COAP", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 5683, 5684, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MQTT, 0 /* can_have_a_subprotocol */, no_master, no_master, "MQTT", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 1883, 8883, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SOMEIP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SOMEIP", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 30491, 30501, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 30491, 30501, 30490, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RX, 0 /* can_have_a_subprotocol */, no_master, no_master, "RX", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_GIT, 0 /* can_have_a_subprotocol */, no_master, no_master, "Git", NDPI_PROTOCOL_CATEGORY_COLLABORATIVE, ndpi_build_default_ports(ports_a, 9418, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DRDA, 0 /* can_have_a_subprotocol */, no_master, no_master, "DRDA", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HANGOUT_DUO, 0 /* can_have_a_subprotocol */, no_master, no_master, "GoogleHangoutDuo", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_BJNP, 0 /* can_have_a_subprotocol */, no_master, no_master, "BJNP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 8612, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SMPP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMPP", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_OOKLA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Ookla", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AMQP, 0 /* can_have_a_subprotocol */, no_master, no_master, "AMQP", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_DNSCRYPT, 0 /* can_have_a_subprotocol */, no_master, no_master, "DNScrypt", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TINC, 0 /* can_have_a_subprotocol */, no_master, no_master, "TINC", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 655, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 655, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_FIX, 0 /* can_have_a_subprotocol */, no_master, no_master, "FIX", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_NINTENDO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Nintendo", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_CSGO, 0 /* can_have_a_subprotocol */, no_master, no_master, "CSGO", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AJP, 0 /* can_have_a_subprotocol */, no_master, no_master, "AJP", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 8009, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TARGUS_GETDATA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Targus Dataspeed", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 5001, 5201, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5001, 5201, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AMAZON_VIDEO, 0 /* can_have_a_subprotocol */, no_master, no_master, "AmazonVideo", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DNP3, 1 /* no subprotocol */, no_master, no_master, "DNP3", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 20000, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IEC60870, 1 /* no subprotocol */, no_master, no_master, "IEC60870", NDPI_PROTOCOL_CATEGORY_NETWORK, /* Perhaps IoT in the future */ ndpi_build_default_ports(ports_a, 2404, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_BLOOMBERG, 1 /* no subprotocol */, no_master, no_master, "Bloomberg", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CAPWAP, 1 /* no subprotocol */, no_master, no_master, "CAPWAP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5246, 5247, 0, 0, 0) /* UDP */ ); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ZABBIX, 1 /* no subprotocol */, no_master, no_master, "Zabbix", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 10050, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */ ); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_S7COMM, 1 /* no subprotocol */, no_master, no_master, "s7comm", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 102, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MSTEAMS, 1 /* no subprotocol */, no_master, no_master, "Teams", NDPI_PROTOCOL_CATEGORY_COLLABORATIVE, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */ ); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WEBSOCKET, 1 /* can_have_a_subprotocol */, no_master, no_master, "WebSocket", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ANYDESK, 1 /* no subprotocol */, no_master, no_master, "AnyDesk", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/custom_ndpi_main.c" #endif /* calling function for host and content matched protocols */ init_string_based_protocols(ndpi_str); ndpi_validate_protocol_initialization(ndpi_str); } /* ****************************************************** */ #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/custom_ndpi_protocols.c" #endif /* ****************************************************** */ static int ac_match_handler(AC_MATCH_t *m, AC_TEXT_t *txt, AC_REP_t *match) { int min_len = (txt->length < m->patterns->length) ? txt->length : m->patterns->length; char buf[64] = {'\0'}, *whatfound; int min_buf_len = (txt->length > 63 /* sizeof(buf)-1 */) ? 63 : txt->length; u_int buf_len = strlen(buf); strncpy(buf, txt->astring, min_buf_len); buf[min_buf_len] = '\0'; #ifdef MATCH_DEBUG printf("Searching [to search: %s/%u][pattern: %s/%u] [len: %d][match_num: %u][%s]\n", buf, (unigned int) txt->length, m->patterns->astring, (unigned int) m->patterns->length, min_len, m->match_num, m->patterns->astring); #endif whatfound = strstr(buf, m->patterns->astring); #ifdef MATCH_DEBUG printf("[NDPI] %s() [searching=%s][pattern=%s][%s][%c]\n", __FUNCTION__, buf, m->patterns->astring, whatfound ? whatfound : "<NULL>", whatfound[-1]); #endif if(whatfound) { /* The patch below allows in case of pattern ws.amazon.com to avoid matching aws.amazon.com whereas a.ws.amazon.com has to match */ if((whatfound != buf) && (m->patterns->astring[0] != '.') /* The searched pattern does not start with . */ && strchr(m->patterns->astring, '.') /* The matched pattern has a . (e.g. numeric or sym IPs) */) { int len = strlen(m->patterns->astring); if((whatfound[-1] != '.') || ((m->patterns->astring[len - 1] != '.') && (whatfound[len] != '\0') /* endsWith does not hold here */)) { return(0); } else { memcpy(match, &m->patterns[0].rep, sizeof(AC_REP_t)); /* Partial match? */ return(0); /* Keep searching as probably there is a better match */ } } } /* Return 1 for stopping to the first match. We might consider searching for the more specific match, paying more cpu cycles. */ memcpy(match, &m->patterns[0].rep, sizeof(AC_REP_t)); if(((buf_len >= min_len) && (strncmp(&buf[buf_len - min_len], m->patterns->astring, min_len) == 0)) || (strncmp(buf, m->patterns->astring, min_len) == 0) /* begins with */ ) { #ifdef MATCH_DEBUG printf("Found match [%s][%s] [len: %d]" // "[proto_id: %u]" "\n", buf, m->patterns->astring, min_len /* , *matching_protocol_id */); #endif return(1); /* If the pattern found matches the string at the beginning we stop here */ } else { #ifdef MATCH_DEBUG printf("NO match found: continue\n"); #endif return(0); /* 0 to continue searching, !0 to stop */ } } /* ******************************************************************** */ static int fill_prefix_v4(prefix_t *p, const struct in_addr *a, int b, int mb) { if(b < 0 || b > mb) return(-1); memset(p, 0, sizeof(prefix_t)); memcpy(&p->add.sin, a, (mb + 7) / 8); p->family = AF_INET; p->bitlen = b; p->ref_count = 0; return(0); } /* ******************************************* */ static int fill_prefix_v6(prefix_t *prefix, const struct in6_addr *addr, int bits, int maxbits) { #ifdef PATRICIA_IPV6 if(bits < 0 || bits > maxbits) return -1; memcpy(&prefix->add.sin6, addr, (maxbits + 7) / 8); prefix->family = AF_INET6, prefix->bitlen = bits, prefix->ref_count = 0; return 0; #else return(-1); #endif } /* ******************************************* */ u_int16_t ndpi_network_ptree_match(struct ndpi_detection_module_struct *ndpi_str, struct in_addr *pin /* network byte order */) { prefix_t prefix; patricia_node_t *node; /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, pin, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->protocols_ptree, &prefix); return(node ? node->value.uv.user_value : NDPI_PROTOCOL_UNKNOWN); } /* ******************************************* */ u_int16_t ndpi_network_port_ptree_match(struct ndpi_detection_module_struct *ndpi_str, struct in_addr *pin /* network byte order */, u_int16_t port /* network byte order */) { prefix_t prefix; patricia_node_t *node; /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, pin, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->protocols_ptree, &prefix); if(node) { if((node->value.uv.additional_user_value == 0) || (node->value.uv.additional_user_value == port)) return(node->value.uv.user_value); } return(NDPI_PROTOCOL_UNKNOWN); } /* ******************************************* */ #if 0 static u_int8_t tor_ptree_match(struct ndpi_detection_module_struct *ndpi_str, struct in_addr *pin) { return((ndpi_network_ptree_match(ndpi_str, pin) == NDPI_PROTOCOL_TOR) ? 1 : 0); } #endif /* ******************************************* */ u_int8_t ndpi_is_tor_flow(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; if(packet->tcp != NULL) { if(packet->iph) { if(flow->guessed_host_protocol_id == NDPI_PROTOCOL_TOR) return(1); } } return(0); } /* ******************************************* */ static patricia_node_t *add_to_ptree(patricia_tree_t *tree, int family, void *addr, int bits) { prefix_t prefix; patricia_node_t *node; fill_prefix_v4(&prefix, (struct in_addr *) addr, bits, tree->maxbits); node = ndpi_patricia_lookup(tree, &prefix); if(node) memset(&node->value, 0, sizeof(node->value)); return(node); } /* ******************************************* */ /* Load a file containing IPv4 addresses in CIDR format as 'protocol_id' Return: the number of entries loaded or -1 in case of error */ int ndpi_load_ipv4_ptree(struct ndpi_detection_module_struct *ndpi_str, const char *path, u_int16_t protocol_id) { char buffer[128], *line, *addr, *cidr, *saveptr; FILE *fd; int len; u_int num_loaded = 0; fd = fopen(path, "r"); if(fd == NULL) { NDPI_LOG_ERR(ndpi_str, "Unable to open file %s [%s]\n", path, strerror(errno)); return(-1); } while (1) { line = fgets(buffer, sizeof(buffer), fd); if(line == NULL) break; len = strlen(line); if((len <= 1) || (line[0] == '#')) continue; line[len - 1] = '\0'; addr = strtok_r(line, "/", &saveptr); if(addr) { struct in_addr pin; patricia_node_t *node; cidr = strtok_r(NULL, "\n", &saveptr); pin.s_addr = inet_addr(addr); if((node = add_to_ptree(ndpi_str->protocols_ptree, AF_INET, &pin, cidr ? atoi(cidr) : 32 /* bits */)) != NULL) { node->value.uv.user_value = protocol_id, node->value.uv.additional_user_value = 0 /* port */; num_loaded++; } } } fclose(fd); return(num_loaded); } /* ******************************************* */ static void ndpi_init_ptree_ipv4(struct ndpi_detection_module_struct *ndpi_str, void *ptree, ndpi_network host_list[], u_int8_t skip_tor_hosts) { int i; for (i = 0; host_list[i].network != 0x0; i++) { struct in_addr pin; patricia_node_t *node; if(skip_tor_hosts && (host_list[i].value == NDPI_PROTOCOL_TOR)) continue; pin.s_addr = htonl(host_list[i].network); if((node = add_to_ptree(ptree, AF_INET, &pin, host_list[i].cidr /* bits */)) != NULL) { node->value.uv.user_value = host_list[i].value, node->value.uv.additional_user_value = 0; } } } /* ******************************************* */ static int ndpi_add_host_ip_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *value, u_int16_t protocol_id) { patricia_node_t *node; struct in_addr pin; int bits = 32; char *ptr = strrchr(value, '/'); u_int16_t port = 0; /* Format ip:8.248.73.247:443 */ char *double_column; if(ptr) { ptr[0] = '\0'; ptr++; if((double_column = strrchr(ptr, ':')) != NULL) { double_column[0] = '\0'; port = atoi(&double_column[1]); } if(atoi(ptr) >= 0 && atoi(ptr) <= 32) bits = atoi(ptr); } else { /* Let's check if there is the port defined Example: ip:8.248.73.247:443@AmazonPrime */ double_column = strrchr(value, ':'); if(double_column) { double_column[0] = '\0'; port = atoi(&double_column[1]); } } inet_pton(AF_INET, value, &pin); if((node = add_to_ptree(ndpi_str->protocols_ptree, AF_INET, &pin, bits)) != NULL) { node->value.uv.user_value = protocol_id, node->value.uv.additional_user_value = htons(port); } return(0); } void set_ndpi_malloc(void *(*__ndpi_malloc)(size_t size)) { _ndpi_malloc = __ndpi_malloc; } void set_ndpi_flow_malloc(void *(*__ndpi_flow_malloc)(size_t size)) { _ndpi_flow_malloc = __ndpi_flow_malloc; } void set_ndpi_free(void (*__ndpi_free)(void *ptr)) { _ndpi_free = __ndpi_free; } void set_ndpi_flow_free(void (*__ndpi_flow_free)(void *ptr)) { _ndpi_flow_free = __ndpi_flow_free; } void ndpi_debug_printf(unsigned int proto, struct ndpi_detection_module_struct *ndpi_str, ndpi_log_level_t log_level, const char *file_name, const char *func_name, int line_number, const char *format, ...) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES va_list args; #define MAX_STR_LEN 250 char str[MAX_STR_LEN]; if(ndpi_str != NULL && log_level > NDPI_LOG_ERROR && proto > 0 && proto < NDPI_MAX_SUPPORTED_PROTOCOLS && !NDPI_ISSET(&ndpi_str->debug_bitmask, proto)) return; va_start(args, format); vsnprintf(str, sizeof(str) - 1, format, args); va_end(args); if(ndpi_str != NULL) { printf("%s:%s:%-3d - [%s]: %s", file_name, func_name, line_number, ndpi_get_proto_name(ndpi_str, proto), str); } else { printf("Proto: %u, %s", proto, str); } #endif } void set_ndpi_debug_function(struct ndpi_detection_module_struct *ndpi_str, ndpi_debug_function_ptr ndpi_debug_printf) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES ndpi_str->ndpi_debug_printf = ndpi_debug_printf; #endif } /* ****************************************** */ /* Keep it in order and in sync with ndpi_protocol_category_t in ndpi_typedefs.h */ static const char *categories[] = { "Unspecified", "Media", "VPN", "Email", "DataTransfer", "Web", "SocialNetwork", "Download-FileTransfer-FileSharing", "Game", "Chat", "VoIP", "Database", "RemoteAccess", "Cloud", "Network", "Collaborative", "RPC", "Streaming", "System", "SoftwareUpdate", "", "", "", "", "", "Music", "Video", "Shopping", "Productivity", "FileSharing", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "Mining", /* 99 */ "Malware", "Advertisement", "Banned_Site", "Site_Unavailable", "Allowed_Site", "Antimalware", }; /* ******************************************************************** */ struct ndpi_detection_module_struct *ndpi_init_detection_module(ndpi_init_prefs prefs) { struct ndpi_detection_module_struct *ndpi_str = ndpi_malloc(sizeof(struct ndpi_detection_module_struct)); int i; if(ndpi_str == NULL) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES NDPI_LOG_ERR(ndpi_str, "ndpi_init_detection_module initial malloc failed for ndpi_str\n"); #endif /* NDPI_ENABLE_DEBUG_MESSAGES */ return(NULL); } memset(ndpi_str, 0, sizeof(struct ndpi_detection_module_struct)); #ifdef NDPI_ENABLE_DEBUG_MESSAGES set_ndpi_debug_function(ndpi_str, (ndpi_debug_function_ptr) ndpi_debug_printf); #endif /* NDPI_ENABLE_DEBUG_MESSAGES */ if((ndpi_str->protocols_ptree = ndpi_New_Patricia(32 /* IPv4 */)) != NULL) ndpi_init_ptree_ipv4(ndpi_str, ndpi_str->protocols_ptree, host_protocol_list, prefs & ndpi_dont_load_tor_hosts); NDPI_BITMASK_RESET(ndpi_str->detection_bitmask); #ifdef NDPI_ENABLE_DEBUG_MESSAGES ndpi_str->user_data = NULL; #endif ndpi_str->ticks_per_second = 1000; /* ndpi_str->ticks_per_second */ ndpi_str->tcp_max_retransmission_window_size = NDPI_DEFAULT_MAX_TCP_RETRANSMISSION_WINDOW_SIZE; ndpi_str->directconnect_connection_ip_tick_timeout = NDPI_DIRECTCONNECT_CONNECTION_IP_TICK_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->rtsp_connection_timeout = NDPI_RTSP_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->irc_timeout = NDPI_IRC_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->gnutella_timeout = NDPI_GNUTELLA_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->thunder_timeout = NDPI_THUNDER_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->zattoo_connection_timeout = NDPI_ZATTOO_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->jabber_stun_timeout = NDPI_JABBER_STUN_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->jabber_file_transfer_timeout = NDPI_JABBER_FT_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->soulseek_connection_ip_tick_timeout = NDPI_SOULSEEK_CONNECTION_IP_TICK_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->ndpi_num_supported_protocols = NDPI_MAX_SUPPORTED_PROTOCOLS; ndpi_str->ndpi_num_custom_protocols = 0; ndpi_str->host_automa.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->content_automa.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->bigrams_automa.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->impossible_bigrams_automa.ac_automa = ac_automata_init(ac_match_handler); if((sizeof(categories) / sizeof(char *)) != NDPI_PROTOCOL_NUM_CATEGORIES) { NDPI_LOG_ERR(ndpi_str, "[NDPI] invalid categories length: expected %u, got %u\n", NDPI_PROTOCOL_NUM_CATEGORIES, (unsigned int) (sizeof(categories) / sizeof(char *))); return(NULL); } ndpi_str->custom_categories.hostnames.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->custom_categories.hostnames_shadow.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->custom_categories.ipAddresses = ndpi_New_Patricia(32 /* IPv4 */); ndpi_str->custom_categories.ipAddresses_shadow = ndpi_New_Patricia(32 /* IPv4 */); if((ndpi_str->custom_categories.ipAddresses == NULL) || (ndpi_str->custom_categories.ipAddresses_shadow == NULL)) return(NULL); ndpi_init_protocol_defaults(ndpi_str); for (i = 0; i < NUM_CUSTOM_CATEGORIES; i++) snprintf(ndpi_str->custom_category_labels[i], CUSTOM_CATEGORY_LABEL_LEN, "User custom category %u", (unsigned int) (i + 1)); return(ndpi_str); } /* *********************************************** */ void ndpi_finalize_initalization(struct ndpi_detection_module_struct *ndpi_str) { u_int i; for (i = 0; i < 4; i++) { ndpi_automa *automa; switch (i) { case 0: automa = &ndpi_str->host_automa; break; case 1: automa = &ndpi_str->content_automa; break; case 2: automa = &ndpi_str->bigrams_automa; break; case 3: automa = &ndpi_str->impossible_bigrams_automa; break; default: automa = NULL; break; } if(automa) { ac_automata_finalize((AC_AUTOMATA_t *) automa->ac_automa); automa->ac_automa_finalized = 1; } } } /* *********************************************** */ /* Wrappers */ void *ndpi_init_automa(void) { return(ac_automata_init(ac_match_handler)); } /* ****************************************************** */ int ndpi_add_string_value_to_automa(void *_automa, char *str, u_int32_t num) { AC_PATTERN_t ac_pattern; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; AC_ERROR_t rc; if(automa == NULL) return(-1); memset(&ac_pattern, 0, sizeof(ac_pattern)); ac_pattern.astring = str; ac_pattern.rep.number = num; ac_pattern.length = strlen(ac_pattern.astring); rc = ac_automata_add(automa, &ac_pattern); return(rc == ACERR_SUCCESS || rc == ACERR_DUPLICATE_PATTERN ? 0 : -1); } /* ****************************************************** */ int ndpi_add_string_to_automa(void *_automa, char *str) { return(ndpi_add_string_value_to_automa(_automa, str, 1)); } /* ****************************************************** */ void ndpi_free_automa(void *_automa) { ac_automata_release((AC_AUTOMATA_t *) _automa, 0); } /* ****************************************************** */ void ndpi_finalize_automa(void *_automa) { ac_automata_finalize((AC_AUTOMATA_t *) _automa); } /* ****************************************************** */ int ndpi_match_string(void *_automa, char *string_to_match) { AC_REP_t match = { NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED }; AC_TEXT_t ac_input_text; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; int rc; if((automa == NULL) || (string_to_match == NULL) || (string_to_match[0] == '\0')) return(-2); ac_input_text.astring = string_to_match, ac_input_text.length = strlen(string_to_match); rc = ac_automata_search(automa, &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; return(rc ? match.number : 0); } /* ****************************************************** */ int ndpi_match_string_protocol_id(void *_automa, char *string_to_match, u_int match_len, u_int16_t *protocol_id, ndpi_protocol_category_t *category, ndpi_protocol_breed_t *breed) { AC_TEXT_t ac_input_text; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; AC_REP_t match = { 0, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED }; int rc; *protocol_id = (u_int16_t)-1; if((automa == NULL) || (string_to_match == NULL) || (string_to_match[0] == '\0')) return(-2); ac_input_text.astring = string_to_match, ac_input_text.length = match_len; rc = ac_automata_search(automa, &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; if(rc) *protocol_id = (u_int16_t)match.number, *category = match.category, *breed = match.breed; else *protocol_id = NDPI_PROTOCOL_UNKNOWN; return((*protocol_id != NDPI_PROTOCOL_UNKNOWN) ? 0 : -1); } /* ****************************************************** */ int ndpi_match_string_value(void *_automa, char *string_to_match, u_int match_len, u_int32_t *num) { AC_TEXT_t ac_input_text; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; AC_REP_t match = { 0, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED }; int rc; *num = (u_int32_t)-1; if((automa == NULL) || (string_to_match == NULL) || (string_to_match[0] == '\0')) return(-2); ac_input_text.astring = string_to_match, ac_input_text.length = match_len; rc = ac_automata_search(automa, &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; if(rc) *num = match.number; else *num = 0; return(rc ? 0 : -1); } /* *********************************************** */ int ndpi_match_custom_category(struct ndpi_detection_module_struct *ndpi_str, char *name, u_int name_len, ndpi_protocol_category_t *category) { ndpi_protocol_breed_t breed; u_int16_t id; int rc = ndpi_match_string_protocol_id(ndpi_str->custom_categories.hostnames.ac_automa, name, name_len, &id, category, &breed); return(rc); } /* *********************************************** */ int ndpi_get_custom_category_match(struct ndpi_detection_module_struct *ndpi_str, char *name_or_ip, u_int name_len, ndpi_protocol_category_t *id) { char ipbuf[64], *ptr; struct in_addr pin; u_int cp_len = ndpi_min(sizeof(ipbuf) - 1, name_len); if(!ndpi_str->custom_categories.categories_loaded) return(-1); if(cp_len > 0) { memcpy(ipbuf, name_or_ip, cp_len); ipbuf[cp_len] = '\0'; } else ipbuf[0] = '\0'; ptr = strrchr(ipbuf, '/'); if(ptr) ptr[0] = '\0'; if(inet_pton(AF_INET, ipbuf, &pin) == 1) { /* Search IP */ prefix_t prefix; patricia_node_t *node; /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, &pin, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->custom_categories.ipAddresses, &prefix); if(node) { *id = node->value.uv.user_value; return(0); } return(-1); } else { /* Search Host */ return(ndpi_match_custom_category(ndpi_str, name_or_ip, name_len, id)); } } /* *********************************************** */ static void free_ptree_data(void *data) { ; } /* ****************************************************** */ void ndpi_exit_detection_module(struct ndpi_detection_module_struct *ndpi_str) { if(ndpi_str != NULL) { int i; for (i = 0; i < (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS); i++) { if(ndpi_str->proto_defaults[i].protoName) ndpi_free(ndpi_str->proto_defaults[i].protoName); } /* NDPI_PROTOCOL_TINC */ if(ndpi_str->tinc_cache) cache_free((cache_t)(ndpi_str->tinc_cache)); if(ndpi_str->ookla_cache) ndpi_lru_free_cache(ndpi_str->ookla_cache); if(ndpi_str->stun_cache) ndpi_lru_free_cache(ndpi_str->stun_cache); if(ndpi_str->msteams_cache) ndpi_lru_free_cache(ndpi_str->msteams_cache); if(ndpi_str->protocols_ptree) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->protocols_ptree, free_ptree_data); if(ndpi_str->udpRoot != NULL) ndpi_tdestroy(ndpi_str->udpRoot, ndpi_free); if(ndpi_str->tcpRoot != NULL) ndpi_tdestroy(ndpi_str->tcpRoot, ndpi_free); if(ndpi_str->host_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->host_automa.ac_automa, 1 /* free patterns strings memory */); if(ndpi_str->content_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->content_automa.ac_automa, 0); if(ndpi_str->bigrams_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->bigrams_automa.ac_automa, 0); if(ndpi_str->impossible_bigrams_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->impossible_bigrams_automa.ac_automa, 0); if(ndpi_str->custom_categories.hostnames.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames.ac_automa, 1 /* free patterns strings memory */); if(ndpi_str->custom_categories.hostnames_shadow.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames_shadow.ac_automa, 1 /* free patterns strings memory */); if(ndpi_str->custom_categories.ipAddresses != NULL) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->custom_categories.ipAddresses, free_ptree_data); if(ndpi_str->custom_categories.ipAddresses_shadow != NULL) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->custom_categories.ipAddresses_shadow, free_ptree_data); #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/ndpi_exit_detection_module.c" #endif ndpi_free(ndpi_str); } } /* ****************************************************** */ int ndpi_get_protocol_id_master_proto(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protocol_id, u_int16_t **tcp_master_proto, u_int16_t **udp_master_proto) { if(protocol_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) { *tcp_master_proto = ndpi_str->proto_defaults[NDPI_PROTOCOL_UNKNOWN].master_tcp_protoId, *udp_master_proto = ndpi_str->proto_defaults[NDPI_PROTOCOL_UNKNOWN].master_udp_protoId; return(-1); } *tcp_master_proto = ndpi_str->proto_defaults[protocol_id].master_tcp_protoId, *udp_master_proto = ndpi_str->proto_defaults[protocol_id].master_udp_protoId; return(0); } /* ****************************************************** */ static ndpi_default_ports_tree_node_t *ndpi_get_guessed_protocol_id(struct ndpi_detection_module_struct *ndpi_str, u_int8_t proto, u_int16_t sport, u_int16_t dport) { ndpi_default_ports_tree_node_t node; if(sport && dport) { int low = ndpi_min(sport, dport); int high = ndpi_max(sport, dport); const void *ret; node.default_port = low; /* Check server port first */ ret = ndpi_tfind(&node, (proto == IPPROTO_TCP) ? (void *) &ndpi_str->tcpRoot : (void *) &ndpi_str->udpRoot, ndpi_default_ports_tree_node_t_cmp); if(ret == NULL) { node.default_port = high; ret = ndpi_tfind(&node, (proto == IPPROTO_TCP) ? (void *) &ndpi_str->tcpRoot : (void *) &ndpi_str->udpRoot, ndpi_default_ports_tree_node_t_cmp); } if(ret) return(*(ndpi_default_ports_tree_node_t **) ret); } return(NULL); } /* ****************************************************** */ /* These are UDP protocols that must fit a single packet and thus that if have NOT been detected they cannot be guessed as they have been excluded */ u_int8_t is_udp_guessable_protocol(u_int16_t l7_guessed_proto) { switch (l7_guessed_proto) { case NDPI_PROTOCOL_QUIC: case NDPI_PROTOCOL_SNMP: case NDPI_PROTOCOL_NETFLOW: /* TODO: add more protocols (if any missing) */ return(1); } return(0); } /* ****************************************************** */ u_int16_t ndpi_guess_protocol_id(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int8_t proto, u_int16_t sport, u_int16_t dport, u_int8_t *user_defined_proto) { *user_defined_proto = 0; /* Default */ if(sport && dport) { ndpi_default_ports_tree_node_t *found = ndpi_get_guessed_protocol_id(ndpi_str, proto, sport, dport); if(found != NULL) { u_int16_t guessed_proto = found->proto->protoId; /* We need to check if the guessed protocol isn't excluded by nDPI */ if(flow && (proto == IPPROTO_UDP) && NDPI_COMPARE_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, guessed_proto) && is_udp_guessable_protocol(guessed_proto)) return(NDPI_PROTOCOL_UNKNOWN); else { *user_defined_proto = found->customUserProto; return(guessed_proto); } } } else { /* No TCP/UDP */ switch (proto) { case NDPI_IPSEC_PROTOCOL_ESP: case NDPI_IPSEC_PROTOCOL_AH: return(NDPI_PROTOCOL_IP_IPSEC); break; case NDPI_GRE_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_GRE); break; case NDPI_ICMP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_ICMP); break; case NDPI_IGMP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_IGMP); break; case NDPI_EGP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_EGP); break; case NDPI_SCTP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_SCTP); break; case NDPI_OSPF_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_OSPF); break; case NDPI_IPIP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_IP_IN_IP); break; case NDPI_ICMPV6_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_ICMPV6); break; case 112: return(NDPI_PROTOCOL_IP_VRRP); break; } } return(NDPI_PROTOCOL_UNKNOWN); } /* ******************************************************************** */ u_int ndpi_get_num_supported_protocols(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->ndpi_num_supported_protocols); } /* ******************************************************************** */ #ifdef WIN32 char *strsep(char **sp, char *sep) { char *p, *s; if(sp == NULL || *sp == NULL || **sp == '\0') return(NULL); s = *sp; p = s + strcspn(s, sep); if(*p != '\0') *p++ = '\0'; *sp = p; return(s); } #endif /* ******************************************************************** */ int ndpi_handle_rule(struct ndpi_detection_module_struct *ndpi_str, char *rule, u_int8_t do_add) { char *at, *proto, *elem; ndpi_proto_defaults_t *def; u_int16_t subprotocol_id, i; at = strrchr(rule, '@'); if(at == NULL) { NDPI_LOG_ERR(ndpi_str, "Invalid rule '%s'\n", rule); return(-1); } else at[0] = 0, proto = &at[1]; for (i = 0; proto[i] != '\0'; i++) { switch (proto[i]) { case '/': case '&': case '^': case ':': case ';': case '\'': case '"': case ' ': proto[i] = '_'; break; } } for (i = 0, def = NULL; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) { if(ndpi_str->proto_defaults[i].protoName && strcasecmp(ndpi_str->proto_defaults[i].protoName, proto) == 0) { def = &ndpi_str->proto_defaults[i]; subprotocol_id = i; break; } } if(def == NULL) { if(!do_add) { /* We need to remove a rule */ NDPI_LOG_ERR(ndpi_str, "Unable to find protocol '%s': skipping rule '%s'\n", proto, rule); return(-3); } else { ndpi_port_range ports_a[MAX_DEFAULT_PORTS], ports_b[MAX_DEFAULT_PORTS]; u_int16_t no_master[2] = {NDPI_PROTOCOL_NO_MASTER_PROTO, NDPI_PROTOCOL_NO_MASTER_PROTO}; if(ndpi_str->ndpi_num_custom_protocols >= (NDPI_MAX_NUM_CUSTOM_PROTOCOLS - 1)) { NDPI_LOG_ERR(ndpi_str, "Too many protocols defined (%u): skipping protocol %s\n", ndpi_str->ndpi_num_custom_protocols, proto); return(-2); } ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, ndpi_str->ndpi_num_supported_protocols, 0 /* can_have_a_subprotocol */, no_master, no_master, proto, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, /* TODO add protocol category support in rules */ ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); def = &ndpi_str->proto_defaults[ndpi_str->ndpi_num_supported_protocols]; subprotocol_id = ndpi_str->ndpi_num_supported_protocols; ndpi_str->ndpi_num_supported_protocols++, ndpi_str->ndpi_num_custom_protocols++; } } while ((elem = strsep(&rule, ",")) != NULL) { char *attr = elem, *value = NULL; ndpi_port_range range; int is_tcp = 0, is_udp = 0, is_ip = 0; if(strncmp(attr, "tcp:", 4) == 0) is_tcp = 1, value = &attr[4]; else if(strncmp(attr, "udp:", 4) == 0) is_udp = 1, value = &attr[4]; else if(strncmp(attr, "ip:", 3) == 0) is_ip = 1, value = &attr[3]; else if(strncmp(attr, "host:", 5) == 0) { /* host:"<value>",host:"<value>",.....@<subproto> */ value = &attr[5]; if(value[0] == '"') value++; /* remove leading " */ if(value[strlen(value) - 1] == '"') value[strlen(value) - 1] = '\0'; /* remove trailing " */ } if(is_tcp || is_udp) { u_int p_low, p_high; if(sscanf(value, "%u-%u", &p_low, &p_high) == 2) range.port_low = p_low, range.port_high = p_high; else range.port_low = range.port_high = atoi(&elem[4]); if(do_add) addDefaultPort(ndpi_str, &range, def, 1 /* Custom user proto */, is_tcp ? &ndpi_str->tcpRoot : &ndpi_str->udpRoot, __FUNCTION__, __LINE__); else removeDefaultPort(&range, def, is_tcp ? &ndpi_str->tcpRoot : &ndpi_str->udpRoot); } else if(is_ip) { /* NDPI_PROTOCOL_TOR */ ndpi_add_host_ip_subprotocol(ndpi_str, value, subprotocol_id); } else { if(do_add) ndpi_add_host_url_subprotocol(ndpi_str, value, subprotocol_id, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_ACCEPTABLE); else ndpi_remove_host_url_subprotocol(ndpi_str, value, subprotocol_id); } } return(0); } /* ******************************************************************** */ /* * Format: * * <host|ip> <category_id> * * Notes: * - host and category are separated by a single TAB * - empty lines or lines starting with # are ignored */ int ndpi_load_categories_file(struct ndpi_detection_module_struct *ndpi_str, const char *path) { char buffer[512], *line, *name, *category, *saveptr; FILE *fd; int len, num = 0; fd = fopen(path, "r"); if(fd == NULL) { NDPI_LOG_ERR(ndpi_str, "Unable to open file %s [%s]\n", path, strerror(errno)); return(-1); } while (1) { line = fgets(buffer, sizeof(buffer), fd); if(line == NULL) break; len = strlen(line); if((len <= 1) || (line[0] == '#')) continue; line[len - 1] = '\0'; name = strtok_r(line, "\t", &saveptr); if(name) { category = strtok_r(NULL, "\t", &saveptr); if(category) { int rc = ndpi_load_category(ndpi_str, name, (ndpi_protocol_category_t) atoi(category)); if(rc >= 0) num++; } } } fclose(fd); ndpi_enable_loaded_categories(ndpi_str); return(num); } /* ******************************************************************** */ /* Format: <tcp|udp>:<port>,<tcp|udp>:<port>,.....@<proto> Subprotocols Format: host:"<value>",host:"<value>",.....@<subproto> IP based Subprotocols Format (<value> is IP or CIDR): ip:<value>,ip:<value>,.....@<subproto> Example: tcp:80,tcp:3128@HTTP udp:139@NETBIOS */ int ndpi_load_protocols_file(struct ndpi_detection_module_struct *ndpi_str, const char *path) { FILE *fd; char *buffer, *old_buffer; int chunk_len = 512, buffer_len = chunk_len, old_buffer_len; int i, rc = -1; fd = fopen(path, "r"); if(fd == NULL) { NDPI_LOG_ERR(ndpi_str, "Unable to open file %s [%s]\n", path, strerror(errno)); goto error; } buffer = ndpi_malloc(buffer_len); if(buffer == NULL) { NDPI_LOG_ERR(ndpi_str, "Memory allocation failure\n"); goto close_fd; } while (1) { char *line = buffer; int line_len = buffer_len; while ((line = fgets(line, line_len, fd)) != NULL && line[strlen(line) - 1] != '\n') { i = strlen(line); old_buffer = buffer; old_buffer_len = buffer_len; buffer_len += chunk_len; buffer = ndpi_realloc(old_buffer, old_buffer_len, buffer_len); if(buffer == NULL) { NDPI_LOG_ERR(ndpi_str, "Memory allocation failure\n"); ndpi_free(old_buffer); goto close_fd; } line = &buffer[i]; line_len = chunk_len; } if(!line) /* safety check */ break; i = strlen(buffer); if((i <= 1) || (buffer[0] == '#')) continue; else buffer[i - 1] = '\0'; ndpi_handle_rule(ndpi_str, buffer, 1); } rc = 0; ndpi_free(buffer); close_fd: fclose(fd); error: return(rc); } /* ******************************************************************** */ /* ntop */ void ndpi_set_bitmask_protocol_detection(char *label, struct ndpi_detection_module_struct *ndpi_str, const NDPI_PROTOCOL_BITMASK *detection_bitmask, const u_int32_t idx, u_int16_t ndpi_protocol_id, void (*func)(struct ndpi_detection_module_struct *, struct ndpi_flow_struct *flow), const NDPI_SELECTION_BITMASK_PROTOCOL_SIZE ndpi_selection_bitmask, u_int8_t b_save_bitmask_unknow, u_int8_t b_add_detection_bitmask) { /* Compare specify protocol bitmask with main detection bitmask */ if(NDPI_COMPARE_PROTOCOL_TO_BITMASK(*detection_bitmask, ndpi_protocol_id) != 0) { #ifdef DEBUG NDPI_LOG_DBG2(ndpi_str, "[NDPI] ndpi_set_bitmask_protocol_detection: %s : [callback_buffer] idx= %u, [proto_defaults] " "protocol_id=%u\n", label, idx, ndpi_protocol_id); #endif if(ndpi_str->proto_defaults[ndpi_protocol_id].protoIdx != 0) { NDPI_LOG_DBG2(ndpi_str, "[NDPI] Internal error: protocol %s/%u has been already registered\n", label, ndpi_protocol_id); #ifdef DEBUG } else { NDPI_LOG_DBG2(ndpi_str, "[NDPI] Adding %s with protocol id %d\n", label, ndpi_protocol_id); #endif } /* Set function and index protocol within proto_default structure for port protocol detection and callback_buffer function for DPI protocol detection */ ndpi_str->proto_defaults[ndpi_protocol_id].protoIdx = idx; ndpi_str->proto_defaults[ndpi_protocol_id].func = ndpi_str->callback_buffer[idx].func = func; /* Set ndpi_selection_bitmask for protocol */ ndpi_str->callback_buffer[idx].ndpi_selection_bitmask = ndpi_selection_bitmask; /* Reset protocol detection bitmask via NDPI_PROTOCOL_UNKNOWN and than add specify protocol bitmast to callback buffer. */ if(b_save_bitmask_unknow) NDPI_SAVE_AS_BITMASK(ndpi_str->callback_buffer[idx].detection_bitmask, NDPI_PROTOCOL_UNKNOWN); if(b_add_detection_bitmask) NDPI_ADD_PROTOCOL_TO_BITMASK(ndpi_str->callback_buffer[idx].detection_bitmask, ndpi_protocol_id); NDPI_SAVE_AS_BITMASK(ndpi_str->callback_buffer[idx].excluded_protocol_bitmask, ndpi_protocol_id); } } /* ******************************************************************** */ void ndpi_set_protocol_detection_bitmask2(struct ndpi_detection_module_struct *ndpi_str, const NDPI_PROTOCOL_BITMASK *dbm) { NDPI_PROTOCOL_BITMASK detection_bitmask_local; NDPI_PROTOCOL_BITMASK *detection_bitmask = &detection_bitmask_local; u_int32_t a = 0; NDPI_BITMASK_SET(detection_bitmask_local, *dbm); NDPI_BITMASK_SET(ndpi_str->detection_bitmask, *dbm); /* set this here to zero to be interrupt safe */ ndpi_str->callback_buffer_size = 0; /* HTTP */ init_http_dissector(ndpi_str, &a, detection_bitmask); /* STARCRAFT */ init_starcraft_dissector(ndpi_str, &a, detection_bitmask); /* TLS */ init_tls_dissector(ndpi_str, &a, detection_bitmask); /* STUN */ init_stun_dissector(ndpi_str, &a, detection_bitmask); /* RTP */ init_rtp_dissector(ndpi_str, &a, detection_bitmask); /* RTSP */ init_rtsp_dissector(ndpi_str, &a, detection_bitmask); /* RDP */ init_rdp_dissector(ndpi_str, &a, detection_bitmask); /* SIP */ init_sip_dissector(ndpi_str, &a, detection_bitmask); /* IMO */ init_imo_dissector(ndpi_str, &a, detection_bitmask); /* Teredo */ init_teredo_dissector(ndpi_str, &a, detection_bitmask); /* EDONKEY */ init_edonkey_dissector(ndpi_str, &a, detection_bitmask); /* FASTTRACK */ init_fasttrack_dissector(ndpi_str, &a, detection_bitmask); /* GNUTELLA */ init_gnutella_dissector(ndpi_str, &a, detection_bitmask); /* DIRECTCONNECT */ init_directconnect_dissector(ndpi_str, &a, detection_bitmask); /* NATS */ init_nats_dissector(ndpi_str, &a, detection_bitmask); /* APPLEJUICE */ init_applejuice_dissector(ndpi_str, &a, detection_bitmask); /* SOULSEEK */ init_soulseek_dissector(ndpi_str, &a, detection_bitmask); /* SOCKS */ init_socks_dissector(ndpi_str, &a, detection_bitmask); /* IRC */ init_irc_dissector(ndpi_str, &a, detection_bitmask); /* JABBER */ init_jabber_dissector(ndpi_str, &a, detection_bitmask); /* MAIL_POP */ init_mail_pop_dissector(ndpi_str, &a, detection_bitmask); /* MAIL_IMAP */ init_mail_imap_dissector(ndpi_str, &a, detection_bitmask); /* MAIL_SMTP */ init_mail_smtp_dissector(ndpi_str, &a, detection_bitmask); /* USENET */ init_usenet_dissector(ndpi_str, &a, detection_bitmask); /* DNS */ init_dns_dissector(ndpi_str, &a, detection_bitmask); /* FILETOPIA */ init_fbzero_dissector(ndpi_str, &a, detection_bitmask); /* VMWARE */ init_vmware_dissector(ndpi_str, &a, detection_bitmask); /* NON_TCP_UDP */ init_non_tcp_udp_dissector(ndpi_str, &a, detection_bitmask); /* SOPCAST */ init_sopcast_dissector(ndpi_str, &a, detection_bitmask); /* TVUPLAYER */ init_tvuplayer_dissector(ndpi_str, &a, detection_bitmask); /* PPSTREAM */ init_ppstream_dissector(ndpi_str, &a, detection_bitmask); /* PPLIVE */ init_pplive_dissector(ndpi_str, &a, detection_bitmask); /* IAX */ init_iax_dissector(ndpi_str, &a, detection_bitmask); /* MGPC */ init_mgpc_dissector(ndpi_str, &a, detection_bitmask); /* ZATTOO */ init_zattoo_dissector(ndpi_str, &a, detection_bitmask); /* QQ */ init_qq_dissector(ndpi_str, &a, detection_bitmask); /* SSH */ init_ssh_dissector(ndpi_str, &a, detection_bitmask); /* AYIYA */ init_ayiya_dissector(ndpi_str, &a, detection_bitmask); /* THUNDER */ init_thunder_dissector(ndpi_str, &a, detection_bitmask); /* VNC */ init_vnc_dissector(ndpi_str, &a, detection_bitmask); /* TEAMVIEWER */ init_teamviewer_dissector(ndpi_str, &a, detection_bitmask); /* DHCP */ init_dhcp_dissector(ndpi_str, &a, detection_bitmask); /* STEAM */ init_steam_dissector(ndpi_str, &a, detection_bitmask); /* HALFLIFE2 */ init_halflife2_dissector(ndpi_str, &a, detection_bitmask); /* XBOX */ init_xbox_dissector(ndpi_str, &a, detection_bitmask); /* HTTP_APPLICATION_ACTIVESYNC */ init_http_activesync_dissector(ndpi_str, &a, detection_bitmask); /* SMB */ init_smb_dissector(ndpi_str, &a, detection_bitmask); /* MINING */ init_mining_dissector(ndpi_str, &a, detection_bitmask); /* TELNET */ init_telnet_dissector(ndpi_str, &a, detection_bitmask); /* NTP */ init_ntp_dissector(ndpi_str, &a, detection_bitmask); /* NFS */ init_nfs_dissector(ndpi_str, &a, detection_bitmask); /* SSDP */ init_ssdp_dissector(ndpi_str, &a, detection_bitmask); /* WORLD_OF_WARCRAFT */ init_world_of_warcraft_dissector(ndpi_str, &a, detection_bitmask); /* POSTGRES */ init_postgres_dissector(ndpi_str, &a, detection_bitmask); /* MYSQL */ init_mysql_dissector(ndpi_str, &a, detection_bitmask); /* BGP */ init_bgp_dissector(ndpi_str, &a, detection_bitmask); /* SNMP */ init_snmp_dissector(ndpi_str, &a, detection_bitmask); /* KONTIKI */ init_kontiki_dissector(ndpi_str, &a, detection_bitmask); /* ICECAST */ init_icecast_dissector(ndpi_str, &a, detection_bitmask); /* SHOUTCAST */ init_shoutcast_dissector(ndpi_str, &a, detection_bitmask); /* KERBEROS */ init_kerberos_dissector(ndpi_str, &a, detection_bitmask); /* OPENFT */ init_openft_dissector(ndpi_str, &a, detection_bitmask); /* SYSLOG */ init_syslog_dissector(ndpi_str, &a, detection_bitmask); /* DIRECT_DOWNLOAD_LINK */ init_directdownloadlink_dissector(ndpi_str, &a, detection_bitmask); /* NETBIOS */ init_netbios_dissector(ndpi_str, &a, detection_bitmask); /* MDNS */ init_mdns_dissector(ndpi_str, &a, detection_bitmask); /* IPP */ init_ipp_dissector(ndpi_str, &a, detection_bitmask); /* LDAP */ init_ldap_dissector(ndpi_str, &a, detection_bitmask); /* WARCRAFT3 */ init_warcraft3_dissector(ndpi_str, &a, detection_bitmask); /* XDMCP */ init_xdmcp_dissector(ndpi_str, &a, detection_bitmask); /* TFTP */ init_tftp_dissector(ndpi_str, &a, detection_bitmask); /* MSSQL_TDS */ init_mssql_tds_dissector(ndpi_str, &a, detection_bitmask); /* PPTP */ init_pptp_dissector(ndpi_str, &a, detection_bitmask); /* STEALTHNET */ init_stealthnet_dissector(ndpi_str, &a, detection_bitmask); /* DHCPV6 */ init_dhcpv6_dissector(ndpi_str, &a, detection_bitmask); /* AFP */ init_afp_dissector(ndpi_str, &a, detection_bitmask); /* check_mk */ init_checkmk_dissector(ndpi_str, &a, detection_bitmask); /* AIMINI */ init_aimini_dissector(ndpi_str, &a, detection_bitmask); /* FLORENSIA */ init_florensia_dissector(ndpi_str, &a, detection_bitmask); /* MAPLESTORY */ init_maplestory_dissector(ndpi_str, &a, detection_bitmask); /* DOFUS */ init_dofus_dissector(ndpi_str, &a, detection_bitmask); /* WORLD_OF_KUNG_FU */ init_world_of_kung_fu_dissector(ndpi_str, &a, detection_bitmask); /* FIESTA */ init_fiesta_dissector(ndpi_str, &a, detection_bitmask); /* CROSSIFIRE */ init_crossfire_dissector(ndpi_str, &a, detection_bitmask); /* GUILDWARS */ init_guildwars_dissector(ndpi_str, &a, detection_bitmask); /* ARMAGETRON */ init_armagetron_dissector(ndpi_str, &a, detection_bitmask); /* DROPBOX */ init_dropbox_dissector(ndpi_str, &a, detection_bitmask); /* SPOTIFY */ init_spotify_dissector(ndpi_str, &a, detection_bitmask); /* RADIUS */ init_radius_dissector(ndpi_str, &a, detection_bitmask); /* CITRIX */ init_citrix_dissector(ndpi_str, &a, detection_bitmask); /* LOTUS_NOTES */ init_lotus_notes_dissector(ndpi_str, &a, detection_bitmask); /* GTP */ init_gtp_dissector(ndpi_str, &a, detection_bitmask); /* DCERPC */ init_dcerpc_dissector(ndpi_str, &a, detection_bitmask); /* NETFLOW */ init_netflow_dissector(ndpi_str, &a, detection_bitmask); /* SFLOW */ init_sflow_dissector(ndpi_str, &a, detection_bitmask); /* H323 */ init_h323_dissector(ndpi_str, &a, detection_bitmask); /* OPENVPN */ init_openvpn_dissector(ndpi_str, &a, detection_bitmask); /* NOE */ init_noe_dissector(ndpi_str, &a, detection_bitmask); /* CISCOVPN */ init_ciscovpn_dissector(ndpi_str, &a, detection_bitmask); /* TEAMSPEAK */ init_teamspeak_dissector(ndpi_str, &a, detection_bitmask); /* TOR */ init_tor_dissector(ndpi_str, &a, detection_bitmask); /* SKINNY */ init_skinny_dissector(ndpi_str, &a, detection_bitmask); /* RTCP */ init_rtcp_dissector(ndpi_str, &a, detection_bitmask); /* RSYNC */ init_rsync_dissector(ndpi_str, &a, detection_bitmask); /* WHOIS_DAS */ init_whois_das_dissector(ndpi_str, &a, detection_bitmask); /* ORACLE */ init_oracle_dissector(ndpi_str, &a, detection_bitmask); /* CORBA */ init_corba_dissector(ndpi_str, &a, detection_bitmask); /* RTMP */ init_rtmp_dissector(ndpi_str, &a, detection_bitmask); /* FTP_CONTROL */ init_ftp_control_dissector(ndpi_str, &a, detection_bitmask); /* FTP_DATA */ init_ftp_data_dissector(ndpi_str, &a, detection_bitmask); /* PANDO */ init_pando_dissector(ndpi_str, &a, detection_bitmask); /* MEGACO */ init_megaco_dissector(ndpi_str, &a, detection_bitmask); /* REDIS */ init_redis_dissector(ndpi_str, &a, detection_bitmask); /* UPnP */ init_upnp_dissector(ndpi_str, &a, detection_bitmask); /* VHUA */ init_vhua_dissector(ndpi_str, &a, detection_bitmask); /* ZMQ */ init_zmq_dissector(ndpi_str, &a, detection_bitmask); /* TELEGRAM */ init_telegram_dissector(ndpi_str, &a, detection_bitmask); /* QUIC */ init_quic_dissector(ndpi_str, &a, detection_bitmask); /* DIAMETER */ init_diameter_dissector(ndpi_str, &a, detection_bitmask); /* APPLE_PUSH */ init_apple_push_dissector(ndpi_str, &a, detection_bitmask); /* EAQ */ init_eaq_dissector(ndpi_str, &a, detection_bitmask); /* KAKAOTALK_VOICE */ init_kakaotalk_voice_dissector(ndpi_str, &a, detection_bitmask); /* MPEGTS */ init_mpegts_dissector(ndpi_str, &a, detection_bitmask); /* UBNTAC2 */ init_ubntac2_dissector(ndpi_str, &a, detection_bitmask); /* COAP */ init_coap_dissector(ndpi_str, &a, detection_bitmask); /* MQTT */ init_mqtt_dissector(ndpi_str, &a, detection_bitmask); /* SOME/IP */ init_someip_dissector(ndpi_str, &a, detection_bitmask); /* RX */ init_rx_dissector(ndpi_str, &a, detection_bitmask); /* GIT */ init_git_dissector(ndpi_str, &a, detection_bitmask); /* HANGOUT */ init_hangout_dissector(ndpi_str, &a, detection_bitmask); /* DRDA */ init_drda_dissector(ndpi_str, &a, detection_bitmask); /* BJNP */ init_bjnp_dissector(ndpi_str, &a, detection_bitmask); /* SMPP */ init_smpp_dissector(ndpi_str, &a, detection_bitmask); /* TINC */ init_tinc_dissector(ndpi_str, &a, detection_bitmask); /* FIX */ init_fix_dissector(ndpi_str, &a, detection_bitmask); /* NINTENDO */ init_nintendo_dissector(ndpi_str, &a, detection_bitmask); /* MODBUS */ init_modbus_dissector(ndpi_str, &a, detection_bitmask); /* CAPWAP */ init_capwap_dissector(ndpi_str, &a, detection_bitmask); /* ZABBIX */ init_zabbix_dissector(ndpi_str, &a, detection_bitmask); /*** Put false-positive sensitive protocols at the end ***/ /* VIBER */ init_viber_dissector(ndpi_str, &a, detection_bitmask); /* SKYPE */ init_skype_dissector(ndpi_str, &a, detection_bitmask); /* BITTORRENT */ init_bittorrent_dissector(ndpi_str, &a, detection_bitmask); /* WHATSAPP */ init_whatsapp_dissector(ndpi_str, &a, detection_bitmask); /* OOKLA */ init_ookla_dissector(ndpi_str, &a, detection_bitmask); /* AMQP */ init_amqp_dissector(ndpi_str, &a, detection_bitmask); /* CSGO */ init_csgo_dissector(ndpi_str, &a, detection_bitmask); /* LISP */ init_lisp_dissector(ndpi_str, &a, detection_bitmask); /* AJP */ init_ajp_dissector(ndpi_str, &a, detection_bitmask); /* Memcached */ init_memcached_dissector(ndpi_str, &a, detection_bitmask); /* Nest Log Sink */ init_nest_log_sink_dissector(ndpi_str, &a, detection_bitmask); /* WireGuard VPN */ init_wireguard_dissector(ndpi_str, &a, detection_bitmask); /* Amazon_Video */ init_amazon_video_dissector(ndpi_str, &a, detection_bitmask); /* Targus Getdata */ init_targus_getdata_dissector(ndpi_str, &a, detection_bitmask); /* S7 comm */ init_s7comm_dissector(ndpi_str, &a, detection_bitmask); /* IEC 60870-5-104 */ init_104_dissector(ndpi_str, &a, detection_bitmask); /* WEBSOCKET */ init_websocket_dissector(ndpi_str, &a, detection_bitmask); #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/custom_ndpi_main_init.c" #endif /* ----------------------------------------------------------------- */ ndpi_str->callback_buffer_size = a; NDPI_LOG_DBG2(ndpi_str, "callback_buffer_size is %u\n", ndpi_str->callback_buffer_size); /* now build the specific buffer for tcp, udp and non_tcp_udp */ ndpi_str->callback_buffer_size_tcp_payload = 0; ndpi_str->callback_buffer_size_tcp_no_payload = 0; for (a = 0; a < ndpi_str->callback_buffer_size; a++) { if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & (NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC)) != 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "callback_buffer_tcp_payload, adding buffer %u as entry %u\n", a, ndpi_str->callback_buffer_size_tcp_payload); memcpy(&ndpi_str->callback_buffer_tcp_payload[ndpi_str->callback_buffer_size_tcp_payload], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_tcp_payload++; if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & NDPI_SELECTION_BITMASK_PROTOCOL_HAS_PAYLOAD) == 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2( ndpi_str, "\tcallback_buffer_tcp_no_payload, additional adding buffer %u to no_payload process\n", a); memcpy(&ndpi_str->callback_buffer_tcp_no_payload[ndpi_str->callback_buffer_size_tcp_no_payload], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_tcp_no_payload++; } } } ndpi_str->callback_buffer_size_udp = 0; for (a = 0; a < ndpi_str->callback_buffer_size; a++) { if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & (NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC)) != 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "callback_buffer_size_udp: adding buffer : %u as entry %u\n", a, ndpi_str->callback_buffer_size_udp); memcpy(&ndpi_str->callback_buffer_udp[ndpi_str->callback_buffer_size_udp], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_udp++; } } ndpi_str->callback_buffer_size_non_tcp_udp = 0; for (a = 0; a < ndpi_str->callback_buffer_size; a++) { if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & (NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP)) == 0 || (ndpi_str->callback_buffer[a].ndpi_selection_bitmask & NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC) != 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "callback_buffer_non_tcp_udp: adding buffer : %u as entry %u\n", a, ndpi_str->callback_buffer_size_non_tcp_udp); memcpy(&ndpi_str->callback_buffer_non_tcp_udp[ndpi_str->callback_buffer_size_non_tcp_udp], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_non_tcp_udp++; } } } #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* handle extension headers in IPv6 packets * arguments: * l4ptr: pointer to the byte following the initial IPv6 header * l4len: the length of the IPv6 packet excluding the IPv6 header * nxt_hdr: next header value from the IPv6 header * result: * l4ptr: pointer to the start of the actual packet payload * l4len: length of the actual payload * nxt_hdr: protocol of the actual payload * returns 0 upon success and 1 upon failure */ int ndpi_handle_ipv6_extension_headers(struct ndpi_detection_module_struct *ndpi_str, const u_int8_t **l4ptr, u_int16_t *l4len, u_int8_t *nxt_hdr) { while ((*nxt_hdr == 0 || *nxt_hdr == 43 || *nxt_hdr == 44 || *nxt_hdr == 60 || *nxt_hdr == 135 || *nxt_hdr == 59)) { u_int16_t ehdr_len; // no next header if(*nxt_hdr == 59) { return(1); } // fragment extension header has fixed size of 8 bytes and the first byte is the next header type if(*nxt_hdr == 44) { if(*l4len < 8) { return(1); } *nxt_hdr = (*l4ptr)[0]; *l4len -= 8; (*l4ptr) += 8; continue; } // the other extension headers have one byte for the next header type // and one byte for the extension header length in 8 byte steps minus the first 8 bytes if(*l4len < 2) { return(1); } ehdr_len = (*l4ptr)[1]; ehdr_len *= 8; ehdr_len += 8; if(*l4len < ehdr_len) { return(1); } *nxt_hdr = (*l4ptr)[0]; *l4len -= ehdr_len; (*l4ptr) += ehdr_len; } return(0); } #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ static u_int8_t ndpi_iph_is_valid_and_not_fragmented(const struct ndpi_iphdr *iph, const u_int16_t ipsize) { //#ifdef REQUIRE_FULL_PACKETS if(ipsize < iph->ihl * 4 || ipsize < ntohs(iph->tot_len) || ntohs(iph->tot_len) < iph->ihl * 4 || (iph->frag_off & htons(0x1FFF)) != 0) { return(0); } //#endif return(1); } static u_int8_t ndpi_detection_get_l4_internal(struct ndpi_detection_module_struct *ndpi_str, const u_int8_t *l3, u_int16_t l3_len, const u_int8_t **l4_return, u_int16_t *l4_len_return, u_int8_t *l4_protocol_return, u_int32_t flags) { const struct ndpi_iphdr *iph = NULL; #ifdef NDPI_DETECTION_SUPPORT_IPV6 const struct ndpi_ipv6hdr *iph_v6 = NULL; #endif u_int16_t l4len = 0; const u_int8_t *l4ptr = NULL; u_int8_t l4protocol = 0; if(l3 == NULL || l3_len < sizeof(struct ndpi_iphdr)) return(1); if((iph = (const struct ndpi_iphdr *) l3) == NULL) return(1); if(iph->version == IPVERSION && iph->ihl >= 5) { NDPI_LOG_DBG2(ndpi_str, "ipv4 header\n"); } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if(iph->version == 6 && l3_len >= sizeof(struct ndpi_ipv6hdr)) { NDPI_LOG_DBG2(ndpi_str, "ipv6 header\n"); iph_v6 = (const struct ndpi_ipv6hdr *) l3; iph = NULL; } #endif else { return(1); } if((flags & NDPI_DETECTION_ONLY_IPV6) && iph != NULL) { NDPI_LOG_DBG2(ndpi_str, "ipv4 header found but excluded by flag\n"); return(1); } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if((flags & NDPI_DETECTION_ONLY_IPV4) && iph_v6 != NULL) { NDPI_LOG_DBG2(ndpi_str, "ipv6 header found but excluded by flag\n"); return(1); } #endif if(iph != NULL && ndpi_iph_is_valid_and_not_fragmented(iph, l3_len)) { u_int16_t len = ntohs(iph->tot_len); u_int16_t hlen = (iph->ihl * 4); l4ptr = (((const u_int8_t *) iph) + iph->ihl * 4); if(len == 0) len = l3_len; l4len = (len > hlen) ? (len - hlen) : 0; l4protocol = iph->protocol; } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if(iph_v6 != NULL && (l3_len - sizeof(struct ndpi_ipv6hdr)) >= ntohs(iph_v6->ip6_hdr.ip6_un1_plen)) { l4ptr = (((const u_int8_t *) iph_v6) + sizeof(struct ndpi_ipv6hdr)); l4len = ntohs(iph_v6->ip6_hdr.ip6_un1_plen); l4protocol = iph_v6->ip6_hdr.ip6_un1_nxt; // we need to handle IPv6 extension headers if present if(ndpi_handle_ipv6_extension_headers(ndpi_str, &l4ptr, &l4len, &l4protocol) != 0) { return(1); } } #endif else { return(1); } if(l4_return != NULL) { *l4_return = l4ptr; } if(l4_len_return != NULL) { *l4_len_return = l4len; } if(l4_protocol_return != NULL) { *l4_protocol_return = l4protocol; } return(0); } /* ************************************************ */ void ndpi_apply_flow_protocol_to_packet(struct ndpi_flow_struct *flow, struct ndpi_packet_struct *packet) { memcpy(&packet->detected_protocol_stack, &flow->detected_protocol_stack, sizeof(packet->detected_protocol_stack)); memcpy(&packet->protocol_stack_info, &flow->protocol_stack_info, sizeof(packet->protocol_stack_info)); } /* ************************************************ */ static int ndpi_init_packet_header(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, unsigned short packetlen) { const struct ndpi_iphdr *decaps_iph = NULL; u_int16_t l3len; u_int16_t l4len; const u_int8_t *l4ptr; u_int8_t l4protocol; u_int8_t l4_result; if(!flow) return(1); /* reset payload_packet_len, will be set if ipv4 tcp or udp */ flow->packet.payload_packet_len = 0; flow->packet.l4_packet_len = 0; flow->packet.l3_packet_len = packetlen; flow->packet.tcp = NULL, flow->packet.udp = NULL; flow->packet.generic_l4_ptr = NULL; #ifdef NDPI_DETECTION_SUPPORT_IPV6 flow->packet.iphv6 = NULL; #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ ndpi_apply_flow_protocol_to_packet(flow, &flow->packet); l3len = flow->packet.l3_packet_len; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(flow->packet.iph != NULL) { #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ decaps_iph = flow->packet.iph; #ifdef NDPI_DETECTION_SUPPORT_IPV6 } #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ if(decaps_iph && decaps_iph->version == IPVERSION && decaps_iph->ihl >= 5) { NDPI_LOG_DBG2(ndpi_str, "ipv4 header\n"); } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if(decaps_iph && decaps_iph->version == 6 && l3len >= sizeof(struct ndpi_ipv6hdr) && (ndpi_str->ip_version_limit & NDPI_DETECTION_ONLY_IPV4) == 0) { NDPI_LOG_DBG2(ndpi_str, "ipv6 header\n"); flow->packet.iphv6 = (struct ndpi_ipv6hdr *) flow->packet.iph; flow->packet.iph = NULL; } #endif else { flow->packet.iph = NULL; return(1); } /* needed: * - unfragmented packets * - ip header <= packet len * - ip total length >= packet len */ l4ptr = NULL; l4len = 0; l4protocol = 0; l4_result = ndpi_detection_get_l4_internal(ndpi_str, (const u_int8_t *) decaps_iph, l3len, &l4ptr, &l4len, &l4protocol, 0); if(l4_result != 0) { return(1); } flow->packet.l4_protocol = l4protocol; flow->packet.l4_packet_len = l4len; flow->l4_proto = l4protocol; /* tcp / udp detection */ if(l4protocol == IPPROTO_TCP && flow->packet.l4_packet_len >= 20 /* min size of tcp */) { /* tcp */ flow->packet.tcp = (struct ndpi_tcphdr *) l4ptr; if(flow->packet.l4_packet_len >= flow->packet.tcp->doff * 4) { flow->packet.payload_packet_len = flow->packet.l4_packet_len - flow->packet.tcp->doff * 4; flow->packet.actual_payload_len = flow->packet.payload_packet_len; flow->packet.payload = ((u_int8_t *) flow->packet.tcp) + (flow->packet.tcp->doff * 4); /* check for new tcp syn packets, here * idea: reset detection state if a connection is unknown */ if(flow->packet.tcp->syn != 0 && flow->packet.tcp->ack == 0 && flow->init_finished != 0 && flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { u_int8_t backup; u_int16_t backup1, backup2; if(flow->http.url) { ndpi_free(flow->http.url); flow->http.url = NULL; } if(flow->http.content_type) { ndpi_free(flow->http.content_type); flow->http.content_type = NULL; } if(flow->http.user_agent) { ndpi_free(flow->http.user_agent); flow->http.user_agent = NULL; } if(flow->kerberos_buf.pktbuf) { ndpi_free(flow->kerberos_buf.pktbuf); flow->kerberos_buf.pktbuf = NULL; } if(flow->l4.tcp.tls.message.buffer) { ndpi_free(flow->l4.tcp.tls.message.buffer); flow->l4.tcp.tls.message.buffer = NULL; flow->l4.tcp.tls.message.buffer_len = flow->l4.tcp.tls.message.buffer_used = 0; } backup = flow->num_processed_pkts; backup1 = flow->guessed_protocol_id; backup2 = flow->guessed_host_protocol_id; memset(flow, 0, sizeof(*(flow))); flow->num_processed_pkts = backup; flow->guessed_protocol_id = backup1; flow->guessed_host_protocol_id = backup2; NDPI_LOG_DBG(ndpi_str, "tcp syn packet for unknown protocol, reset detection state\n"); } } else { /* tcp header not complete */ flow->packet.tcp = NULL; } } else if(l4protocol == IPPROTO_UDP && flow->packet.l4_packet_len >= 8 /* size of udp */) { flow->packet.udp = (struct ndpi_udphdr *) l4ptr; flow->packet.payload_packet_len = flow->packet.l4_packet_len - 8; flow->packet.payload = ((u_int8_t *) flow->packet.udp) + 8; } else { flow->packet.generic_l4_ptr = l4ptr; } return(0); } /* ************************************************ */ void ndpi_connection_tracking(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { if(!flow) { return; } else { /* const for gcc code optimization and cleaner code */ struct ndpi_packet_struct *packet = &flow->packet; const struct ndpi_iphdr *iph = packet->iph; #ifdef NDPI_DETECTION_SUPPORT_IPV6 const struct ndpi_ipv6hdr *iphv6 = packet->iphv6; #endif const struct ndpi_tcphdr *tcph = packet->tcp; const struct ndpi_udphdr *udph = flow->packet.udp; packet->tcp_retransmission = 0, packet->packet_direction = 0; if(ndpi_str->direction_detect_disable) { packet->packet_direction = flow->packet_direction; } else { if(iph != NULL && ntohl(iph->saddr) < ntohl(iph->daddr)) packet->packet_direction = 1; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(iphv6 != NULL && NDPI_COMPARE_IPV6_ADDRESS_STRUCTS(&iphv6->ip6_src, &iphv6->ip6_dst) != 0) packet->packet_direction = 1; #endif } packet->packet_lines_parsed_complete = 0; if(flow->init_finished == 0) { flow->init_finished = 1; flow->setup_packet_direction = packet->packet_direction; } if(tcph != NULL) { /* reset retried bytes here before setting it */ packet->num_retried_bytes = 0; if(!ndpi_str->direction_detect_disable) packet->packet_direction = (ntohs(tcph->source) < ntohs(tcph->dest)) ? 1 : 0; if(tcph->syn != 0 && tcph->ack == 0 && flow->l4.tcp.seen_syn == 0 && flow->l4.tcp.seen_syn_ack == 0 && flow->l4.tcp.seen_ack == 0) { flow->l4.tcp.seen_syn = 1; } if(tcph->syn != 0 && tcph->ack != 0 && flow->l4.tcp.seen_syn == 1 && flow->l4.tcp.seen_syn_ack == 0 && flow->l4.tcp.seen_ack == 0) { flow->l4.tcp.seen_syn_ack = 1; } if(tcph->syn == 0 && tcph->ack == 1 && flow->l4.tcp.seen_syn == 1 && flow->l4.tcp.seen_syn_ack == 1 && flow->l4.tcp.seen_ack == 0) { flow->l4.tcp.seen_ack = 1; } if((flow->next_tcp_seq_nr[0] == 0 && flow->next_tcp_seq_nr[1] == 0) || (flow->next_tcp_seq_nr[0] == 0 || flow->next_tcp_seq_nr[1] == 0)) { /* initialize tcp sequence counters */ /* the ack flag needs to be set to get valid sequence numbers from the other * direction. Usually it will catch the second packet syn+ack but it works * also for asymmetric traffic where it will use the first data packet * * if the syn flag is set add one to the sequence number, * otherwise use the payload length. */ if(tcph->ack != 0) { flow->next_tcp_seq_nr[flow->packet.packet_direction] = ntohl(tcph->seq) + (tcph->syn ? 1 : packet->payload_packet_len); flow->next_tcp_seq_nr[1 - flow->packet.packet_direction] = ntohl(tcph->ack_seq); } } else if(packet->payload_packet_len > 0) { /* check tcp sequence counters */ if(((u_int32_t)(ntohl(tcph->seq) - flow->next_tcp_seq_nr[packet->packet_direction])) > ndpi_str->tcp_max_retransmission_window_size) { packet->tcp_retransmission = 1; /* CHECK IF PARTIAL RETRY IS HAPPENING */ if((flow->next_tcp_seq_nr[packet->packet_direction] - ntohl(tcph->seq) < packet->payload_packet_len)) { /* num_retried_bytes actual_payload_len hold info about the partial retry analyzer which require this info can make use of this info Other analyzer can use packet->payload_packet_len */ packet->num_retried_bytes = (u_int16_t)(flow->next_tcp_seq_nr[packet->packet_direction] - ntohl(tcph->seq)); packet->actual_payload_len = packet->payload_packet_len - packet->num_retried_bytes; flow->next_tcp_seq_nr[packet->packet_direction] = ntohl(tcph->seq) + packet->payload_packet_len; } } /* normal path actual_payload_len is initialized to payload_packet_len during tcp header parsing itself. It will be changed only in case of retransmission */ else { packet->num_retried_bytes = 0; flow->next_tcp_seq_nr[packet->packet_direction] = ntohl(tcph->seq) + packet->payload_packet_len; } } if(tcph->rst) { flow->next_tcp_seq_nr[0] = 0; flow->next_tcp_seq_nr[1] = 0; } } else if(udph != NULL) { if(!ndpi_str->direction_detect_disable) packet->packet_direction = (htons(udph->source) < htons(udph->dest)) ? 1 : 0; } if(flow->packet_counter < MAX_PACKET_COUNTER && packet->payload_packet_len) { flow->packet_counter++; } if(flow->packet_direction_counter[packet->packet_direction] < MAX_PACKET_COUNTER && packet->payload_packet_len) { flow->packet_direction_counter[packet->packet_direction]++; } if(flow->byte_counter[packet->packet_direction] + packet->payload_packet_len > flow->byte_counter[packet->packet_direction]) { flow->byte_counter[packet->packet_direction] += packet->payload_packet_len; } } } /* ************************************************ */ void check_ndpi_other_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { if(!flow) return; void *func = NULL; u_int32_t a; u_int16_t proto_index = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoIdx; int16_t proto_id = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoId; NDPI_PROTOCOL_BITMASK detection_bitmask; NDPI_SAVE_AS_BITMASK(detection_bitmask, flow->packet.detected_protocol_stack[0]); if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } for (a = 0; a < ndpi_str->callback_buffer_size_non_tcp_udp; a++) { if((func != ndpi_str->callback_buffer_non_tcp_udp[a].func) && (ndpi_str->callback_buffer_non_tcp_udp[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_non_tcp_udp[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_non_tcp_udp[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_non_tcp_udp[a].detection_bitmask, detection_bitmask) != 0) { if(ndpi_str->callback_buffer_non_tcp_udp[a].func != NULL) ndpi_str->callback_buffer_non_tcp_udp[a].func(ndpi_str, flow); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } } } /* ************************************************ */ void check_ndpi_udp_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { void *func = NULL; u_int32_t a; u_int16_t proto_index = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoIdx; int16_t proto_id = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoId; NDPI_PROTOCOL_BITMASK detection_bitmask; NDPI_SAVE_AS_BITMASK(detection_bitmask, flow->packet.detected_protocol_stack[0]); if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } if(flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { for (a = 0; a < ndpi_str->callback_buffer_size_udp; a++) { if((func != ndpi_str->callback_buffer_udp[a].func) && (ndpi_str->callback_buffer_udp[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_udp[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_udp[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_udp[a].detection_bitmask, detection_bitmask) != 0) { ndpi_str->callback_buffer_udp[a].func(ndpi_str, flow); // NDPI_LOG_DBG(ndpi_str, "[UDP,CALL] dissector of protocol as callback_buffer idx = %d\n",a); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } else if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "[UDP,SKIP] dissector of protocol as callback_buffer idx = %d\n", a); } } } /* ************************************************ */ void check_ndpi_tcp_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { void *func = NULL; u_int32_t a; u_int16_t proto_index = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoIdx; int16_t proto_id = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoId; NDPI_PROTOCOL_BITMASK detection_bitmask; NDPI_SAVE_AS_BITMASK(detection_bitmask, flow->packet.detected_protocol_stack[0]); if(flow->packet.payload_packet_len != 0) { if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } if(flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { for (a = 0; a < ndpi_str->callback_buffer_size_tcp_payload; a++) { if((func != ndpi_str->callback_buffer_tcp_payload[a].func) && (ndpi_str->callback_buffer_tcp_payload[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_tcp_payload[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_tcp_payload[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_tcp_payload[a].detection_bitmask, detection_bitmask) != 0) { ndpi_str->callback_buffer_tcp_payload[a].func(ndpi_str, flow); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } } } } else { /* no payload */ if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL) && ((ndpi_str->callback_buffer[flow->guessed_protocol_id].ndpi_selection_bitmask & NDPI_SELECTION_BITMASK_PROTOCOL_HAS_PAYLOAD) == 0)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } for (a = 0; a < ndpi_str->callback_buffer_size_tcp_no_payload; a++) { if((func != ndpi_str->callback_buffer_tcp_payload[a].func) && (ndpi_str->callback_buffer_tcp_no_payload[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_tcp_no_payload[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_tcp_no_payload[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_tcp_no_payload[a].detection_bitmask, detection_bitmask) != 0) { ndpi_str->callback_buffer_tcp_no_payload[a].func(ndpi_str, flow); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } } } } /* ********************************************************************************* */ void ndpi_check_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { if(flow->packet.tcp != NULL) check_ndpi_tcp_flow_func(ndpi_str, flow, ndpi_selection_packet); else if(flow->packet.udp != NULL) check_ndpi_udp_flow_func(ndpi_str, flow, ndpi_selection_packet); else check_ndpi_other_flow_func(ndpi_str, flow, ndpi_selection_packet); } /* ********************************************************************************* */ u_int16_t ndpi_guess_host_protocol_id(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { u_int16_t ret = NDPI_PROTOCOL_UNKNOWN; if(flow->packet.iph) { struct in_addr addr; u_int16_t sport, dport; addr.s_addr = flow->packet.iph->saddr; if((flow->l4_proto == IPPROTO_TCP) && flow->packet.tcp) sport = flow->packet.tcp->source, dport = flow->packet.tcp->dest; else if((flow->l4_proto == IPPROTO_UDP) && flow->packet.udp) sport = flow->packet.udp->source, dport = flow->packet.udp->dest; else sport = dport = 0; /* guess host protocol */ ret = ndpi_network_port_ptree_match(ndpi_str, &addr, sport); if(ret == NDPI_PROTOCOL_UNKNOWN) { addr.s_addr = flow->packet.iph->daddr; ret = ndpi_network_port_ptree_match(ndpi_str, &addr, dport); } } return(ret); } /* ********************************************************************************* */ ndpi_protocol ndpi_detection_giveup(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int8_t enable_guess, u_int8_t *protocol_was_guessed) { ndpi_protocol ret = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; *protocol_was_guessed = 0; if(flow == NULL) return(ret); /* Init defaults */ ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; ret.category = flow->category; /* Ensure that we don't change our mind if detection is already complete */ if((ret.master_protocol != NDPI_PROTOCOL_UNKNOWN) && (ret.app_protocol != NDPI_PROTOCOL_UNKNOWN)) return(ret); /* TODO: add the remaining stage_XXXX protocols */ if(flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { u_int16_t guessed_protocol_id = NDPI_PROTOCOL_UNKNOWN, guessed_host_protocol_id = NDPI_PROTOCOL_UNKNOWN; if(flow->guessed_protocol_id == NDPI_PROTOCOL_STUN) goto check_stun_export; else if((flow->guessed_protocol_id == NDPI_PROTOCOL_HANGOUT_DUO) || (flow->guessed_protocol_id == NDPI_PROTOCOL_MESSENGER) || (flow->guessed_protocol_id == NDPI_PROTOCOL_WHATSAPP_CALL)) { *protocol_was_guessed = 1; ndpi_set_detected_protocol(ndpi_str, flow, flow->guessed_protocol_id, NDPI_PROTOCOL_UNKNOWN); } else if((flow->l4.tcp.tls.hello_processed == 1) && (flow->protos.stun_ssl.ssl.client_requested_server_name[0] != '\0')) { *protocol_was_guessed = 1; ndpi_set_detected_protocol(ndpi_str, flow, NDPI_PROTOCOL_TLS, NDPI_PROTOCOL_UNKNOWN); } else if(enable_guess) { if((flow->guessed_protocol_id == NDPI_PROTOCOL_UNKNOWN) && (flow->packet.l4_protocol == IPPROTO_TCP) && flow->l4.tcp.tls.hello_processed) flow->guessed_protocol_id = NDPI_PROTOCOL_TLS; guessed_protocol_id = flow->guessed_protocol_id, guessed_host_protocol_id = flow->guessed_host_protocol_id; if((guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) && ((flow->packet.l4_protocol == IPPROTO_UDP) && NDPI_ISSET(&flow->excluded_protocol_bitmask, guessed_host_protocol_id) && is_udp_guessable_protocol(guessed_host_protocol_id))) flow->guessed_host_protocol_id = guessed_host_protocol_id = NDPI_PROTOCOL_UNKNOWN; /* Ignore guessed protocol if they have been discarded */ if((guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) // && (guessed_host_protocol_id == NDPI_PROTOCOL_UNKNOWN) && (flow->packet.l4_protocol == IPPROTO_UDP) && NDPI_ISSET(&flow->excluded_protocol_bitmask, guessed_protocol_id) && is_udp_guessable_protocol(guessed_protocol_id)) flow->guessed_protocol_id = guessed_protocol_id = NDPI_PROTOCOL_UNKNOWN; if((guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) || (guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN)) { if((guessed_protocol_id == 0) && (flow->protos.stun_ssl.stun.num_binding_requests > 0) && (flow->protos.stun_ssl.stun.num_processed_pkts > 0)) guessed_protocol_id = NDPI_PROTOCOL_STUN; if(flow->host_server_name[0] != '\0') { ndpi_protocol_match_result ret_match; memset(&ret_match, 0, sizeof(ret_match)); ndpi_match_host_subprotocol(ndpi_str, flow, (char *) flow->host_server_name, strlen((const char *) flow->host_server_name), &ret_match, NDPI_PROTOCOL_DNS); if(ret_match.protocol_id != NDPI_PROTOCOL_UNKNOWN) guessed_host_protocol_id = ret_match.protocol_id; } *protocol_was_guessed = 1; ndpi_int_change_protocol(ndpi_str, flow, guessed_host_protocol_id, guessed_protocol_id); } } } else if(enable_guess) { if(flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) { *protocol_was_guessed = 1; flow->detected_protocol_stack[1] = flow->guessed_protocol_id; } if(flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) { *protocol_was_guessed = 1; flow->detected_protocol_stack[0] = flow->guessed_host_protocol_id; } if(flow->detected_protocol_stack[1] == flow->detected_protocol_stack[0]) { *protocol_was_guessed = 1; flow->detected_protocol_stack[1] = flow->guessed_host_protocol_id; } } if((flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) && (flow->guessed_protocol_id == NDPI_PROTOCOL_STUN)) { check_stun_export: if(flow->protos.stun_ssl.stun.num_processed_pkts || flow->protos.stun_ssl.stun.num_udp_pkts) { // if(/* (flow->protos.stun_ssl.stun.num_processed_pkts >= NDPI_MIN_NUM_STUN_DETECTION) */ *protocol_was_guessed = 1; ndpi_set_detected_protocol(ndpi_str, flow, flow->guessed_host_protocol_id, NDPI_PROTOCOL_STUN); } } ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; if(ret.master_protocol == NDPI_PROTOCOL_STUN) { if(ret.app_protocol == NDPI_PROTOCOL_FACEBOOK) ret.app_protocol = NDPI_PROTOCOL_MESSENGER; else if(ret.app_protocol == NDPI_PROTOCOL_GOOGLE) { /* As Google has recently introduced Duo, we need to distinguish between it and hangout thing that should be handled by the STUN dissector */ ret.app_protocol = NDPI_PROTOCOL_HANGOUT_DUO; } } if(ret.app_protocol != NDPI_PROTOCOL_UNKNOWN) { *protocol_was_guessed = 1; ndpi_fill_protocol_category(ndpi_str, flow, &ret); } return(ret); } /* ********************************************************************************* */ void ndpi_process_extra_packet(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, const unsigned char *packet, const unsigned short packetlen, const u_int64_t current_time_ms, struct ndpi_id_struct *src, struct ndpi_id_struct *dst) { if(flow == NULL) return; if(flow->server_id == NULL) flow->server_id = dst; /* Default */ /* need at least 20 bytes for ip header */ if(packetlen < 20) { return; } flow->packet.current_time_ms = current_time_ms; /* parse packet */ flow->packet.iph = (struct ndpi_iphdr *) packet; /* we are interested in ipv4 packet */ /* set up the packet headers for the extra packet function to use if it wants */ if(ndpi_init_packet_header(ndpi_str, flow, packetlen) != 0) return; /* detect traffic for tcp or udp only */ flow->src = src, flow->dst = dst; ndpi_connection_tracking(ndpi_str, flow); /* call the extra packet function (which may add more data/info to flow) */ if(flow->extra_packets_func) { if((flow->extra_packets_func(ndpi_str, flow)) == 0) flow->check_extra_packets = 0; if(++flow->num_extra_packets_checked == flow->max_extra_packets_to_check) flow->extra_packets_func = NULL; /* Enough packets detected */ } } /* ********************************************************************************* */ int ndpi_load_ip_category(struct ndpi_detection_module_struct *ndpi_str, const char *ip_address_and_mask, ndpi_protocol_category_t category) { patricia_node_t *node; struct in_addr pin; int bits = 32; char *ptr; char ipbuf[64]; strncpy(ipbuf, ip_address_and_mask, sizeof(ipbuf)); ipbuf[sizeof(ipbuf) - 1] = '\0'; ptr = strrchr(ipbuf, '/'); if(ptr) { *(ptr++) = '\0'; if(atoi(ptr) >= 0 && atoi(ptr) <= 32) bits = atoi(ptr); } if(inet_pton(AF_INET, ipbuf, &pin) != 1) { NDPI_LOG_DBG2(ndpi_str, "Invalid ip/ip+netmask: %s\n", ip_address_and_mask); return(-1); } if((node = add_to_ptree(ndpi_str->custom_categories.ipAddresses_shadow, AF_INET, &pin, bits)) != NULL) { node->value.uv.user_value = (u_int16_t)category, node->value.uv.additional_user_value = 0; } return(0); } /* ********************************************************************************* */ int ndpi_load_hostname_category(struct ndpi_detection_module_struct *ndpi_str, const char *name_to_add, ndpi_protocol_category_t category) { char *name; if(name_to_add == NULL) return(-1); name = ndpi_strdup(name_to_add); if(name == NULL) return(-1); #if 0 printf("===> %s() Loading %s as %u\n", __FUNCTION__, name, category); #endif AC_PATTERN_t ac_pattern; AC_ERROR_t rc; memset(&ac_pattern, 0, sizeof(ac_pattern)); if(ndpi_str->custom_categories.hostnames_shadow.ac_automa == NULL) { free(name); return(-1); } ac_pattern.astring = name, ac_pattern.length = strlen(ac_pattern.astring); ac_pattern.rep.number = (u_int32_t) category, ac_pattern.rep.category = category;; rc = ac_automata_add(ndpi_str->custom_categories.hostnames_shadow.ac_automa, &ac_pattern); if(rc != ACERR_DUPLICATE_PATTERN && rc != ACERR_SUCCESS) { free(name); return(-1); } if(rc == ACERR_DUPLICATE_PATTERN) free(name); return(0); } /* ********************************************************************************* */ /* Loads an IP or name category */ int ndpi_load_category(struct ndpi_detection_module_struct *ndpi_struct, const char *ip_or_name, ndpi_protocol_category_t category) { int rv; /* Try to load as IP address first */ rv = ndpi_load_ip_category(ndpi_struct, ip_or_name, category); if(rv < 0) { /* IP load failed, load as hostname */ rv = ndpi_load_hostname_category(ndpi_struct, ip_or_name, category); } return(rv); } /* ********************************************************************************* */ int ndpi_enable_loaded_categories(struct ndpi_detection_module_struct *ndpi_str) { int i; /* First add the nDPI known categories matches */ for (i = 0; category_match[i].string_to_match != NULL; i++) ndpi_load_category(ndpi_str, category_match[i].string_to_match, category_match[i].protocol_category); /* Free */ ac_automata_release((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames.ac_automa, 1 /* free patterns strings memory */); /* Finalize */ ac_automata_finalize((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames_shadow.ac_automa); /* Swap */ ndpi_str->custom_categories.hostnames.ac_automa = ndpi_str->custom_categories.hostnames_shadow.ac_automa; /* Realloc */ ndpi_str->custom_categories.hostnames_shadow.ac_automa = ac_automata_init(ac_match_handler); if(ndpi_str->custom_categories.ipAddresses != NULL) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->custom_categories.ipAddresses, free_ptree_data); ndpi_str->custom_categories.ipAddresses = ndpi_str->custom_categories.ipAddresses_shadow; ndpi_str->custom_categories.ipAddresses_shadow = ndpi_New_Patricia(32 /* IPv4 */); ndpi_str->custom_categories.categories_loaded = 1; return(0); } /* ********************************************************************************* */ int ndpi_fill_ip_protocol_category(struct ndpi_detection_module_struct *ndpi_str, u_int32_t saddr, u_int32_t daddr, ndpi_protocol *ret) { if(ndpi_str->custom_categories.categories_loaded) { prefix_t prefix; patricia_node_t *node; if(saddr == 0) node = NULL; else { /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, (struct in_addr *) &saddr, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->custom_categories.ipAddresses, &prefix); } if(!node) { if(daddr != 0) { fill_prefix_v4(&prefix, (struct in_addr *) &daddr, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->custom_categories.ipAddresses, &prefix); } } if(node) { ret->category = (ndpi_protocol_category_t) node->value.uv.user_value; return(1); } } ret->category = ndpi_get_proto_category(ndpi_str, *ret); return(0); } /* ********************************************************************************* */ void ndpi_fill_protocol_category(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_protocol *ret) { if(ndpi_str->custom_categories.categories_loaded) { if(flow->guessed_header_category != NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) { flow->category = ret->category = flow->guessed_header_category; return; } if(flow->host_server_name[0] != '\0') { u_int32_t id; int rc = ndpi_match_custom_category(ndpi_str, (char *) flow->host_server_name, strlen((char *) flow->host_server_name), &id); if(rc == 0) { flow->category = ret->category = (ndpi_protocol_category_t) id; return; } } if(flow->l4.tcp.tls.hello_processed == 1 && flow->protos.stun_ssl.ssl.client_requested_server_name[0] != '\0') { u_int32_t id; int rc = ndpi_match_custom_category(ndpi_str, (char *) flow->protos.stun_ssl.ssl.client_requested_server_name, strlen(flow->protos.stun_ssl.ssl.client_requested_server_name), &id); if(rc == 0) { flow->category = ret->category = (ndpi_protocol_category_t) id; return; } } } flow->category = ret->category = ndpi_get_proto_category(ndpi_str, *ret); } /* ********************************************************************************* */ static void ndpi_reset_packet_line_info(struct ndpi_packet_struct *packet) { packet->parsed_lines = 0, packet->empty_line_position_set = 0, packet->host_line.ptr = NULL, packet->host_line.len = 0, packet->referer_line.ptr = NULL, packet->referer_line.len = 0, packet->content_line.ptr = NULL, packet->content_line.len = 0, packet->accept_line.ptr = NULL, packet->accept_line.len = 0, packet->user_agent_line.ptr = NULL, packet->user_agent_line.len = 0, packet->http_url_name.ptr = NULL, packet->http_url_name.len = 0, packet->http_encoding.ptr = NULL, packet->http_encoding.len = 0, packet->http_transfer_encoding.ptr = NULL, packet->http_transfer_encoding.len = 0, packet->http_contentlen.ptr = NULL, packet->http_contentlen.len = 0, packet->content_disposition_line.ptr = NULL, packet->content_disposition_line.len = 0, packet->http_cookie.ptr = NULL, packet->http_cookie.len = 0, packet->http_origin.len = 0, packet->http_origin.ptr = NULL, packet->http_x_session_type.ptr = NULL, packet->http_x_session_type.len = 0, packet->server_line.ptr = NULL, packet->server_line.len = 0, packet->http_method.ptr = NULL, packet->http_method.len = 0, packet->http_response.ptr = NULL, packet->http_response.len = 0, packet->http_num_headers = 0; } /* ********************************************************************************* */ static int ndpi_check_protocol_port_mismatch_exceptions(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_default_ports_tree_node_t *expected_proto, ndpi_protocol *returned_proto) { /* For TLS (and other protocols) it is not simple to guess the exact protocol so before triggering an alert we need to make sure what we have exhausted all the possible options available */ if(returned_proto->master_protocol == NDPI_PROTOCOL_TLS) { switch(expected_proto->proto->protoId) { case NDPI_PROTOCOL_MAIL_IMAPS: case NDPI_PROTOCOL_MAIL_POPS: case NDPI_PROTOCOL_MAIL_SMTPS: return(1); /* This is a reasonable exception */ break; } } return(0); } /* ********************************************************************************* */ static void ndpi_reconcile_protocols(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_protocol *ret) { /* Skype for a host doing MS Teams means MS Teams (MS Teams uses Skype as transport protocol for voice/video) */ if(flow) { /* Do not go for DNS when there is an application protocol. Example DNS.Apple */ if((flow->detected_protocol_stack[1] != NDPI_PROTOCOL_UNKNOWN) && (flow->detected_protocol_stack[0] /* app */ != flow->detected_protocol_stack[1] /* major */)) NDPI_CLR_BIT(flow->risk, NDPI_SUSPICIOUS_DGA_DOMAIN); } switch(ret->app_protocol) { case NDPI_PROTOCOL_MSTEAMS: if(flow->packet.iph && flow->packet.tcp) { // printf("====>> NDPI_PROTOCOL_MSTEAMS\n"); if(ndpi_str->msteams_cache == NULL) ndpi_str->msteams_cache = ndpi_lru_cache_init(1024); if(ndpi_str->msteams_cache) ndpi_lru_add_to_cache(ndpi_str->msteams_cache, flow->packet.iph->saddr, (flow->packet.current_time_ms / 1000) & 0xFFFF /* 16 bit */); } break; case NDPI_PROTOCOL_SKYPE: case NDPI_PROTOCOL_SKYPE_CALL: if(flow->packet.iph && flow->packet.udp && ndpi_str->msteams_cache) { u_int16_t when; if(ndpi_lru_find_cache(ndpi_str->msteams_cache, flow->packet.iph->saddr, &when, 0 /* Don't remove it as it can be used for other connections */)) { u_int16_t tdiff = ((flow->packet.current_time_ms /1000) & 0xFFFF) - when; if(tdiff < 60 /* sec */) { // printf("====>> NDPI_PROTOCOL_SKYPE(_CALL) -> NDPI_PROTOCOL_MSTEAMS [%u]\n", tdiff); ret->app_protocol = NDPI_PROTOCOL_MSTEAMS; /* Refresh cache */ ndpi_lru_add_to_cache(ndpi_str->msteams_cache, flow->packet.iph->saddr, (flow->packet.current_time_ms / 1000) & 0xFFFF /* 16 bit */); } } } break; } /* switch */ } /* ********************************************************************************* */ ndpi_protocol ndpi_detection_process_packet(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, const unsigned char *packet, const unsigned short packetlen, const u_int64_t current_time_ms, struct ndpi_id_struct *src, struct ndpi_id_struct *dst) { NDPI_SELECTION_BITMASK_PROTOCOL_SIZE ndpi_selection_packet; u_int32_t a; ndpi_protocol ret = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; if(ndpi_str->ndpi_log_level >= NDPI_LOG_TRACE) NDPI_LOG(flow ? flow->detected_protocol_stack[0] : NDPI_PROTOCOL_UNKNOWN, ndpi_str, NDPI_LOG_TRACE, "START packet processing\n"); if(flow == NULL) return(ret); else ret.category = flow->category; flow->num_processed_pkts++; /* Init default */ ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; if(flow->server_id == NULL) flow->server_id = dst; /* Default */ if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) { if(flow->check_extra_packets) { ndpi_process_extra_packet(ndpi_str, flow, packet, packetlen, current_time_ms, src, dst); /* Update in case of new match */ ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0], ret.category = flow->category; goto invalidate_ptr; } else goto ret_protocols; } /* need at least 20 bytes for ip header */ if(packetlen < 20) { /* reset protocol which is normally done in init_packet_header */ ndpi_int_reset_packet_protocol(&flow->packet); goto invalidate_ptr; } flow->packet.current_time_ms = current_time_ms; /* parse packet */ flow->packet.iph = (struct ndpi_iphdr *) packet; /* we are interested in ipv4 packet */ if(ndpi_init_packet_header(ndpi_str, flow, packetlen) != 0) goto invalidate_ptr; /* detect traffic for tcp or udp only */ flow->src = src, flow->dst = dst; ndpi_connection_tracking(ndpi_str, flow); /* build ndpi_selection packet bitmask */ ndpi_selection_packet = NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC; if(flow->packet.iph != NULL) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_IP | NDPI_SELECTION_BITMASK_PROTOCOL_IPV4_OR_IPV6; if(flow->packet.tcp != NULL) ndpi_selection_packet |= (NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP); if(flow->packet.udp != NULL) ndpi_selection_packet |= (NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP); if(flow->packet.payload_packet_len != 0) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_HAS_PAYLOAD; if(flow->packet.tcp_retransmission == 0) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_NO_TCP_RETRANSMISSION; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(flow->packet.iphv6 != NULL) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_IPV6 | NDPI_SELECTION_BITMASK_PROTOCOL_IPV4_OR_IPV6; #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ if((!flow->protocol_id_already_guessed) && ( #ifdef NDPI_DETECTION_SUPPORT_IPV6 flow->packet.iphv6 || #endif flow->packet.iph)) { u_int16_t sport, dport; u_int8_t protocol; u_int8_t user_defined_proto; flow->protocol_id_already_guessed = 1; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(flow->packet.iphv6 != NULL) { protocol = flow->packet.iphv6->ip6_hdr.ip6_un1_nxt; } else #endif { protocol = flow->packet.iph->protocol; } if(flow->packet.udp) sport = ntohs(flow->packet.udp->source), dport = ntohs(flow->packet.udp->dest); else if(flow->packet.tcp) sport = ntohs(flow->packet.tcp->source), dport = ntohs(flow->packet.tcp->dest); else sport = dport = 0; /* guess protocol */ flow->guessed_protocol_id = (int16_t) ndpi_guess_protocol_id(ndpi_str, flow, protocol, sport, dport, &user_defined_proto); flow->guessed_host_protocol_id = ndpi_guess_host_protocol_id(ndpi_str, flow); if(ndpi_str->custom_categories.categories_loaded && flow->packet.iph) { ndpi_fill_ip_protocol_category(ndpi_str, flow->packet.iph->saddr, flow->packet.iph->daddr, &ret); flow->guessed_header_category = ret.category; } else flow->guessed_header_category = NDPI_PROTOCOL_CATEGORY_UNSPECIFIED; if(flow->guessed_protocol_id >= NDPI_MAX_SUPPORTED_PROTOCOLS) { /* This is a custom protocol and it has priority over everything else */ ret.master_protocol = NDPI_PROTOCOL_UNKNOWN, ret.app_protocol = flow->guessed_protocol_id ? flow->guessed_protocol_id : flow->guessed_host_protocol_id; ndpi_fill_protocol_category(ndpi_str, flow, &ret); goto invalidate_ptr; } if(user_defined_proto && flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) { if(flow->packet.iph) { if(flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) { u_int8_t protocol_was_guessed; /* ret.master_protocol = flow->guessed_protocol_id , ret.app_protocol = flow->guessed_host_protocol_id; /\* ****** *\/ */ ret = ndpi_detection_giveup(ndpi_str, flow, 0, &protocol_was_guessed); } ndpi_fill_protocol_category(ndpi_str, flow, &ret); goto invalidate_ptr; } } else { /* guess host protocol */ if(flow->packet.iph) { flow->guessed_host_protocol_id = ndpi_guess_host_protocol_id(ndpi_str, flow); /* We could implement a shortcut here skipping dissectors for protocols we have identified by other means such as with the IP However we do NOT stop here and skip invoking the dissectors because we want to dissect the flow (e.g. dissect the TLS) and extract metadata. */ #if SKIP_INVOKING_THE_DISSECTORS if(flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) { /* We have identified a protocol using the IP address so it is not worth to dissect the traffic as we already have the solution */ ret.master_protocol = flow->guessed_protocol_id, ret.app_protocol = flow->guessed_host_protocol_id; } #endif } } } if(flow->guessed_host_protocol_id >= NDPI_MAX_SUPPORTED_PROTOCOLS) { /* This is a custom protocol and it has priority over everything else */ ret.master_protocol = flow->guessed_protocol_id, ret.app_protocol = flow->guessed_host_protocol_id; ndpi_check_flow_func(ndpi_str, flow, &ndpi_selection_packet); ndpi_fill_protocol_category(ndpi_str, flow, &ret); goto invalidate_ptr; } ndpi_check_flow_func(ndpi_str, flow, &ndpi_selection_packet); a = flow->packet.detected_protocol_stack[0]; if(NDPI_COMPARE_PROTOCOL_TO_BITMASK(ndpi_str->detection_bitmask, a) == 0) a = NDPI_PROTOCOL_UNKNOWN; if(a != NDPI_PROTOCOL_UNKNOWN) { int i; for (i = 0; i < sizeof(flow->host_server_name); i++) { if(flow->host_server_name[i] != '\0') flow->host_server_name[i] = tolower(flow->host_server_name[i]); else { flow->host_server_name[i] = '\0'; break; } } } ret_protocols: if(flow->detected_protocol_stack[1] != NDPI_PROTOCOL_UNKNOWN) { ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; if(ret.app_protocol == ret.master_protocol) ret.master_protocol = NDPI_PROTOCOL_UNKNOWN; } else ret.app_protocol = flow->detected_protocol_stack[0]; /* Don't overwrite the category if already set */ if((flow->category == NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) && (ret.app_protocol != NDPI_PROTOCOL_UNKNOWN)) ndpi_fill_protocol_category(ndpi_str, flow, &ret); else ret.category = flow->category; if((flow->num_processed_pkts == 1) && (ret.master_protocol == NDPI_PROTOCOL_UNKNOWN) && (ret.app_protocol == NDPI_PROTOCOL_UNKNOWN) && flow->packet.tcp && (flow->packet.tcp->syn == 0) && (flow->guessed_protocol_id == 0)) { u_int8_t protocol_was_guessed; /* This is a TCP flow - whose first packet is NOT a SYN - no protocol has been detected We don't see how future packets can match anything hence we giveup here */ ret = ndpi_detection_giveup(ndpi_str, flow, 0, &protocol_was_guessed); } if((ret.master_protocol == NDPI_PROTOCOL_UNKNOWN) && (ret.app_protocol != NDPI_PROTOCOL_UNKNOWN) && (flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN)) { ret.master_protocol = ret.app_protocol; ret.app_protocol = flow->guessed_host_protocol_id; } if((!flow->risk_checked) && (ret.master_protocol != NDPI_PROTOCOL_UNKNOWN)) { ndpi_default_ports_tree_node_t *found; u_int16_t *default_ports, sport, dport; if(flow->packet.udp) found = ndpi_get_guessed_protocol_id(ndpi_str, IPPROTO_UDP, sport = ntohs(flow->packet.udp->source), dport = ntohs(flow->packet.udp->dest)), default_ports = ndpi_str->proto_defaults[ret.master_protocol].udp_default_ports; else if(flow->packet.tcp) found = ndpi_get_guessed_protocol_id(ndpi_str, IPPROTO_TCP, sport = ntohs(flow->packet.tcp->source), dport = ntohs(flow->packet.tcp->dest)), default_ports = ndpi_str->proto_defaults[ret.master_protocol].tcp_default_ports; else found = NULL, default_ports = NULL; if(found && (found->proto->protoId != NDPI_PROTOCOL_UNKNOWN) && (found->proto->protoId != ret.master_protocol)) { // printf("******** %u / %u\n", found->proto->protoId, ret.master_protocol); if(!ndpi_check_protocol_port_mismatch_exceptions(ndpi_str, flow, found, &ret)) NDPI_SET_BIT(flow->risk, NDPI_KNOWN_PROTOCOL_ON_NON_STANDARD_PORT); } else if(default_ports && (default_ports[0] != 0)) { u_int8_t found = 0, i; for(i=0; (i<MAX_DEFAULT_PORTS) && (default_ports[i] != 0); i++) { if((default_ports[i] == sport) || (default_ports[i] == dport)) { found = 1; break; } } /* for */ if(!found) { // printf("******** Invalid default port\n"); NDPI_SET_BIT(flow->risk, NDPI_KNOWN_PROTOCOL_ON_NON_STANDARD_PORT); } } flow->risk_checked = 1; } ndpi_reconcile_protocols(ndpi_str, flow, &ret); invalidate_ptr: /* Invalidate packet memory to avoid accessing the pointers below when the packet is no longer accessible */ flow->packet.iph = NULL, flow->packet.tcp = NULL, flow->packet.udp = NULL, flow->packet.payload = NULL; ndpi_reset_packet_line_info(&flow->packet); return(ret); } /* ********************************************************************************* */ u_int32_t ndpi_bytestream_to_number(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int32_t val; val = 0; // cancel if eof, ' ' or line end chars are reached while (*str >= '0' && *str <= '9' && max_chars_to_read > 0) { val *= 10; val += *str - '0'; str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } return(val); } /* ********************************************************************************* */ #ifdef CODE_UNUSED u_int32_t ndpi_bytestream_dec_or_hex_to_number(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int32_t val; val = 0; if(max_chars_to_read <= 2 || str[0] != '0' || str[1] != 'x') { return(ndpi_bytestream_to_number(str, max_chars_to_read, bytes_read)); } else { /*use base 16 system */ str += 2; max_chars_to_read -= 2; *bytes_read = *bytes_read + 2; while (max_chars_to_read > 0) { if(*str >= '0' && *str <= '9') { val *= 16; val += *str - '0'; } else if(*str >= 'a' && *str <= 'f') { val *= 16; val += *str + 10 - 'a'; } else if(*str >= 'A' && *str <= 'F') { val *= 16; val += *str + 10 - 'A'; } else { break; } str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } } return(val); } #endif /* ********************************************************************************* */ u_int64_t ndpi_bytestream_to_number64(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int64_t val; val = 0; // cancel if eof, ' ' or line end chars are reached while (max_chars_to_read > 0 && *str >= '0' && *str <= '9') { val *= 10; val += *str - '0'; str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } return(val); } /* ********************************************************************************* */ u_int64_t ndpi_bytestream_dec_or_hex_to_number64(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int64_t val; val = 0; if(max_chars_to_read <= 2 || str[0] != '0' || str[1] != 'x') { return(ndpi_bytestream_to_number64(str, max_chars_to_read, bytes_read)); } else { /*use base 16 system */ str += 2; max_chars_to_read -= 2; *bytes_read = *bytes_read + 2; while (max_chars_to_read > 0) { if(*str >= '0' && *str <= '9') { val *= 16; val += *str - '0'; } else if(*str >= 'a' && *str <= 'f') { val *= 16; val += *str + 10 - 'a'; } else if(*str >= 'A' && *str <= 'F') { val *= 16; val += *str + 10 - 'A'; } else { break; } str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } } return(val); } /* ********************************************************************************* */ u_int32_t ndpi_bytestream_to_ipv4(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int32_t val; u_int16_t read = 0; u_int16_t oldread; u_int32_t c; /* ip address must be X.X.X.X with each X between 0 and 255 */ oldread = read; c = ndpi_bytestream_to_number(str, max_chars_to_read, &read); if(c > 255 || oldread == read || max_chars_to_read == read || str[read] != '.') return(0); read++; val = c << 24; oldread = read; c = ndpi_bytestream_to_number(&str[read], max_chars_to_read - read, &read); if(c > 255 || oldread == read || max_chars_to_read == read || str[read] != '.') return(0); read++; val = val + (c << 16); oldread = read; c = ndpi_bytestream_to_number(&str[read], max_chars_to_read - read, &read); if(c > 255 || oldread == read || max_chars_to_read == read || str[read] != '.') return(0); read++; val = val + (c << 8); oldread = read; c = ndpi_bytestream_to_number(&str[read], max_chars_to_read - read, &read); if(c > 255 || oldread == read || max_chars_to_read == read) return(0); val = val + c; *bytes_read = *bytes_read + read; return(htonl(val)); } /* ********************************************************************************* */ /* internal function for every detection to parse one packet and to increase the info buffer */ void ndpi_parse_packet_line_info(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { u_int32_t a; struct ndpi_packet_struct *packet = &flow->packet; if((packet->payload_packet_len < 3) || (packet->payload == NULL)) return; if(packet->packet_lines_parsed_complete != 0) return; packet->packet_lines_parsed_complete = 1; ndpi_reset_packet_line_info(packet); packet->line[packet->parsed_lines].ptr = packet->payload; packet->line[packet->parsed_lines].len = 0; for (a = 0; ((a+1) < packet->payload_packet_len) && (packet->parsed_lines < NDPI_MAX_PARSE_LINES_PER_PACKET); a++) { if((packet->payload[a] == 0x0d) && (packet->payload[a+1] == 0x0a)) { /* If end of line char sequence CR+NL "\r\n", process line */ if(((a + 3) < packet->payload_packet_len) && (packet->payload[a+2] == 0x0d) && (packet->payload[a+3] == 0x0a)) { /* \r\n\r\n */ int diff; /* No unsigned ! */ u_int32_t a1 = a + 4; diff = packet->payload_packet_len - a1; if(diff > 0) { diff = ndpi_min(diff, sizeof(flow->initial_binary_bytes)); memcpy(&flow->initial_binary_bytes, &packet->payload[a1], diff); flow->initial_binary_bytes_len = diff; } } packet->line[packet->parsed_lines].len = (u_int16_t)(((unsigned long) &packet->payload[a]) - ((unsigned long) packet->line[packet->parsed_lines].ptr)); /* First line of a HTTP response parsing. Expected a "HTTP/1.? ???" */ if(packet->parsed_lines == 0 && packet->line[0].len >= NDPI_STATICSTRING_LEN("HTTP/1.X 200 ") && strncasecmp((const char *) packet->line[0].ptr, "HTTP/1.", NDPI_STATICSTRING_LEN("HTTP/1.")) == 0 && packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.X ")] > '0' && /* response code between 000 and 699 */ packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.X ")] < '6') { packet->http_response.ptr = &packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.1 ")]; packet->http_response.len = packet->line[0].len - NDPI_STATICSTRING_LEN("HTTP/1.1 "); packet->http_num_headers++; /* Set server HTTP response code */ if(packet->payload_packet_len >= 12) { char buf[4]; /* Set server HTTP response code */ strncpy(buf, (char *) &packet->payload[9], 3); buf[3] = '\0'; flow->http.response_status_code = atoi(buf); /* https://en.wikipedia.org/wiki/List_of_HTTP_status_codes */ if((flow->http.response_status_code < 100) || (flow->http.response_status_code > 509)) flow->http.response_status_code = 0; /* Out of range */ } } /* "Server:" header line in HTTP response */ if(packet->line[packet->parsed_lines].len > NDPI_STATICSTRING_LEN("Server:") + 1 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Server:", NDPI_STATICSTRING_LEN("Server:")) == 0) { // some stupid clients omit a space and place the servername directly after the colon if(packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:")] == ' ') { packet->server_line.ptr = &packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:") + 1]; packet->server_line.len = packet->line[packet->parsed_lines].len - (NDPI_STATICSTRING_LEN("Server:") + 1); } else { packet->server_line.ptr = &packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:")]; packet->server_line.len = packet->line[packet->parsed_lines].len - NDPI_STATICSTRING_LEN("Server:"); } packet->http_num_headers++; } /* "Host:" header line in HTTP request */ if(packet->line[packet->parsed_lines].len > 6 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Host:", 5) == 0) { // some stupid clients omit a space and place the hostname directly after the colon if(packet->line[packet->parsed_lines].ptr[5] == ' ') { packet->host_line.ptr = &packet->line[packet->parsed_lines].ptr[6]; packet->host_line.len = packet->line[packet->parsed_lines].len - 6; } else { packet->host_line.ptr = &packet->line[packet->parsed_lines].ptr[5]; packet->host_line.len = packet->line[packet->parsed_lines].len - 5; } packet->http_num_headers++; } /* "X-Forwarded-For:" header line in HTTP request. Commonly used for HTTP proxies. */ if(packet->line[packet->parsed_lines].len > 17 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "X-Forwarded-For:", 16) == 0) { // some stupid clients omit a space and place the hostname directly after the colon if(packet->line[packet->parsed_lines].ptr[16] == ' ') { packet->forwarded_line.ptr = &packet->line[packet->parsed_lines].ptr[17]; packet->forwarded_line.len = packet->line[packet->parsed_lines].len - 17; } else { packet->forwarded_line.ptr = &packet->line[packet->parsed_lines].ptr[16]; packet->forwarded_line.len = packet->line[packet->parsed_lines].len - 16; } packet->http_num_headers++; } /* "Content-Type:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 14 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Type: ", 14) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-type: ", 14) == 0)) { packet->content_line.ptr = &packet->line[packet->parsed_lines].ptr[14]; packet->content_line.len = packet->line[packet->parsed_lines].len - 14; while ((packet->content_line.len > 0) && (packet->content_line.ptr[0] == ' ')) packet->content_line.len--, packet->content_line.ptr++; packet->http_num_headers++; } /* "Content-Type:" header line in HTTP AGAIN. Probably a bogus response without space after ":" */ if((packet->content_line.len == 0) && (packet->line[packet->parsed_lines].len > 13) && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-type:", 13) == 0)) { packet->content_line.ptr = &packet->line[packet->parsed_lines].ptr[13]; packet->content_line.len = packet->line[packet->parsed_lines].len - 13; packet->http_num_headers++; } if(packet->content_line.len > 0) { /* application/json; charset=utf-8 */ char separator[] = {';', '\r', '\0'}; int i; for (i = 0; separator[i] != '\0'; i++) { char *c = memchr((char *) packet->content_line.ptr, separator[i], packet->content_line.len); if(c != NULL) packet->content_line.len = c - (char *) packet->content_line.ptr; } } /* "Accept:" header line in HTTP request. */ if(packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept: ", 8) == 0) { packet->accept_line.ptr = &packet->line[packet->parsed_lines].ptr[8]; packet->accept_line.len = packet->line[packet->parsed_lines].len - 8; packet->http_num_headers++; } /* "Referer:" header line in HTTP request. */ if(packet->line[packet->parsed_lines].len > 9 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Referer: ", 9) == 0) { packet->referer_line.ptr = &packet->line[packet->parsed_lines].ptr[9]; packet->referer_line.len = packet->line[packet->parsed_lines].len - 9; packet->http_num_headers++; } /* "User-Agent:" header line in HTTP request. */ if(packet->line[packet->parsed_lines].len > 12 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "User-Agent: ", 12) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "User-agent: ", 12) == 0)) { packet->user_agent_line.ptr = &packet->line[packet->parsed_lines].ptr[12]; packet->user_agent_line.len = packet->line[packet->parsed_lines].len - 12; packet->http_num_headers++; } /* "Content-Encoding:" header line in HTTP response (and request?). */ if(packet->line[packet->parsed_lines].len > 18 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Encoding: ", 18) == 0) { packet->http_encoding.ptr = &packet->line[packet->parsed_lines].ptr[18]; packet->http_encoding.len = packet->line[packet->parsed_lines].len - 18; packet->http_num_headers++; } /* "Transfer-Encoding:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 19 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Transfer-Encoding: ", 19) == 0) { packet->http_transfer_encoding.ptr = &packet->line[packet->parsed_lines].ptr[19]; packet->http_transfer_encoding.len = packet->line[packet->parsed_lines].len - 19; packet->http_num_headers++; } /* "Content-Length:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 16 && ((strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Length: ", 16) == 0) || (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "content-length: ", 16) == 0))) { packet->http_contentlen.ptr = &packet->line[packet->parsed_lines].ptr[16]; packet->http_contentlen.len = packet->line[packet->parsed_lines].len - 16; packet->http_num_headers++; } /* "Content-Disposition"*/ if(packet->line[packet->parsed_lines].len > 21 && ((strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Disposition: ", 21) == 0))) { packet->content_disposition_line.ptr = &packet->line[packet->parsed_lines].ptr[21]; packet->content_disposition_line.len = packet->line[packet->parsed_lines].len - 21; packet->http_num_headers++; } /* "Cookie:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Cookie: ", 8) == 0) { packet->http_cookie.ptr = &packet->line[packet->parsed_lines].ptr[8]; packet->http_cookie.len = packet->line[packet->parsed_lines].len - 8; packet->http_num_headers++; } /* "Origin:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Origin: ", 8) == 0) { packet->http_origin.ptr = &packet->line[packet->parsed_lines].ptr[8]; packet->http_origin.len = packet->line[packet->parsed_lines].len - 8; packet->http_num_headers++; } /* "X-Session-Type:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 16 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "X-Session-Type: ", 16) == 0) { packet->http_x_session_type.ptr = &packet->line[packet->parsed_lines].ptr[16]; packet->http_x_session_type.len = packet->line[packet->parsed_lines].len - 16; packet->http_num_headers++; } /* Identification and counting of other HTTP headers. * We consider the most common headers, but there are many others, * which can be seen at references below: * - https://tools.ietf.org/html/rfc7230 * - https://en.wikipedia.org/wiki/List_of_HTTP_header_fields */ if((packet->line[packet->parsed_lines].len > 6 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Date: ", 6) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Vary: ", 6) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "ETag: ", 6) == 0)) || (packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Pragma: ", 8) == 0) || (packet->line[packet->parsed_lines].len > 9 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Expires: ", 9) == 0) || (packet->line[packet->parsed_lines].len > 12 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Set-Cookie: ", 12) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Keep-Alive: ", 12) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Connection: ", 12) == 0)) || (packet->line[packet->parsed_lines].len > 15 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Last-Modified: ", 15) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Ranges: ", 15) == 0)) || (packet->line[packet->parsed_lines].len > 17 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Language: ", 17) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Encoding: ", 17) == 0)) || (packet->line[packet->parsed_lines].len > 27 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Upgrade-Insecure-Requests: ", 27) == 0)) { /* Just count. In the future, if needed, this if can be splited to parse these headers */ packet->http_num_headers++; } if(packet->line[packet->parsed_lines].len == 0) { packet->empty_line_position = a; packet->empty_line_position_set = 1; } if(packet->parsed_lines >= (NDPI_MAX_PARSE_LINES_PER_PACKET - 1)) return; packet->parsed_lines++; packet->line[packet->parsed_lines].ptr = &packet->payload[a + 2]; packet->line[packet->parsed_lines].len = 0; a++; /* next char in the payload */ } } if(packet->parsed_lines >= 1) { packet->line[packet->parsed_lines].len = (u_int16_t)(((unsigned long) &packet->payload[packet->payload_packet_len]) - ((unsigned long) packet->line[packet->parsed_lines].ptr)); packet->parsed_lines++; } } /* ********************************************************************************* */ void ndpi_parse_packet_line_info_any(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int32_t a; u_int16_t end = packet->payload_packet_len; if(packet->packet_lines_parsed_complete != 0) return; packet->packet_lines_parsed_complete = 1; packet->parsed_lines = 0; if(packet->payload_packet_len == 0) return; packet->line[packet->parsed_lines].ptr = packet->payload; packet->line[packet->parsed_lines].len = 0; for (a = 0; a < end; a++) { if(packet->payload[a] == 0x0a) { packet->line[packet->parsed_lines].len = (u_int16_t)( ((unsigned long) &packet->payload[a]) - ((unsigned long) packet->line[packet->parsed_lines].ptr)); if(a > 0 && packet->payload[a - 1] == 0x0d) packet->line[packet->parsed_lines].len--; if(packet->parsed_lines >= (NDPI_MAX_PARSE_LINES_PER_PACKET - 1)) break; packet->parsed_lines++; packet->line[packet->parsed_lines].ptr = &packet->payload[a + 1]; packet->line[packet->parsed_lines].len = 0; if((a + 1) >= packet->payload_packet_len) break; //a++; } } } /* ********************************************************************************* */ u_int16_t ndpi_check_for_email_address(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t counter) { struct ndpi_packet_struct *packet = &flow->packet; NDPI_LOG_DBG2(ndpi_str, "called ndpi_check_for_email_address\n"); if(packet->payload_packet_len > counter && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') || (packet->payload[counter] >= 'A' && packet->payload[counter] <= 'Z') || (packet->payload[counter] >= '0' && packet->payload[counter] <= '9') || packet->payload[counter] == '-' || packet->payload[counter] == '_')) { NDPI_LOG_DBG2(ndpi_str, "first letter\n"); counter++; while (packet->payload_packet_len > counter && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') || (packet->payload[counter] >= 'A' && packet->payload[counter] <= 'Z') || (packet->payload[counter] >= '0' && packet->payload[counter] <= '9') || packet->payload[counter] == '-' || packet->payload[counter] == '_' || packet->payload[counter] == '.')) { NDPI_LOG_DBG2(ndpi_str, "further letter\n"); counter++; if(packet->payload_packet_len > counter && packet->payload[counter] == '@') { NDPI_LOG_DBG2(ndpi_str, "@\n"); counter++; while (packet->payload_packet_len > counter && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') || (packet->payload[counter] >= 'A' && packet->payload[counter] <= 'Z') || (packet->payload[counter] >= '0' && packet->payload[counter] <= '9') || packet->payload[counter] == '-' || packet->payload[counter] == '_')) { NDPI_LOG_DBG2(ndpi_str, "letter\n"); counter++; if(packet->payload_packet_len > counter && packet->payload[counter] == '.') { NDPI_LOG_DBG2(ndpi_str, ".\n"); counter++; if(packet->payload_packet_len > counter + 1 && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') && (packet->payload[counter + 1] >= 'a' && packet->payload[counter + 1] <= 'z'))) { NDPI_LOG_DBG2(ndpi_str, "two letters\n"); counter += 2; if(packet->payload_packet_len > counter && (packet->payload[counter] == ' ' || packet->payload[counter] == ';')) { NDPI_LOG_DBG2(ndpi_str, "whitespace1\n"); return(counter); } else if(packet->payload_packet_len > counter && packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') { NDPI_LOG_DBG2(ndpi_str, "one letter\n"); counter++; if(packet->payload_packet_len > counter && (packet->payload[counter] == ' ' || packet->payload[counter] == ';')) { NDPI_LOG_DBG2(ndpi_str, "whitespace2\n"); return(counter); } else if(packet->payload_packet_len > counter && packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') { counter++; if(packet->payload_packet_len > counter && (packet->payload[counter] == ' ' || packet->payload[counter] == ';')) { NDPI_LOG_DBG2(ndpi_str, "whitespace3\n"); return(counter); } else { return(0); } } else { return(0); } } else { return(0); } } else { return(0); } } } return(0); } } } return(0); } #ifdef NDPI_ENABLE_DEBUG_MESSAGES /* ********************************************************************************* */ void ndpi_debug_get_last_log_function_line(struct ndpi_detection_module_struct *ndpi_str, const char **file, const char **func, u_int32_t *line) { *file = ""; *func = ""; if(ndpi_str->ndpi_debug_print_file != NULL) *file = ndpi_str->ndpi_debug_print_file; if(ndpi_str->ndpi_debug_print_function != NULL) *func = ndpi_str->ndpi_debug_print_function; *line = ndpi_str->ndpi_debug_print_line; } #endif /* ********************************************************************************* */ u_int8_t ndpi_detection_get_l4(const u_int8_t *l3, u_int16_t l3_len, const u_int8_t **l4_return, u_int16_t *l4_len_return, u_int8_t *l4_protocol_return, u_int32_t flags) { return(ndpi_detection_get_l4_internal(NULL, l3, l3_len, l4_return, l4_len_return, l4_protocol_return, flags)); } /* ********************************************************************************* */ void ndpi_set_detected_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { struct ndpi_id_struct *src = flow->src, *dst = flow->dst; ndpi_int_change_protocol(ndpi_str, flow, upper_detected_protocol, lower_detected_protocol); if(src != NULL) { NDPI_ADD_PROTOCOL_TO_BITMASK(src->detected_protocol_bitmask, upper_detected_protocol); if(lower_detected_protocol != NDPI_PROTOCOL_UNKNOWN) NDPI_ADD_PROTOCOL_TO_BITMASK(src->detected_protocol_bitmask, lower_detected_protocol); } if(dst != NULL) { NDPI_ADD_PROTOCOL_TO_BITMASK(dst->detected_protocol_bitmask, upper_detected_protocol); if(lower_detected_protocol != NDPI_PROTOCOL_UNKNOWN) NDPI_ADD_PROTOCOL_TO_BITMASK(dst->detected_protocol_bitmask, lower_detected_protocol); } } /* ********************************************************************************* */ u_int16_t ndpi_get_flow_masterprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { return(flow->detected_protocol_stack[1]); } /* ********************************************************************************* */ void ndpi_int_change_flow_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { if(!flow) return; flow->detected_protocol_stack[0] = upper_detected_protocol, flow->detected_protocol_stack[1] = lower_detected_protocol; } /* ********************************************************************************* */ void ndpi_int_change_packet_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { struct ndpi_packet_struct *packet = &flow->packet; /* NOTE: everything below is identically to change_flow_protocol * except flow->packet If you want to change something here, * don't! Change it for the flow function and apply it here * as well */ if(!packet) return; packet->detected_protocol_stack[0] = upper_detected_protocol, packet->detected_protocol_stack[1] = lower_detected_protocol; } /* ********************************************************************************* */ /* generic function for changing the protocol * * what it does is: * 1.update the flow protocol stack with the new protocol * 2.update the packet protocol stack with the new protocol */ void ndpi_int_change_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { if((upper_detected_protocol == NDPI_PROTOCOL_UNKNOWN) && (lower_detected_protocol != NDPI_PROTOCOL_UNKNOWN)) upper_detected_protocol = lower_detected_protocol; if(upper_detected_protocol == lower_detected_protocol) lower_detected_protocol = NDPI_PROTOCOL_UNKNOWN; if((upper_detected_protocol != NDPI_PROTOCOL_UNKNOWN) && (lower_detected_protocol == NDPI_PROTOCOL_UNKNOWN)) { if((flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (upper_detected_protocol != flow->guessed_host_protocol_id)) { if(ndpi_str->proto_defaults[upper_detected_protocol].can_have_a_subprotocol) { lower_detected_protocol = upper_detected_protocol; upper_detected_protocol = flow->guessed_host_protocol_id; } } } ndpi_int_change_flow_protocol(ndpi_str, flow, upper_detected_protocol, lower_detected_protocol); ndpi_int_change_packet_protocol(ndpi_str, flow, upper_detected_protocol, lower_detected_protocol); } /* ********************************************************************************* */ void ndpi_int_change_category(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_protocol_category_t protocol_category) { flow->category = protocol_category; } /* ********************************************************************************* */ /* turns a packet back to unknown */ void ndpi_int_reset_packet_protocol(struct ndpi_packet_struct *packet) { int a; for (a = 0; a < NDPI_PROTOCOL_SIZE; a++) packet->detected_protocol_stack[a] = NDPI_PROTOCOL_UNKNOWN; } /* ********************************************************************************* */ void ndpi_int_reset_protocol(struct ndpi_flow_struct *flow) { if(flow) { int a; for (a = 0; a < NDPI_PROTOCOL_SIZE; a++) flow->detected_protocol_stack[a] = NDPI_PROTOCOL_UNKNOWN; } } /* ********************************************************************************* */ void NDPI_PROTOCOL_IP_clear(ndpi_ip_addr_t *ip) { memset(ip, 0, sizeof(ndpi_ip_addr_t)); } /* ********************************************************************************* */ #ifdef CODE_UNUSED /* NTOP */ int NDPI_PROTOCOL_IP_is_set(const ndpi_ip_addr_t *ip) { return(memcmp(ip, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", sizeof(ndpi_ip_addr_t)) != 0); } #endif /* ********************************************************************************* */ /* check if the source ip address in packet and ip are equal */ /* NTOP */ int ndpi_packet_src_ip_eql(const struct ndpi_packet_struct *packet, const ndpi_ip_addr_t *ip) { #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* IPv6 */ if(packet->iphv6 != NULL) { if(packet->iphv6->ip6_src.u6_addr.u6_addr32[0] == ip->ipv6.u6_addr.u6_addr32[0] && packet->iphv6->ip6_src.u6_addr.u6_addr32[1] == ip->ipv6.u6_addr.u6_addr32[1] && packet->iphv6->ip6_src.u6_addr.u6_addr32[2] == ip->ipv6.u6_addr.u6_addr32[2] && packet->iphv6->ip6_src.u6_addr.u6_addr32[3] == ip->ipv6.u6_addr.u6_addr32[3]) return(1); //else return(0); } #endif /* IPv4 */ if(packet->iph->saddr == ip->ipv4) return(1); return(0); } /* ********************************************************************************* */ /* check if the destination ip address in packet and ip are equal */ int ndpi_packet_dst_ip_eql(const struct ndpi_packet_struct *packet, const ndpi_ip_addr_t *ip) { #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* IPv6 */ if(packet->iphv6 != NULL) { if(packet->iphv6->ip6_dst.u6_addr.u6_addr32[0] == ip->ipv6.u6_addr.u6_addr32[0] && packet->iphv6->ip6_dst.u6_addr.u6_addr32[1] == ip->ipv6.u6_addr.u6_addr32[1] && packet->iphv6->ip6_dst.u6_addr.u6_addr32[2] == ip->ipv6.u6_addr.u6_addr32[2] && packet->iphv6->ip6_dst.u6_addr.u6_addr32[3] == ip->ipv6.u6_addr.u6_addr32[3]) return(1); //else return(0); } #endif /* IPv4 */ if(packet->iph->saddr == ip->ipv4) return(1); return(0); } /* ********************************************************************************* */ /* get the source ip address from packet and put it into ip */ /* NTOP */ void ndpi_packet_src_ip_get(const struct ndpi_packet_struct *packet, ndpi_ip_addr_t *ip) { NDPI_PROTOCOL_IP_clear(ip); #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* IPv6 */ if(packet->iphv6 != NULL) { ip->ipv6.u6_addr.u6_addr32[0] = packet->iphv6->ip6_src.u6_addr.u6_addr32[0]; ip->ipv6.u6_addr.u6_addr32[1] = packet->iphv6->ip6_src.u6_addr.u6_addr32[1]; ip->ipv6.u6_addr.u6_addr32[2] = packet->iphv6->ip6_src.u6_addr.u6_addr32[2]; ip->ipv6.u6_addr.u6_addr32[3] = packet->iphv6->ip6_src.u6_addr.u6_addr32[3]; } else #endif /* IPv4 */ ip->ipv4 = packet->iph->saddr; } /* ********************************************************************************* */ /* get the destination ip address from packet and put it into ip */ /* NTOP */ void ndpi_packet_dst_ip_get(const struct ndpi_packet_struct *packet, ndpi_ip_addr_t *ip) { NDPI_PROTOCOL_IP_clear(ip); #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(packet->iphv6 != NULL) { ip->ipv6.u6_addr.u6_addr32[0] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[0]; ip->ipv6.u6_addr.u6_addr32[1] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[1]; ip->ipv6.u6_addr.u6_addr32[2] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[2]; ip->ipv6.u6_addr.u6_addr32[3] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[3]; } else #endif ip->ipv4 = packet->iph->daddr; } /* ********************************************************************************* */ u_int8_t ndpi_is_ipv6(const ndpi_ip_addr_t *ip) { #ifdef NDPI_DETECTION_SUPPORT_IPV6 return(ip->ipv6.u6_addr.u6_addr32[1] != 0 || ip->ipv6.u6_addr.u6_addr32[2] != 0 || ip->ipv6.u6_addr.u6_addr32[3] != 0); #else return(0); #endif } /* ********************************************************************************* */ char *ndpi_get_ip_string(const ndpi_ip_addr_t *ip, char *buf, u_int buf_len) { const u_int8_t *a = (const u_int8_t *) &ip->ipv4; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(ndpi_is_ipv6(ip)) { if(inet_ntop(AF_INET6, &ip->ipv6.u6_addr, buf, buf_len) == NULL) buf[0] = '\0'; return(buf); } #endif snprintf(buf, buf_len, "%u.%u.%u.%u", a[0], a[1], a[2], a[3]); return(buf); } /* ****************************************************** */ /* Returns -1 on failutre, otherwise fills parsed_ip and returns the IP version */ int ndpi_parse_ip_string(const char *ip_str, ndpi_ip_addr_t *parsed_ip) { int rv = -1; memset(parsed_ip, 0, sizeof(*parsed_ip)); if(strchr(ip_str, '.')) { if(inet_pton(AF_INET, ip_str, &parsed_ip->ipv4) > 0) rv = 4; #ifdef NDPI_DETECTION_SUPPORT_IPV6 } else { if(inet_pton(AF_INET6, ip_str, &parsed_ip->ipv6) > 0) rv = 6; #endif } return(rv); } /* ****************************************************** */ u_int16_t ntohs_ndpi_bytestream_to_number(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int16_t val = ndpi_bytestream_to_number(str, max_chars_to_read, bytes_read); return(ntohs(val)); } /* ****************************************************** */ u_int8_t ndpi_is_proto(ndpi_protocol proto, u_int16_t p) { return(((proto.app_protocol == p) || (proto.master_protocol == p)) ? 1 : 0); } /* ****************************************************** */ u_int16_t ndpi_get_lower_proto(ndpi_protocol proto) { return((proto.master_protocol != NDPI_PROTOCOL_UNKNOWN) ? proto.master_protocol : proto.app_protocol); } /* ****************************************************** */ ndpi_protocol ndpi_guess_undetected_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int8_t proto, u_int32_t shost /* host byte order */, u_int16_t sport, u_int32_t dhost /* host byte order */, u_int16_t dport) { u_int32_t rc; struct in_addr addr; ndpi_protocol ret = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; u_int8_t user_defined_proto; if((proto == IPPROTO_TCP) || (proto == IPPROTO_UDP)) { rc = ndpi_search_tcp_or_udp_raw(ndpi_str, flow, proto, shost, dhost, sport, dport); if(rc != NDPI_PROTOCOL_UNKNOWN) { if(flow && (proto == IPPROTO_UDP) && NDPI_COMPARE_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, rc) && is_udp_guessable_protocol(rc)) ; else { ret.app_protocol = rc, ret.master_protocol = ndpi_guess_protocol_id(ndpi_str, flow, proto, sport, dport, &user_defined_proto); if(ret.app_protocol == ret.master_protocol) ret.master_protocol = NDPI_PROTOCOL_UNKNOWN; ret.category = ndpi_get_proto_category(ndpi_str, ret); return(ret); } } rc = ndpi_guess_protocol_id(ndpi_str, flow, proto, sport, dport, &user_defined_proto); if(rc != NDPI_PROTOCOL_UNKNOWN) { if(flow && (proto == IPPROTO_UDP) && NDPI_COMPARE_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, rc) && is_udp_guessable_protocol(rc)) ; else { ret.app_protocol = rc; if(rc == NDPI_PROTOCOL_TLS) goto check_guessed_skype; else { ret.category = ndpi_get_proto_category(ndpi_str, ret); return(ret); } } } check_guessed_skype: addr.s_addr = htonl(shost); if(ndpi_network_ptree_match(ndpi_str, &addr) == NDPI_PROTOCOL_SKYPE) { ret.app_protocol = NDPI_PROTOCOL_SKYPE; } else { addr.s_addr = htonl(dhost); if(ndpi_network_ptree_match(ndpi_str, &addr) == NDPI_PROTOCOL_SKYPE) ret.app_protocol = NDPI_PROTOCOL_SKYPE; } } else ret.app_protocol = ndpi_guess_protocol_id(ndpi_str, flow, proto, sport, dport, &user_defined_proto); ret.category = ndpi_get_proto_category(ndpi_str, ret); return(ret); } /* ****************************************************** */ char *ndpi_protocol2id(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol proto, char *buf, u_int buf_len) { if((proto.master_protocol != NDPI_PROTOCOL_UNKNOWN) && (proto.master_protocol != proto.app_protocol)) { if(proto.app_protocol != NDPI_PROTOCOL_UNKNOWN) snprintf(buf, buf_len, "%u.%u", proto.master_protocol, proto.app_protocol); else snprintf(buf, buf_len, "%u", proto.master_protocol); } else snprintf(buf, buf_len, "%u", proto.app_protocol); return(buf); } /* ****************************************************** */ char *ndpi_protocol2name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol proto, char *buf, u_int buf_len) { if((proto.master_protocol != NDPI_PROTOCOL_UNKNOWN) && (proto.master_protocol != proto.app_protocol)) { if(proto.app_protocol != NDPI_PROTOCOL_UNKNOWN) snprintf(buf, buf_len, "%s.%s", ndpi_get_proto_name(ndpi_str, proto.master_protocol), ndpi_get_proto_name(ndpi_str, proto.app_protocol)); else snprintf(buf, buf_len, "%s", ndpi_get_proto_name(ndpi_str, proto.master_protocol)); } else snprintf(buf, buf_len, "%s", ndpi_get_proto_name(ndpi_str, proto.app_protocol)); return(buf); } /* ****************************************************** */ int ndpi_is_custom_category(ndpi_protocol_category_t category) { switch (category) { case NDPI_PROTOCOL_CATEGORY_CUSTOM_1: case NDPI_PROTOCOL_CATEGORY_CUSTOM_2: case NDPI_PROTOCOL_CATEGORY_CUSTOM_3: case NDPI_PROTOCOL_CATEGORY_CUSTOM_4: case NDPI_PROTOCOL_CATEGORY_CUSTOM_5: return(1); break; default: return(0); break; } } /* ****************************************************** */ void ndpi_category_set_name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_category_t category, char *name) { if(!name) return; switch (category) { case NDPI_PROTOCOL_CATEGORY_CUSTOM_1: snprintf(ndpi_str->custom_category_labels[0], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_2: snprintf(ndpi_str->custom_category_labels[1], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_3: snprintf(ndpi_str->custom_category_labels[2], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_4: snprintf(ndpi_str->custom_category_labels[3], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_5: snprintf(ndpi_str->custom_category_labels[4], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; default: break; } } /* ****************************************************** */ const char *ndpi_category_get_name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_category_t category) { if((!ndpi_str) || (category >= NDPI_PROTOCOL_NUM_CATEGORIES)) { static char b[24]; if(!ndpi_str) snprintf(b, sizeof(b), "NULL nDPI"); else snprintf(b, sizeof(b), "Invalid category %d", (int) category); return(b); } if((category >= NDPI_PROTOCOL_CATEGORY_CUSTOM_1) && (category <= NDPI_PROTOCOL_CATEGORY_CUSTOM_5)) { switch (category) { case NDPI_PROTOCOL_CATEGORY_CUSTOM_1: return(ndpi_str->custom_category_labels[0]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_2: return(ndpi_str->custom_category_labels[1]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_3: return(ndpi_str->custom_category_labels[2]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_4: return(ndpi_str->custom_category_labels[3]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_5: return(ndpi_str->custom_category_labels[4]); case NDPI_PROTOCOL_NUM_CATEGORIES: return("Code should not use this internal constant"); default: return("Unspecified"); } } else return(categories[category]); } /* ****************************************************** */ ndpi_protocol_category_t ndpi_get_proto_category(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol proto) { if(proto.category != NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) return(proto.category); /* simple rule: sub protocol first, master after */ else if((proto.master_protocol == NDPI_PROTOCOL_UNKNOWN) || (ndpi_str->proto_defaults[proto.app_protocol].protoCategory != NDPI_PROTOCOL_CATEGORY_UNSPECIFIED)) { if(proto.app_protocol < (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) return(ndpi_str->proto_defaults[proto.app_protocol].protoCategory); } else if(proto.master_protocol < (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) return(ndpi_str->proto_defaults[proto.master_protocol].protoCategory); return(NDPI_PROTOCOL_CATEGORY_UNSPECIFIED); } /* ****************************************************** */ char *ndpi_get_proto_name(struct ndpi_detection_module_struct *ndpi_str, u_int16_t proto_id) { if((proto_id >= ndpi_str->ndpi_num_supported_protocols) || (proto_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) || (ndpi_str->proto_defaults[proto_id].protoName == NULL)) proto_id = NDPI_PROTOCOL_UNKNOWN; return(ndpi_str->proto_defaults[proto_id].protoName); } /* ****************************************************** */ ndpi_protocol_breed_t ndpi_get_proto_breed(struct ndpi_detection_module_struct *ndpi_str, u_int16_t proto_id) { if((proto_id >= ndpi_str->ndpi_num_supported_protocols) || (proto_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) || (ndpi_str->proto_defaults[proto_id].protoName == NULL)) proto_id = NDPI_PROTOCOL_UNKNOWN; return(ndpi_str->proto_defaults[proto_id].protoBreed); } /* ****************************************************** */ char *ndpi_get_proto_breed_name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_breed_t breed_id) { switch (breed_id) { case NDPI_PROTOCOL_SAFE: return("Safe"); break; case NDPI_PROTOCOL_ACCEPTABLE: return("Acceptable"); break; case NDPI_PROTOCOL_FUN: return("Fun"); break; case NDPI_PROTOCOL_UNSAFE: return("Unsafe"); break; case NDPI_PROTOCOL_POTENTIALLY_DANGEROUS: return("Potentially Dangerous"); break; case NDPI_PROTOCOL_DANGEROUS: return("Dangerous"); break; case NDPI_PROTOCOL_UNRATED: default: return("Unrated"); break; } } /* ****************************************************** */ int ndpi_get_protocol_id(struct ndpi_detection_module_struct *ndpi_str, char *proto) { int i; for (i = 0; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) if(strcasecmp(proto, ndpi_str->proto_defaults[i].protoName) == 0) return(i); return(-1); } /* ****************************************************** */ int ndpi_get_category_id(struct ndpi_detection_module_struct *ndpi_str, char *cat) { int i; for (i = 0; i < NDPI_PROTOCOL_NUM_CATEGORIES; i++) { const char *name = ndpi_category_get_name(ndpi_str, i); if(strcasecmp(cat, name) == 0) return(i); } return(-1); } /* ****************************************************** */ void ndpi_dump_protocols(struct ndpi_detection_module_struct *ndpi_str) { int i; for (i = 0; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) printf("%3d %-22s %-8s %-12s %s\n", i, ndpi_str->proto_defaults[i].protoName, ndpi_get_l4_proto_name(ndpi_get_l4_proto_info(ndpi_str, i)), ndpi_get_proto_breed_name(ndpi_str, ndpi_str->proto_defaults[i].protoBreed), ndpi_category_get_name(ndpi_str, ndpi_str->proto_defaults[i].protoCategory)); } /* ****************************************************** */ /* * Find the first occurrence of find in s, where the search is limited to the * first slen characters of s. */ char *ndpi_strnstr(const char *s, const char *find, size_t slen) { char c; size_t len; if((c = *find++) != '\0') { len = strnlen(find, slen); do { char sc; do { if(slen-- < 1 || (sc = *s++) == '\0') return(NULL); } while (sc != c); if(len > slen) return(NULL); } while (strncmp(s, find, len) != 0); s--; } return((char *) s); } /* ****************************************************** */ /* * Same as ndpi_strnstr but case-insensitive */ const char * ndpi_strncasestr(const char *str1, const char *str2, size_t len) { size_t str1_len = strnlen(str1, len); size_t str2_len = strlen(str2); size_t i; for(i = 0; i < (str1_len - str2_len + 1); i++){ if(str1[0] == '\0') return NULL; else if(strncasecmp(str1, str2, str2_len) == 0) return(str1); str1++; } return NULL; } /* ****************************************************** */ int ndpi_match_prefix(const u_int8_t *payload, size_t payload_len, const char *str, size_t str_len) { int rc = str_len <= payload_len ? memcmp(payload, str, str_len) == 0 : 0; return(rc); } /* ****************************************************** */ int ndpi_match_string_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *string_to_match, u_int string_to_match_len, ndpi_protocol_match_result *ret_match, u_int8_t is_host_match) { AC_TEXT_t ac_input_text; ndpi_automa *automa = is_host_match ? &ndpi_str->host_automa : &ndpi_str->content_automa; AC_REP_t match = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED}; int rc; if((automa->ac_automa == NULL) || (string_to_match_len == 0)) return(NDPI_PROTOCOL_UNKNOWN); if(!automa->ac_automa_finalized) { printf("[%s:%d] [NDPI] Internal error: please call ndpi_finalize_initalization()\n", __FILE__, __LINE__); return(0); /* No matches */ } ac_input_text.astring = string_to_match, ac_input_text.length = string_to_match_len; rc = ac_automata_search(((AC_AUTOMATA_t *) automa->ac_automa), &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; /* We need to take into account also rc == 0 that is used for partial matches */ ret_match->protocol_id = match.number, ret_match->protocol_category = match.category, ret_match->protocol_breed = match.breed; return(rc ? match.number : 0); } /* **************************************** */ static u_int8_t ndpi_is_more_generic_protocol(u_int16_t previous_proto, u_int16_t new_proto) { /* Sometimes certificates are more generic than previously identified protocols */ if((previous_proto == NDPI_PROTOCOL_UNKNOWN) || (previous_proto == new_proto)) return(0); switch (previous_proto) { case NDPI_PROTOCOL_WHATSAPP_CALL: case NDPI_PROTOCOL_WHATSAPP_FILES: if(new_proto == NDPI_PROTOCOL_WHATSAPP) return(1); } return(0); } /* ****************************************************** */ static u_int16_t ndpi_automa_match_string_subprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *string_to_match, u_int string_to_match_len, u_int16_t master_protocol_id, ndpi_protocol_match_result *ret_match, u_int8_t is_host_match) { int matching_protocol_id; struct ndpi_packet_struct *packet = &flow->packet; matching_protocol_id = ndpi_match_string_subprotocol(ndpi_str, string_to_match, string_to_match_len, ret_match, is_host_match); #ifdef DEBUG { char m[256]; int len = ndpi_min(sizeof(m), string_to_match_len); strncpy(m, string_to_match, len); m[len] = '\0'; NDPI_LOG_DBG2(ndpi_str, "[NDPI] ndpi_match_host_subprotocol(%s): %s\n", m, ndpi_str->proto_defaults[matching_protocol_id].protoName); } #endif if((matching_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (!ndpi_is_more_generic_protocol(packet->detected_protocol_stack[0], matching_protocol_id))) { /* Move the protocol on slot 0 down one position */ packet->detected_protocol_stack[1] = master_protocol_id, packet->detected_protocol_stack[0] = matching_protocol_id; flow->detected_protocol_stack[0] = packet->detected_protocol_stack[0], flow->detected_protocol_stack[1] = packet->detected_protocol_stack[1]; if(flow->category == NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) flow->category = ret_match->protocol_category; return(packet->detected_protocol_stack[0]); } #ifdef DEBUG string_to_match[string_to_match_len] = '\0'; NDPI_LOG_DBG2(ndpi_str, "[NTOP] Unable to find a match for '%s'\n", string_to_match); #endif ret_match->protocol_id = NDPI_PROTOCOL_UNKNOWN, ret_match->protocol_category = NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, ret_match->protocol_breed = NDPI_PROTOCOL_UNRATED; return(NDPI_PROTOCOL_UNKNOWN); } /* ****************************************************** */ u_int16_t ndpi_match_host_subprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *string_to_match, u_int string_to_match_len, ndpi_protocol_match_result *ret_match, u_int16_t master_protocol_id) { u_int16_t rc = ndpi_automa_match_string_subprotocol(ndpi_str, flow, string_to_match, string_to_match_len, master_protocol_id, ret_match, 1); ndpi_protocol_category_t id = ret_match->protocol_category; if(ndpi_get_custom_category_match(ndpi_str, string_to_match, string_to_match_len, &id) != -1) { /* if(id != -1) */ { flow->category = ret_match->protocol_category = id; rc = master_protocol_id; } } return(rc); } /* **************************************** */ int ndpi_match_hostname_protocol(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int16_t master_protocol, char *name, u_int name_len) { ndpi_protocol_match_result ret_match; u_int16_t subproto, what_len; char *what; if((name_len > 2) && (name[0] == '*') && (name[1] == '.')) what = &name[1], what_len = name_len - 1; else what = name, what_len = name_len; subproto = ndpi_match_host_subprotocol(ndpi_struct, flow, what, what_len, &ret_match, master_protocol); if(subproto != NDPI_PROTOCOL_UNKNOWN) { ndpi_set_detected_protocol(ndpi_struct, flow, subproto, master_protocol); ndpi_int_change_category(ndpi_struct, flow, ret_match.protocol_category); return(1); } else return(0); } /* ****************************************************** */ u_int16_t ndpi_match_content_subprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *string_to_match, u_int string_to_match_len, ndpi_protocol_match_result *ret_match, u_int16_t master_protocol_id) { return(ndpi_automa_match_string_subprotocol(ndpi_str, flow, string_to_match, string_to_match_len, master_protocol_id, ret_match, 0)); } /* ****************************************************** */ int ndpi_match_bigram(struct ndpi_detection_module_struct *ndpi_str, ndpi_automa *automa, char *bigram_to_match) { AC_TEXT_t ac_input_text; AC_REP_t match = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED}; int rc; if((automa->ac_automa == NULL) || (bigram_to_match == NULL)) return(-1); if(!automa->ac_automa_finalized) { #if 1 ndpi_finalize_initalization(ndpi_str); #else printf("[%s:%d] [NDPI] Internal error: please call ndpi_finalize_initalization()\n", __FILE__, __LINE__); return(0); /* No matches */ #endif } ac_input_text.astring = bigram_to_match, ac_input_text.length = 2; rc = ac_automata_search(((AC_AUTOMATA_t *) automa->ac_automa), &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; return(rc ? match.number : 0); } /* ****************************************************** */ void ndpi_free_flow(struct ndpi_flow_struct *flow) { if(flow) { if(flow->http.url) ndpi_free(flow->http.url); if(flow->http.content_type) ndpi_free(flow->http.content_type); if(flow->http.user_agent) ndpi_free(flow->http.user_agent); if(flow->kerberos_buf.pktbuf) ndpi_free(flow->kerberos_buf.pktbuf); if(flow_is_proto(flow, NDPI_PROTOCOL_TLS)) { if(flow->protos.stun_ssl.ssl.server_names) ndpi_free(flow->protos.stun_ssl.ssl.server_names); if(flow->protos.stun_ssl.ssl.alpn) ndpi_free(flow->protos.stun_ssl.ssl.alpn); if(flow->protos.stun_ssl.ssl.tls_supported_versions) ndpi_free(flow->protos.stun_ssl.ssl.tls_supported_versions); if(flow->protos.stun_ssl.ssl.issuerDN) ndpi_free(flow->protos.stun_ssl.ssl.issuerDN); if(flow->protos.stun_ssl.ssl.subjectDN) ndpi_free(flow->protos.stun_ssl.ssl.subjectDN); if(flow->l4.tcp.tls.srv_cert_fingerprint_ctx) ndpi_free(flow->l4.tcp.tls.srv_cert_fingerprint_ctx); if(flow->protos.stun_ssl.ssl.encrypted_sni.esni) ndpi_free(flow->protos.stun_ssl.ssl.encrypted_sni.esni); } if(flow->l4_proto == IPPROTO_TCP) { if(flow->l4.tcp.tls.message.buffer) ndpi_free(flow->l4.tcp.tls.message.buffer); } ndpi_free(flow); } } /* ****************************************************** */ char *ndpi_revision() { return(NDPI_GIT_RELEASE); } /* ****************************************************** */ #ifdef WIN32 /* https://stackoverflow.com/questions/10905892/equivalent-of-gettimeday-for-windows */ int gettimeofday(struct timeval *tp, struct timezone *tzp) { // Note: some broken versions only have 8 trailing zero's, the correct epoch has 9 trailing zero's // This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC) // until 00:00:00 January 1, 1970 static const uint64_t EPOCH = ((uint64_t) 116444736000000000ULL); SYSTEMTIME system_time; FILETIME file_time; uint64_t time; GetSystemTime(&system_time); SystemTimeToFileTime(&system_time, &file_time); time = ((uint64_t) file_time.dwLowDateTime); time += ((uint64_t) file_time.dwHighDateTime) << 32; tp->tv_sec = (long) ((time - EPOCH) / 10000000L); tp->tv_usec = (long) (system_time.wMilliseconds * 1000); return(0); } #endif int NDPI_BITMASK_COMPARE(NDPI_PROTOCOL_BITMASK a, NDPI_PROTOCOL_BITMASK b) { int i; for (i = 0; i < NDPI_NUM_FDS_BITS; i++) { if(a.fds_bits[i] & b.fds_bits[i]) return(1); } return(0); } #ifdef CODE_UNUSED int NDPI_BITMASK_IS_EMPTY(NDPI_PROTOCOL_BITMASK a) { int i; for (i = 0; i < NDPI_NUM_FDS_BITS; i++) if(a.fds_bits[i] != 0) return(0); return(1); } void NDPI_DUMP_BITMASK(NDPI_PROTOCOL_BITMASK a) { int i; for (i = 0; i < NDPI_NUM_FDS_BITS; i++) printf("[%d=%u]", i, a.fds_bits[i]); printf("\n"); } #endif u_int16_t ndpi_get_api_version() { return(NDPI_API_VERSION); } ndpi_proto_defaults_t *ndpi_get_proto_defaults(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->proto_defaults); } u_int ndpi_get_ndpi_num_supported_protocols(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->ndpi_num_supported_protocols); } u_int ndpi_get_ndpi_num_custom_protocols(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->ndpi_num_custom_protocols); } u_int ndpi_get_ndpi_detection_module_size() { return(sizeof(struct ndpi_detection_module_struct)); } void ndpi_set_log_level(struct ndpi_detection_module_struct *ndpi_str, u_int l){ ndpi_str->ndpi_log_level = l; } /* ******************************************************************** */ /* LRU cache */ struct ndpi_lru_cache *ndpi_lru_cache_init(u_int32_t num_entries) { struct ndpi_lru_cache *c = (struct ndpi_lru_cache *) ndpi_malloc(sizeof(struct ndpi_lru_cache)); if(!c) return(NULL); c->entries = (struct ndpi_lru_cache_entry *) ndpi_calloc(num_entries, sizeof(struct ndpi_lru_cache_entry)); if(!c->entries) { ndpi_free(c); return(NULL); } else c->num_entries = num_entries; return(c); } void ndpi_lru_free_cache(struct ndpi_lru_cache *c) { ndpi_free(c->entries); ndpi_free(c); } u_int8_t ndpi_lru_find_cache(struct ndpi_lru_cache *c, u_int32_t key, u_int16_t *value, u_int8_t clean_key_when_found) { u_int32_t slot = key % c->num_entries; if(c->entries[slot].is_full) { *value = c->entries[slot].value; if(clean_key_when_found) c->entries[slot].is_full = 0; return(1); } else return(0); } void ndpi_lru_add_to_cache(struct ndpi_lru_cache *c, u_int32_t key, u_int16_t value) { u_int32_t slot = key % c->num_entries; c->entries[slot].is_full = 1, c->entries[slot].key = key, c->entries[slot].value = value; } /* ******************************************************************** */ /* This function tells if it's possible to further dissect a given flow 0 - All possible dissection has been completed 1 - Additional dissection is possible */ u_int8_t ndpi_extra_dissection_possible(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { u_int16_t proto = flow->detected_protocol_stack[1] ? flow->detected_protocol_stack[1] : flow->detected_protocol_stack[0]; #if 0 printf("[DEBUG] %s(%u.%u): %u\n", __FUNCTION__, flow->detected_protocol_stack[0], flow->detected_protocol_stack[1], proto); #endif switch (proto) { case NDPI_PROTOCOL_TLS: if(!flow->l4.tcp.tls.certificate_processed) return(1); /* TODO: add check for TLS 1.3 */ break; case NDPI_PROTOCOL_HTTP: if((flow->host_server_name[0] == '\0') || (flow->http.response_status_code == 0)) return(1); break; case NDPI_PROTOCOL_DNS: if(flow->protos.dns.num_answers == 0) return(1); break; case NDPI_PROTOCOL_FTP_CONTROL: case NDPI_PROTOCOL_MAIL_POP: case NDPI_PROTOCOL_MAIL_IMAP: case NDPI_PROTOCOL_MAIL_SMTP: if(flow->protos.ftp_imap_pop_smtp.password[0] == '\0') return(1); break; case NDPI_PROTOCOL_SSH: if((flow->protos.ssh.hassh_client[0] == '\0') || (flow->protos.ssh.hassh_server[0] == '\0')) return(1); break; case NDPI_PROTOCOL_TELNET: if(!flow->protos.telnet.password_detected) return(1); break; } return(0); } /* ******************************************************************** */ const char *ndpi_get_l4_proto_name(ndpi_l4_proto_info proto) { switch (proto) { case ndpi_l4_proto_unknown: return(""); break; case ndpi_l4_proto_tcp_only: return("TCP"); break; case ndpi_l4_proto_udp_only: return("UDP"); break; case ndpi_l4_proto_tcp_and_udp: return("TCP/UDP"); break; } return(""); } /* ******************************************************************** */ ndpi_l4_proto_info ndpi_get_l4_proto_info(struct ndpi_detection_module_struct *ndpi_struct, u_int16_t ndpi_proto_id) { if(ndpi_proto_id < ndpi_struct->ndpi_num_supported_protocols) { u_int16_t idx = ndpi_struct->proto_defaults[ndpi_proto_id].protoIdx; NDPI_SELECTION_BITMASK_PROTOCOL_SIZE bm = ndpi_struct->callback_buffer[idx].ndpi_selection_bitmask; if(bm & NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP) return(ndpi_l4_proto_tcp_only); else if(bm & NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP) return(ndpi_l4_proto_udp_only); else if(bm & NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP) return(ndpi_l4_proto_tcp_and_udp); } return(ndpi_l4_proto_unknown); /* default */ } /* ******************************************************************** */ ndpi_ptree_t *ndpi_ptree_create(void) { ndpi_ptree_t *tree = (ndpi_ptree_t *) ndpi_malloc(sizeof(ndpi_ptree_t)); if(tree) { tree->v4 = ndpi_New_Patricia(32); tree->v6 = ndpi_New_Patricia(128); if((!tree->v4) || (!tree->v6)) { ndpi_ptree_destroy(tree); return(NULL); } } return(tree); } /* ******************************************************************** */ void ndpi_ptree_destroy(ndpi_ptree_t *tree) { if(tree) { if(tree->v4) ndpi_Destroy_Patricia(tree->v4, free_ptree_data); if(tree->v6) ndpi_Destroy_Patricia(tree->v6, free_ptree_data); ndpi_free(tree); } } /* ******************************************************************** */ int ndpi_ptree_insert(ndpi_ptree_t *tree, const ndpi_ip_addr_t *addr, u_int8_t bits, uint user_data) { u_int8_t is_v6 = ndpi_is_ipv6(addr); patricia_tree_t *ptree = is_v6 ? tree->v6 : tree->v4; prefix_t prefix; patricia_node_t *node; if(bits > ptree->maxbits) return(-1); if(is_v6) fill_prefix_v6(&prefix, (const struct in6_addr *) &addr->ipv6, bits, ptree->maxbits); else fill_prefix_v4(&prefix, (const struct in_addr *) &addr->ipv4, bits, ptree->maxbits); /* Verify that the node does not already exist */ node = ndpi_patricia_search_best(ptree, &prefix); if(node && (node->prefix->bitlen == bits)) return(-2); node = ndpi_patricia_lookup(ptree, &prefix); if(node != NULL) { node->value.uv.user_value = user_data, node->value.uv.additional_user_value = 0; return(0); } return(-3); } /* ******************************************************************** */ int ndpi_ptree_match_addr(ndpi_ptree_t *tree, const ndpi_ip_addr_t *addr, uint *user_data) { u_int8_t is_v6 = ndpi_is_ipv6(addr); patricia_tree_t *ptree = is_v6 ? tree->v6 : tree->v4; prefix_t prefix; patricia_node_t *node; int bits = ptree->maxbits; if(is_v6) fill_prefix_v6(&prefix, (const struct in6_addr *) &addr->ipv6, bits, ptree->maxbits); else fill_prefix_v4(&prefix, (const struct in_addr *) &addr->ipv4, bits, ptree->maxbits); node = ndpi_patricia_search_best(ptree, &prefix); if(node) { *user_data = node->value.uv.user_value; return(0); } return(-1); } /* ******************************************************************** */ void ndpi_md5(const u_char *data, size_t data_len, u_char hash[16]) { ndpi_MD5_CTX ctx; ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, data, data_len); ndpi_MD5Final(hash, &ctx); } /* ******************************************************************** */ static int enough(int a, int b) { u_int8_t percentage = 20; if(b == 0) return(0); if(a == 0) return(1); if(b > (((a+1)*percentage)/100)) return(1); return(0); } /* ******************************************************************** */ // #define DGA_DEBUG 1 int ndpi_check_dga_name(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *name) { int len, rc = 0; len = strlen(name); if(len >= 5) { int i, j, num_found = 0, num_impossible = 0, num_bigram_checks = 0, num_digits = 0, num_vowels = 0, num_words = 0; char tmp[128], *word, *tok_tmp; len = snprintf(tmp, sizeof(tmp)-1, "%s", name); if(len < 0) return(0); for(i=0, j=0; (i<len) && (j<(sizeof(tmp)-1)); i++) { tmp[j++] = tolower(name[i]); } tmp[j] = '\0'; len = j; for(word = strtok_r(tmp, ".", &tok_tmp); ; word = strtok_r(NULL, ".", &tok_tmp)) { if(!word) break; num_words++; if(strlen(word) < 3) continue; #ifdef DGA_DEBUG printf("-> %s [%s][len: %u]\n", word, name, (unsigned int)strlen(word)); #endif for(i = 0; word[i+1] != '\0'; i++) { if(isdigit(word[i])) { num_digits++; // if(!isdigit(word[i+1])) num_impossible++; continue; } switch(word[i]) { case '_': case '-': case ':': continue; break; case '.': continue; break; } switch(word[i]) { case 'a': case 'e': case 'i': case 'o': case 'u': num_vowels++; break; } if(isdigit(word[i+1])) { num_digits++; // num_impossible++; continue; } num_bigram_checks++; if(ndpi_match_bigram(ndpi_str, &ndpi_str->bigrams_automa, &word[i])) { num_found++; } else { if(ndpi_match_bigram(ndpi_str, &ndpi_str->impossible_bigrams_automa, &word[i])) { #ifdef DGA_DEBUG printf("IMPOSSIBLE %s\n", &word[i]); #endif num_impossible++; } } } /* for */ } /* for */ #ifdef DGA_DEBUG printf("[num_found: %u][num_impossible: %u][num_digits: %u][num_bigram_checks: %u][num_vowels: %u/%u]\n", num_found, num_impossible, num_digits, num_bigram_checks, num_vowels, j-num_vowels); #endif if(num_bigram_checks && ((num_found == 0) || ((num_digits > 5) && (num_words <= 3)) || enough(num_found, num_impossible))) rc = 1; if(rc && flow) NDPI_SET_BIT(flow->risk, NDPI_SUSPICIOUS_DGA_DOMAIN); #ifdef DGA_DEBUG if(rc) printf("DGA %s [num_found: %u][num_impossible: %u]\n", name, num_found, num_impossible); #endif } return(rc); }
static void ndpi_reset_packet_line_info(struct ndpi_packet_struct *packet) { packet->parsed_lines = 0, packet->empty_line_position_set = 0, packet->host_line.ptr = NULL, packet->host_line.len = 0, packet->referer_line.ptr = NULL, packet->referer_line.len = 0, packet->content_line.ptr = NULL, packet->content_line.len = 0, packet->accept_line.ptr = NULL, packet->accept_line.len = 0, packet->user_agent_line.ptr = NULL, packet->user_agent_line.len = 0, packet->http_url_name.ptr = NULL, packet->http_url_name.len = 0, packet->http_encoding.ptr = NULL, packet->http_encoding.len = 0, packet->http_transfer_encoding.ptr = NULL, packet->http_transfer_encoding.len = 0, packet->http_contentlen.ptr = NULL, packet->http_contentlen.len = 0, packet->http_cookie.ptr = NULL, packet->http_cookie.len = 0, packet->http_origin.len = 0, packet->http_origin.ptr = NULL, packet->http_x_session_type.ptr = NULL, packet->http_x_session_type.len = 0, packet->server_line.ptr = NULL, packet->server_line.len = 0, packet->http_method.ptr = NULL, packet->http_method.len = 0, packet->http_response.ptr = NULL, packet->http_response.len = 0, packet->http_num_headers = 0; }
static void ndpi_reset_packet_line_info(struct ndpi_packet_struct *packet) { packet->parsed_lines = 0, packet->empty_line_position_set = 0, packet->host_line.ptr = NULL, packet->host_line.len = 0, packet->referer_line.ptr = NULL, packet->referer_line.len = 0, packet->content_line.ptr = NULL, packet->content_line.len = 0, packet->accept_line.ptr = NULL, packet->accept_line.len = 0, packet->user_agent_line.ptr = NULL, packet->user_agent_line.len = 0, packet->http_url_name.ptr = NULL, packet->http_url_name.len = 0, packet->http_encoding.ptr = NULL, packet->http_encoding.len = 0, packet->http_transfer_encoding.ptr = NULL, packet->http_transfer_encoding.len = 0, packet->http_contentlen.ptr = NULL, packet->http_contentlen.len = 0, packet->content_disposition_line.ptr = NULL, packet->content_disposition_line.len = 0, packet->http_cookie.ptr = NULL, packet->http_cookie.len = 0, packet->http_origin.len = 0, packet->http_origin.ptr = NULL, packet->http_x_session_type.ptr = NULL, packet->http_x_session_type.len = 0, packet->server_line.ptr = NULL, packet->server_line.len = 0, packet->http_method.ptr = NULL, packet->http_method.len = 0, packet->http_response.ptr = NULL, packet->http_response.len = 0, packet->http_num_headers = 0; }
{'added': [(4339, ' packet->http_contentlen.ptr = NULL, packet->http_contentlen.len = 0, packet->content_disposition_line.ptr = NULL,'), (4340, ' packet->content_disposition_line.len = 0, packet->http_cookie.ptr = NULL,')], 'deleted': [(4339, ' packet->http_contentlen.ptr = NULL, packet->http_contentlen.len = 0, packet->http_cookie.ptr = NULL,')]}
2
1
4,575
40,634
13
267
1
https://github.com/ntop/nDPI
CVE-2020-15475
CWE-416
771
cast.cc
C++
tflite::ops::builtin::cast::Prepare
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <algorithm> #include <complex> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" namespace tflite { namespace ops { namespace builtin { namespace cast { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): these two checks would make the new implementation // incompatible with some existing models, where params is not specified. It // is OK not to have them because toco would have set input and output types // to match the parameters. // auto* params = reinterpret_cast<TfLiteCastParams*>(node->builtin_data); // TF_LITE_ENSURE_EQ(context, input->type, params->in_data_type); // TF_LITE_ENSURE_EQ(context, output->type, params->out_data_type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <typename FromT, typename ToT> void copyCast(const FromT* in, ToT* out, int num_elements) { std::transform(in, in + num_elements, out, [](FromT a) { return static_cast<ToT>(a); }); } template <typename ToT> void copyCast(const std::complex<float>* in, ToT* out, int num_elements) { std::transform(in, in + num_elements, out, [](std::complex<float> a) { return static_cast<ToT>(std::real(a)); }); } template <> void copyCast(const std::complex<float>* in, std::complex<float>* out, int num_elements) { std::transform(in, in + num_elements, out, [](std::complex<float> a) { return a; }); } template <typename FromT> TfLiteStatus copyToTensor(TfLiteContext* context, const FromT* in, TfLiteTensor* out, int num_elements) { switch (out->type) { case kTfLiteInt64: copyCast(in, out->data.i64, num_elements); break; case kTfLiteInt32: copyCast(in, out->data.i32, num_elements); break; case kTfLiteUInt8: copyCast(in, out->data.uint8, num_elements); break; case kTfLiteFloat32: copyCast(in, GetTensorData<float>(out), num_elements); break; case kTfLiteBool: copyCast(in, out->data.b, num_elements); break; case kTfLiteComplex64: copyCast(in, reinterpret_cast<std::complex<float>*>(out->data.c64), num_elements); break; default: // Unsupported type. TF_LITE_UNSUPPORTED_TYPE(context, out->type, "Cast"); } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); TF_LITE_ENSURE_EQ(context, num_elements, NumElements(output)); switch (input->type) { case kTfLiteInt64: return copyToTensor(context, input->data.i64, output, num_elements); case kTfLiteInt32: return copyToTensor(context, input->data.i32, output, num_elements); case kTfLiteUInt8: return copyToTensor(context, input->data.uint8, output, num_elements); case kTfLiteFloat32: return copyToTensor(context, GetTensorData<float>(input), output, num_elements); case kTfLiteBool: return copyToTensor(context, input->data.b, output, num_elements); case kTfLiteComplex64: return copyToTensor( context, reinterpret_cast<std::complex<float>*>(input->data.c64), output, num_elements); default: // Unsupported type. TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Cast"); } return kTfLiteOk; } } // namespace cast TfLiteRegistration* Register_CAST() { static TfLiteRegistration r = {nullptr, nullptr, cast::Prepare, cast::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <algorithm> #include <complex> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" namespace tflite { namespace ops { namespace builtin { namespace cast { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // TODO(ahentz): these two checks would make the new implementation // incompatible with some existing models, where params is not specified. It // is OK not to have them because toco would have set input and output types // to match the parameters. // auto* params = reinterpret_cast<TfLiteCastParams*>(node->builtin_data); // TF_LITE_ENSURE_EQ(context, input->type, params->in_data_type); // TF_LITE_ENSURE_EQ(context, output->type, params->out_data_type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <typename FromT, typename ToT> void copyCast(const FromT* in, ToT* out, int num_elements) { std::transform(in, in + num_elements, out, [](FromT a) { return static_cast<ToT>(a); }); } template <typename ToT> void copyCast(const std::complex<float>* in, ToT* out, int num_elements) { std::transform(in, in + num_elements, out, [](std::complex<float> a) { return static_cast<ToT>(std::real(a)); }); } template <> void copyCast(const std::complex<float>* in, std::complex<float>* out, int num_elements) { std::transform(in, in + num_elements, out, [](std::complex<float> a) { return a; }); } template <typename FromT> TfLiteStatus copyToTensor(TfLiteContext* context, const FromT* in, TfLiteTensor* out, int num_elements) { switch (out->type) { case kTfLiteInt64: copyCast(in, out->data.i64, num_elements); break; case kTfLiteInt32: copyCast(in, out->data.i32, num_elements); break; case kTfLiteUInt8: copyCast(in, out->data.uint8, num_elements); break; case kTfLiteFloat32: copyCast(in, GetTensorData<float>(out), num_elements); break; case kTfLiteBool: copyCast(in, out->data.b, num_elements); break; case kTfLiteComplex64: copyCast(in, reinterpret_cast<std::complex<float>*>(out->data.c64), num_elements); break; default: // Unsupported type. TF_LITE_UNSUPPORTED_TYPE(context, out->type, "Cast"); } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); const int num_elements = NumElements(input); TF_LITE_ENSURE_EQ(context, num_elements, NumElements(output)); switch (input->type) { case kTfLiteInt64: return copyToTensor(context, input->data.i64, output, num_elements); case kTfLiteInt32: return copyToTensor(context, input->data.i32, output, num_elements); case kTfLiteUInt8: return copyToTensor(context, input->data.uint8, output, num_elements); case kTfLiteFloat32: return copyToTensor(context, GetTensorData<float>(input), output, num_elements); case kTfLiteBool: return copyToTensor(context, input->data.b, output, num_elements); case kTfLiteComplex64: return copyToTensor( context, reinterpret_cast<std::complex<float>*>(input->data.c64), output, num_elements); default: // Unsupported type. TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Cast"); } return kTfLiteOk; } } // namespace cast TfLiteRegistration* Register_CAST() { static TfLiteRegistration r = {nullptr, nullptr, cast::Prepare, cast::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): these two checks would make the new implementation // incompatible with some existing models, where params is not specified. It // is OK not to have them because toco would have set input and output types // to match the parameters. // auto* params = reinterpret_cast<TfLiteCastParams*>(node->builtin_data); // TF_LITE_ENSURE_EQ(context, input->type, params->in_data_type); // TF_LITE_ENSURE_EQ(context, output->type, params->out_data_type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // TODO(ahentz): these two checks would make the new implementation // incompatible with some existing models, where params is not specified. It // is OK not to have them because toco would have set input and output types // to match the parameters. // auto* params = reinterpret_cast<TfLiteCastParams*>(node->builtin_data); // TF_LITE_ENSURE_EQ(context, input->type, params->in_data_type); // TF_LITE_ENSURE_EQ(context, output->type, params->out_data_type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
{'added': [(35, ' const TfLiteTensor* input;'), (36, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (37, ' TfLiteTensor* output;'), (38, ' TF_LITE_ENSURE_OK(context,'), (39, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (104, ' const TfLiteTensor* input;'), (105, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (106, ' TfLiteTensor* output;'), (107, ' TF_LITE_ENSURE_OK(context,'), (108, ' GetOutputSafe(context, node, kOutputTensor, &output));')], 'deleted': [(35, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (36, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (101, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (102, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);')]}
10
4
107
755
8
80
1
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
1,220
cipso_ipv4.c
C
cipso_v4_validate
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <linux/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) kfree_rcu(old, rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) kfree_rcu(opt, rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; kfree_rcu(opt, rcu); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <linux/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. Further, * there is no legitimate reason for setting this from * userspace so reject it if skb is NULL. */ if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) kfree_rcu(old, rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) kfree_rcu(opt, rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; kfree_rcu(opt, rcu); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; }
int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. Further, * there is no legitimate reason for setting this from * userspace so reject it if skb is NULL. */ if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; }
{'added': [(1728, '\t\t\t * not the loopback device drop the packet. Further,'), (1729, '\t\t\t * there is no legitimate reason for setting this from'), (1730, '\t\t\t * userspace so reject it if skb is NULL. */'), (1731, '\t\t\tif (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {')], 'deleted': [(1728, '\t\t\t * not the loopback device drop the packet. */'), (1729, '\t\t\tif (!(skb->dev->flags & IFF_LOOPBACK)) {')]}
4
2
1,356
7,452
115
536
27
https://github.com/torvalds/linux
CVE-2013-0310
CWE-119
1,165
root.c
C
pid_ns_release_proc
/* * linux/fs/proc/root.c * * Copyright (C) 1991, 1992 Linus Torvalds * * proc root directory handling functions */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/mount.h> #include <linux/pid_namespace.h> #include "internal.h" static int proc_test_super(struct super_block *sb, void *data) { return sb->s_fs_info == data; } static int proc_set_super(struct super_block *sb, void *data) { int err = set_anon_super(sb, NULL); if (!err) { struct pid_namespace *ns = (struct pid_namespace *)data; sb->s_fs_info = get_pid_ns(ns); } return err; } static struct dentry *proc_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { int err; struct super_block *sb; struct pid_namespace *ns; struct proc_inode *ei; if (flags & MS_KERNMOUNT) ns = (struct pid_namespace *)data; else ns = current->nsproxy->pid_ns; sb = sget(fs_type, proc_test_super, proc_set_super, ns); if (IS_ERR(sb)) return ERR_CAST(sb); if (!sb->s_root) { sb->s_flags = flags; err = proc_fill_super(sb); if (err) { deactivate_locked_super(sb); return ERR_PTR(err); } sb->s_flags |= MS_ACTIVE; } ei = PROC_I(sb->s_root->d_inode); if (!ei->pid) { rcu_read_lock(); ei->pid = get_pid(find_pid_ns(1, ns)); rcu_read_unlock(); } return dget(sb->s_root); } static void proc_kill_sb(struct super_block *sb) { struct pid_namespace *ns; ns = (struct pid_namespace *)sb->s_fs_info; kill_anon_super(sb); put_pid_ns(ns); } static struct file_system_type proc_fs_type = { .name = "proc", .mount = proc_mount, .kill_sb = proc_kill_sb, }; void __init proc_root_init(void) { struct vfsmount *mnt; int err; proc_init_inodecache(); err = register_filesystem(&proc_fs_type); if (err) return; mnt = kern_mount_data(&proc_fs_type, &init_pid_ns); if (IS_ERR(mnt)) { unregister_filesystem(&proc_fs_type); return; } init_pid_ns.proc_mnt = mnt; proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); #endif proc_mkdir("fs", NULL); proc_mkdir("driver", NULL); proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) /* just give it a mountpoint */ proc_mkdir("openprom", NULL); #endif proc_tty_init(); #ifdef CONFIG_PROC_DEVICETREE proc_device_tree_init(); #endif proc_mkdir("bus", NULL); proc_sys_init(); } static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat ) { generic_fillattr(dentry->d_inode, stat); stat->nlink = proc_root.nlink + nr_processes(); return 0; } static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) { if (!proc_lookup(dir, dentry, nd)) { return NULL; } return proc_pid_lookup(dir, dentry, nd); } static int proc_root_readdir(struct file * filp, void * dirent, filldir_t filldir) { unsigned int nr = filp->f_pos; int ret; if (nr < FIRST_PROCESS_ENTRY) { int error = proc_readdir(filp, dirent, filldir); if (error <= 0) return error; filp->f_pos = FIRST_PROCESS_ENTRY; } ret = proc_pid_readdir(filp, dirent, filldir); return ret; } /* * The root /proc directory is special, as it has the * <pid> directories. Thus we don't use the generic * directory handling functions for that.. */ static const struct file_operations proc_root_operations = { .read = generic_read_dir, .readdir = proc_root_readdir, .llseek = default_llseek, }; /* * proc root can do almost nothing.. */ static const struct inode_operations proc_root_inode_operations = { .lookup = proc_root_lookup, .getattr = proc_root_getattr, }; /* * This is the root "inode" in the /proc tree.. */ struct proc_dir_entry proc_root = { .low_ino = PROC_ROOT_INO, .namelen = 5, .mode = S_IFDIR | S_IRUGO | S_IXUGO, .nlink = 2, .count = ATOMIC_INIT(1), .proc_iops = &proc_root_inode_operations, .proc_fops = &proc_root_operations, .parent = &proc_root, .name = "/proc", }; int pid_ns_prepare_proc(struct pid_namespace *ns) { struct vfsmount *mnt; mnt = kern_mount_data(&proc_fs_type, ns); if (IS_ERR(mnt)) return PTR_ERR(mnt); ns->proc_mnt = mnt; return 0; } void pid_ns_release_proc(struct pid_namespace *ns) { mntput(ns->proc_mnt); }
/* * linux/fs/proc/root.c * * Copyright (C) 1991, 1992 Linus Torvalds * * proc root directory handling functions */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/mount.h> #include <linux/pid_namespace.h> #include "internal.h" static int proc_test_super(struct super_block *sb, void *data) { return sb->s_fs_info == data; } static int proc_set_super(struct super_block *sb, void *data) { int err = set_anon_super(sb, NULL); if (!err) { struct pid_namespace *ns = (struct pid_namespace *)data; sb->s_fs_info = get_pid_ns(ns); } return err; } static struct dentry *proc_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { int err; struct super_block *sb; struct pid_namespace *ns; struct proc_inode *ei; if (flags & MS_KERNMOUNT) ns = (struct pid_namespace *)data; else ns = current->nsproxy->pid_ns; sb = sget(fs_type, proc_test_super, proc_set_super, ns); if (IS_ERR(sb)) return ERR_CAST(sb); if (!sb->s_root) { sb->s_flags = flags; err = proc_fill_super(sb); if (err) { deactivate_locked_super(sb); return ERR_PTR(err); } sb->s_flags |= MS_ACTIVE; } ei = PROC_I(sb->s_root->d_inode); if (!ei->pid) { rcu_read_lock(); ei->pid = get_pid(find_pid_ns(1, ns)); rcu_read_unlock(); } return dget(sb->s_root); } static void proc_kill_sb(struct super_block *sb) { struct pid_namespace *ns; ns = (struct pid_namespace *)sb->s_fs_info; kill_anon_super(sb); put_pid_ns(ns); } static struct file_system_type proc_fs_type = { .name = "proc", .mount = proc_mount, .kill_sb = proc_kill_sb, }; void __init proc_root_init(void) { int err; proc_init_inodecache(); err = register_filesystem(&proc_fs_type); if (err) return; err = pid_ns_prepare_proc(&init_pid_ns); if (err) { unregister_filesystem(&proc_fs_type); return; } proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); #endif proc_mkdir("fs", NULL); proc_mkdir("driver", NULL); proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) /* just give it a mountpoint */ proc_mkdir("openprom", NULL); #endif proc_tty_init(); #ifdef CONFIG_PROC_DEVICETREE proc_device_tree_init(); #endif proc_mkdir("bus", NULL); proc_sys_init(); } static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat ) { generic_fillattr(dentry->d_inode, stat); stat->nlink = proc_root.nlink + nr_processes(); return 0; } static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) { if (!proc_lookup(dir, dentry, nd)) { return NULL; } return proc_pid_lookup(dir, dentry, nd); } static int proc_root_readdir(struct file * filp, void * dirent, filldir_t filldir) { unsigned int nr = filp->f_pos; int ret; if (nr < FIRST_PROCESS_ENTRY) { int error = proc_readdir(filp, dirent, filldir); if (error <= 0) return error; filp->f_pos = FIRST_PROCESS_ENTRY; } ret = proc_pid_readdir(filp, dirent, filldir); return ret; } /* * The root /proc directory is special, as it has the * <pid> directories. Thus we don't use the generic * directory handling functions for that.. */ static const struct file_operations proc_root_operations = { .read = generic_read_dir, .readdir = proc_root_readdir, .llseek = default_llseek, }; /* * proc root can do almost nothing.. */ static const struct inode_operations proc_root_inode_operations = { .lookup = proc_root_lookup, .getattr = proc_root_getattr, }; /* * This is the root "inode" in the /proc tree.. */ struct proc_dir_entry proc_root = { .low_ino = PROC_ROOT_INO, .namelen = 5, .mode = S_IFDIR | S_IRUGO | S_IXUGO, .nlink = 2, .count = ATOMIC_INIT(1), .proc_iops = &proc_root_inode_operations, .proc_fops = &proc_root_operations, .parent = &proc_root, .name = "/proc", }; int pid_ns_prepare_proc(struct pid_namespace *ns) { struct vfsmount *mnt; mnt = kern_mount_data(&proc_fs_type, ns); if (IS_ERR(mnt)) return PTR_ERR(mnt); ns->proc_mnt = mnt; return 0; } void pid_ns_release_proc(struct pid_namespace *ns) { kern_unmount(ns->proc_mnt); }
void pid_ns_release_proc(struct pid_namespace *ns) { mntput(ns->proc_mnt); }
void pid_ns_release_proc(struct pid_namespace *ns) { kern_unmount(ns->proc_mnt); }
{'added': [(100, '\terr = pid_ns_prepare_proc(&init_pid_ns);'), (101, '\tif (err) {'), (210, '\tkern_unmount(ns->proc_mnt);')], 'deleted': [(94, '\tstruct vfsmount *mnt;'), (101, '\tmnt = kern_mount_data(&proc_fs_type, &init_pid_ns);'), (102, '\tif (IS_ERR(mnt)) {'), (107, '\tinit_pid_ns.proc_mnt = mnt;'), (212, '\tmntput(ns->proc_mnt);')]}
3
5
153
810
4
16
1
https://github.com/torvalds/linux
CVE-2012-2127
CWE-119
1,305
topk_v2.cc
C++
tflite::ops::builtin::topk_v2::ResizeOutput
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include <algorithm> #include <iterator> #include <vector> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace topk_v2 { constexpr int kInputTensor = 0; constexpr int kInputTopK = 1; constexpr int kOutputValues = 0; constexpr int kOutputIndexes = 1; namespace { TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* top_k = GetInput(context, node, kInputTopK); // INT32 number of top results is supported. TF_LITE_ENSURE_TYPES_EQ(context, top_k->type, kTfLiteInt32); // Check that the tensor contains only one value. TF_LITE_ENSURE_EQ(context, NumElements(top_k), 1); const int32 k = *GetTensorData<int32_t>(top_k); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const int num_dimensions = NumDimensions(input); // Check that input has one or more dimensions. TF_LITE_ENSURE_MSG(context, input->dims->size >= 1, "TopK k input must have 1 or more dimensions."); // Check that k is less or equal the internal dimension. TF_LITE_ENSURE_MSG(context, k <= input->dims->data[num_dimensions - 1], "TopK k is higher than the internal dimension."); TfLiteIntArray* output_indexes_shape = TfLiteIntArrayCreate(num_dimensions); TfLiteIntArray* output_values_shape = TfLiteIntArrayCreate(num_dimensions); for (int i = 0; i < num_dimensions - 1; ++i) { output_indexes_shape->data[i] = input->dims->data[i]; output_values_shape->data[i] = input->dims->data[i]; } output_indexes_shape->data[num_dimensions - 1] = k; output_values_shape->data[num_dimensions - 1] = k; TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes); TfLiteTensor* output_values = GetOutput(context, node, kOutputValues); // Force output types. output_indexes->type = kTfLiteInt32; output_values->type = input->type; auto resize_tensor = [context](TfLiteTensor* tensor, TfLiteIntArray* new_size, TfLiteIntArray* delete_on_error) { TfLiteStatus status = context->ResizeTensor(context, tensor, new_size); if (status != kTfLiteOk) { if (delete_on_error != nullptr) { TfLiteIntArrayFree(delete_on_error); } } return status; }; TF_LITE_ENSURE_OK(context, resize_tensor(output_indexes, output_indexes_shape, output_values_shape)); TF_LITE_ENSURE_OK(context, resize_tensor(output_values, output_values_shape, nullptr)); return kTfLiteOk; } // Class that collects indices of top k values. Based on template // tensorflow::gtl::TopN<> but, for optimization, it re-uses the same container. template <typename T> class TopContainer { public: TopContainer() = delete; TopContainer(int32 k, int32 row_size) : k_(k) { container_.reserve(std::min(k, row_size) + 1); } void start_collecting(const T* values) { values_ = values; container_.clear(); } void push(int32 a) { auto comparator = [this](int32 a, int32 b) { return compare_fun(a, b); }; if (container_.size() <= k_) { container_.push_back(a); if (container_.size() == k_ + 1) { std::make_heap(container_.begin(), container_.end(), comparator); std::pop_heap(container_.begin(), container_.end(), comparator); } } else if (comparator(a, container_.front())) { // Due to how we defined comparator / compare_fun, container_.front() // contains the index of the smallest of the top-k elements seen so far. // // If control reaches this point, we know that the current index a // corresponds to an element which is bigger than the smallest of the // top-k elements seen so far. Hence, we have to update the indices of // the top-k elements, by removing the index of the smallest top-k // element, adding a, and making sure container_[0:k] is still a heap. // Store index a into container_[k]. container_.back() = a; // Swap container_[0] and container_[k], and rearrange elements from // container_[0,k) such that they are a heap according to comparator. For // more info, see https://en.cppreference.com/w/cpp/algorithm/pop_heap. std::pop_heap(container_.begin(), container_.end(), comparator); } } const std::vector<int32>& sorted_result() { auto comparator = [this](int32 a, int32 b) { return compare_fun(a, b); }; if (container_.size() <= k_) { // Note: due to the way we defined compare_fun (see comments for that // function) std::sort puts the indices from container_ in decreasing // order of the corresponding elements. std::sort(container_.begin(), container_.end(), comparator); } else { std::sort_heap(container_.begin(), container_.end() - 1, comparator); container_.resize(k_); } return container_; } private: const int32 k_; // container_[0,k) holds the indices of the largest k elements from values_ // seen so far and are maintained in a min-heap order: container_.front() is // the index of the smallest of the top-k elements see so far. // // container_[k] is used as temporary space (not part of the min-heap). std::vector<int32> container_; const T* values_ = nullptr; // Compares indices a and b based on the corresponding elements from values_. // // Intuitively, compare_fun(a, b) returns true iff values_[b] < values_[a] // (notice the inversion of direction, not a typo); ties (==) are broken in // favor of earlier elements (i.e., a < b). bool compare_fun(int32 a, int32 b) const { if (values_[b] < values_[a]) { return true; } else if (values_[b] > values_[a]) { return false; } else { return a < b; } } }; // Mostly modeled on tensorflow/core/kernels/topk_op.cc for CPU. template <typename T> void TopK(int32 row_size, int32 num_rows, const T* data, int32 k, int32* output_indexes, T* output_values) { TopContainer<T> topc(k, row_size); for (int row = 0; row < num_rows; ++row) { const T* values_row = data + row * row_size; topc.start_collecting(values_row); for (int32 c = 0; c < row_size; ++c) { topc.push(c); } // Prepare output buffers. int32* indexes_row = output_indexes + row * k; T* output_row = output_values + row * k; // We always assume that the output is sorted. const auto& top_k = topc.sorted_result(); std::copy(top_k.begin(), top_k.end(), indexes_row); std::transform(top_k.begin(), top_k.end(), output_row, [values_row](const int32 loc) { return values_row[loc]; }); } } } // namespace TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // Check that the inputs and outputs have the right sizes and types. TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output_values = GetOutput(context, node, kOutputValues); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output_values->type); const TfLiteTensor* top_k = GetInput(context, node, kInputTopK); TF_LITE_ENSURE_TYPES_EQ(context, top_k->type, kTfLiteInt32); // Set output dynamic if the input is not const. if (IsConstantTensor(top_k)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } else { TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes); TfLiteTensor* output_values = GetOutput(context, node, kOutputValues); SetTensorToDynamic(output_indexes); SetTensorToDynamic(output_values); } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output_values = GetOutput(context, node, kOutputValues); TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes); if (IsDynamicTensor(output_values)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } const TfLiteTensor* top_k = GetInput(context, node, kInputTopK); const int32 k = top_k->data.i32[0]; // The tensor can have more than 2 dimensions or even be a vector, the code // anyway calls the internal dimension as row; const TfLiteTensor* input = GetInput(context, node, kInputTensor); const int32 row_size = input->dims->data[input->dims->size - 1]; int32 num_rows = 1; for (int i = 0; i < input->dims->size - 1; ++i) { num_rows *= input->dims->data[i]; } switch (output_values->type) { case kTfLiteFloat32: TopK(row_size, num_rows, GetTensorData<float>(input), k, output_indexes->data.i32, GetTensorData<float>(output_values)); break; case kTfLiteUInt8: TopK(row_size, num_rows, input->data.uint8, k, output_indexes->data.i32, output_values->data.uint8); break; case kTfLiteInt8: TopK(row_size, num_rows, input->data.int8, k, output_indexes->data.i32, output_values->data.int8); break; case kTfLiteInt32: TopK(row_size, num_rows, input->data.i32, k, output_indexes->data.i32, output_values->data.i32); break; case kTfLiteInt64: TopK(row_size, num_rows, input->data.i64, k, output_indexes->data.i32, output_values->data.i64); break; default: TF_LITE_KERNEL_LOG(context, "Type %s is currently not supported by TopK.", TfLiteTypeGetName(output_values->type)); return kTfLiteError; } return kTfLiteOk; } } // namespace topk_v2 TfLiteRegistration* Register_TOPK_V2() { static TfLiteRegistration r = {nullptr, nullptr, topk_v2::Prepare, topk_v2::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include <algorithm> #include <iterator> #include <vector> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace topk_v2 { constexpr int kInputTensor = 0; constexpr int kInputTopK = 1; constexpr int kOutputValues = 0; constexpr int kOutputIndexes = 1; namespace { TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* top_k; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTopK, &top_k)); // INT32 number of top results is supported. TF_LITE_ENSURE_TYPES_EQ(context, top_k->type, kTfLiteInt32); // Check that the tensor contains only one value. TF_LITE_ENSURE_EQ(context, NumElements(top_k), 1); const int32 k = *GetTensorData<int32_t>(top_k); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const int num_dimensions = NumDimensions(input); // Check that input has one or more dimensions. TF_LITE_ENSURE_MSG(context, input->dims->size >= 1, "TopK k input must have 1 or more dimensions."); // Check that k is less or equal the internal dimension. TF_LITE_ENSURE_MSG(context, k <= input->dims->data[num_dimensions - 1], "TopK k is higher than the internal dimension."); TfLiteIntArray* output_indexes_shape = TfLiteIntArrayCreate(num_dimensions); TfLiteIntArray* output_values_shape = TfLiteIntArrayCreate(num_dimensions); for (int i = 0; i < num_dimensions - 1; ++i) { output_indexes_shape->data[i] = input->dims->data[i]; output_values_shape->data[i] = input->dims->data[i]; } output_indexes_shape->data[num_dimensions - 1] = k; output_values_shape->data[num_dimensions - 1] = k; TfLiteTensor* output_indexes; TF_LITE_ENSURE_OK( context, GetOutputSafe(context, node, kOutputIndexes, &output_indexes)); TfLiteTensor* output_values; TF_LITE_ENSURE_OK( context, GetOutputSafe(context, node, kOutputValues, &output_values)); // Force output types. output_indexes->type = kTfLiteInt32; output_values->type = input->type; auto resize_tensor = [context](TfLiteTensor* tensor, TfLiteIntArray* new_size, TfLiteIntArray* delete_on_error) { TfLiteStatus status = context->ResizeTensor(context, tensor, new_size); if (status != kTfLiteOk) { if (delete_on_error != nullptr) { TfLiteIntArrayFree(delete_on_error); } } return status; }; TF_LITE_ENSURE_OK(context, resize_tensor(output_indexes, output_indexes_shape, output_values_shape)); TF_LITE_ENSURE_OK(context, resize_tensor(output_values, output_values_shape, nullptr)); return kTfLiteOk; } // Class that collects indices of top k values. Based on template // tensorflow::gtl::TopN<> but, for optimization, it re-uses the same container. template <typename T> class TopContainer { public: TopContainer() = delete; TopContainer(int32 k, int32 row_size) : k_(k) { container_.reserve(std::min(k, row_size) + 1); } void start_collecting(const T* values) { values_ = values; container_.clear(); } void push(int32 a) { auto comparator = [this](int32 a, int32 b) { return compare_fun(a, b); }; if (container_.size() <= k_) { container_.push_back(a); if (container_.size() == k_ + 1) { std::make_heap(container_.begin(), container_.end(), comparator); std::pop_heap(container_.begin(), container_.end(), comparator); } } else if (comparator(a, container_.front())) { // Due to how we defined comparator / compare_fun, container_.front() // contains the index of the smallest of the top-k elements seen so far. // // If control reaches this point, we know that the current index a // corresponds to an element which is bigger than the smallest of the // top-k elements seen so far. Hence, we have to update the indices of // the top-k elements, by removing the index of the smallest top-k // element, adding a, and making sure container_[0:k] is still a heap. // Store index a into container_[k]. container_.back() = a; // Swap container_[0] and container_[k], and rearrange elements from // container_[0,k) such that they are a heap according to comparator. For // more info, see https://en.cppreference.com/w/cpp/algorithm/pop_heap. std::pop_heap(container_.begin(), container_.end(), comparator); } } const std::vector<int32>& sorted_result() { auto comparator = [this](int32 a, int32 b) { return compare_fun(a, b); }; if (container_.size() <= k_) { // Note: due to the way we defined compare_fun (see comments for that // function) std::sort puts the indices from container_ in decreasing // order of the corresponding elements. std::sort(container_.begin(), container_.end(), comparator); } else { std::sort_heap(container_.begin(), container_.end() - 1, comparator); container_.resize(k_); } return container_; } private: const int32 k_; // container_[0,k) holds the indices of the largest k elements from values_ // seen so far and are maintained in a min-heap order: container_.front() is // the index of the smallest of the top-k elements see so far. // // container_[k] is used as temporary space (not part of the min-heap). std::vector<int32> container_; const T* values_ = nullptr; // Compares indices a and b based on the corresponding elements from values_. // // Intuitively, compare_fun(a, b) returns true iff values_[b] < values_[a] // (notice the inversion of direction, not a typo); ties (==) are broken in // favor of earlier elements (i.e., a < b). bool compare_fun(int32 a, int32 b) const { if (values_[b] < values_[a]) { return true; } else if (values_[b] > values_[a]) { return false; } else { return a < b; } } }; // Mostly modeled on tensorflow/core/kernels/topk_op.cc for CPU. template <typename T> void TopK(int32 row_size, int32 num_rows, const T* data, int32 k, int32* output_indexes, T* output_values) { TopContainer<T> topc(k, row_size); for (int row = 0; row < num_rows; ++row) { const T* values_row = data + row * row_size; topc.start_collecting(values_row); for (int32 c = 0; c < row_size; ++c) { topc.push(c); } // Prepare output buffers. int32* indexes_row = output_indexes + row * k; T* output_row = output_values + row * k; // We always assume that the output is sorted. const auto& top_k = topc.sorted_result(); std::copy(top_k.begin(), top_k.end(), indexes_row); std::transform(top_k.begin(), top_k.end(), output_row, [values_row](const int32 loc) { return values_row[loc]; }); } } } // namespace TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // Check that the inputs and outputs have the right sizes and types. TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output_values; TF_LITE_ENSURE_OK( context, GetOutputSafe(context, node, kOutputValues, &output_values)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output_values->type); const TfLiteTensor* top_k; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTopK, &top_k)); TF_LITE_ENSURE_TYPES_EQ(context, top_k->type, kTfLiteInt32); // Set output dynamic if the input is not const. if (IsConstantTensor(top_k)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } else { TfLiteTensor* output_indexes; TF_LITE_ENSURE_OK( context, GetOutputSafe(context, node, kOutputIndexes, &output_indexes)); TfLiteTensor* output_values; TF_LITE_ENSURE_OK( context, GetOutputSafe(context, node, kOutputValues, &output_values)); SetTensorToDynamic(output_indexes); SetTensorToDynamic(output_values); } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output_values; TF_LITE_ENSURE_OK( context, GetOutputSafe(context, node, kOutputValues, &output_values)); TfLiteTensor* output_indexes; TF_LITE_ENSURE_OK( context, GetOutputSafe(context, node, kOutputIndexes, &output_indexes)); if (IsDynamicTensor(output_values)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } const TfLiteTensor* top_k; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTopK, &top_k)); const int32 k = top_k->data.i32[0]; // The tensor can have more than 2 dimensions or even be a vector, the code // anyway calls the internal dimension as row; const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const int32 row_size = input->dims->data[input->dims->size - 1]; int32 num_rows = 1; for (int i = 0; i < input->dims->size - 1; ++i) { num_rows *= input->dims->data[i]; } switch (output_values->type) { case kTfLiteFloat32: TopK(row_size, num_rows, GetTensorData<float>(input), k, output_indexes->data.i32, GetTensorData<float>(output_values)); break; case kTfLiteUInt8: TopK(row_size, num_rows, input->data.uint8, k, output_indexes->data.i32, output_values->data.uint8); break; case kTfLiteInt8: TopK(row_size, num_rows, input->data.int8, k, output_indexes->data.i32, output_values->data.int8); break; case kTfLiteInt32: TopK(row_size, num_rows, input->data.i32, k, output_indexes->data.i32, output_values->data.i32); break; case kTfLiteInt64: TopK(row_size, num_rows, input->data.i64, k, output_indexes->data.i32, output_values->data.i64); break; default: TF_LITE_KERNEL_LOG(context, "Type %s is currently not supported by TopK.", TfLiteTypeGetName(output_values->type)); return kTfLiteError; } return kTfLiteOk; } } // namespace topk_v2 TfLiteRegistration* Register_TOPK_V2() { static TfLiteRegistration r = {nullptr, nullptr, topk_v2::Prepare, topk_v2::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* top_k = GetInput(context, node, kInputTopK); // INT32 number of top results is supported. TF_LITE_ENSURE_TYPES_EQ(context, top_k->type, kTfLiteInt32); // Check that the tensor contains only one value. TF_LITE_ENSURE_EQ(context, NumElements(top_k), 1); const int32 k = *GetTensorData<int32_t>(top_k); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const int num_dimensions = NumDimensions(input); // Check that input has one or more dimensions. TF_LITE_ENSURE_MSG(context, input->dims->size >= 1, "TopK k input must have 1 or more dimensions."); // Check that k is less or equal the internal dimension. TF_LITE_ENSURE_MSG(context, k <= input->dims->data[num_dimensions - 1], "TopK k is higher than the internal dimension."); TfLiteIntArray* output_indexes_shape = TfLiteIntArrayCreate(num_dimensions); TfLiteIntArray* output_values_shape = TfLiteIntArrayCreate(num_dimensions); for (int i = 0; i < num_dimensions - 1; ++i) { output_indexes_shape->data[i] = input->dims->data[i]; output_values_shape->data[i] = input->dims->data[i]; } output_indexes_shape->data[num_dimensions - 1] = k; output_values_shape->data[num_dimensions - 1] = k; TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes); TfLiteTensor* output_values = GetOutput(context, node, kOutputValues); // Force output types. output_indexes->type = kTfLiteInt32; output_values->type = input->type; auto resize_tensor = [context](TfLiteTensor* tensor, TfLiteIntArray* new_size, TfLiteIntArray* delete_on_error) { TfLiteStatus status = context->ResizeTensor(context, tensor, new_size); if (status != kTfLiteOk) { if (delete_on_error != nullptr) { TfLiteIntArrayFree(delete_on_error); } } return status; }; TF_LITE_ENSURE_OK(context, resize_tensor(output_indexes, output_indexes_shape, output_values_shape)); TF_LITE_ENSURE_OK(context, resize_tensor(output_values, output_values_shape, nullptr)); return kTfLiteOk; }
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* top_k; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTopK, &top_k)); // INT32 number of top results is supported. TF_LITE_ENSURE_TYPES_EQ(context, top_k->type, kTfLiteInt32); // Check that the tensor contains only one value. TF_LITE_ENSURE_EQ(context, NumElements(top_k), 1); const int32 k = *GetTensorData<int32_t>(top_k); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const int num_dimensions = NumDimensions(input); // Check that input has one or more dimensions. TF_LITE_ENSURE_MSG(context, input->dims->size >= 1, "TopK k input must have 1 or more dimensions."); // Check that k is less or equal the internal dimension. TF_LITE_ENSURE_MSG(context, k <= input->dims->data[num_dimensions - 1], "TopK k is higher than the internal dimension."); TfLiteIntArray* output_indexes_shape = TfLiteIntArrayCreate(num_dimensions); TfLiteIntArray* output_values_shape = TfLiteIntArrayCreate(num_dimensions); for (int i = 0; i < num_dimensions - 1; ++i) { output_indexes_shape->data[i] = input->dims->data[i]; output_values_shape->data[i] = input->dims->data[i]; } output_indexes_shape->data[num_dimensions - 1] = k; output_values_shape->data[num_dimensions - 1] = k; TfLiteTensor* output_indexes; TF_LITE_ENSURE_OK( context, GetOutputSafe(context, node, kOutputIndexes, &output_indexes)); TfLiteTensor* output_values; TF_LITE_ENSURE_OK( context, GetOutputSafe(context, node, kOutputValues, &output_values)); // Force output types. output_indexes->type = kTfLiteInt32; output_values->type = input->type; auto resize_tensor = [context](TfLiteTensor* tensor, TfLiteIntArray* new_size, TfLiteIntArray* delete_on_error) { TfLiteStatus status = context->ResizeTensor(context, tensor, new_size); if (status != kTfLiteOk) { if (delete_on_error != nullptr) { TfLiteIntArrayFree(delete_on_error); } } return status; }; TF_LITE_ENSURE_OK(context, resize_tensor(output_indexes, output_indexes_shape, output_values_shape)); TF_LITE_ENSURE_OK(context, resize_tensor(output_values, output_values_shape, nullptr)); return kTfLiteOk; }
{'added': [(38, ' const TfLiteTensor* top_k;'), (39, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTopK, &top_k));'), (46, ' const TfLiteTensor* input;'), (47, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (64, ' TfLiteTensor* output_indexes;'), (65, ' TF_LITE_ENSURE_OK('), (66, ' context, GetOutputSafe(context, node, kOutputIndexes, &output_indexes));'), (67, ' TfLiteTensor* output_values;'), (68, ' TF_LITE_ENSURE_OK('), (69, ' context, GetOutputSafe(context, node, kOutputValues, &output_values));'), (204, ' const TfLiteTensor* input;'), (205, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (206, ' TfLiteTensor* output_values;'), (207, ' TF_LITE_ENSURE_OK('), (208, ' context, GetOutputSafe(context, node, kOutputValues, &output_values));'), (211, ' const TfLiteTensor* top_k;'), (212, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTopK, &top_k));'), (219, ' TfLiteTensor* output_indexes;'), (220, ' TF_LITE_ENSURE_OK('), (221, ' context, GetOutputSafe(context, node, kOutputIndexes, &output_indexes));'), (222, ' TfLiteTensor* output_values;'), (223, ' TF_LITE_ENSURE_OK('), (224, ' context, GetOutputSafe(context, node, kOutputValues, &output_values));'), (232, ' TfLiteTensor* output_values;'), (233, ' TF_LITE_ENSURE_OK('), (234, ' context, GetOutputSafe(context, node, kOutputValues, &output_values));'), (235, ' TfLiteTensor* output_indexes;'), (236, ' TF_LITE_ENSURE_OK('), (237, ' context, GetOutputSafe(context, node, kOutputIndexes, &output_indexes));'), (241, ' const TfLiteTensor* top_k;'), (242, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTopK, &top_k));'), (246, ' const TfLiteTensor* input;'), (247, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));')], 'deleted': [(38, ' const TfLiteTensor* top_k = GetInput(context, node, kInputTopK);'), (45, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (62, ' TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes);'), (63, ' TfLiteTensor* output_values = GetOutput(context, node, kOutputValues);'), (198, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (199, ' TfLiteTensor* output_values = GetOutput(context, node, kOutputValues);'), (202, ' const TfLiteTensor* top_k = GetInput(context, node, kInputTopK);'), (209, ' TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes);'), (210, ' TfLiteTensor* output_values = GetOutput(context, node, kOutputValues);'), (218, ' TfLiteTensor* output_values = GetOutput(context, node, kOutputValues);'), (219, ' TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes);'), (223, ' const TfLiteTensor* top_k = GetInput(context, node, kInputTopK);'), (227, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);')]}
33
13
213
1,633
39
341
4
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
2,787
jsiArray.c
C
jsi_ArraySizeOfCmd
#ifndef JSI_LITE_ONLY #ifndef JSI_AMALGAMATION #include "jsiInt.h" #endif #if JSI__MUSL==1 || defined(__FreeBSD__) #define NO_QSORT_R 1 #endif static Jsi_RC jsi_ArrayPushCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { Jsi_Obj *obj; if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } obj = _this->d.obj; int argc = Jsi_ValueGetLength(interp, args); int curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } int i; for (i = 0; i < argc; ++i) { Jsi_Value *ov = Jsi_ValueArrayIndex(interp, args, i); if (!ov) { Jsi_LogBug("Arguments Error"); ov = Jsi_ValueNew(interp); } Jsi_ValueInsertArray(interp, _this, curlen + i, ov, 0); } Jsi_ValueMakeNumber(interp, ret, Jsi_ObjGetLength(interp, obj)); return JSI_OK; } static Jsi_RC jsi_ArrayPopCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } Jsi_Value *v; Jsi_Obj *obj; obj = _this->d.obj; int i = Jsi_ObjGetLength(interp, obj) - 1; if (i < 0) { Jsi_ValueMakeUndef(interp, ret); return JSI_OK; } if (obj->arr) { if ((v = obj->arr[i])) { obj->arr[i] = NULL; obj->arrCnt--; } } else { v = Jsi_ValueArrayIndex(interp, _this, i); } if (v) { Jsi_DecrRefCount(interp, *ret); *ret = v; } Jsi_ObjSetLength(interp, obj, i); return JSI_OK; } static Jsi_RC jsi_ArrayJoinCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); const char *jstr = ""; int argc, curlen; Jsi_DString dStr = {}; curlen = Jsi_ObjGetLength(interp, _this->d.obj); if (curlen == 0) { goto bail; } if (Jsi_ValueGetLength(interp, args) >= 1) { Jsi_Value *sc = Jsi_ValueArrayIndex(interp, args, 0); if (sc != NULL) jstr = Jsi_ValueToString(interp, sc, NULL); } if (0 == (argc=Jsi_ObjGetLength(interp, _this->d.obj))) { goto bail; } int i; for (i = 0; i < argc; ++i) { const char *cp; Jsi_Value *ov = Jsi_ValueArrayIndex(interp, _this, i); if (!ov) { /* TODO: are NULL args ok? */ continue; cp = ""; } else cp = Jsi_ValueToString(interp, ov, NULL); if (i && jstr[0]) Jsi_DSAppend(&dStr, jstr, NULL); Jsi_DSAppend(&dStr, cp, NULL); } Jsi_ValueMakeStringDup(interp, ret, Jsi_DSValue(&dStr)); Jsi_DSFree(&dStr); return JSI_OK; bail: Jsi_ValueMakeStringDup(interp, ret, ""); return JSI_OK; } Jsi_Value* Jsi_ValueArrayConcat(Jsi_Interp *interp, Jsi_Value *arg1, Jsi_Value *arg2) { Jsi_Value *va; Jsi_Obj *obj; if (arg1->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg1->d.obj)) { return NULL; } if (arg2->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg2->d.obj)) { return NULL; } int len1 = arg1->d.obj->arrCnt; int len2 = arg2->d.obj->arrCnt; Jsi_Obj *nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ObjArraySizer(interp, nobj, len1+len2); int i, j = 0; obj = arg1->d.obj; for (i = 0; i<len1; i++, j++) { if (!obj->arr[i]) continue; nobj->arr[j] = NULL; Jsi_ValueDup2(interp, nobj->arr+j, obj->arr[i]); } obj = arg2->d.obj; for (i = 0; i<len2; i++, j++) { if (!obj->arr[i]) continue; nobj->arr[j] = NULL; Jsi_ValueDup2(interp, nobj->arr+j, obj->arr[i]); } Jsi_ObjSetLength(interp, nobj, len1+len2); va = Jsi_ValueMakeArrayObject(interp, NULL, nobj); return va; } Jsi_RC Jsi_ValueArrayPush(Jsi_Interp *interp, Jsi_Value *arg1, Jsi_Value *arg2) { Jsi_Obj *obj; if (arg1->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg1->d.obj)) return JSI_ERROR; if (!arg2) return JSI_ERROR; int len1 = arg1->d.obj->arrCnt; obj = arg1->d.obj; Jsi_ObjArraySizer(interp, obj, len1); obj->arr[len1] = arg2; Jsi_IncrRefCount(interp, arg2); obj->arrCnt++; return JSI_OK; } Jsi_Value *Jsi_ValueArrayPop(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayPop, target is not object"); return NULL; } Jsi_Obj *o = v->d.obj; if (!o->isarrlist) { Jsi_LogBug("Jsi_ValueArrayPop, target is not array"); return NULL; } if (o->arrCnt<=0) return NULL; int idx = o->arrCnt-1; if (!o->arr[idx]) return NULL; Jsi_DecrRefCount(interp, o->arr[idx]); Jsi_Value *ret = o->arr[idx]; o->arr[idx] = NULL; o->arrCnt--; return ret; } Jsi_Value *Jsi_ValueArrayUnshift(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayUnshift, target is not object"); return NULL; } Jsi_Obj *o = v->d.obj; if (!o->isarrlist) { Jsi_LogBug("Jsi_ValueArrayUnshift, target is not array"); return NULL; } if (o->arrCnt<=0) return NULL; if (!o->arr[0]) return NULL; Jsi_DecrRefCount(interp, o->arr[0]); Jsi_Value *ret = o->arr[0]; o->arr[0] = NULL; o->arrCnt--; return ret; } /* delete array[0], array[1]->array[0] */ void Jsi_ValueArrayShift(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayShift, target is not object"); return; } Jsi_Obj *o = v->d.obj; if (o->isarrlist) { uint i; if (!o->arrCnt) return; if (o->arr[0]) Jsi_DecrRefCount(interp, o->arr[0]); for (i=1; i<o->arrCnt; i++) { o->arr[i-1] = o->arr[i]; } o->arr[o->arrCnt--] = NULL; return; } int len = Jsi_ObjGetLength(interp, v->d.obj); if (len <= 0) return; Jsi_Value *v0 = Jsi_ValueArrayIndex(interp, v, 0); if (!v0) return; Jsi_ValueReset(interp, &v0); int i; Jsi_Value *last = v0; for (i = 1; i < len; ++i) { Jsi_Value *t = Jsi_ValueArrayIndex(interp, v, i); if (!t) return; Jsi_ValueCopy(interp, last, t); Jsi_ValueReset(interp, &t); last = t; } Jsi_ObjSetLength(interp, v->d.obj, len - 1); } static Jsi_RC jsi_ArrayFlatSub(Jsi_Interp *interp, Jsi_Obj* nobj, Jsi_Value *arr, int depth) { int i, n = 0, len = Jsi_ObjGetLength(interp, arr->d.obj); if (len <= 0) return JSI_OK; Jsi_RC rc = JSI_OK; int clen = Jsi_ObjGetLength(interp, nobj); for (i = 0; i < len && rc == JSI_OK; i++) { Jsi_Value *t = Jsi_ValueArrayIndex(interp, arr, i); if (t && depth>0 && Jsi_ValueIsArray(interp, t)) rc = jsi_ArrayFlatSub(interp, nobj, t , depth-1); else if (!Jsi_ValueIsUndef(interp, t)) Jsi_ObjArrayAdd(interp, nobj, t); if ((++n + clen)>interp->maxArrayList) return Jsi_LogError("array size exceeded"); } return rc; } static Jsi_RC jsi_ArrayFlatCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Number ndepth = 1; Jsi_Obj *nobj; Jsi_Value *depth = Jsi_ValueArrayIndex(interp, args, 0); if (depth && Jsi_GetNumberFromValue(interp,depth, &ndepth) != JSI_OK) return JSI_ERROR; if (ndepth < 0 || ndepth>1000) return Jsi_LogError("bad depth: %d", (int)ndepth); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ValueMakeArrayObject(interp, ret, nobj ); if (ndepth>0) return jsi_ArrayFlatSub(interp, nobj, _this, ndepth); return JSI_OK; } static Jsi_RC jsi_ArrayConcatCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, argc, nsiz; Jsi_Obj *obj, *nobj; Jsi_Value *va; obj = _this->d.obj; argc = Jsi_ValueGetLength(interp, args); curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrMaxSize; if (nsiz<=0) nsiz = 100; if (Jsi_ObjArraySizer(interp, nobj, nsiz+1) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", nsiz+1); goto bail; } int i, j, m; for (i = 0; i<curlen; i++) { if (!obj->arr[i]) continue; nobj->arr[i] = NULL; Jsi_ValueDup2(interp, nobj->arr+i, obj->arr[i]); } m = i; for (i = 0; i < argc; i++) { va = Jsi_ValueArrayIndex(interp, args, i); if (va->vt == JSI_VT_OBJECT && Jsi_ObjIsArray(interp, va->d.obj)) { int margc = Jsi_ValueGetLength(interp, va); Jsi_Obj *mobj = va->d.obj; Jsi_ObjListifyArray(interp, mobj); if (Jsi_ObjArraySizer(interp, nobj, curlen += margc) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", curlen); goto bail; } for (j = 0; j<margc; j++, m++) { if (!mobj->arr[j]) continue; nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m, mobj->arr[j]); } } else { if (Jsi_ObjArraySizer(interp, nobj, ++curlen) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", curlen); goto bail; } nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m++, va); } } Jsi_ObjSetLength(interp, nobj, curlen); Jsi_ValueMakeArrayObject(interp, ret, nobj); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); return rc; } static Jsi_RC jsi_ArrayMapCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, nsiz, i, maa = 0; Jsi_Obj *obj, *nobj; Jsi_Value *func, *vpargs, *nthis = NULL, *sthis; Jsi_Func *fptr = NULL; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrCnt; if (nsiz<=0) nsiz = 1; if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { Jsi_LogError("index too large: %d", nsiz); rc = JSI_ERROR; goto bail; } Jsi_ValueMakeArrayObject(interp, ret, nobj); Jsi_Value *vobjs[3]; fptr = func->d.obj->d.fobj->func; maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < curlen; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); nobj->arr[i] = Jsi_ValueNew1(interp); rc = Jsi_FunctionInvoke(interp, func, vpargs, nobj->arr+i, sthis); Jsi_DecrRefCount(interp, vpargs); if( JSI_OK!=rc ) { goto bail; } } Jsi_ObjSetLength(interp, nobj, curlen); if (nthis) Jsi_DecrRefCount(interp, nthis); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); if (nthis) Jsi_DecrRefCount(interp, nthis); return rc; } static Jsi_RC jsi_ArrayFilterCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, nsiz, i, fval, n = 0, maa = 0; Jsi_Obj *obj, *nobj; Jsi_Value *func, *vpargs, *nthis = NULL, *sthis, *nrPtr = NULL; Jsi_Func *fptr = NULL; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrCnt; if (nsiz<=0) nsiz = 1; if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { Jsi_LogError("index too large: %d", nsiz); rc = JSI_ERROR; goto bail; } Jsi_ValueMakeArrayObject(interp, ret, nobj); nrPtr = Jsi_ValueNew1(interp); Jsi_Value *vobjs[4]; fptr = func->d.obj->d.fobj->func; maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < curlen; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, sthis); Jsi_DecrRefCount(interp, vpargs); fval = Jsi_ValueIsTrue(interp, nrPtr); Jsi_ValueMakeUndef(interp, &nrPtr); if( JSI_OK!=rc ) { goto bail; } if (fval) { nobj->arr[n++] = obj->arr[i]; Jsi_IncrRefCount(interp, obj->arr[i]); } } if (nthis) Jsi_DecrRefCount(interp, nthis); Jsi_DecrRefCount(interp, nrPtr); Jsi_ObjSetLength(interp, nobj, n); return JSI_OK; bail: if (nthis) Jsi_DecrRefCount(interp, nthis); if (nrPtr) Jsi_DecrRefCount(interp, nrPtr); Jsi_ValueMakeNull(interp, ret); return rc; } static Jsi_RC jsi_ArrayReverseCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); int i, n, m; Jsi_Obj *obj; Jsi_Value *tval, *nthis = NULL, *sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); m = obj->arrCnt/2; for (i = 0, n=obj->arrCnt-1; i < m; i++, n--) { tval = obj->arr[i]; obj->arr[i] = obj->arr[n]; obj->arr[n] = tval; } Jsi_ValueDup2(interp, ret, _this); if (nthis) Jsi_DecrRefCount(interp, nthis); return JSI_OK; } static Jsi_RC jsi_ArrayForeachCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Obj *obj; int curlen; uint i; Jsi_Value *func, *vpargs; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *sthis = Jsi_ValueArrayIndex(interp, args, 1); Jsi_Value *nthis = NULL; if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); Jsi_RC rc = JSI_OK; Jsi_Value *vobjs[3]; Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < obj->arrCnt && rc == JSI_OK; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, ret, sthis); Jsi_DecrRefCount(interp, vpargs); } if (nthis) Jsi_DecrRefCount(interp, nthis); return rc; } static Jsi_RC jsi_ArrayFindSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); Jsi_Obj *obj; int curlen; uint i; Jsi_RC rc = JSI_OK; Jsi_Value *func, *vpargs, *sthis = Jsi_ValueArrayIndex(interp, args, 1); func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *nthis = NULL; if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); int fval = 0; Jsi_Value *nrPtr = Jsi_ValueNew1(interp); Jsi_Value *vobjs[3]; Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < obj->arrCnt && rc == JSI_OK; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, sthis); Jsi_DecrRefCount(interp, vpargs); if (rc != JSI_OK) break; fval = Jsi_ValueIsTrue(interp, nrPtr); Jsi_ValueMakeUndef(interp, &nrPtr); if (op == 3) { if (!fval) break; } else if (fval) break; } if (rc == JSI_OK) { if (op == 1 && fval) // Find Jsi_ValueCopy(interp, *ret, obj->arr[i]); else if (op == 2 || op == 3) // Some/Every Jsi_ValueMakeBool(interp, ret, fval); else if (op == 4) Jsi_ValueMakeNumber(interp, ret, (Jsi_Number)(fval?(int)i:-1)); } if (nthis) Jsi_DecrRefCount(interp, nthis); Jsi_DecrRefCount(interp, nrPtr); return rc; } static Jsi_RC jsi_ArrayReduceSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); Jsi_RC rc = JSI_OK; int curlen, i; Jsi_Obj *obj; Jsi_Value *func, *vpargs, *ini = Jsi_ValueArrayIndex(interp, args, 1); func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *nrPtr = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) Jsi_ObjSetLength(interp, obj, 0); Jsi_ObjListifyArray(interp, obj); Jsi_Value *vobjs[4]; int n, rev = (op==2); Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>4) maa = 4; for (n = 0, i = (rev?obj->arrCnt-1:0); (rev?i>=0:i < (int)obj->arrCnt) && rc == JSI_OK; n++, i = (rev?i-1:i+1)) { if (!obj->arr[i]) continue; if (n==0 && !ini) { ini = obj->arr[i]; continue; } vobjs[0] = ini; vobjs[1] = obj->arr[i]; vobjs[2] = (maa>2?Jsi_ValueNewNumber(interp, i):NULL); vobjs[3] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, NULL); Jsi_DecrRefCount(interp, vpargs); if (rc != JSI_OK) break; ini = nrPtr; } if (rc == JSI_OK && ini) Jsi_ValueCopy(interp, *ret, ini); Jsi_DecrRefCount(interp, nrPtr); return rc; } static Jsi_RC jsi_ArrayFindCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArraySomeCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayEveryCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 3); } static Jsi_RC jsi_ArrayFindIndexCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 4); } static Jsi_RC jsi_ArrayReduceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayReduceSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArrayReduceRightCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayReduceSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayIsArrayCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { bool b = 0; Jsi_Value *sthis = _this; if (_this->vt == JSI_VT_OBJECT && _this->d.obj->ot == JSI_OT_FUNCTION && _this->d.obj->__proto__ == interp->Array_prototype->d.obj->__proto__ ) sthis = Jsi_ValueArrayIndex(interp, args, 0); if (sthis && sthis->vt == JSI_VT_OBJECT && Jsi_ObjIsArray(interp, sthis->d.obj)) b = 1; Jsi_ValueMakeBool(interp, ret, b); return JSI_OK; } static Jsi_RC jsi_ArrayIndexSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { int istart = 0, n, i = 0, dir=1, idx=-1; Jsi_Value *seq = Jsi_ValueArrayIndex(interp, args, 0), *start = Jsi_ValueArrayIndex(interp, args, 1); Jsi_Obj *obj = _this->d.obj; if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); if (!seq) { goto bail; } n = Jsi_ObjGetLength(interp, obj); if (n == 0) { goto bail; } Jsi_Number nstart; if (op == 2) { istart = n-1; } if (start && Jsi_GetNumberFromValue(interp,start, &nstart)==JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (op == 2) { istart = n-1; dir = -1; } Jsi_ObjListifyArray(interp, obj); for (i = istart; ; i+=dir) { if ((dir>0 && i>=n) || (dir<0 && i<0) || i>=(int)obj->arrCnt) break; if (obj->arr[i] && Jsi_ValueCmp(interp, obj->arr[i], seq, JSI_CMP_EXACT)==0) { idx = i; break; } } bail: if (op == 3) Jsi_ValueMakeBool(interp, ret, (idx!=-1)); else Jsi_ValueMakeNumber(interp, ret, idx); return JSI_OK; } static Jsi_RC jsi_ArrayIndexOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArrayLastindexOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayIncludesCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 3); } static Jsi_RC jsi_ArraySizeOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int i = Jsi_ObjGetLength(interp, _this->d.obj); Jsi_ValueMakeNumber(interp, ret, i); return JSI_OK; } static Jsi_RC jsi_ArrayShiftCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Value *v; Jsi_Obj *obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); uint n = Jsi_ObjGetLength(interp, obj); assert(n <= obj->arrCnt); if (n<=0) { Jsi_ValueMakeUndef(interp, ret); } else { n--; v = obj->arr[0]; memmove(obj->arr, obj->arr+1, n*sizeof(Jsi_Value*)); obj->arr[n] = NULL; Jsi_ValueDup2(interp, ret, v); Jsi_DecrRefCount(interp, v); Jsi_ObjSetLength(interp, obj, n); } return JSI_OK; } static Jsi_RC jsi_ArrayUnshiftCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Obj *obj = _this->d.obj; int argc = Jsi_ValueGetLength(interp, args); int curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } if (argc <= 0) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } Jsi_ObjListifyArray(interp, obj); if (Jsi_ObjArraySizer(interp, obj, curlen+argc)<=0) return Jsi_LogError("too long"); memmove(obj->arr+argc, obj->arr, (curlen)*sizeof(Jsi_Value*)); obj->arrCnt += argc; int i; for (i = 0; i < argc; ++i) { Jsi_Value *ov = Jsi_ValueArrayIndex(interp, args, i); obj->arr[i] = NULL; if (!ov) { Jsi_LogBug("Arguments Error"); continue; } obj->arr[i] = ov; Jsi_IncrRefCount(interp, ov); } Jsi_ObjSetLength(interp, obj, curlen+argc); Jsi_ValueMakeNumber(interp, ret, Jsi_ObjGetLength(interp, obj)); return JSI_OK; } static Jsi_RC jsi_ArrayFillCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int istart = 0, iend, n, nsiz; Jsi_Number nstart = 0, nend = 0; // TODO: merge with code in ArraySliceCmd. Jsi_Value *value = Jsi_ValueArrayIndex(interp, args, 0), *start = Jsi_ValueArrayIndex(interp, args, 1), *end = Jsi_ValueArrayIndex(interp, args, 2); Jsi_Obj *obj = _this->d.obj; n = Jsi_ObjGetLength(interp, obj); if (start && Jsi_GetNumberFromValue(interp, start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (n == 0) { goto bail; } iend = n-1; if (end && Jsi_GetNumberFromValue(interp,end, &nend) == JSI_OK) { iend = (int) nend; if (iend >= n) iend = n; if (iend < 0) iend = (n+iend); if (iend<0) goto bail; } nsiz = iend-istart+1; if (nsiz<=0) goto bail; int i; for (i = istart; i <= iend; i++) { if (obj->arr[i]) Jsi_ValueCopy(interp, obj->arr[i], value); else obj->arr[i] = Jsi_ValueDup(interp, value); } bail: if (_this != *ret) { Jsi_ValueMove(interp, *ret, _this); /*if (*ret) Jsi_DecrRefCount(interp, *ret); *ret = _this; Jsi_IncrRefCount(interp, *ret);*/ } return rc; } static Jsi_RC jsi_ArraySliceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int istart = 0, iend, n, nsiz; Jsi_Number nstart; Jsi_Obj *nobj, *obj; Jsi_Value *start = Jsi_ValueArrayIndex(interp, args, 0), *end = Jsi_ValueArrayIndex(interp, args, 1); if (!start) { goto bail; } obj = _this->d.obj; n = Jsi_ObjGetLength(interp, obj); if (Jsi_GetNumberFromValue(interp,start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto done; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (n == 0) { done: Jsi_ValueMakeArrayObject(interp, ret, Jsi_ObjNewType(interp, JSI_OT_ARRAY)); return JSI_OK; } Jsi_Number nend; iend = n-1; if (end && Jsi_GetNumberFromValue(interp,end, &nend) == JSI_OK) { iend = (int) nend; if (iend >= n) iend = n; if (iend < 0) iend = (n+iend); if (iend<0) goto bail; } nsiz = iend-istart+1; if (nsiz<=0) goto done; Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { rc = Jsi_LogError("index too large: %d", nsiz); goto bail; } int i, m; for (m = 0, i = istart; i <= iend; i++, m++) { if (!obj->arr[i]) continue; nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m, obj->arr[i]); } Jsi_ObjSetLength(interp, nobj, nsiz); Jsi_ValueMakeArrayObject(interp, ret, nobj); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); return rc; } typedef struct { Jsi_Interp *interp; int flags; int mode; bool unique; Jsi_Value *compare; int errCnt; } SortInfo; static const char *sortArrayStrs[] = {"default", "desc", "dict", "nocase", 0}; static Jsi_OptionSpec jsi_ArraySortOptions[] = { JSI_OPT(CUSTOM, SortInfo, mode, .help="Mode to sort by", .flags=0, .custom=Jsi_Opt_SwitchEnum, .data=sortArrayStrs), JSI_OPT(FUNC, SortInfo, compare, .help="Function to do comparison", .flags=0, .custom=0, .data=(void*)"val1,val2"), JSI_OPT(BOOL, SortInfo, unique, .help="Eliminate duplicate items"), JSI_OPT_END(SortInfo) }; #ifdef NO_QSORT_R SortInfo *curSortInfo = NULL; static int SortSubCmd(const void *p1, const void *p2) { SortInfo *si = curSortInfo; #else #ifdef __WIN32 static int SortSubCmd(void *thunk, const void *p1, const void *p2) #else static int SortSubCmd(const void *p1, const void *p2, void *thunk) #endif { SortInfo *si = (SortInfo *)thunk; #endif Jsi_Interp *interp = si->interp; int sortFlags = si->flags; if (interp == NULL || interp->deleting) return 0; Jsi_Value *v1 = *(Jsi_Value**)p1, *v2 = *(Jsi_Value**)p2; int rc = 0; if (v1 != NULL && v2 != NULL) { VALCHK(v1); VALCHK(v2); if (!si->compare) rc = Jsi_ValueCmp(interp, v1, v2, sortFlags); else { Jsi_Value *vv[2] = {v1, v2}; Jsi_Value *retP = Jsi_ValueNew1(interp); Jsi_Value *vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vv, 2, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, si->compare, vpargs, &retP, NULL); Jsi_DecrRefCount(interp, vpargs); if (rc == JSI_OK) { Jsi_Number d = 0; if (Jsi_ValueGetNumber(interp, retP, &d) == JSI_OK) rc = -(int)d; else { if (!si->errCnt) Jsi_LogWarn("invalid function return"); si->errCnt++; } } Jsi_DecrRefCount(interp, retP); } } else { if (v1 == v2) rc = 0; else if (v1 == NULL) rc = 1; else rc = -1; } if ((sortFlags&JSI_SORT_DESCEND)) return rc; return -rc; } Jsi_RC Jsi_ValueArraySort(Jsi_Interp *interp, Jsi_Value *val, int flags) { if (val->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, val->d.obj)) { return JSI_ERROR; } Jsi_Obj *obj = val->d.obj; Jsi_ObjListifyArray(interp, obj); if (obj->arrCnt <= 0) { return JSI_OK; } #ifdef __WIN32 #define qsort_r qsort_s #endif SortInfo si = {}; si.interp = interp; si.flags = flags; #ifdef NO_QSORT_R curSortInfo = &si; qsort(obj->arr, obj->arrCnt, sizeof(Jsi_Value*), SortSubCmd); curSortInfo = NULL; #else qsort_r(obj->arr, obj->arrCnt, sizeof(Jsi_Value*), SortSubCmd, &si); #endif return JSI_OK; } static Jsi_RC jsi_ArraySortCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int flags = 0, i, curlen, hasopt = 0; Jsi_Value *v, *arg = NULL; SortInfo si = {}; si.interp = interp; Jsi_Obj *obj = _this->d.obj; curlen = obj->arrCnt; if (curlen <= 1) { goto done; } arg = Jsi_ValueArrayIndex(interp, args, 0); if (arg) { if (Jsi_ValueIsObjType(interp, arg, JSI_OT_OBJECT)) { if (Jsi_OptionsProcess(interp, jsi_ArraySortOptions, &si, arg, 0) < 0) return JSI_ERROR; hasopt = 1; switch (si.mode) { case 1: flags |= JSI_SORT_DESCEND; break; case 2: flags |= JSI_SORT_DICT; break; case 3: flags |= JSI_SORT_NOCASE; break; } } else if (Jsi_ValueIsObjType(interp, arg, JSI_OT_FUNCTION)) si.compare = arg; else return Jsi_LogError("expected object or function"); } si.flags = flags; Jsi_ObjListifyArray(interp, obj); #ifdef NO_QSORT_R /* TODO: mutex. */ curSortInfo = &si; qsort(obj->arr, curlen, sizeof(Jsi_Value*), SortSubCmd); #else qsort_r(obj->arr, curlen, sizeof(Jsi_Value*), SortSubCmd, &si); #endif if (interp->deleting) { #ifdef NO_QSORT_R curSortInfo = NULL; #endif return JSI_ERROR; } if (si.unique) { int n, diff = 1, dupCnt=0; for (n=0, i=1; i<(int)obj->arrCnt; i++) { if (obj->arr[n] == obj->arr[i]) diff = 1; else #ifdef NO_QSORT_R diff = SortSubCmd(&obj->arr[n], &obj->arr[i]); #else #ifdef __WIN32 diff = SortSubCmd(&si, &obj->arr[n], &obj->arr[i]); #else diff = SortSubCmd(&obj->arr[n], &obj->arr[i], &si); #endif #endif if (diff) { n++; if (n!=i) obj->arr[n] = obj->arr[i]; } else { dupCnt++; if (obj->arr[i]) Jsi_DecrRefCount(interp, obj->arr[i]); obj->arr[i] = 0; } } obj->arrCnt -= dupCnt; } #ifdef NO_QSORT_R curSortInfo = NULL; #endif if (hasopt) Jsi_OptionsFree(interp, jsi_ArraySortOptions, &si, 0); done: v = Jsi_ValueMakeObject(interp, NULL, obj); Jsi_ValueReplace(interp, ret, v); return JSI_OK; Jsi_ValueMakeNull(interp, ret); return JSI_OK; } static Jsi_RC jsi_ArraySpliceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int newlen, argc, istart, n, rhowmany, ilen, curlen; Jsi_Value *va, *start, *howmany; Jsi_Obj *nobj, *obj = _this->d.obj; start = Jsi_ValueArrayIndex(interp, args, 0); howmany = Jsi_ValueArrayIndex(interp, args, 1); argc = Jsi_ValueGetLength(interp, args); istart = 0; ilen = (argc>=2 ? argc - 2 : 0); n = Jsi_ObjGetLength(interp, obj); curlen = n; if (!start) { goto bail2; } nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ValueMakeArrayObject(interp, ret, nobj); Jsi_ObjSetLength(interp, nobj, 0); /* Determine start index. */ Jsi_Number nstart; if (Jsi_GetNumberFromValue(interp, start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) istart=0; } Jsi_Number nhow; rhowmany = n-istart; if (howmany && Jsi_GetNumberFromValue(interp, howmany, &nhow) == JSI_OK) { rhowmany = (int)nhow; if (rhowmany >= (n-istart)) rhowmany = n-istart; if (rhowmany < 0) rhowmany = (n-istart); if (rhowmany<0) goto bail; } if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); Jsi_ObjArraySizer(interp, nobj, rhowmany); /* Move elements to return object. */ int i, j, m; for (m=0, j = 0, i = istart; m<rhowmany && m<curlen; m++, i++, j++) { if (!obj->arr[i]) continue; nobj->arr[m] = obj->arr[i]; obj->arr[i] = NULL; } Jsi_ObjSetLength(interp, nobj, m); /* Shift remaining down. */ for (; rhowmany && i<curlen; i++) { obj->arr[i-rhowmany] = obj->arr[i]; obj->arr[i] = NULL; } curlen -= j; /* Add elements. */ newlen = curlen + argc - (argc>=2?2:1); if (Jsi_ObjArraySizer(interp, obj, newlen+3) <= 0) { Jsi_LogError("too long"); Jsi_ValueMakeUndef(interp, ret); return JSI_ERROR; } if (ilen>0) { for (i = curlen-1; i>=istart; i--) { obj->arr[i+ilen] = obj->arr[i]; obj->arr[i] = NULL; } for (m=istart, i = 2; i<argc; m++,i++) { va = Jsi_ValueArrayIndex(interp, args, i); if (!va) continue; obj->arr[m] = NULL; Jsi_ValueDup2(interp, obj->arr+m, va); } } Jsi_ObjSetLength(interp, obj, newlen); bail: return JSI_OK; bail2: Jsi_ValueMakeNull(interp, ret); return JSI_OK; } static Jsi_RC jsi_ArrayConstructor(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { int argc = Jsi_ValueGetLength(interp, args), iscons = Jsi_FunctionIsConstructor(funcPtr); Jsi_Value *target; Jsi_Value *v = Jsi_ValueArrayIndex(interp, args, 0); if (iscons) { target = _this; Jsi_ValueMakeArrayObject(interp, &_this, Jsi_ObjNewArray(interp, NULL, 0, 0)); } else { Jsi_Obj *o = Jsi_ObjNewType(interp, JSI_OT_ARRAY); o->__proto__ = interp->Array_prototype; Jsi_ValueMakeObject(interp, ret, o); target = *ret; } if (argc == 1 && v && Jsi_ValueIsNumber(interp, v)) { Jsi_Number nv; Jsi_GetNumberFromValue(interp,v, &nv); int len = (int)nv; if (!Jsi_NumberIsInteger(v->d.num) || len < 0) return Jsi_LogError("Invalid array length"); target->d.obj->isarrlist = 1; if (Jsi_ObjArraySizer(interp, target->d.obj, len) <= 0) return JSI_ERROR; } else { int i; target->d.obj->isarrlist = 1; if (Jsi_ObjArraySizer(interp, target->d.obj, 0) <= 0) return JSI_ERROR; for (i = 0; i < argc; ++i) { Jsi_Value *argv = Jsi_ValueArrayIndex(interp, args, i); ; Jsi_ValueInsertArray(interp, _this, i, argv, 0); } } if (iscons) Jsi_ValueDup2(interp, ret, target); return JSI_OK; } static Jsi_CmdSpec arrayCmds[] = { { "Array", jsi_ArrayConstructor, 0,-1, "...", .help="jsi_Array constructor", .retType=(uint)JSI_TT_ARRAY, .flags=JSI_CMD_IS_CONSTRUCTOR }, { "concat", jsi_ArrayConcatCmd, 0,-1, "...", .help="Return array with args appended", .retType=(uint)JSI_TT_ARRAY }, { "every", jsi_ArrayEveryCmd, 1, 1, "callback:function", .help="Returns true if every value in array satisfies the test", .retType=(uint)JSI_TT_ANY }, { "fill", jsi_ArrayFillCmd, 1, 3, "value:any, start:number=0, end:number=-1", .help="Fill an array with values", .retType=(uint)JSI_TT_ARRAY }, { "filter", jsi_ArrayFilterCmd, 1, 2, "callback:function, this:object=void", .help="Return a filtered array", .retType=(uint)JSI_TT_ARRAY }, { "find", jsi_ArrayFindCmd, 1, 1, "callback:function", .help="Returns the value of the first element in the array that satisfies the test", .retType=(uint)JSI_TT_ANY }, { "findIndex", jsi_ArrayFindIndexCmd, 1, 1, "callback:function", .help="Returns the index of the first element in the array that satisfies the test", .retType=(uint)JSI_TT_ANY }, { "flat", jsi_ArrayFlatCmd, 0, 1, "depth:number=1", .help="Flatten an arra", .retType=(uint)JSI_TT_ARRAY }, { "forEach", jsi_ArrayForeachCmd, 1, 2, "callback:function, this:object=void", .help="Invoke function with each item in object", .retType=(uint)JSI_TT_VOID }, { "includes", jsi_ArrayIncludesCmd, 1, 1, "val:any", .help="Returns true if array contains value", .retType=(uint)JSI_TT_ANY }, { "indexOf", jsi_ArrayIndexOfCmd, 1, 2, "str:any, startIdx:number=0", .help="Return index of first occurrance in array", .retType=(uint)JSI_TT_NUMBER }, { "isArray", jsi_ArrayIsArrayCmd, 0, 0, "", .help="True if val array", .retType=(uint)JSI_TT_BOOLEAN }, { "join", jsi_ArrayJoinCmd, 0, 1, "sep:string=''", .help="Return elements joined by char", .retType=(uint)JSI_TT_STRING }, { "lastIndexOf",jsi_ArrayLastindexOfCmd,1, 2, "val:any, start:number=0", .help="Return index of last occurence in array", .retType=(uint)JSI_TT_NUMBER }, { "map", jsi_ArrayMapCmd, 1, 2, "callback:function, this:object=void", .help="Creates a new array with the results of calling a provided function on every element in this array", .retType=(uint)JSI_TT_ARRAY }, { "pop", jsi_ArrayPopCmd, 0, 0, "", .help="Remove and return last element of array", .retType=(uint)JSI_TT_ANY }, { "push", jsi_ArrayPushCmd, 1,-1, "val:any, ...", .help="Push one or more elements onto array and return size", .retType=(uint)JSI_TT_NUMBER }, { "reduce", jsi_ArrayReduceCmd, 1, 2, "callback:function, initial:any", .help="Return a reduced array", .retType=(uint)JSI_TT_ANY }, { "reduceRight",jsi_ArrayReduceRightCmd,1, 2, "callback:function, initial:any", .help="Return a reduced array", .retType=(uint)JSI_TT_ANY }, { "shift", jsi_ArrayShiftCmd, 0, 0, "", .help="Remove first element and shift downwards", .retType=(uint)JSI_TT_ANY }, { "sizeOf", jsi_ArraySizeOfCmd, 0, 0, "", .help="Return size of array", .retType=(uint)JSI_TT_NUMBER }, { "slice", jsi_ArraySliceCmd, 1, 2, "start:number, end:number=void", .help="Return sub-array", .retType=(uint)JSI_TT_ARRAY }, { "some", jsi_ArraySomeCmd, 1, 2, "callback:function, this:object=void", .help="Return true if function returns true some element", .retType=(uint)JSI_TT_BOOLEAN }, { "sort", jsi_ArraySortCmd, 0, 1, "options:function|object=void", .help="Sort an array", .retType=(uint)JSI_TT_ARRAY, .flags=0, .info=0, .opts=jsi_ArraySortOptions }, { "splice", jsi_ArraySpliceCmd, 1,-1, "start:number, howmany:number=void, ...", .help="Change the content of an array, adding new elements while removing old elements", .retType=(uint)JSI_TT_ARRAY }, { "reverse", jsi_ArrayReverseCmd, 0, 0, "", .help="Reverse order of all elements in an array", .retType=(uint)JSI_TT_ARRAY }, { "unshift", jsi_ArrayUnshiftCmd, 0,-1, "...", .help="Add new elements to start of array and return size", .retType=(uint)JSI_TT_NUMBER }, { NULL, 0,0,0,0, .help="Provide access to array objects" } }; Jsi_RC jsi_InitArray(Jsi_Interp *interp, int release) { if (release) return JSI_OK; interp->Array_prototype = Jsi_CommandCreateSpecs(interp, "Array", arrayCmds, NULL, JSI_CMDSPEC_ISOBJ); return JSI_OK; } #endif
#ifndef JSI_LITE_ONLY #ifndef JSI_AMALGAMATION #include "jsiInt.h" #endif #if JSI__MUSL==1 || defined(__FreeBSD__) #define NO_QSORT_R 1 #endif static uint jsi_SizeOfArray(Jsi_Interp *interp, Jsi_Obj *obj) { if (!obj || !obj->arr) return 0; return obj->arrCnt; } static Jsi_RC jsi_ArrayPushCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { Jsi_Obj *obj; if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } obj = _this->d.obj; int argc = Jsi_ValueGetLength(interp, args); int curlen = jsi_SizeOfArray(interp, obj); int i; for (i = 0; i < argc; ++i) { Jsi_Value *ov = Jsi_ValueArrayIndex(interp, args, i); if (!ov) { Jsi_LogBug("Arguments Error"); ov = Jsi_ValueNew(interp); } Jsi_ValueInsertArray(interp, _this, curlen + i, ov, 0); } Jsi_ValueMakeNumber(interp, ret, jsi_SizeOfArray(interp, obj)); return JSI_OK; } static Jsi_RC jsi_ArrayPopCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } Jsi_Value *v; Jsi_Obj *obj; obj = _this->d.obj; int i = jsi_SizeOfArray(interp, obj) - 1; if (i < 0) { Jsi_ValueMakeUndef(interp, ret); return JSI_OK; } if (obj->arr) { if ((v = obj->arr[i])) { obj->arr[i] = NULL; obj->arrCnt--; } } else { v = Jsi_ValueArrayIndex(interp, _this, i); } if (v) { Jsi_DecrRefCount(interp, *ret); *ret = v; } Jsi_ObjSetLength(interp, obj, i); return JSI_OK; } static Jsi_RC jsi_ArrayJoinCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); const char *jstr = ""; int argc, curlen; Jsi_DString dStr = {}; curlen = jsi_SizeOfArray(interp, _this->d.obj); if (curlen == 0) { goto bail; } if (Jsi_ValueGetLength(interp, args) >= 1) { Jsi_Value *sc = Jsi_ValueArrayIndex(interp, args, 0); if (sc != NULL) jstr = Jsi_ValueToString(interp, sc, NULL); } if (0 == (argc=jsi_SizeOfArray(interp, _this->d.obj))) { goto bail; } int i; for (i = 0; i < argc; ++i) { const char *cp; Jsi_Value *ov = Jsi_ValueArrayIndex(interp, _this, i); if (!ov) { /* TODO: are NULL args ok? */ continue; cp = ""; } else cp = Jsi_ValueToString(interp, ov, NULL); if (i && jstr[0]) Jsi_DSAppend(&dStr, jstr, NULL); Jsi_DSAppend(&dStr, cp, NULL); } Jsi_ValueMakeStringDup(interp, ret, Jsi_DSValue(&dStr)); Jsi_DSFree(&dStr); return JSI_OK; bail: Jsi_ValueMakeStringDup(interp, ret, ""); return JSI_OK; } Jsi_Value* Jsi_ValueArrayConcat(Jsi_Interp *interp, Jsi_Value *arg1, Jsi_Value *arg2) { Jsi_Value *va; Jsi_Obj *obj; if (arg1->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg1->d.obj)) { return NULL; } if (arg2->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg2->d.obj)) { return NULL; } int len1 = arg1->d.obj->arrCnt; int len2 = arg2->d.obj->arrCnt; Jsi_Obj *nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ObjArraySizer(interp, nobj, len1+len2); int i, j = 0; obj = arg1->d.obj; for (i = 0; i<len1; i++, j++) { if (!obj->arr[i]) continue; nobj->arr[j] = NULL; Jsi_ValueDup2(interp, nobj->arr+j, obj->arr[i]); } obj = arg2->d.obj; for (i = 0; i<len2; i++, j++) { if (!obj->arr[i]) continue; nobj->arr[j] = NULL; Jsi_ValueDup2(interp, nobj->arr+j, obj->arr[i]); } Jsi_ObjSetLength(interp, nobj, len1+len2); va = Jsi_ValueMakeArrayObject(interp, NULL, nobj); return va; } Jsi_RC Jsi_ValueArrayPush(Jsi_Interp *interp, Jsi_Value *arg1, Jsi_Value *arg2) { Jsi_Obj *obj; if (arg1->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg1->d.obj)) return JSI_ERROR; if (!arg2) return JSI_ERROR; int len1 = arg1->d.obj->arrCnt; obj = arg1->d.obj; Jsi_ObjArraySizer(interp, obj, len1); obj->arr[len1] = arg2; Jsi_IncrRefCount(interp, arg2); obj->arrCnt++; return JSI_OK; } Jsi_Value *Jsi_ValueArrayPop(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayPop, target is not object"); return NULL; } Jsi_Obj *o = v->d.obj; if (!o->isarrlist) { Jsi_LogBug("Jsi_ValueArrayPop, target is not array"); return NULL; } if (o->arrCnt<=0) return NULL; int idx = o->arrCnt-1; if (!o->arr[idx]) return NULL; Jsi_DecrRefCount(interp, o->arr[idx]); Jsi_Value *ret = o->arr[idx]; o->arr[idx] = NULL; o->arrCnt--; return ret; } Jsi_Value *Jsi_ValueArrayUnshift(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayUnshift, target is not object"); return NULL; } Jsi_Obj *o = v->d.obj; if (!o->isarrlist) { Jsi_LogBug("Jsi_ValueArrayUnshift, target is not array"); return NULL; } if (o->arrCnt<=0) return NULL; if (!o->arr[0]) return NULL; Jsi_DecrRefCount(interp, o->arr[0]); Jsi_Value *ret = o->arr[0]; o->arr[0] = NULL; o->arrCnt--; return ret; } /* delete array[0], array[1]->array[0] */ void Jsi_ValueArrayShift(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayShift, target is not object"); return; } Jsi_Obj *o = v->d.obj; if (o->isarrlist) { uint i; if (!o->arrCnt) return; if (o->arr[0]) Jsi_DecrRefCount(interp, o->arr[0]); for (i=1; i<o->arrCnt; i++) { o->arr[i-1] = o->arr[i]; } o->arr[o->arrCnt--] = NULL; return; } int len = jsi_SizeOfArray(interp, v->d.obj); if (len <= 0) return; Jsi_Value *v0 = Jsi_ValueArrayIndex(interp, v, 0); if (!v0) return; Jsi_ValueReset(interp, &v0); int i; Jsi_Value *last = v0; for (i = 1; i < len; ++i) { Jsi_Value *t = Jsi_ValueArrayIndex(interp, v, i); if (!t) return; Jsi_ValueCopy(interp, last, t); Jsi_ValueReset(interp, &t); last = t; } Jsi_ObjSetLength(interp, v->d.obj, len - 1); } static Jsi_RC jsi_ArrayFlatSub(Jsi_Interp *interp, Jsi_Obj* nobj, Jsi_Value *arr, int depth) { int i, n = 0, len = jsi_SizeOfArray(interp, arr->d.obj); if (len <= 0) return JSI_OK; Jsi_RC rc = JSI_OK; int clen = jsi_SizeOfArray(interp, nobj); for (i = 0; i < len && rc == JSI_OK; i++) { Jsi_Value *t = Jsi_ValueArrayIndex(interp, arr, i); if (t && depth>0 && Jsi_ValueIsArray(interp, t)) rc = jsi_ArrayFlatSub(interp, nobj, t , depth-1); else if (!Jsi_ValueIsUndef(interp, t)) Jsi_ObjArrayAdd(interp, nobj, t); if ((++n + clen)>interp->maxArrayList) return Jsi_LogError("array size exceeded"); } return rc; } static Jsi_RC jsi_ArrayFlatCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Number ndepth = 1; Jsi_Obj *nobj; Jsi_Value *depth = Jsi_ValueArrayIndex(interp, args, 0); if (depth && Jsi_GetNumberFromValue(interp,depth, &ndepth) != JSI_OK) return JSI_ERROR; if (ndepth < 0 || ndepth>1000) return Jsi_LogError("bad depth: %d", (int)ndepth); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ValueMakeArrayObject(interp, ret, nobj ); if (ndepth>0) return jsi_ArrayFlatSub(interp, nobj, _this, ndepth); return JSI_OK; } static Jsi_RC jsi_ArrayConcatCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, argc, nsiz; Jsi_Obj *obj, *nobj; Jsi_Value *va; obj = _this->d.obj; argc = Jsi_ValueGetLength(interp, args); curlen = jsi_SizeOfArray(interp, obj); Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrMaxSize; if (nsiz<=0) nsiz = 100; if (Jsi_ObjArraySizer(interp, nobj, nsiz+1) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", nsiz+1); goto bail; } int i, j, m; for (i = 0; i<curlen; i++) { if (!obj->arr[i]) continue; nobj->arr[i] = NULL; Jsi_ValueDup2(interp, nobj->arr+i, obj->arr[i]); } m = i; for (i = 0; i < argc; i++) { va = Jsi_ValueArrayIndex(interp, args, i); if (va->vt == JSI_VT_OBJECT && Jsi_ObjIsArray(interp, va->d.obj)) { int margc = Jsi_ValueGetLength(interp, va); Jsi_Obj *mobj = va->d.obj; Jsi_ObjListifyArray(interp, mobj); if (Jsi_ObjArraySizer(interp, nobj, curlen += margc) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", curlen); goto bail; } for (j = 0; j<margc; j++, m++) { if (!mobj->arr[j]) continue; nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m, mobj->arr[j]); } } else { if (Jsi_ObjArraySizer(interp, nobj, ++curlen) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", curlen); goto bail; } nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m++, va); } } Jsi_ObjSetLength(interp, nobj, curlen); Jsi_ValueMakeArrayObject(interp, ret, nobj); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); return rc; } static Jsi_RC jsi_ArrayMapCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, nsiz, i, maa = 0; Jsi_Obj *obj, *nobj; Jsi_Value *func, *vpargs, *nthis = NULL, *sthis; Jsi_Func *fptr = NULL; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = jsi_SizeOfArray(interp, obj); Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrCnt; if (nsiz<=0) nsiz = 1; if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { Jsi_LogError("index too large: %d", nsiz); rc = JSI_ERROR; goto bail; } Jsi_ValueMakeArrayObject(interp, ret, nobj); Jsi_Value *vobjs[3]; fptr = func->d.obj->d.fobj->func; maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < curlen; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); nobj->arr[i] = Jsi_ValueNew1(interp); rc = Jsi_FunctionInvoke(interp, func, vpargs, nobj->arr+i, sthis); Jsi_DecrRefCount(interp, vpargs); if( JSI_OK!=rc ) { goto bail; } } Jsi_ObjSetLength(interp, nobj, curlen); if (nthis) Jsi_DecrRefCount(interp, nthis); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); if (nthis) Jsi_DecrRefCount(interp, nthis); return rc; } static Jsi_RC jsi_ArrayFilterCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, nsiz, i, fval, n = 0, maa = 0; Jsi_Obj *obj, *nobj; Jsi_Value *func, *vpargs, *nthis = NULL, *sthis, *nrPtr = NULL; Jsi_Func *fptr = NULL; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = jsi_SizeOfArray(interp, obj); Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrCnt; if (nsiz<=0) nsiz = 1; if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { Jsi_LogError("index too large: %d", nsiz); rc = JSI_ERROR; goto bail; } Jsi_ValueMakeArrayObject(interp, ret, nobj); nrPtr = Jsi_ValueNew1(interp); Jsi_Value *vobjs[4]; fptr = func->d.obj->d.fobj->func; maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < curlen; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, sthis); Jsi_DecrRefCount(interp, vpargs); fval = Jsi_ValueIsTrue(interp, nrPtr); Jsi_ValueMakeUndef(interp, &nrPtr); if( JSI_OK!=rc ) { goto bail; } if (fval) { nobj->arr[n++] = obj->arr[i]; Jsi_IncrRefCount(interp, obj->arr[i]); } } if (nthis) Jsi_DecrRefCount(interp, nthis); Jsi_DecrRefCount(interp, nrPtr); Jsi_ObjSetLength(interp, nobj, n); return JSI_OK; bail: if (nthis) Jsi_DecrRefCount(interp, nthis); if (nrPtr) Jsi_DecrRefCount(interp, nrPtr); Jsi_ValueMakeNull(interp, ret); return rc; } static Jsi_RC jsi_ArrayReverseCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); int i, n, m; Jsi_Obj *obj; Jsi_Value *tval, *nthis = NULL, *sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); m = obj->arrCnt/2; for (i = 0, n=obj->arrCnt-1; i < m; i++, n--) { tval = obj->arr[i]; obj->arr[i] = obj->arr[n]; obj->arr[n] = tval; } Jsi_ValueDup2(interp, ret, _this); if (nthis) Jsi_DecrRefCount(interp, nthis); return JSI_OK; } static Jsi_RC jsi_ArrayForeachCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Obj *obj; uint i; Jsi_Value *func, *vpargs; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *sthis = Jsi_ValueArrayIndex(interp, args, 1); Jsi_Value *nthis = NULL; if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); Jsi_RC rc = JSI_OK; Jsi_Value *vobjs[3]; Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < obj->arrCnt && rc == JSI_OK; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, ret, sthis); Jsi_DecrRefCount(interp, vpargs); } if (nthis) Jsi_DecrRefCount(interp, nthis); return rc; } static Jsi_RC jsi_ArrayFindSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); Jsi_Obj *obj; uint i; Jsi_RC rc = JSI_OK; Jsi_Value *func, *vpargs, *sthis = Jsi_ValueArrayIndex(interp, args, 1); func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *nthis = NULL; if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); int fval = 0; Jsi_Value *nrPtr = Jsi_ValueNew1(interp); Jsi_Value *vobjs[3]; Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < obj->arrCnt && rc == JSI_OK; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, sthis); Jsi_DecrRefCount(interp, vpargs); if (rc != JSI_OK) break; fval = Jsi_ValueIsTrue(interp, nrPtr); Jsi_ValueMakeUndef(interp, &nrPtr); if (op == 3) { if (!fval) break; } else if (fval) break; } if (rc == JSI_OK) { if (op == 1 && fval) // Find Jsi_ValueCopy(interp, *ret, obj->arr[i]); else if (op == 2 || op == 3) // Some/Every Jsi_ValueMakeBool(interp, ret, fval); else if (op == 4) Jsi_ValueMakeNumber(interp, ret, (Jsi_Number)(fval?(int)i:-1)); } if (nthis) Jsi_DecrRefCount(interp, nthis); Jsi_DecrRefCount(interp, nrPtr); return rc; } static Jsi_RC jsi_ArrayReduceSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); Jsi_RC rc = JSI_OK; int i; Jsi_Obj *obj; Jsi_Value *func, *vpargs, *ini = Jsi_ValueArrayIndex(interp, args, 1); func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *nrPtr = Jsi_ValueNew1(interp); obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); Jsi_Value *vobjs[4]; int n, rev = (op==2); Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>4) maa = 4; for (n = 0, i = (rev?obj->arrCnt-1:0); (rev?i>=0:i < (int)obj->arrCnt) && rc == JSI_OK; n++, i = (rev?i-1:i+1)) { if (!obj->arr[i]) continue; if (n==0 && !ini) { ini = obj->arr[i]; continue; } vobjs[0] = ini; vobjs[1] = obj->arr[i]; vobjs[2] = (maa>2?Jsi_ValueNewNumber(interp, i):NULL); vobjs[3] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, NULL); Jsi_DecrRefCount(interp, vpargs); if (rc != JSI_OK) break; ini = nrPtr; } if (rc == JSI_OK && ini) Jsi_ValueCopy(interp, *ret, ini); Jsi_DecrRefCount(interp, nrPtr); return rc; } static Jsi_RC jsi_ArrayFindCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArraySomeCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayEveryCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 3); } static Jsi_RC jsi_ArrayFindIndexCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 4); } static Jsi_RC jsi_ArrayReduceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayReduceSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArrayReduceRightCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayReduceSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayIsArrayCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { bool b = 0; Jsi_Value *sthis = _this; if (_this->vt == JSI_VT_OBJECT && _this->d.obj->ot == JSI_OT_FUNCTION && _this->d.obj->__proto__ == interp->Array_prototype->d.obj->__proto__ ) sthis = Jsi_ValueArrayIndex(interp, args, 0); if (sthis && sthis->vt == JSI_VT_OBJECT && Jsi_ObjIsArray(interp, sthis->d.obj)) b = 1; Jsi_ValueMakeBool(interp, ret, b); return JSI_OK; } static Jsi_RC jsi_ArrayIndexSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { int istart = 0, n, i = 0, dir=1, idx=-1; Jsi_Value *seq = Jsi_ValueArrayIndex(interp, args, 0), *start = Jsi_ValueArrayIndex(interp, args, 1); Jsi_Obj *obj = _this->d.obj; if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); if (!seq) { goto bail; } n = jsi_SizeOfArray(interp, obj); if (n == 0) { goto bail; } Jsi_Number nstart; if (op == 2) { istart = n-1; } if (start && Jsi_GetNumberFromValue(interp,start, &nstart)==JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (op == 2) { istart = n-1; dir = -1; } Jsi_ObjListifyArray(interp, obj); for (i = istart; ; i+=dir) { if ((dir>0 && i>=n) || (dir<0 && i<0) || i>=(int)obj->arrCnt) break; if (obj->arr[i] && Jsi_ValueCmp(interp, obj->arr[i], seq, JSI_CMP_EXACT)==0) { idx = i; break; } } bail: if (op == 3) Jsi_ValueMakeBool(interp, ret, (idx!=-1)); else Jsi_ValueMakeNumber(interp, ret, idx); return JSI_OK; } static Jsi_RC jsi_ArrayIndexOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArrayLastindexOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayIncludesCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 3); } static Jsi_RC jsi_ArraySizeOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int i = jsi_SizeOfArray(interp, _this->d.obj); Jsi_ValueMakeNumber(interp, ret, i); return JSI_OK; } static Jsi_RC jsi_ArrayShiftCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Value *v; Jsi_Obj *obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); uint n = jsi_SizeOfArray(interp, obj); if (n<=0) { Jsi_ValueMakeUndef(interp, ret); } else { n--; v = obj->arr[0]; memmove(obj->arr, obj->arr+1, n*sizeof(Jsi_Value*)); obj->arr[n] = NULL; Jsi_ValueDup2(interp, ret, v); Jsi_DecrRefCount(interp, v); Jsi_ObjSetLength(interp, obj, n); } return JSI_OK; } static Jsi_RC jsi_ArrayUnshiftCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Obj *obj = _this->d.obj; int argc = Jsi_ValueGetLength(interp, args); int curlen = jsi_SizeOfArray(interp, obj); if (argc <= 0) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } Jsi_ObjListifyArray(interp, obj); if (Jsi_ObjArraySizer(interp, obj, curlen+argc)<=0) return Jsi_LogError("too long"); memmove(obj->arr+argc, obj->arr, (curlen)*sizeof(Jsi_Value*)); obj->arrCnt += argc; int i; for (i = 0; i < argc; ++i) { Jsi_Value *ov = Jsi_ValueArrayIndex(interp, args, i); obj->arr[i] = NULL; if (!ov) { Jsi_LogBug("Arguments Error"); continue; } obj->arr[i] = ov; Jsi_IncrRefCount(interp, ov); } Jsi_ObjSetLength(interp, obj, curlen+argc); Jsi_ValueMakeNumber(interp, ret, jsi_SizeOfArray(interp, obj)); return JSI_OK; } static Jsi_RC jsi_ArrayFillCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int istart = 0, iend, n, nsiz; Jsi_Number nstart = 0, nend = 0; // TODO: merge with code in ArraySliceCmd. Jsi_Value *value = Jsi_ValueArrayIndex(interp, args, 0), *start = Jsi_ValueArrayIndex(interp, args, 1), *end = Jsi_ValueArrayIndex(interp, args, 2); Jsi_Obj *obj = _this->d.obj; n = jsi_SizeOfArray(interp, obj); if (start && Jsi_GetNumberFromValue(interp, start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (n == 0) { goto bail; } iend = n-1; if (end && Jsi_GetNumberFromValue(interp,end, &nend) == JSI_OK) { iend = (int) nend; if (iend >= n) iend = n; if (iend < 0) iend = (n+iend); if (iend<0) goto bail; } nsiz = iend-istart+1; if (nsiz<=0) goto bail; int i; for (i = istart; i <= iend; i++) { if (obj->arr[i]) Jsi_ValueCopy(interp, obj->arr[i], value); else obj->arr[i] = Jsi_ValueDup(interp, value); } bail: if (_this != *ret) { Jsi_ValueMove(interp, *ret, _this); /*if (*ret) Jsi_DecrRefCount(interp, *ret); *ret = _this; Jsi_IncrRefCount(interp, *ret);*/ } return rc; } static Jsi_RC jsi_ArraySliceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int istart = 0, iend, n, nsiz; Jsi_Number nstart; Jsi_Obj *nobj, *obj; Jsi_Value *start = Jsi_ValueArrayIndex(interp, args, 0), *end = Jsi_ValueArrayIndex(interp, args, 1); if (!start) { goto bail; } obj = _this->d.obj; n = jsi_SizeOfArray(interp, obj); if (Jsi_GetNumberFromValue(interp,start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto done; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (n == 0) { done: Jsi_ValueMakeArrayObject(interp, ret, Jsi_ObjNewType(interp, JSI_OT_ARRAY)); return JSI_OK; } Jsi_Number nend; iend = n-1; if (end && Jsi_GetNumberFromValue(interp,end, &nend) == JSI_OK) { iend = (int) nend; if (iend >= n) iend = n; if (iend < 0) iend = (n+iend); if (iend<0) goto bail; } nsiz = iend-istart+1; if (nsiz<=0) goto done; Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { rc = Jsi_LogError("index too large: %d", nsiz); goto bail; } int i, m; for (m = 0, i = istart; i <= iend; i++, m++) { if (!obj->arr[i]) continue; nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m, obj->arr[i]); } Jsi_ObjSetLength(interp, nobj, nsiz); Jsi_ValueMakeArrayObject(interp, ret, nobj); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); return rc; } typedef struct { Jsi_Interp *interp; int flags; int mode; bool unique; Jsi_Value *compare; int errCnt; } SortInfo; static const char *sortArrayStrs[] = {"default", "desc", "dict", "nocase", 0}; static Jsi_OptionSpec jsi_ArraySortOptions[] = { JSI_OPT(CUSTOM, SortInfo, mode, .help="Mode to sort by", .flags=0, .custom=Jsi_Opt_SwitchEnum, .data=sortArrayStrs), JSI_OPT(FUNC, SortInfo, compare, .help="Function to do comparison", .flags=0, .custom=0, .data=(void*)"val1,val2"), JSI_OPT(BOOL, SortInfo, unique, .help="Eliminate duplicate items"), JSI_OPT_END(SortInfo) }; #ifdef NO_QSORT_R SortInfo *curSortInfo = NULL; static int SortSubCmd(const void *p1, const void *p2) { SortInfo *si = curSortInfo; #else #ifdef __WIN32 static int SortSubCmd(void *thunk, const void *p1, const void *p2) #else static int SortSubCmd(const void *p1, const void *p2, void *thunk) #endif { SortInfo *si = (SortInfo *)thunk; #endif Jsi_Interp *interp = si->interp; int sortFlags = si->flags; if (interp == NULL || interp->deleting) return 0; Jsi_Value *v1 = *(Jsi_Value**)p1, *v2 = *(Jsi_Value**)p2; int rc = 0; if (v1 != NULL && v2 != NULL) { VALCHK(v1); VALCHK(v2); if (!si->compare) rc = Jsi_ValueCmp(interp, v1, v2, sortFlags); else { Jsi_Value *vv[2] = {v1, v2}; Jsi_Value *retP = Jsi_ValueNew1(interp); Jsi_Value *vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vv, 2, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, si->compare, vpargs, &retP, NULL); Jsi_DecrRefCount(interp, vpargs); if (rc == JSI_OK) { Jsi_Number d = 0; if (Jsi_ValueGetNumber(interp, retP, &d) == JSI_OK) rc = -(int)d; else { if (!si->errCnt) Jsi_LogWarn("invalid function return"); si->errCnt++; } } Jsi_DecrRefCount(interp, retP); } } else { if (v1 == v2) rc = 0; else if (v1 == NULL) rc = 1; else rc = -1; } if ((sortFlags&JSI_SORT_DESCEND)) return rc; return -rc; } Jsi_RC Jsi_ValueArraySort(Jsi_Interp *interp, Jsi_Value *val, int flags) { if (val->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, val->d.obj)) { return JSI_ERROR; } Jsi_Obj *obj = val->d.obj; Jsi_ObjListifyArray(interp, obj); if (obj->arrCnt <= 0) { return JSI_OK; } #ifdef __WIN32 #define qsort_r qsort_s #endif SortInfo si = {}; si.interp = interp; si.flags = flags; #ifdef NO_QSORT_R curSortInfo = &si; qsort(obj->arr, obj->arrCnt, sizeof(Jsi_Value*), SortSubCmd); curSortInfo = NULL; #else qsort_r(obj->arr, obj->arrCnt, sizeof(Jsi_Value*), SortSubCmd, &si); #endif return JSI_OK; } static Jsi_RC jsi_ArraySortCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int flags = 0, i, curlen, hasopt = 0; Jsi_Value *v, *arg = NULL; SortInfo si = {}; si.interp = interp; Jsi_Obj *obj = _this->d.obj; curlen = obj->arrCnt; if (curlen <= 1) { goto done; } arg = Jsi_ValueArrayIndex(interp, args, 0); if (arg) { if (Jsi_ValueIsObjType(interp, arg, JSI_OT_OBJECT)) { if (Jsi_OptionsProcess(interp, jsi_ArraySortOptions, &si, arg, 0) < 0) return JSI_ERROR; hasopt = 1; switch (si.mode) { case 1: flags |= JSI_SORT_DESCEND; break; case 2: flags |= JSI_SORT_DICT; break; case 3: flags |= JSI_SORT_NOCASE; break; } } else if (Jsi_ValueIsObjType(interp, arg, JSI_OT_FUNCTION)) si.compare = arg; else return Jsi_LogError("expected object or function"); } si.flags = flags; Jsi_ObjListifyArray(interp, obj); #ifdef NO_QSORT_R /* TODO: mutex. */ curSortInfo = &si; qsort(obj->arr, curlen, sizeof(Jsi_Value*), SortSubCmd); #else qsort_r(obj->arr, curlen, sizeof(Jsi_Value*), SortSubCmd, &si); #endif if (interp->deleting) { #ifdef NO_QSORT_R curSortInfo = NULL; #endif return JSI_ERROR; } if (si.unique) { int n, diff = 1, dupCnt=0; for (n=0, i=1; i<(int)obj->arrCnt; i++) { if (obj->arr[n] == obj->arr[i]) diff = 1; else #ifdef NO_QSORT_R diff = SortSubCmd(&obj->arr[n], &obj->arr[i]); #else #ifdef __WIN32 diff = SortSubCmd(&si, &obj->arr[n], &obj->arr[i]); #else diff = SortSubCmd(&obj->arr[n], &obj->arr[i], &si); #endif #endif if (diff) { n++; if (n!=i) obj->arr[n] = obj->arr[i]; } else { dupCnt++; if (obj->arr[i]) Jsi_DecrRefCount(interp, obj->arr[i]); obj->arr[i] = 0; } } obj->arrCnt -= dupCnt; } #ifdef NO_QSORT_R curSortInfo = NULL; #endif if (hasopt) Jsi_OptionsFree(interp, jsi_ArraySortOptions, &si, 0); done: v = Jsi_ValueMakeObject(interp, NULL, obj); Jsi_ValueReplace(interp, ret, v); return JSI_OK; Jsi_ValueMakeNull(interp, ret); return JSI_OK; } static Jsi_RC jsi_ArraySpliceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int newlen, argc, istart, n, rhowmany, ilen, curlen; Jsi_Value *va, *start, *howmany; Jsi_Obj *nobj, *obj = _this->d.obj; start = Jsi_ValueArrayIndex(interp, args, 0); howmany = Jsi_ValueArrayIndex(interp, args, 1); argc = Jsi_ValueGetLength(interp, args); istart = 0; ilen = (argc>=2 ? argc - 2 : 0); n = jsi_SizeOfArray(interp, obj); curlen = n; if (!start) { goto bail2; } nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ValueMakeArrayObject(interp, ret, nobj); Jsi_ObjSetLength(interp, nobj, 0); /* Determine start index. */ Jsi_Number nstart; if (Jsi_GetNumberFromValue(interp, start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) istart=0; } Jsi_Number nhow; rhowmany = n-istart; if (howmany && Jsi_GetNumberFromValue(interp, howmany, &nhow) == JSI_OK) { rhowmany = (int)nhow; if (rhowmany >= (n-istart)) rhowmany = n-istart; if (rhowmany < 0) rhowmany = (n-istart); if (rhowmany<0) goto bail; } if (curlen < 0) { Jsi_ObjSetLength(interp, obj, curlen=0); } Jsi_ObjListifyArray(interp, obj); Jsi_ObjArraySizer(interp, nobj, rhowmany); /* Move elements to return object. */ int i, j, m; for (m=0, j = 0, i = istart; m<rhowmany && m<curlen; m++, i++, j++) { if (!obj->arr[i]) continue; nobj->arr[m] = obj->arr[i]; obj->arr[i] = NULL; } Jsi_ObjSetLength(interp, nobj, m); /* Shift remaining down. */ for (; rhowmany && i<curlen; i++) { obj->arr[i-rhowmany] = obj->arr[i]; obj->arr[i] = NULL; } curlen -= j; /* Add elements. */ newlen = curlen + argc - (argc>=2?2:1); if (Jsi_ObjArraySizer(interp, obj, newlen+3) <= 0) { Jsi_LogError("too long"); Jsi_ValueMakeUndef(interp, ret); return JSI_ERROR; } if (ilen>0) { for (i = curlen-1; i>=istart; i--) { obj->arr[i+ilen] = obj->arr[i]; obj->arr[i] = NULL; } for (m=istart, i = 2; i<argc; m++,i++) { va = Jsi_ValueArrayIndex(interp, args, i); if (!va) continue; obj->arr[m] = NULL; Jsi_ValueDup2(interp, obj->arr+m, va); } } Jsi_ObjSetLength(interp, obj, newlen); bail: return JSI_OK; bail2: Jsi_ValueMakeNull(interp, ret); return JSI_OK; } static Jsi_RC jsi_ArrayConstructor(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { int argc = Jsi_ValueGetLength(interp, args), iscons = Jsi_FunctionIsConstructor(funcPtr); Jsi_Value *target; Jsi_Value *v = Jsi_ValueArrayIndex(interp, args, 0); if (iscons) { target = _this; Jsi_ValueMakeArrayObject(interp, &_this, Jsi_ObjNewArray(interp, NULL, 0, 0)); } else { Jsi_Obj *o = Jsi_ObjNewType(interp, JSI_OT_ARRAY); o->__proto__ = interp->Array_prototype; Jsi_ValueMakeObject(interp, ret, o); target = *ret; } if (argc == 1 && v && Jsi_ValueIsNumber(interp, v)) { Jsi_Number nv; Jsi_GetNumberFromValue(interp,v, &nv); int len = (int)nv; if (!Jsi_NumberIsInteger(v->d.num) || len < 0) return Jsi_LogError("Invalid array length"); target->d.obj->isarrlist = 1; if (Jsi_ObjArraySizer(interp, target->d.obj, len) <= 0) return JSI_ERROR; } else { int i; target->d.obj->isarrlist = 1; if (Jsi_ObjArraySizer(interp, target->d.obj, 0) <= 0) return JSI_ERROR; for (i = 0; i < argc; ++i) { Jsi_Value *argv = Jsi_ValueArrayIndex(interp, args, i); ; Jsi_ValueInsertArray(interp, _this, i, argv, 0); } } if (iscons) Jsi_ValueDup2(interp, ret, target); return JSI_OK; } static Jsi_CmdSpec arrayCmds[] = { { "Array", jsi_ArrayConstructor, 0,-1, "...", .help="jsi_Array constructor", .retType=(uint)JSI_TT_ARRAY, .flags=JSI_CMD_IS_CONSTRUCTOR }, { "concat", jsi_ArrayConcatCmd, 0,-1, "...", .help="Return array with args appended", .retType=(uint)JSI_TT_ARRAY }, { "every", jsi_ArrayEveryCmd, 1, 1, "callback:function", .help="Returns true if every value in array satisfies the test", .retType=(uint)JSI_TT_ANY }, { "fill", jsi_ArrayFillCmd, 1, 3, "value:any, start:number=0, end:number=-1", .help="Fill an array with values", .retType=(uint)JSI_TT_ARRAY }, { "filter", jsi_ArrayFilterCmd, 1, 2, "callback:function, this:object=void", .help="Return a filtered array", .retType=(uint)JSI_TT_ARRAY }, { "find", jsi_ArrayFindCmd, 1, 1, "callback:function", .help="Returns the value of the first element in the array that satisfies the test", .retType=(uint)JSI_TT_ANY }, { "findIndex", jsi_ArrayFindIndexCmd, 1, 1, "callback:function", .help="Returns the index of the first element in the array that satisfies the test", .retType=(uint)JSI_TT_ANY }, { "flat", jsi_ArrayFlatCmd, 0, 1, "depth:number=1", .help="Flatten an arra", .retType=(uint)JSI_TT_ARRAY }, { "forEach", jsi_ArrayForeachCmd, 1, 2, "callback:function, this:object=void", .help="Invoke function with each item in object", .retType=(uint)JSI_TT_VOID }, { "includes", jsi_ArrayIncludesCmd, 1, 1, "val:any", .help="Returns true if array contains value", .retType=(uint)JSI_TT_ANY }, { "indexOf", jsi_ArrayIndexOfCmd, 1, 2, "str:any, startIdx:number=0", .help="Return index of first occurrance in array", .retType=(uint)JSI_TT_NUMBER }, { "isArray", jsi_ArrayIsArrayCmd, 0, 0, "", .help="True if val array", .retType=(uint)JSI_TT_BOOLEAN }, { "join", jsi_ArrayJoinCmd, 0, 1, "sep:string=''", .help="Return elements joined by char", .retType=(uint)JSI_TT_STRING }, { "lastIndexOf",jsi_ArrayLastindexOfCmd,1, 2, "val:any, start:number=0", .help="Return index of last occurence in array", .retType=(uint)JSI_TT_NUMBER }, { "map", jsi_ArrayMapCmd, 1, 2, "callback:function, this:object=void", .help="Creates a new array with the results of calling a provided function on every element in this array", .retType=(uint)JSI_TT_ARRAY }, { "pop", jsi_ArrayPopCmd, 0, 0, "", .help="Remove and return last element of array", .retType=(uint)JSI_TT_ANY }, { "push", jsi_ArrayPushCmd, 1,-1, "val:any, ...", .help="Push one or more elements onto array and return size", .retType=(uint)JSI_TT_NUMBER }, { "reduce", jsi_ArrayReduceCmd, 1, 2, "callback:function, initial:any", .help="Return a reduced array", .retType=(uint)JSI_TT_ANY }, { "reduceRight",jsi_ArrayReduceRightCmd,1, 2, "callback:function, initial:any", .help="Return a reduced array", .retType=(uint)JSI_TT_ANY }, { "shift", jsi_ArrayShiftCmd, 0, 0, "", .help="Remove first element and shift downwards", .retType=(uint)JSI_TT_ANY }, { "sizeOf", jsi_ArraySizeOfCmd, 0, 0, "", .help="Return size of array", .retType=(uint)JSI_TT_NUMBER }, { "slice", jsi_ArraySliceCmd, 1, 2, "start:number, end:number=void", .help="Return sub-array", .retType=(uint)JSI_TT_ARRAY }, { "some", jsi_ArraySomeCmd, 1, 2, "callback:function, this:object=void", .help="Return true if function returns true some element", .retType=(uint)JSI_TT_BOOLEAN }, { "sort", jsi_ArraySortCmd, 0, 1, "options:function|object=void", .help="Sort an array", .retType=(uint)JSI_TT_ARRAY, .flags=0, .info=0, .opts=jsi_ArraySortOptions }, { "splice", jsi_ArraySpliceCmd, 1,-1, "start:number, howmany:number=void, ...", .help="Change the content of an array, adding new elements while removing old elements", .retType=(uint)JSI_TT_ARRAY }, { "reverse", jsi_ArrayReverseCmd, 0, 0, "", .help="Reverse order of all elements in an array", .retType=(uint)JSI_TT_ARRAY }, { "unshift", jsi_ArrayUnshiftCmd, 0,-1, "...", .help="Add new elements to start of array and return size", .retType=(uint)JSI_TT_NUMBER }, { NULL, 0,0,0,0, .help="Provide access to array objects" } }; Jsi_RC jsi_InitArray(Jsi_Interp *interp, int release) { if (release) return JSI_OK; interp->Array_prototype = Jsi_CommandCreateSpecs(interp, "Array", arrayCmds, NULL, JSI_CMDSPEC_ISOBJ); return JSI_OK; } #endif
static Jsi_RC jsi_ArraySizeOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int i = Jsi_ObjGetLength(interp, _this->d.obj); Jsi_ValueMakeNumber(interp, ret, i); return JSI_OK; }
static Jsi_RC jsi_ArraySizeOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int i = jsi_SizeOfArray(interp, _this->d.obj); Jsi_ValueMakeNumber(interp, ret, i); return JSI_OK; }
{'added': [(10, 'static uint jsi_SizeOfArray(Jsi_Interp *interp, Jsi_Obj *obj) {'), (11, ' if (!obj || !obj->arr)'), (12, ' return 0;'), (13, ' return obj->arrCnt;'), (14, '}'), (15, ''), (28, ' int curlen = jsi_SizeOfArray(interp, obj);'), (36, ' Jsi_ValueMakeNumber(interp, ret, jsi_SizeOfArray(interp, obj));'), (50, ' int i = jsi_SizeOfArray(interp, obj) - 1;'), (83, ' curlen = jsi_SizeOfArray(interp, _this->d.obj);'), (94, ' if (0 == (argc=jsi_SizeOfArray(interp, _this->d.obj))) {'), (238, ' int len = jsi_SizeOfArray(interp, v->d.obj);'), (260, ' int i, n = 0, len = jsi_SizeOfArray(interp, arr->d.obj);'), (263, ' int clen = jsi_SizeOfArray(interp, nobj);'), (307, ' curlen = jsi_SizeOfArray(interp, obj);'), (379, ' curlen = jsi_SizeOfArray(interp, obj);'), (438, ' curlen = jsi_SizeOfArray(interp, obj);'), (614, ' int i;'), (700, ' n = jsi_SizeOfArray(interp, obj);'), (752, ' int i = jsi_SizeOfArray(interp, _this->d.obj);'), (763, ' uint n = jsi_SizeOfArray(interp, obj);'), (784, ' int curlen = jsi_SizeOfArray(interp, obj);'), (804, ' Jsi_ValueMakeNumber(interp, ret, jsi_SizeOfArray(interp, obj));'), (818, ' n = jsi_SizeOfArray(interp, obj);'), (881, ' n = jsi_SizeOfArray(interp, obj);'), (1145, ' n = jsi_SizeOfArray(interp, obj);'), (1181, ' Jsi_ObjSetLength(interp, obj, curlen=0);')], 'deleted': [(22, ' int curlen = Jsi_ObjGetLength(interp, obj);'), (23, ' if (curlen < 0) {'), (24, ' Jsi_ObjSetLength(interp, obj, 0);'), (25, ' }'), (26, ''), (34, ' Jsi_ValueMakeNumber(interp, ret, Jsi_ObjGetLength(interp, obj));'), (48, ' int i = Jsi_ObjGetLength(interp, obj) - 1;'), (81, ' curlen = Jsi_ObjGetLength(interp, _this->d.obj);'), (92, ' if (0 == (argc=Jsi_ObjGetLength(interp, _this->d.obj))) {'), (236, ' int len = Jsi_ObjGetLength(interp, v->d.obj);'), (258, ' int i, n = 0, len = Jsi_ObjGetLength(interp, arr->d.obj);'), (261, ' int clen = Jsi_ObjGetLength(interp, nobj);'), (305, ' curlen = Jsi_ObjGetLength(interp, obj);'), (306, ' if (curlen < 0) {'), (307, ' Jsi_ObjSetLength(interp, obj, 0);'), (308, ' }'), (380, ' curlen = Jsi_ObjGetLength(interp, obj);'), (381, ' if (curlen < 0) {'), (382, ' Jsi_ObjSetLength(interp, obj, 0);'), (383, ' }'), (442, ' curlen = Jsi_ObjGetLength(interp, obj);'), (443, ' if (curlen < 0) {'), (444, ' Jsi_ObjSetLength(interp, obj, 0);'), (445, ' }'), (525, ' int curlen;'), (538, ' curlen = Jsi_ObjGetLength(interp, obj);'), (539, ' if (curlen < 0) {'), (540, ' Jsi_ObjSetLength(interp, obj, 0);'), (541, ' }'), (569, ' int curlen;'), (582, ' curlen = Jsi_ObjGetLength(interp, obj);'), (583, ' if (curlen < 0) {'), (584, ' Jsi_ObjSetLength(interp, obj, 0);'), (585, ' }'), (631, ' int curlen, i;'), (641, ' curlen = Jsi_ObjGetLength(interp, obj);'), (642, ' if (curlen < 0)'), (643, ' Jsi_ObjSetLength(interp, obj, 0);'), (720, ' n = Jsi_ObjGetLength(interp, obj);'), (772, ' int i = Jsi_ObjGetLength(interp, _this->d.obj);'), (783, ' uint n = Jsi_ObjGetLength(interp, obj);'), (784, ' assert(n <= obj->arrCnt);'), (805, ' int curlen = Jsi_ObjGetLength(interp, obj);'), (806, ' if (curlen < 0) {'), (807, ' Jsi_ObjSetLength(interp, obj, 0);'), (808, ' }'), (828, ' Jsi_ValueMakeNumber(interp, ret, Jsi_ObjGetLength(interp, obj));'), (842, ' n = Jsi_ObjGetLength(interp, obj);'), (905, ' n = Jsi_ObjGetLength(interp, obj);'), (1169, ' n = Jsi_ObjGetLength(interp, obj);'), (1205, ' Jsi_ObjSetLength(interp, obj, 0);')]}
27
51
1,148
9,934
7
76
3
https://github.com/pcmacdon/jsish
CVE-2020-22875
CWE-190
549
rza1_eth_driver.c
C
rza1EthInit
/** * @file rza1_eth_driver.c * @brief Renesas RZ/A1 Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "iodefine.h" #include "cpg_iobitmask.h" #include "intc.h" #include "core/net.h" #include "drivers/mac/rza1_eth_driver.h" #include "debug.h" //Underlying network interface static NetInterface *nicDriverInterface; //IAR EWARM compiler? #if defined(__ICCARM__) //Transmit buffer #pragma data_alignment = 32 static uint8_t txBuffer[RZA1_ETH_TX_BUFFER_COUNT][RZA1_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 32 static uint8_t rxBuffer[RZA1_ETH_RX_BUFFER_COUNT][RZA1_ETH_RX_BUFFER_SIZE]; //Transmit DMA descriptors #pragma data_alignment = 32 static Rza1TxDmaDesc txDmaDesc[RZA1_ETH_TX_BUFFER_COUNT]; //Receive DMA descriptors #pragma data_alignment = 32 static Rza1RxDmaDesc rxDmaDesc[RZA1_ETH_RX_BUFFER_COUNT]; //ARM or GCC compiler? #else //Transmit buffer static uint8_t txBuffer[RZA1_ETH_TX_BUFFER_COUNT][RZA1_ETH_TX_BUFFER_SIZE] __attribute__((section(".BSS_DMAC_SAMPLE_INTERNAL_RAM"), aligned(32))); //Receive buffer static uint8_t rxBuffer[RZA1_ETH_RX_BUFFER_COUNT][RZA1_ETH_RX_BUFFER_SIZE] __attribute__((section(".BSS_DMAC_SAMPLE_INTERNAL_RAM"), aligned(32))); //Transmit DMA descriptors static Rza1TxDmaDesc txDmaDesc[RZA1_ETH_TX_BUFFER_COUNT] __attribute__((section(".BSS_DMAC_SAMPLE_INTERNAL_RAM"), aligned(32))); //Receive DMA descriptors static Rza1RxDmaDesc rxDmaDesc[RZA1_ETH_RX_BUFFER_COUNT] __attribute__((section(".BSS_DMAC_SAMPLE_INTERNAL_RAM"), aligned(32))); #endif //Current transmit descriptor static uint_t txIndex; //Current receive descriptor static uint_t rxIndex; /** * @brief RZ/A1 Ethernet MAC driver **/ const NicDriver rza1EthDriver = { NIC_TYPE_ETHERNET, ETH_MTU, rza1EthInit, rza1EthTick, rza1EthEnableIrq, rza1EthDisableIrq, rza1EthEventHandler, rza1EthSendPacket, rza1EthUpdateMacAddrFilter, rza1EthUpdateMacConfig, rza1EthWritePhyReg, rza1EthReadPhyReg, TRUE, TRUE, TRUE, TRUE }; /** * @brief RZ/A1 Ethernet MAC initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t rza1EthInit(NetInterface *interface) { error_t error; //Debug message TRACE_INFO("Initializing RZ/A1 Ethernet MAC...\r\n"); //Save underlying network interface nicDriverInterface = interface; //Enable Ethernet peripheral clock CPG.STBCR7 &= ~CPG_STBCR7_MSTP74; //GPIO configuration rza1EthInitGpio(interface); //Perform software reset ETHER.ARSTR = ETHER_ARSTR_ARST; //Wait for the reset to complete sleep(10); //Start EDMAC transmitting and receiving units ETHER.EDSR0 = ETHER_EDSR0_ENT | ETHER_EDSR0_ENR; //To execute a software reset with this register, 1 must be //written to both the SWRT and SWRR bits simultaneously ETHER.EDMR0 = ETHER_EDMR0_SWRT | ETHER_EDMR0_SWRR; //Wait for the reset to complete while(ETHER.EDMR0 & (ETHER_EDMR0_SWRT | ETHER_EDMR0_SWRR)) { } //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Initialize DMA descriptor lists rza1EthInitDmaDesc(interface); //Select little endian mode and set descriptor length (16 bytes) ETHER.EDMR0 = ETHER_EDMR0_DE | ETHER_EDMR0_DL_16; //Error masks ETHER.TRSCER0 = 0; //Use store and forward mode ETHER.TFTR0 = 0; //Set transmit FIFO size and receive FIFO size (2048 bytes) ETHER.FDR0 = ETHER_FDR0_TFD_2048 | ETHER_FDR0_RFD_2048; //Enable continuous reception of multiple frames ETHER.RMCR0 = ETHER_RMCR0_RNC; //No padding insertion into receive data ETHER.RPADIR0 = 0; //Receive FIFO threshold (8 frames or 2048-64 bytes) ETHER.FCFTR0 = ETHER_FCFTR0_RFF_8 | ETHER_FCFTR0_RFD_2048; //Intelligent checksum operation mode ETHER.CSMR = 0; //Enable multicast address filtering ETHER.ECMR0 |= ETH_ECMR0_MCT; //Set the upper 32 bits of the MAC address ETHER.MAHR0 = (interface->macAddr.b[0] << 24) | (interface->macAddr.b[1] << 16) | (interface->macAddr.b[2] << 8) | interface->macAddr.b[3]; //Set the lower 16 bits of the MAC address ETHER.MALR0 = (interface->macAddr.b[4] << 8) | interface->macAddr.b[5]; //Disable all CAM entries ETHER.TSU_TEN = 0; //Maximum frame length that can be accepted ETHER.RFLR0 = RZA1_ETH_RX_BUFFER_SIZE; //Automatic pause frame ETHER.APR0 = 0; //Manual pause frame ETHER.MPR0 = 0; //Automatic pause frame retransmit count ETHER.TPAUSER0 = 0; //Disable all EMAC interrupts ETHER.ECSIPR0 = 0; //Enable the desired EDMAC interrupts ETHER.EESIPR0 = ETHER_EESIPR0_TWBIP | ETHER_EESIPR0_FRIP; //Register interrupt handler R_INTC_Regist_Int_Func(INTC_ID_ETHERI, rza1EthIrqHandler); //Configure interrupt priority R_INTC_Set_Priority(INTC_ID_ETHERI, RZA1_ETH_IRQ_PRIORITY); //Enable EDMAC transmission and reception ETHER.ECMR0 |= ETH_ECMR0_RE | ETH_ECMR0_TE; //Instruct the DMA to poll the receive descriptor list ETHER.EDRRR0 = ETHER_EDRRR0_RR; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } //RSK RZ/A1H, Stream it! RZ, Hachiko or VK-RZ/A1H evaluation board? #if defined(USE_RSK_RZA1H) || defined(USE_STREAM_IT_RZ) || \ defined(USE_HACHIKO) || defined(USE_VK_RZA1H) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void rza1EthInitGpio(NetInterface *interface) { //RSK RZ/A1H or Hachiko evaluation board? #if defined(USE_RSK_RZA1H) || defined(USE_HACHIKO) //Configure ET_COL (P1_14) PORT1.PMCn.BIT.PMCn14 = 1; PORT1.PFCn.BIT.PFCn14 = 1; PORT1.PFCEn.BIT.PFCEn14 = 1; PORT1.PFCAEn.BIT.PFCAEn14 = 0; PORT1.PIPCn.BIT.PIPCn14 = 1; //Configure ET_TXCLK (P2_0) PORT2.PMCn.BIT.PMCn0 = 1; PORT2.PFCn.BIT.PFCn0 = 1; PORT2.PFCEn.BIT.PFCEn0 = 0; PORT2.PFCAEn.BIT.PFCAEn0 = 0; PORT2.PIPCn.BIT.PIPCn0 = 1; //Configure ET_TXER (P2_1) PORT2.PMCn.BIT.PMCn1 = 1; PORT2.PFCn.BIT.PFCn1 = 1; PORT2.PFCEn.BIT.PFCEn1 = 0; PORT2.PFCAEn.BIT.PFCAEn1 = 0; PORT2.PIPCn.BIT.PIPCn1 = 1; //Configure ET_TXEN (P2_2) PORT2.PMCn.BIT.PMCn2 = 1; PORT2.PFCn.BIT.PFCn2 = 1; PORT2.PFCEn.BIT.PFCEn2 = 0; PORT2.PFCAEn.BIT.PFCAEn2 = 0; PORT2.PIPCn.BIT.PIPCn2 = 1; //Configure ET_CRS (P2_3) PORT2.PMCn.BIT.PMCn3 = 1; PORT2.PFCn.BIT.PFCn3 = 1; PORT2.PFCEn.BIT.PFCEn3 = 0; PORT2.PFCAEn.BIT.PFCAEn3 = 0; PORT2.PIPCn.BIT.PIPCn3 = 1; //Configure ET_TXD0 (P2_4) PORT2.PMCn.BIT.PMCn4 = 1; PORT2.PFCn.BIT.PFCn4 = 1; PORT2.PFCEn.BIT.PFCEn4 = 0; PORT2.PFCAEn.BIT.PFCAEn4 = 0; PORT2.PIPCn.BIT.PIPCn4 = 1; //Configure ET_TXD1 (P2_5) PORT2.PMCn.BIT.PMCn5 = 1; PORT2.PFCn.BIT.PFCn5 = 1; PORT2.PFCEn.BIT.PFCEn5 = 0; PORT2.PFCAEn.BIT.PFCAEn5 = 0; PORT2.PIPCn.BIT.PIPCn5 = 1; //Configure ET_TXD2 (P2_6) PORT2.PMCn.BIT.PMCn6 = 1; PORT2.PFCn.BIT.PFCn6 = 1; PORT2.PFCEn.BIT.PFCEn6 = 0; PORT2.PFCAEn.BIT.PFCAEn6 = 0; PORT2.PIPCn.BIT.PIPCn6 = 1; //Configure ET_TXD3 (P2_7) PORT2.PMCn.BIT.PMCn7 = 1; PORT2.PFCn.BIT.PFCn7 = 1; PORT2.PFCEn.BIT.PFCEn7 = 0; PORT2.PFCAEn.BIT.PFCAEn7 = 0; PORT2.PIPCn.BIT.PIPCn7 = 1; //Configure ET_RXD0 (P2_8) PORT2.PMCn.BIT.PMCn8 = 1; PORT2.PFCn.BIT.PFCn8 = 1; PORT2.PFCEn.BIT.PFCEn8 = 0; PORT2.PFCAEn.BIT.PFCAEn8 = 0; PORT2.PIPCn.BIT.PIPCn8 = 1; //Configure ET_RXD1 (P2_9) PORT2.PMCn.BIT.PMCn9 = 1; PORT2.PFCn.BIT.PFCn9 = 1; PORT2.PFCEn.BIT.PFCEn9 = 0; PORT2.PFCAEn.BIT.PFCAEn9 = 0; PORT2.PIPCn.BIT.PIPCn9 = 1; //Configure ET_RXD2 (P2_10) PORT2.PMCn.BIT.PMCn10 = 1; PORT2.PFCn.BIT.PFCn10 = 1; PORT2.PFCEn.BIT.PFCEn10 = 0; PORT2.PFCAEn.BIT.PFCAEn10 = 0; PORT2.PIPCn.BIT.PIPCn10 = 1; //Configure ET_RXD3 (P2_11) PORT2.PMCn.BIT.PMCn11 = 1; PORT2.PFCn.BIT.PFCn11 = 1; PORT2.PFCEn.BIT.PFCEn11 = 0; PORT2.PFCAEn.BIT.PFCAEn11 = 0; PORT2.PIPCn.BIT.PIPCn11 = 1; //Configure ET_MDIO (P3_3) PORT3.PMCn.BIT.PMCn3 = 1; PORT3.PFCn.BIT.PFCn3 = 1; PORT3.PFCEn.BIT.PFCEn3 = 0; PORT3.PFCAEn.BIT.PFCAEn3 = 0; PORT3.PIPCn.BIT.PIPCn3 = 1; //Configure ET_RXCLK (P3_4) PORT3.PMCn.BIT.PMCn4 = 1; PORT3.PFCn.BIT.PFCn4 = 1; PORT3.PFCEn.BIT.PFCEn4 = 0; PORT3.PFCAEn.BIT.PFCAEn4 = 0; PORT3.PIPCn.BIT.PIPCn4 = 1; //Configure ET_RXER (P3_5) PORT3.PMCn.BIT.PMCn5 = 1; PORT3.PFCn.BIT.PFCn5 = 1; PORT3.PFCEn.BIT.PFCEn5 = 0; PORT3.PFCAEn.BIT.PFCAEn5 = 0; PORT3.PIPCn.BIT.PIPCn5 = 1; //Configure ET_RXDV (P3_6) PORT3.PMCn.BIT.PMCn6 = 1; PORT3.PFCn.BIT.PFCn6 = 1; PORT3.PFCEn.BIT.PFCEn6 = 0; PORT3.PFCAEn.BIT.PFCAEn6 = 0; PORT3.PIPCn.BIT.PIPCn6 = 1; //Configure ET_MDC (P5_9) PORT5.PMCn.BIT.PMCn9 = 1; PORT5.PFCn.BIT.PFCn9 = 1; PORT5.PFCEn.BIT.PFCEn9 = 0; PORT5.PFCAEn.BIT.PFCAEn9 = 0; PORT5.PIPCn.BIT.PIPCn9 = 1; //VK-RZ/A1H evaluation board? #elif defined(USE_VK_RZA1H) //Configure ET_COL (P1_14) PORT1.PMCn.BIT.PMCn14 = 1; PORT1.PFCn.BIT.PFCn14 = 1; PORT1.PFCEn.BIT.PFCEn14 = 1; PORT1.PFCAEn.BIT.PFCAEn14 = 0; PORT1.PIPCn.BIT.PIPCn14 = 1; //Configure ET_TXCLK (P2_0) PORT2.PMCn.BIT.PMCn0 = 1; PORT2.PFCn.BIT.PFCn0 = 1; PORT2.PFCEn.BIT.PFCEn0 = 0; PORT2.PFCAEn.BIT.PFCAEn0 = 0; PORT2.PIPCn.BIT.PIPCn0 = 1; //Configure ET_TXER (P2_1) PORT2.PMCn.BIT.PMCn1 = 1; PORT2.PFCn.BIT.PFCn1 = 1; PORT2.PFCEn.BIT.PFCEn1 = 0; PORT2.PFCAEn.BIT.PFCAEn1 = 0; PORT2.PIPCn.BIT.PIPCn1 = 1; //Configure ET_TXEN (P2_2) PORT2.PMCn.BIT.PMCn2 = 1; PORT2.PFCn.BIT.PFCn2 = 1; PORT2.PFCEn.BIT.PFCEn2 = 0; PORT2.PFCAEn.BIT.PFCAEn2 = 0; PORT2.PIPCn.BIT.PIPCn2 = 1; //Configure ET_CRS (P2_3) PORT2.PMCn.BIT.PMCn3 = 1; PORT2.PFCn.BIT.PFCn3 = 1; PORT2.PFCEn.BIT.PFCEn3 = 0; PORT2.PFCAEn.BIT.PFCAEn3 = 0; PORT2.PIPCn.BIT.PIPCn3 = 1; //Configure ET_TXD0 (P2_4) PORT2.PMCn.BIT.PMCn4 = 1; PORT2.PFCn.BIT.PFCn4 = 1; PORT2.PFCEn.BIT.PFCEn4 = 0; PORT2.PFCAEn.BIT.PFCAEn4 = 0; PORT2.PIPCn.BIT.PIPCn4 = 1; //Configure ET_TXD1 (P2_5) PORT2.PMCn.BIT.PMCn5 = 1; PORT2.PFCn.BIT.PFCn5 = 1; PORT2.PFCEn.BIT.PFCEn5 = 0; PORT2.PFCAEn.BIT.PFCAEn5 = 0; PORT2.PIPCn.BIT.PIPCn5 = 1; //Configure ET_TXD2 (P2_6) PORT2.PMCn.BIT.PMCn6 = 1; PORT2.PFCn.BIT.PFCn6 = 1; PORT2.PFCEn.BIT.PFCEn6 = 0; PORT2.PFCAEn.BIT.PFCAEn6 = 0; PORT2.PIPCn.BIT.PIPCn6 = 1; //Configure ET_TXD3 (P2_7) PORT2.PMCn.BIT.PMCn7 = 1; PORT2.PFCn.BIT.PFCn7 = 1; PORT2.PFCEn.BIT.PFCEn7 = 0; PORT2.PFCAEn.BIT.PFCAEn7 = 0; PORT2.PIPCn.BIT.PIPCn7 = 1; //Configure ET_RXD0 (P2_8) PORT2.PMCn.BIT.PMCn8 = 1; PORT2.PFCn.BIT.PFCn8 = 1; PORT2.PFCEn.BIT.PFCEn8 = 0; PORT2.PFCAEn.BIT.PFCAEn8 = 0; PORT2.PIPCn.BIT.PIPCn8 = 1; //Configure ET_RXD1 (P2_9) PORT2.PMCn.BIT.PMCn9 = 1; PORT2.PFCn.BIT.PFCn9 = 1; PORT2.PFCEn.BIT.PFCEn9 = 0; PORT2.PFCAEn.BIT.PFCAEn9 = 0; PORT2.PIPCn.BIT.PIPCn9 = 1; //Configure ET_RXD2 (P2_10) PORT2.PMCn.BIT.PMCn10 = 1; PORT2.PFCn.BIT.PFCn10 = 1; PORT2.PFCEn.BIT.PFCEn10 = 0; PORT2.PFCAEn.BIT.PFCAEn10 = 0; PORT2.PIPCn.BIT.PIPCn10 = 1; //Configure ET_RXD3 (P2_11) PORT2.PMCn.BIT.PMCn11 = 1; PORT2.PFCn.BIT.PFCn11 = 1; PORT2.PFCEn.BIT.PFCEn11 = 0; PORT2.PFCAEn.BIT.PFCAEn11 = 0; PORT2.PIPCn.BIT.PIPCn11 = 1; //Configure ET_MDIO (P3_3) PORT3.PMCn.BIT.PMCn3 = 1; PORT3.PFCn.BIT.PFCn3 = 1; PORT3.PFCEn.BIT.PFCEn3 = 0; PORT3.PFCAEn.BIT.PFCAEn3 = 0; PORT3.PIPCn.BIT.PIPCn3 = 1; //Configure ET_RXCLK (P3_4) PORT3.PMCn.BIT.PMCn4 = 1; PORT3.PFCn.BIT.PFCn4 = 1; PORT3.PFCEn.BIT.PFCEn4 = 0; PORT3.PFCAEn.BIT.PFCAEn4 = 0; PORT3.PIPCn.BIT.PIPCn4 = 1; //Configure ET_RXER (P3_5) PORT3.PMCn.BIT.PMCn5 = 1; PORT3.PFCn.BIT.PFCn5 = 1; PORT3.PFCEn.BIT.PFCEn5 = 0; PORT3.PFCAEn.BIT.PFCAEn5 = 0; PORT3.PIPCn.BIT.PIPCn5 = 1; //Configure ET_RXDV (P3_6) PORT3.PMCn.BIT.PMCn6 = 1; PORT3.PFCn.BIT.PFCn6 = 1; PORT3.PFCEn.BIT.PFCEn6 = 0; PORT3.PFCAEn.BIT.PFCAEn6 = 0; PORT3.PIPCn.BIT.PIPCn6 = 1; //Configure ET_MDC (P7_0) PORT7.PMCn.BIT.PMCn0 = 1; PORT7.PFCn.BIT.PFCn0 = 0; PORT7.PFCEn.BIT.PFCEn0 = 1; PORT7.PFCAEn.BIT.PFCAEn0 = 0; PORT7.PIPCn.BIT.PIPCn0 = 1; //Stream it! RZ evaluation board? #elif defined(USE_STREAM_IT_RZ) //Configure ET_TXD0 (P8_0) PORT8.PMCn.BIT.PMCn0 = 1; PORT8.PFCn.BIT.PFCn0 = 1; PORT8.PFCEn.BIT.PFCEn0 = 0; PORT8.PFCAEn.BIT.PFCAEn0 = 0; PORT8.PIPCn.BIT.PIPCn0 = 1; //Configure ET_TXD1 (P8_1) PORT8.PMCn.BIT.PMCn1 = 1; PORT8.PFCn.BIT.PFCn1 = 1; PORT8.PFCEn.BIT.PFCEn1 = 0; PORT8.PFCAEn.BIT.PFCAEn1 = 0; PORT8.PIPCn.BIT.PIPCn1 = 1; //Configure ET_TXD2 (P8_2) PORT8.PMCn.BIT.PMCn2 = 1; PORT8.PFCn.BIT.PFCn2 = 1; PORT8.PFCEn.BIT.PFCEn2 = 0; PORT8.PFCAEn.BIT.PFCAEn2 = 0; PORT8.PIPCn.BIT.PIPCn2 = 1; //Configure ET_TXD3 (P8_3) PORT8.PMCn.BIT.PMCn3 = 1; PORT8.PFCn.BIT.PFCn3 = 1; PORT8.PFCEn.BIT.PFCEn3 = 0; PORT8.PFCAEn.BIT.PFCAEn3 = 0; PORT8.PIPCn.BIT.PIPCn3 = 1; //Configure ET_TXCLK (P8_4) PORT8.PMCn.BIT.PMCn4 = 1; PORT8.PFCn.BIT.PFCn4 = 1; PORT8.PFCEn.BIT.PFCEn4 = 0; PORT8.PFCAEn.BIT.PFCAEn4 = 0; PORT8.PIPCn.BIT.PIPCn4 = 1; //Configure ET_TXER (P8_5) PORT8.PMCn.BIT.PMCn5 = 1; PORT8.PFCn.BIT.PFCn5 = 1; PORT8.PFCEn.BIT.PFCEn5 = 0; PORT8.PFCAEn.BIT.PFCAEn5 = 0; PORT8.PIPCn.BIT.PIPCn5 = 1; //Configure ET_TXEN (P8_6) PORT8.PMCn.BIT.PMCn6 = 1; PORT8.PFCn.BIT.PFCn6 = 1; PORT8.PFCEn.BIT.PFCEn6 = 0; PORT8.PFCAEn.BIT.PFCAEn6 = 0; PORT8.PIPCn.BIT.PIPCn6 = 1; //Configure ET_RXD0 (P8_7) PORT8.PMCn.BIT.PMCn7 = 1; PORT8.PFCn.BIT.PFCn7 = 1; PORT8.PFCEn.BIT.PFCEn7 = 0; PORT8.PFCAEn.BIT.PFCAEn7 = 0; PORT8.PIPCn.BIT.PIPCn7 = 1; //Configure ET_RXD1 (P8_8) PORT8.PMCn.BIT.PMCn8 = 1; PORT8.PFCn.BIT.PFCn8 = 1; PORT8.PFCEn.BIT.PFCEn8 = 0; PORT8.PFCAEn.BIT.PFCAEn8 = 0; PORT8.PIPCn.BIT.PIPCn8 = 1; //Configure ET_RXD2 (P8_9) PORT8.PMCn.BIT.PMCn9 = 1; PORT8.PFCn.BIT.PFCn9 = 1; PORT8.PFCEn.BIT.PFCEn9 = 0; PORT8.PFCAEn.BIT.PFCAEn9 = 0; PORT8.PIPCn.BIT.PIPCn9 = 1; //Configure ET_RXD3 (P8_10) PORT8.PMCn.BIT.PMCn10 = 1; PORT8.PFCn.BIT.PFCn10 = 1; PORT8.PFCEn.BIT.PFCEn10 = 0; PORT8.PFCAEn.BIT.PFCAEn10 = 0; PORT8.PIPCn.BIT.PIPCn10 = 1; //Configure ET_COL (P8_14) PORT8.PMCn.BIT.PMCn14 = 1; PORT8.PFCn.BIT.PFCn14 = 1; PORT8.PFCEn.BIT.PFCEn14 = 0; PORT8.PFCAEn.BIT.PFCAEn14 = 0; PORT8.PIPCn.BIT.PIPCn14 = 1; //Configure ET_CRS (P8_15) PORT8.PMCn.BIT.PMCn15 = 1; PORT8.PFCn.BIT.PFCn15 = 1; PORT8.PFCEn.BIT.PFCEn15 = 0; PORT8.PFCAEn.BIT.PFCAEn15 = 0; PORT8.PIPCn.BIT.PIPCn15 = 1; //Configure ET_MDC (P9_0) PORT9.PMCn.BIT.PMCn0 = 1; PORT9.PFCn.BIT.PFCn0 = 1; PORT9.PFCEn.BIT.PFCEn0 = 0; PORT9.PFCAEn.BIT.PFCAEn0 = 0; PORT9.PIPCn.BIT.PIPCn0 = 1; //Configure ET_MDIO (P9_1) PORT9.PMCn.BIT.PMCn1 = 1; PORT9.PFCn.BIT.PFCn1 = 1; PORT9.PFCEn.BIT.PFCEn1 = 0; PORT9.PFCAEn.BIT.PFCAEn1 = 0; PORT9.PIPCn.BIT.PIPCn1 = 1; //Configure ET_RXCLK (P9_2) PORT9.PMCn.BIT.PMCn2 = 1; PORT9.PFCn.BIT.PFCn2 = 1; PORT9.PFCEn.BIT.PFCEn2 = 0; PORT9.PFCAEn.BIT.PFCAEn2 = 0; PORT9.PIPCn.BIT.PIPCn2 = 1; //Configure ET_RXER (P9_3) PORT9.PMCn.BIT.PMCn3 = 1; PORT9.PFCn.BIT.PFCn3 = 1; PORT9.PFCEn.BIT.PFCEn3 = 0; PORT9.PFCAEn.BIT.PFCAEn3 = 0; PORT9.PIPCn.BIT.PIPCn3 = 1; //Configure ET_RXDV (P9_4) PORT9.PMCn.BIT.PMCn4 = 1; PORT9.PFCn.BIT.PFCn4 = 1; PORT9.PFCEn.BIT.PFCEn4 = 0; PORT9.PFCAEn.BIT.PFCAEn4 = 0; PORT9.PIPCn.BIT.PIPCn4 = 1; //Configure PHY_RST (P2_7) PORT2.PMCn.BIT.PMCn7 = 0; PORT2.PIPCn.BIT.PIPCn7 = 0; PORT2.PMn.BIT.PMn7 = 0; //Reset the PHY transceiver PORT2.Pn.BIT.Pn7 = 0; sleep(10); PORT2.Pn.BIT.Pn7 = 1; sleep(10); #endif } #endif /** * @brief Initialize DMA descriptor lists * @param[in] interface Underlying network interface **/ void rza1EthInitDmaDesc(NetInterface *interface) { uint_t i; //Initialize TX descriptors for(i = 0; i < RZA1_ETH_TX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the application txDmaDesc[i].td0 = 0; //Transmit buffer length txDmaDesc[i].td1 = 0; //Transmit buffer address txDmaDesc[i].td2 = (uint32_t) txBuffer[i]; //Clear padding field txDmaDesc[i].padding = 0; } //Mark the last descriptor entry with the TDLE flag txDmaDesc[i - 1].td0 |= ETHER_TD0_TDLE; //Initialize TX descriptor index txIndex = 0; //Initialize RX descriptors for(i = 0; i < RZA1_ETH_RX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the DMA rxDmaDesc[i].rd0 = ETHER_RD0_RACT; //Receive buffer length rxDmaDesc[i].rd1 = (RZA1_ETH_RX_BUFFER_SIZE << 16) & ETHER_RD1_RBL; //Receive buffer address rxDmaDesc[i].rd2 = (uint32_t) rxBuffer[i]; //Clear padding field rxDmaDesc[i].padding = 0; } //Mark the last descriptor entry with the RDLE flag rxDmaDesc[i - 1].rd0 |= ETHER_RD0_RDLE; //Initialize RX descriptor index rxIndex = 0; //Address of the first TX descriptor ETHER.TDLAR0 = (uint32_t) &txDmaDesc[0]; ETHER.TDFAR0 = (uint32_t) &txDmaDesc[0]; //Address of the last TX descriptor ETHER.TDFXR0 = (uint32_t) &txDmaDesc[RZA1_ETH_TX_BUFFER_COUNT - 1]; //Set TDLF flag ETHER.TDFFR0 = ETHER_TDFFR_TDLF; //Address of the first RX descriptor ETHER.RDLAR0 = (uint32_t) &rxDmaDesc[0]; ETHER.RDFAR0 = (uint32_t) &rxDmaDesc[0]; //Address of the last RX descriptor ETHER.RDFXR0 = (uint32_t) &rxDmaDesc[RZA1_ETH_RX_BUFFER_COUNT - 1]; //Set RDLF flag ETHER.RDFFR0 = ETHER_RDFFR0_RDLF; } /** * @brief RZ/A1 Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void rza1EthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void rza1EthEnableIrq(NetInterface *interface) { //Enable Ethernet MAC interrupts R_INTC_Enable(INTC_ID_ETHERI); //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void rza1EthDisableIrq(NetInterface *interface) { //Disable Ethernet MAC interrupts R_INTC_Disable(INTC_ID_ETHERI); //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief RZ/A1 Ethernet MAC interrupt service routine * @param[in] intSense Unused parameter **/ void rza1EthIrqHandler(uint32_t intSense) { bool_t flag; uint32_t status; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read interrupt status register status = ETHER.EESR0; //Packet transmitted? if((status & ETHER_EESR0_TWB) != 0) { //Clear TWB interrupt flag ETHER.EESR0 = ETHER_EESR0_TWB; //Check whether the TX buffer is available for writing if((txDmaDesc[txIndex].td0 & ETHER_TD0_TACT) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface->nicTxEvent); } } //Packet received? if((status & ETHER_EESR0_FR) != 0) { //Disable FR interrupts ETHER.EESIPR0 &= ~ETHER_EESIPR0_FRIP; //Set event flag nicDriverInterface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief RZ/A1 Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void rza1EthEventHandler(NetInterface *interface) { error_t error; //Packet received? if((ETHER.EESR0 & ETHER_EESR0_FR) != 0) { //Clear FR interrupt flag ETHER.EESR0 = ETHER_EESR0_FR; //Process all pending packets do { //Read incoming packet error = rza1EthReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable EDMAC interrupts ETHER.EESIPR0 = ETHER_EESIPR0_TWBIP | ETHER_EESIPR0_FRIP; } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t rza1EthSendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { //Retrieve the length of the packet size_t length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > RZA1_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txDmaDesc[txIndex].td0 & ETHER_TD0_TACT) != 0) { return ERROR_FAILURE; } //Copy user data to the transmit buffer netBufferRead(txBuffer[txIndex], buffer, offset, length); //Write the number of bytes to send txDmaDesc[txIndex].td1 = (length << 16) & ETHER_TD1_TDL; //Check current index if(txIndex < (RZA1_ETH_TX_BUFFER_COUNT - 1)) { //Give the ownership of the descriptor to the DMA engine txDmaDesc[txIndex].td0 = ETHER_TD0_TACT | ETHER_TD0_TFP_SOF | ETHER_TD0_TFP_EOF | ETHER_TD0_TWBI; //Point to the next descriptor txIndex++; } else { //Give the ownership of the descriptor to the DMA engine txDmaDesc[txIndex].td0 = ETHER_TD0_TACT | ETHER_TD0_TDLE | ETHER_TD0_TFP_SOF | ETHER_TD0_TFP_EOF | ETHER_TD0_TWBI; //Wrap around txIndex = 0; } //Instruct the DMA to poll the transmit descriptor list ETHER.EDTRR0 = ETHER_EDTRR0_TR; //Check whether the next buffer is available for writing if((txDmaDesc[txIndex].td0 & ETHER_TD0_TACT) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Successful write operation return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t rza1EthReceivePacket(NetInterface *interface) { static uint8_t temp[RZA1_ETH_RX_BUFFER_SIZE]; error_t error; size_t n; NetRxAncillary ancillary; //The current buffer is available for reading? if((rxDmaDesc[rxIndex].rd0 & ETHER_RD0_RACT) == 0) { //SOF and EOF flags should be set if((rxDmaDesc[rxIndex].rd0 & ETHER_RD0_RFP_SOF) != 0 && (rxDmaDesc[rxIndex].rd0 & ETHER_RD0_RFP_EOF) != 0) { //Make sure no error occurred if(!(rxDmaDesc[rxIndex].rd0 & (ETHER_RD0_RFS_MASK & ~ETHER_RD0_RFS_RMAF))) { //Retrieve the length of the frame n = rxDmaDesc[rxIndex].rd1 & ETHER_RD1_RDL; //Limit the number of data to read n = MIN(n, RZA1_ETH_RX_BUFFER_SIZE); //Copy data from the receive buffer osMemcpy(temp, rxBuffer[rxIndex], n); //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, temp, n, &ancillary); //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Check current index if(rxIndex < (RZA1_ETH_RX_BUFFER_COUNT - 1)) { //Give the ownership of the descriptor back to the DMA rxDmaDesc[rxIndex].rd0 = ETHER_RD0_RACT; //Point to the next descriptor rxIndex++; } else { //Give the ownership of the descriptor back to the DMA rxDmaDesc[rxIndex].rd0 = ETHER_RD0_RACT | ETHER_RD0_RDLE; //Wrap around rxIndex = 0; } //Instruct the DMA to poll the receive descriptor list ETHER.EDRRR0 = ETHER_EDRRR0_RR; } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t rza1EthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; volatile uint32_t *addrHigh; volatile uint32_t *addrLow; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Set the upper 32 bits of the MAC address ETHER.MAHR0 = (interface->macAddr.b[0] << 24) | (interface->macAddr.b[1] << 16) | (interface->macAddr.b[2] << 8) | interface->macAddr.b[3]; //Set the lower 16 bits of the MAC address ETHER.MALR0 = (interface->macAddr.b[4] << 8) | interface->macAddr.b[5]; //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE && i < 32; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Debug message TRACE_DEBUG(" %s\r\n", macAddrToString(&entry->addr, NULL)); //Point to the CAM entry registers addrHigh = &ETHER.TSU_ADRH0 + 2 * i; addrLow = &ETHER.TSU_ADRL0 + 2 * i; //The contents of the CAM entry table registers cannot be //modified while the ADSBSY flag is set while((ETHER.TSU_ADSBSY & ETHER_TSU_ADSBSY_ADSBSY) != 0) { } //Set the upper 32 bits of the MAC address *addrHigh = (entry->addr.b[0] << 24) | (entry->addr.b[1] << 16) | (entry->addr.b[2] << 8) | entry->addr.b[3]; //Wait for the ADSBSY flag to be cleared while((ETHER.TSU_ADSBSY & ETHER_TSU_ADSBSY_ADSBSY) != 0) { } //Set the lower 16 bits of the MAC address *addrLow = (entry->addr.b[4] << 8) | entry->addr.b[5]; //Enable the CAM entry ETHER.TSU_TEN |= 1 << (31 - i); } else { //Disable the CAM entry ETHER.TSU_TEN &= ~(1 << (31 - i)); } } //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t rza1EthUpdateMacConfig(NetInterface *interface) { //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { ETHER.ECMR0 |= ETH_ECMR0_DM; } else { ETHER.ECMR0 &= ~ETH_ECMR0_DM; } //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void rza1EthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { //Synchronization pattern rza1EthWriteSmi(SMI_SYNC, 32); //Start of frame rza1EthWriteSmi(SMI_START, 2); //Set up a write operation rza1EthWriteSmi(opcode, 2); //Write PHY address rza1EthWriteSmi(phyAddr, 5); //Write register address rza1EthWriteSmi(regAddr, 5); //Turnaround rza1EthWriteSmi(SMI_TA, 2); //Write register value rza1EthWriteSmi(data, 16); //Release MDIO rza1EthReadSmi(1); } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t rza1EthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; //Synchronization pattern rza1EthWriteSmi(SMI_SYNC, 32); //Start of frame rza1EthWriteSmi(SMI_START, 2); //Set up a read operation rza1EthWriteSmi(opcode, 2); //Write PHY address rza1EthWriteSmi(phyAddr, 5); //Write register address rza1EthWriteSmi(regAddr, 5); //Turnaround to avoid contention rza1EthReadSmi(1); //Read register value data = rza1EthReadSmi(16); //Force the PHY to release the MDIO pin rza1EthReadSmi(1); //Return PHY register contents return data; } /** * @brief SMI write operation * @param[in] data Raw data to be written * @param[in] length Number of bits to be written **/ void rza1EthWriteSmi(uint32_t data, uint_t length) { //Skip the most significant bits since they are meaningless data <<= 32 - length; //Configure MDIO as an output ETHER.PIR0 |= ETHER_PIR0_MMD; //Write the specified number of bits while(length--) { //Write MDIO if((data & 0x80000000) != 0) { ETHER.PIR0 |= ETHER_PIR0_MDO; } else { ETHER.PIR0 &= ~ETHER_PIR0_MDO; } //Assert MDC usleep(1); ETHER.PIR0 |= ETHER_PIR0_MDC; //Deassert MDC usleep(1); ETHER.PIR0 &= ~ETHER_PIR0_MDC; //Rotate data data <<= 1; } } /** * @brief SMI read operation * @param[in] length Number of bits to be read * @return Data resulting from the MDIO read operation **/ uint32_t rza1EthReadSmi(uint_t length) { uint32_t data = 0; //Configure MDIO as an input ETHER.PIR0 &= ~ETHER_PIR0_MMD; //Read the specified number of bits while(length--) { //Rotate data data <<= 1; //Assert MDC ETHER.PIR0 |= ETHER_PIR0_MDC; usleep(1); //Deassert MDC ETHER.PIR0 &= ~ETHER_PIR0_MDC; usleep(1); //Check MDIO state if((ETHER.PIR0 & ETHER_PIR0_MDI) != 0) { data |= 0x01; } } //Return the received data return data; }
/** * @file rza1_eth_driver.c * @brief Renesas RZ/A1 Ethernet MAC driver * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "iodefine.h" #include "cpg_iobitmask.h" #include "intc.h" #include "core/net.h" #include "drivers/mac/rza1_eth_driver.h" #include "debug.h" //Underlying network interface static NetInterface *nicDriverInterface; //IAR EWARM compiler? #if defined(__ICCARM__) //Transmit buffer #pragma data_alignment = 32 #pragma location = RZA1_ETH_RAM_SECTION static uint8_t txBuffer[RZA1_ETH_TX_BUFFER_COUNT][RZA1_ETH_TX_BUFFER_SIZE]; //Receive buffer #pragma data_alignment = 32 #pragma location = RZA1_ETH_RAM_SECTION static uint8_t rxBuffer[RZA1_ETH_RX_BUFFER_COUNT][RZA1_ETH_RX_BUFFER_SIZE]; //Transmit DMA descriptors #pragma data_alignment = 32 #pragma location = RZA1_ETH_RAM_SECTION static Rza1TxDmaDesc txDmaDesc[RZA1_ETH_TX_BUFFER_COUNT]; //Receive DMA descriptors #pragma data_alignment = 32 #pragma location = RZA1_ETH_RAM_SECTION static Rza1RxDmaDesc rxDmaDesc[RZA1_ETH_RX_BUFFER_COUNT]; //ARM or GCC compiler? #else //Transmit buffer static uint8_t txBuffer[RZA1_ETH_TX_BUFFER_COUNT][RZA1_ETH_TX_BUFFER_SIZE] __attribute__((aligned(32), section(RZA1_ETH_RAM_SECTION))); //Receive buffer static uint8_t rxBuffer[RZA1_ETH_RX_BUFFER_COUNT][RZA1_ETH_RX_BUFFER_SIZE] __attribute__((aligned(32), section(RZA1_ETH_RAM_SECTION))); //Transmit DMA descriptors static Rza1TxDmaDesc txDmaDesc[RZA1_ETH_TX_BUFFER_COUNT] __attribute__((aligned(32), section(RZA1_ETH_RAM_SECTION))); //Receive DMA descriptors static Rza1RxDmaDesc rxDmaDesc[RZA1_ETH_RX_BUFFER_COUNT] __attribute__((aligned(32), section(RZA1_ETH_RAM_SECTION))); #endif //Current transmit descriptor static uint_t txIndex; //Current receive descriptor static uint_t rxIndex; /** * @brief RZ/A1 Ethernet MAC driver **/ const NicDriver rza1EthDriver = { NIC_TYPE_ETHERNET, ETH_MTU, rza1EthInit, rza1EthTick, rza1EthEnableIrq, rza1EthDisableIrq, rza1EthEventHandler, rza1EthSendPacket, rza1EthUpdateMacAddrFilter, rza1EthUpdateMacConfig, rza1EthWritePhyReg, rza1EthReadPhyReg, TRUE, TRUE, TRUE, TRUE }; /** * @brief RZ/A1 Ethernet MAC initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t rza1EthInit(NetInterface *interface) { error_t error; //Debug message TRACE_INFO("Initializing RZ/A1 Ethernet MAC...\r\n"); //Save underlying network interface nicDriverInterface = interface; //Enable Ethernet peripheral clock CPG.STBCR7 &= ~CPG_STBCR7_MSTP74; //GPIO configuration rza1EthInitGpio(interface); //Perform software reset ETHER.ARSTR = ETHER_ARSTR_ARST; //Wait for the reset to complete sleep(10); //Start EDMAC transmitting and receiving units ETHER.EDSR0 = ETHER_EDSR0_ENT | ETHER_EDSR0_ENR; //To execute a software reset with this register, 1 must be //written to both the SWRT and SWRR bits simultaneously ETHER.EDMR0 = ETHER_EDMR0_SWRT | ETHER_EDMR0_SWRR; //Wait for the reset to complete while(ETHER.EDMR0 & (ETHER_EDMR0_SWRT | ETHER_EDMR0_SWRR)) { } //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Initialize DMA descriptor lists rza1EthInitDmaDesc(interface); //Select little endian mode and set descriptor length (16 bytes) ETHER.EDMR0 = ETHER_EDMR0_DE | ETHER_EDMR0_DL_16; //Error masks ETHER.TRSCER0 = 0; //Use store and forward mode ETHER.TFTR0 = 0; //Set transmit FIFO size and receive FIFO size (2048 bytes) ETHER.FDR0 = ETHER_FDR0_TFD_2048 | ETHER_FDR0_RFD_2048; //Enable continuous reception of multiple frames ETHER.RMCR0 = ETHER_RMCR0_RNC; //No padding insertion into receive data ETHER.RPADIR0 = 0; //Receive FIFO threshold (8 frames or 2048-64 bytes) ETHER.FCFTR0 = ETHER_FCFTR0_RFF_8 | ETHER_FCFTR0_RFD_2048; //Intelligent checksum operation mode ETHER.CSMR = 0; //Enable multicast address filtering ETHER.ECMR0 |= ETH_ECMR0_MCT; //Set the upper 32 bits of the MAC address ETHER.MAHR0 = (interface->macAddr.b[0] << 24) | (interface->macAddr.b[1] << 16) | (interface->macAddr.b[2] << 8) | interface->macAddr.b[3]; //Set the lower 16 bits of the MAC address ETHER.MALR0 = (interface->macAddr.b[4] << 8) | interface->macAddr.b[5]; //Disable all CAM entries ETHER.TSU_TEN = 0; //Maximum frame length that can be accepted ETHER.RFLR0 = RZA1_ETH_RX_BUFFER_SIZE; //Automatic pause frame ETHER.APR0 = 0; //Manual pause frame ETHER.MPR0 = 0; //Automatic pause frame retransmit count ETHER.TPAUSER0 = 0; //Disable all EMAC interrupts ETHER.ECSIPR0 = 0; //Enable the desired EDMAC interrupts ETHER.EESIPR0 = ETHER_EESIPR0_TWBIP | ETHER_EESIPR0_FRIP; //Register interrupt handler R_INTC_Regist_Int_Func(INTC_ID_ETHERI, rza1EthIrqHandler); //Configure interrupt priority R_INTC_Set_Priority(INTC_ID_ETHERI, RZA1_ETH_IRQ_PRIORITY); //Enable EDMAC transmission and reception ETHER.ECMR0 |= ETH_ECMR0_RE | ETH_ECMR0_TE; //Instruct the DMA to poll the receive descriptor list ETHER.EDRRR0 = ETHER_EDRRR0_RR; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; } //RSK-RZ/A1H, Stream it! RZ, Hachiko or VK-RZ/A1H evaluation board? #if defined(USE_RSK_RZA1H) || defined(USE_STREAM_IT_RZ) || \ defined(USE_HACHIKO) || defined(USE_VK_RZA1H) /** * @brief GPIO configuration * @param[in] interface Underlying network interface **/ void rza1EthInitGpio(NetInterface *interface) { //RSK RZ/A1H or Hachiko evaluation board? #if defined(USE_RSK_RZA1H) || defined(USE_HACHIKO) //Configure ET_COL (P1_14) PORT1.PMCn.BIT.PMCn14 = 1; PORT1.PFCn.BIT.PFCn14 = 1; PORT1.PFCEn.BIT.PFCEn14 = 1; PORT1.PFCAEn.BIT.PFCAEn14 = 0; PORT1.PIPCn.BIT.PIPCn14 = 1; //Configure ET_TXCLK (P2_0) PORT2.PMCn.BIT.PMCn0 = 1; PORT2.PFCn.BIT.PFCn0 = 1; PORT2.PFCEn.BIT.PFCEn0 = 0; PORT2.PFCAEn.BIT.PFCAEn0 = 0; PORT2.PIPCn.BIT.PIPCn0 = 1; //Configure ET_TXER (P2_1) PORT2.PMCn.BIT.PMCn1 = 1; PORT2.PFCn.BIT.PFCn1 = 1; PORT2.PFCEn.BIT.PFCEn1 = 0; PORT2.PFCAEn.BIT.PFCAEn1 = 0; PORT2.PIPCn.BIT.PIPCn1 = 1; //Configure ET_TXEN (P2_2) PORT2.PMCn.BIT.PMCn2 = 1; PORT2.PFCn.BIT.PFCn2 = 1; PORT2.PFCEn.BIT.PFCEn2 = 0; PORT2.PFCAEn.BIT.PFCAEn2 = 0; PORT2.PIPCn.BIT.PIPCn2 = 1; //Configure ET_CRS (P2_3) PORT2.PMCn.BIT.PMCn3 = 1; PORT2.PFCn.BIT.PFCn3 = 1; PORT2.PFCEn.BIT.PFCEn3 = 0; PORT2.PFCAEn.BIT.PFCAEn3 = 0; PORT2.PIPCn.BIT.PIPCn3 = 1; //Configure ET_TXD0 (P2_4) PORT2.PMCn.BIT.PMCn4 = 1; PORT2.PFCn.BIT.PFCn4 = 1; PORT2.PFCEn.BIT.PFCEn4 = 0; PORT2.PFCAEn.BIT.PFCAEn4 = 0; PORT2.PIPCn.BIT.PIPCn4 = 1; //Configure ET_TXD1 (P2_5) PORT2.PMCn.BIT.PMCn5 = 1; PORT2.PFCn.BIT.PFCn5 = 1; PORT2.PFCEn.BIT.PFCEn5 = 0; PORT2.PFCAEn.BIT.PFCAEn5 = 0; PORT2.PIPCn.BIT.PIPCn5 = 1; //Configure ET_TXD2 (P2_6) PORT2.PMCn.BIT.PMCn6 = 1; PORT2.PFCn.BIT.PFCn6 = 1; PORT2.PFCEn.BIT.PFCEn6 = 0; PORT2.PFCAEn.BIT.PFCAEn6 = 0; PORT2.PIPCn.BIT.PIPCn6 = 1; //Configure ET_TXD3 (P2_7) PORT2.PMCn.BIT.PMCn7 = 1; PORT2.PFCn.BIT.PFCn7 = 1; PORT2.PFCEn.BIT.PFCEn7 = 0; PORT2.PFCAEn.BIT.PFCAEn7 = 0; PORT2.PIPCn.BIT.PIPCn7 = 1; //Configure ET_RXD0 (P2_8) PORT2.PMCn.BIT.PMCn8 = 1; PORT2.PFCn.BIT.PFCn8 = 1; PORT2.PFCEn.BIT.PFCEn8 = 0; PORT2.PFCAEn.BIT.PFCAEn8 = 0; PORT2.PIPCn.BIT.PIPCn8 = 1; //Configure ET_RXD1 (P2_9) PORT2.PMCn.BIT.PMCn9 = 1; PORT2.PFCn.BIT.PFCn9 = 1; PORT2.PFCEn.BIT.PFCEn9 = 0; PORT2.PFCAEn.BIT.PFCAEn9 = 0; PORT2.PIPCn.BIT.PIPCn9 = 1; //Configure ET_RXD2 (P2_10) PORT2.PMCn.BIT.PMCn10 = 1; PORT2.PFCn.BIT.PFCn10 = 1; PORT2.PFCEn.BIT.PFCEn10 = 0; PORT2.PFCAEn.BIT.PFCAEn10 = 0; PORT2.PIPCn.BIT.PIPCn10 = 1; //Configure ET_RXD3 (P2_11) PORT2.PMCn.BIT.PMCn11 = 1; PORT2.PFCn.BIT.PFCn11 = 1; PORT2.PFCEn.BIT.PFCEn11 = 0; PORT2.PFCAEn.BIT.PFCAEn11 = 0; PORT2.PIPCn.BIT.PIPCn11 = 1; //Configure ET_MDIO (P3_3) PORT3.PMCn.BIT.PMCn3 = 1; PORT3.PFCn.BIT.PFCn3 = 1; PORT3.PFCEn.BIT.PFCEn3 = 0; PORT3.PFCAEn.BIT.PFCAEn3 = 0; PORT3.PIPCn.BIT.PIPCn3 = 1; //Configure ET_RXCLK (P3_4) PORT3.PMCn.BIT.PMCn4 = 1; PORT3.PFCn.BIT.PFCn4 = 1; PORT3.PFCEn.BIT.PFCEn4 = 0; PORT3.PFCAEn.BIT.PFCAEn4 = 0; PORT3.PIPCn.BIT.PIPCn4 = 1; //Configure ET_RXER (P3_5) PORT3.PMCn.BIT.PMCn5 = 1; PORT3.PFCn.BIT.PFCn5 = 1; PORT3.PFCEn.BIT.PFCEn5 = 0; PORT3.PFCAEn.BIT.PFCAEn5 = 0; PORT3.PIPCn.BIT.PIPCn5 = 1; //Configure ET_RXDV (P3_6) PORT3.PMCn.BIT.PMCn6 = 1; PORT3.PFCn.BIT.PFCn6 = 1; PORT3.PFCEn.BIT.PFCEn6 = 0; PORT3.PFCAEn.BIT.PFCAEn6 = 0; PORT3.PIPCn.BIT.PIPCn6 = 1; //Configure ET_MDC (P5_9) PORT5.PMCn.BIT.PMCn9 = 1; PORT5.PFCn.BIT.PFCn9 = 1; PORT5.PFCEn.BIT.PFCEn9 = 0; PORT5.PFCAEn.BIT.PFCAEn9 = 0; PORT5.PIPCn.BIT.PIPCn9 = 1; //VK-RZ/A1H evaluation board? #elif defined(USE_VK_RZA1H) //Configure ET_COL (P1_14) PORT1.PMCn.BIT.PMCn14 = 1; PORT1.PFCn.BIT.PFCn14 = 1; PORT1.PFCEn.BIT.PFCEn14 = 1; PORT1.PFCAEn.BIT.PFCAEn14 = 0; PORT1.PIPCn.BIT.PIPCn14 = 1; //Configure ET_TXCLK (P2_0) PORT2.PMCn.BIT.PMCn0 = 1; PORT2.PFCn.BIT.PFCn0 = 1; PORT2.PFCEn.BIT.PFCEn0 = 0; PORT2.PFCAEn.BIT.PFCAEn0 = 0; PORT2.PIPCn.BIT.PIPCn0 = 1; //Configure ET_TXER (P2_1) PORT2.PMCn.BIT.PMCn1 = 1; PORT2.PFCn.BIT.PFCn1 = 1; PORT2.PFCEn.BIT.PFCEn1 = 0; PORT2.PFCAEn.BIT.PFCAEn1 = 0; PORT2.PIPCn.BIT.PIPCn1 = 1; //Configure ET_TXEN (P2_2) PORT2.PMCn.BIT.PMCn2 = 1; PORT2.PFCn.BIT.PFCn2 = 1; PORT2.PFCEn.BIT.PFCEn2 = 0; PORT2.PFCAEn.BIT.PFCAEn2 = 0; PORT2.PIPCn.BIT.PIPCn2 = 1; //Configure ET_CRS (P2_3) PORT2.PMCn.BIT.PMCn3 = 1; PORT2.PFCn.BIT.PFCn3 = 1; PORT2.PFCEn.BIT.PFCEn3 = 0; PORT2.PFCAEn.BIT.PFCAEn3 = 0; PORT2.PIPCn.BIT.PIPCn3 = 1; //Configure ET_TXD0 (P2_4) PORT2.PMCn.BIT.PMCn4 = 1; PORT2.PFCn.BIT.PFCn4 = 1; PORT2.PFCEn.BIT.PFCEn4 = 0; PORT2.PFCAEn.BIT.PFCAEn4 = 0; PORT2.PIPCn.BIT.PIPCn4 = 1; //Configure ET_TXD1 (P2_5) PORT2.PMCn.BIT.PMCn5 = 1; PORT2.PFCn.BIT.PFCn5 = 1; PORT2.PFCEn.BIT.PFCEn5 = 0; PORT2.PFCAEn.BIT.PFCAEn5 = 0; PORT2.PIPCn.BIT.PIPCn5 = 1; //Configure ET_TXD2 (P2_6) PORT2.PMCn.BIT.PMCn6 = 1; PORT2.PFCn.BIT.PFCn6 = 1; PORT2.PFCEn.BIT.PFCEn6 = 0; PORT2.PFCAEn.BIT.PFCAEn6 = 0; PORT2.PIPCn.BIT.PIPCn6 = 1; //Configure ET_TXD3 (P2_7) PORT2.PMCn.BIT.PMCn7 = 1; PORT2.PFCn.BIT.PFCn7 = 1; PORT2.PFCEn.BIT.PFCEn7 = 0; PORT2.PFCAEn.BIT.PFCAEn7 = 0; PORT2.PIPCn.BIT.PIPCn7 = 1; //Configure ET_RXD0 (P2_8) PORT2.PMCn.BIT.PMCn8 = 1; PORT2.PFCn.BIT.PFCn8 = 1; PORT2.PFCEn.BIT.PFCEn8 = 0; PORT2.PFCAEn.BIT.PFCAEn8 = 0; PORT2.PIPCn.BIT.PIPCn8 = 1; //Configure ET_RXD1 (P2_9) PORT2.PMCn.BIT.PMCn9 = 1; PORT2.PFCn.BIT.PFCn9 = 1; PORT2.PFCEn.BIT.PFCEn9 = 0; PORT2.PFCAEn.BIT.PFCAEn9 = 0; PORT2.PIPCn.BIT.PIPCn9 = 1; //Configure ET_RXD2 (P2_10) PORT2.PMCn.BIT.PMCn10 = 1; PORT2.PFCn.BIT.PFCn10 = 1; PORT2.PFCEn.BIT.PFCEn10 = 0; PORT2.PFCAEn.BIT.PFCAEn10 = 0; PORT2.PIPCn.BIT.PIPCn10 = 1; //Configure ET_RXD3 (P2_11) PORT2.PMCn.BIT.PMCn11 = 1; PORT2.PFCn.BIT.PFCn11 = 1; PORT2.PFCEn.BIT.PFCEn11 = 0; PORT2.PFCAEn.BIT.PFCAEn11 = 0; PORT2.PIPCn.BIT.PIPCn11 = 1; //Configure ET_MDIO (P3_3) PORT3.PMCn.BIT.PMCn3 = 1; PORT3.PFCn.BIT.PFCn3 = 1; PORT3.PFCEn.BIT.PFCEn3 = 0; PORT3.PFCAEn.BIT.PFCAEn3 = 0; PORT3.PIPCn.BIT.PIPCn3 = 1; //Configure ET_RXCLK (P3_4) PORT3.PMCn.BIT.PMCn4 = 1; PORT3.PFCn.BIT.PFCn4 = 1; PORT3.PFCEn.BIT.PFCEn4 = 0; PORT3.PFCAEn.BIT.PFCAEn4 = 0; PORT3.PIPCn.BIT.PIPCn4 = 1; //Configure ET_RXER (P3_5) PORT3.PMCn.BIT.PMCn5 = 1; PORT3.PFCn.BIT.PFCn5 = 1; PORT3.PFCEn.BIT.PFCEn5 = 0; PORT3.PFCAEn.BIT.PFCAEn5 = 0; PORT3.PIPCn.BIT.PIPCn5 = 1; //Configure ET_RXDV (P3_6) PORT3.PMCn.BIT.PMCn6 = 1; PORT3.PFCn.BIT.PFCn6 = 1; PORT3.PFCEn.BIT.PFCEn6 = 0; PORT3.PFCAEn.BIT.PFCAEn6 = 0; PORT3.PIPCn.BIT.PIPCn6 = 1; //Configure ET_MDC (P7_0) PORT7.PMCn.BIT.PMCn0 = 1; PORT7.PFCn.BIT.PFCn0 = 0; PORT7.PFCEn.BIT.PFCEn0 = 1; PORT7.PFCAEn.BIT.PFCAEn0 = 0; PORT7.PIPCn.BIT.PIPCn0 = 1; //Stream it! RZ evaluation board? #elif defined(USE_STREAM_IT_RZ) //Configure ET_TXD0 (P8_0) PORT8.PMCn.BIT.PMCn0 = 1; PORT8.PFCn.BIT.PFCn0 = 1; PORT8.PFCEn.BIT.PFCEn0 = 0; PORT8.PFCAEn.BIT.PFCAEn0 = 0; PORT8.PIPCn.BIT.PIPCn0 = 1; //Configure ET_TXD1 (P8_1) PORT8.PMCn.BIT.PMCn1 = 1; PORT8.PFCn.BIT.PFCn1 = 1; PORT8.PFCEn.BIT.PFCEn1 = 0; PORT8.PFCAEn.BIT.PFCAEn1 = 0; PORT8.PIPCn.BIT.PIPCn1 = 1; //Configure ET_TXD2 (P8_2) PORT8.PMCn.BIT.PMCn2 = 1; PORT8.PFCn.BIT.PFCn2 = 1; PORT8.PFCEn.BIT.PFCEn2 = 0; PORT8.PFCAEn.BIT.PFCAEn2 = 0; PORT8.PIPCn.BIT.PIPCn2 = 1; //Configure ET_TXD3 (P8_3) PORT8.PMCn.BIT.PMCn3 = 1; PORT8.PFCn.BIT.PFCn3 = 1; PORT8.PFCEn.BIT.PFCEn3 = 0; PORT8.PFCAEn.BIT.PFCAEn3 = 0; PORT8.PIPCn.BIT.PIPCn3 = 1; //Configure ET_TXCLK (P8_4) PORT8.PMCn.BIT.PMCn4 = 1; PORT8.PFCn.BIT.PFCn4 = 1; PORT8.PFCEn.BIT.PFCEn4 = 0; PORT8.PFCAEn.BIT.PFCAEn4 = 0; PORT8.PIPCn.BIT.PIPCn4 = 1; //Configure ET_TXER (P8_5) PORT8.PMCn.BIT.PMCn5 = 1; PORT8.PFCn.BIT.PFCn5 = 1; PORT8.PFCEn.BIT.PFCEn5 = 0; PORT8.PFCAEn.BIT.PFCAEn5 = 0; PORT8.PIPCn.BIT.PIPCn5 = 1; //Configure ET_TXEN (P8_6) PORT8.PMCn.BIT.PMCn6 = 1; PORT8.PFCn.BIT.PFCn6 = 1; PORT8.PFCEn.BIT.PFCEn6 = 0; PORT8.PFCAEn.BIT.PFCAEn6 = 0; PORT8.PIPCn.BIT.PIPCn6 = 1; //Configure ET_RXD0 (P8_7) PORT8.PMCn.BIT.PMCn7 = 1; PORT8.PFCn.BIT.PFCn7 = 1; PORT8.PFCEn.BIT.PFCEn7 = 0; PORT8.PFCAEn.BIT.PFCAEn7 = 0; PORT8.PIPCn.BIT.PIPCn7 = 1; //Configure ET_RXD1 (P8_8) PORT8.PMCn.BIT.PMCn8 = 1; PORT8.PFCn.BIT.PFCn8 = 1; PORT8.PFCEn.BIT.PFCEn8 = 0; PORT8.PFCAEn.BIT.PFCAEn8 = 0; PORT8.PIPCn.BIT.PIPCn8 = 1; //Configure ET_RXD2 (P8_9) PORT8.PMCn.BIT.PMCn9 = 1; PORT8.PFCn.BIT.PFCn9 = 1; PORT8.PFCEn.BIT.PFCEn9 = 0; PORT8.PFCAEn.BIT.PFCAEn9 = 0; PORT8.PIPCn.BIT.PIPCn9 = 1; //Configure ET_RXD3 (P8_10) PORT8.PMCn.BIT.PMCn10 = 1; PORT8.PFCn.BIT.PFCn10 = 1; PORT8.PFCEn.BIT.PFCEn10 = 0; PORT8.PFCAEn.BIT.PFCAEn10 = 0; PORT8.PIPCn.BIT.PIPCn10 = 1; //Configure ET_COL (P8_14) PORT8.PMCn.BIT.PMCn14 = 1; PORT8.PFCn.BIT.PFCn14 = 1; PORT8.PFCEn.BIT.PFCEn14 = 0; PORT8.PFCAEn.BIT.PFCAEn14 = 0; PORT8.PIPCn.BIT.PIPCn14 = 1; //Configure ET_CRS (P8_15) PORT8.PMCn.BIT.PMCn15 = 1; PORT8.PFCn.BIT.PFCn15 = 1; PORT8.PFCEn.BIT.PFCEn15 = 0; PORT8.PFCAEn.BIT.PFCAEn15 = 0; PORT8.PIPCn.BIT.PIPCn15 = 1; //Configure ET_MDC (P9_0) PORT9.PMCn.BIT.PMCn0 = 1; PORT9.PFCn.BIT.PFCn0 = 1; PORT9.PFCEn.BIT.PFCEn0 = 0; PORT9.PFCAEn.BIT.PFCAEn0 = 0; PORT9.PIPCn.BIT.PIPCn0 = 1; //Configure ET_MDIO (P9_1) PORT9.PMCn.BIT.PMCn1 = 1; PORT9.PFCn.BIT.PFCn1 = 1; PORT9.PFCEn.BIT.PFCEn1 = 0; PORT9.PFCAEn.BIT.PFCAEn1 = 0; PORT9.PIPCn.BIT.PIPCn1 = 1; //Configure ET_RXCLK (P9_2) PORT9.PMCn.BIT.PMCn2 = 1; PORT9.PFCn.BIT.PFCn2 = 1; PORT9.PFCEn.BIT.PFCEn2 = 0; PORT9.PFCAEn.BIT.PFCAEn2 = 0; PORT9.PIPCn.BIT.PIPCn2 = 1; //Configure ET_RXER (P9_3) PORT9.PMCn.BIT.PMCn3 = 1; PORT9.PFCn.BIT.PFCn3 = 1; PORT9.PFCEn.BIT.PFCEn3 = 0; PORT9.PFCAEn.BIT.PFCAEn3 = 0; PORT9.PIPCn.BIT.PIPCn3 = 1; //Configure ET_RXDV (P9_4) PORT9.PMCn.BIT.PMCn4 = 1; PORT9.PFCn.BIT.PFCn4 = 1; PORT9.PFCEn.BIT.PFCEn4 = 0; PORT9.PFCAEn.BIT.PFCAEn4 = 0; PORT9.PIPCn.BIT.PIPCn4 = 1; //Configure PHY_RST (P2_7) PORT2.PMCn.BIT.PMCn7 = 0; PORT2.PIPCn.BIT.PIPCn7 = 0; PORT2.PMn.BIT.PMn7 = 0; //Reset the PHY transceiver PORT2.Pn.BIT.Pn7 = 0; sleep(10); PORT2.Pn.BIT.Pn7 = 1; sleep(10); #endif } #endif /** * @brief Initialize DMA descriptor lists * @param[in] interface Underlying network interface **/ void rza1EthInitDmaDesc(NetInterface *interface) { uint_t i; //Initialize TX descriptors for(i = 0; i < RZA1_ETH_TX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the application txDmaDesc[i].td0 = 0; //Transmit buffer length txDmaDesc[i].td1 = 0; //Transmit buffer address txDmaDesc[i].td2 = (uint32_t) txBuffer[i]; //Clear padding field txDmaDesc[i].padding = 0; } //Mark the last descriptor entry with the TDLE flag txDmaDesc[i - 1].td0 |= ETHER_TD0_TDLE; //Initialize TX descriptor index txIndex = 0; //Initialize RX descriptors for(i = 0; i < RZA1_ETH_RX_BUFFER_COUNT; i++) { //The descriptor is initially owned by the DMA rxDmaDesc[i].rd0 = ETHER_RD0_RACT; //Receive buffer length rxDmaDesc[i].rd1 = (RZA1_ETH_RX_BUFFER_SIZE << 16) & ETHER_RD1_RBL; //Receive buffer address rxDmaDesc[i].rd2 = (uint32_t) rxBuffer[i]; //Clear padding field rxDmaDesc[i].padding = 0; } //Mark the last descriptor entry with the RDLE flag rxDmaDesc[i - 1].rd0 |= ETHER_RD0_RDLE; //Initialize RX descriptor index rxIndex = 0; //Address of the first TX descriptor ETHER.TDLAR0 = (uint32_t) &txDmaDesc[0]; ETHER.TDFAR0 = (uint32_t) &txDmaDesc[0]; //Address of the last TX descriptor ETHER.TDFXR0 = (uint32_t) &txDmaDesc[RZA1_ETH_TX_BUFFER_COUNT - 1]; //Set TDLF flag ETHER.TDFFR0 = ETHER_TDFFR_TDLF; //Address of the first RX descriptor ETHER.RDLAR0 = (uint32_t) &rxDmaDesc[0]; ETHER.RDFAR0 = (uint32_t) &rxDmaDesc[0]; //Address of the last RX descriptor ETHER.RDFXR0 = (uint32_t) &rxDmaDesc[RZA1_ETH_RX_BUFFER_COUNT - 1]; //Set RDLF flag ETHER.RDFFR0 = ETHER_RDFFR0_RDLF; } /** * @brief RZ/A1 Ethernet MAC timer handler * * This routine is periodically called by the TCP/IP stack to handle periodic * operations such as polling the link state * * @param[in] interface Underlying network interface **/ void rza1EthTick(NetInterface *interface) { //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Handle periodic operations interface->phyDriver->tick(interface); } else if(interface->switchDriver != NULL) { //Handle periodic operations interface->switchDriver->tick(interface); } else { //Just for sanity } } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void rza1EthEnableIrq(NetInterface *interface) { //Enable Ethernet MAC interrupts R_INTC_Enable(INTC_ID_ETHERI); //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Enable Ethernet PHY interrupts interface->phyDriver->enableIrq(interface); } else if(interface->switchDriver != NULL) { //Enable Ethernet switch interrupts interface->switchDriver->enableIrq(interface); } else { //Just for sanity } } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void rza1EthDisableIrq(NetInterface *interface) { //Disable Ethernet MAC interrupts R_INTC_Disable(INTC_ID_ETHERI); //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Disable Ethernet PHY interrupts interface->phyDriver->disableIrq(interface); } else if(interface->switchDriver != NULL) { //Disable Ethernet switch interrupts interface->switchDriver->disableIrq(interface); } else { //Just for sanity } } /** * @brief RZ/A1 Ethernet MAC interrupt service routine * @param[in] intSense Unused parameter **/ void rza1EthIrqHandler(uint32_t intSense) { bool_t flag; uint32_t status; //Interrupt service routine prologue osEnterIsr(); //This flag will be set if a higher priority task must be woken flag = FALSE; //Read interrupt status register status = ETHER.EESR0; //Packet transmitted? if((status & ETHER_EESR0_TWB) != 0) { //Clear TWB interrupt flag ETHER.EESR0 = ETHER_EESR0_TWB; //Check whether the TX buffer is available for writing if((txDmaDesc[txIndex].td0 & ETHER_TD0_TACT) == 0) { //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&nicDriverInterface->nicTxEvent); } } //Packet received? if((status & ETHER_EESR0_FR) != 0) { //Disable FR interrupts ETHER.EESIPR0 &= ~ETHER_EESIPR0_FRIP; //Set event flag nicDriverInterface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Interrupt service routine epilogue osExitIsr(flag); } /** * @brief RZ/A1 Ethernet MAC event handler * @param[in] interface Underlying network interface **/ void rza1EthEventHandler(NetInterface *interface) { error_t error; //Packet received? if((ETHER.EESR0 & ETHER_EESR0_FR) != 0) { //Clear FR interrupt flag ETHER.EESR0 = ETHER_EESR0_FR; //Process all pending packets do { //Read incoming packet error = rza1EthReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable EDMAC interrupts ETHER.EESIPR0 = ETHER_EESIPR0_TWBIP | ETHER_EESIPR0_FRIP; } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t rza1EthSendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { //Retrieve the length of the packet size_t length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > RZA1_ETH_TX_BUFFER_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the current buffer is available for writing if((txDmaDesc[txIndex].td0 & ETHER_TD0_TACT) != 0) { return ERROR_FAILURE; } //Copy user data to the transmit buffer netBufferRead(txBuffer[txIndex], buffer, offset, length); //Write the number of bytes to send txDmaDesc[txIndex].td1 = (length << 16) & ETHER_TD1_TDL; //Check current index if(txIndex < (RZA1_ETH_TX_BUFFER_COUNT - 1)) { //Give the ownership of the descriptor to the DMA engine txDmaDesc[txIndex].td0 = ETHER_TD0_TACT | ETHER_TD0_TFP_SOF | ETHER_TD0_TFP_EOF | ETHER_TD0_TWBI; //Point to the next descriptor txIndex++; } else { //Give the ownership of the descriptor to the DMA engine txDmaDesc[txIndex].td0 = ETHER_TD0_TACT | ETHER_TD0_TDLE | ETHER_TD0_TFP_SOF | ETHER_TD0_TFP_EOF | ETHER_TD0_TWBI; //Wrap around txIndex = 0; } //Instruct the DMA to poll the transmit descriptor list ETHER.EDTRR0 = ETHER_EDTRR0_TR; //Check whether the next buffer is available for writing if((txDmaDesc[txIndex].td0 & ETHER_TD0_TACT) == 0) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); } //Successful write operation return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t rza1EthReceivePacket(NetInterface *interface) { static uint8_t temp[RZA1_ETH_RX_BUFFER_SIZE]; error_t error; size_t n; NetRxAncillary ancillary; //The current buffer is available for reading? if((rxDmaDesc[rxIndex].rd0 & ETHER_RD0_RACT) == 0) { //SOF and EOF flags should be set if((rxDmaDesc[rxIndex].rd0 & ETHER_RD0_RFP_SOF) != 0 && (rxDmaDesc[rxIndex].rd0 & ETHER_RD0_RFP_EOF) != 0) { //Make sure no error occurred if(!(rxDmaDesc[rxIndex].rd0 & (ETHER_RD0_RFS_MASK & ~ETHER_RD0_RFS_RMAF))) { //Retrieve the length of the frame n = rxDmaDesc[rxIndex].rd1 & ETHER_RD1_RDL; //Limit the number of data to read n = MIN(n, RZA1_ETH_RX_BUFFER_SIZE); //Copy data from the receive buffer osMemcpy(temp, rxBuffer[rxIndex], n); //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, temp, n, &ancillary); //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } } else { //The packet is not valid error = ERROR_INVALID_PACKET; } //Check current index if(rxIndex < (RZA1_ETH_RX_BUFFER_COUNT - 1)) { //Give the ownership of the descriptor back to the DMA rxDmaDesc[rxIndex].rd0 = ETHER_RD0_RACT; //Point to the next descriptor rxIndex++; } else { //Give the ownership of the descriptor back to the DMA rxDmaDesc[rxIndex].rd0 = ETHER_RD0_RACT | ETHER_RD0_RDLE; //Wrap around rxIndex = 0; } //Instruct the DMA to poll the receive descriptor list ETHER.EDRRR0 = ETHER_EDRRR0_RR; } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t rza1EthUpdateMacAddrFilter(NetInterface *interface) { uint_t i; volatile uint32_t *addrHigh; volatile uint32_t *addrLow; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Set the upper 32 bits of the MAC address ETHER.MAHR0 = (interface->macAddr.b[0] << 24) | (interface->macAddr.b[1] << 16) | (interface->macAddr.b[2] << 8) | interface->macAddr.b[3]; //Set the lower 16 bits of the MAC address ETHER.MALR0 = (interface->macAddr.b[4] << 8) | interface->macAddr.b[5]; //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE && i < 32; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Debug message TRACE_DEBUG(" %s\r\n", macAddrToString(&entry->addr, NULL)); //Point to the CAM entry registers addrHigh = &ETHER.TSU_ADRH0 + 2 * i; addrLow = &ETHER.TSU_ADRL0 + 2 * i; //The contents of the CAM entry table registers cannot be //modified while the ADSBSY flag is set while((ETHER.TSU_ADSBSY & ETHER_TSU_ADSBSY_ADSBSY) != 0) { } //Set the upper 32 bits of the MAC address *addrHigh = (entry->addr.b[0] << 24) | (entry->addr.b[1] << 16) | (entry->addr.b[2] << 8) | entry->addr.b[3]; //Wait for the ADSBSY flag to be cleared while((ETHER.TSU_ADSBSY & ETHER_TSU_ADSBSY_ADSBSY) != 0) { } //Set the lower 16 bits of the MAC address *addrLow = (entry->addr.b[4] << 8) | entry->addr.b[5]; //Enable the CAM entry ETHER.TSU_TEN |= 1 << (31 - i); } else { //Disable the CAM entry ETHER.TSU_TEN &= ~(1 << (31 - i)); } } //Successful processing return NO_ERROR; } /** * @brief Adjust MAC configuration parameters for proper operation * @param[in] interface Underlying network interface * @return Error code **/ error_t rza1EthUpdateMacConfig(NetInterface *interface) { //Half-duplex or full-duplex mode? if(interface->duplexMode == NIC_FULL_DUPLEX_MODE) { ETHER.ECMR0 |= ETH_ECMR0_DM; } else { ETHER.ECMR0 &= ~ETH_ECMR0_DM; } //Successful processing return NO_ERROR; } /** * @brief Write PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @param[in] data Register value **/ void rza1EthWritePhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr, uint16_t data) { //Synchronization pattern rza1EthWriteSmi(SMI_SYNC, 32); //Start of frame rza1EthWriteSmi(SMI_START, 2); //Set up a write operation rza1EthWriteSmi(opcode, 2); //Write PHY address rza1EthWriteSmi(phyAddr, 5); //Write register address rza1EthWriteSmi(regAddr, 5); //Turnaround rza1EthWriteSmi(SMI_TA, 2); //Write register value rza1EthWriteSmi(data, 16); //Release MDIO rza1EthReadSmi(1); } /** * @brief Read PHY register * @param[in] opcode Access type (2 bits) * @param[in] phyAddr PHY address (5 bits) * @param[in] regAddr Register address (5 bits) * @return Register value **/ uint16_t rza1EthReadPhyReg(uint8_t opcode, uint8_t phyAddr, uint8_t regAddr) { uint16_t data; //Synchronization pattern rza1EthWriteSmi(SMI_SYNC, 32); //Start of frame rza1EthWriteSmi(SMI_START, 2); //Set up a read operation rza1EthWriteSmi(opcode, 2); //Write PHY address rza1EthWriteSmi(phyAddr, 5); //Write register address rza1EthWriteSmi(regAddr, 5); //Turnaround to avoid contention rza1EthReadSmi(1); //Read register value data = rza1EthReadSmi(16); //Force the PHY to release the MDIO pin rza1EthReadSmi(1); //Return PHY register contents return data; } /** * @brief SMI write operation * @param[in] data Raw data to be written * @param[in] length Number of bits to be written **/ void rza1EthWriteSmi(uint32_t data, uint_t length) { //Skip the most significant bits since they are meaningless data <<= 32 - length; //Configure MDIO as an output ETHER.PIR0 |= ETHER_PIR0_MMD; //Write the specified number of bits while(length--) { //Write MDIO if((data & 0x80000000) != 0) { ETHER.PIR0 |= ETHER_PIR0_MDO; } else { ETHER.PIR0 &= ~ETHER_PIR0_MDO; } //Assert MDC usleep(1); ETHER.PIR0 |= ETHER_PIR0_MDC; //Deassert MDC usleep(1); ETHER.PIR0 &= ~ETHER_PIR0_MDC; //Rotate data data <<= 1; } } /** * @brief SMI read operation * @param[in] length Number of bits to be read * @return Data resulting from the MDIO read operation **/ uint32_t rza1EthReadSmi(uint_t length) { uint32_t data = 0; //Configure MDIO as an input ETHER.PIR0 &= ~ETHER_PIR0_MMD; //Read the specified number of bits while(length--) { //Rotate data data <<= 1; //Assert MDC ETHER.PIR0 |= ETHER_PIR0_MDC; usleep(1); //Deassert MDC ETHER.PIR0 &= ~ETHER_PIR0_MDC; usleep(1); //Check MDIO state if((ETHER.PIR0 & ETHER_PIR0_MDI) != 0) { data |= 0x01; } } //Return the received data return data; }
error_t rza1EthInit(NetInterface *interface) { error_t error; //Debug message TRACE_INFO("Initializing RZ/A1 Ethernet MAC...\r\n"); //Save underlying network interface nicDriverInterface = interface; //Enable Ethernet peripheral clock CPG.STBCR7 &= ~CPG_STBCR7_MSTP74; //GPIO configuration rza1EthInitGpio(interface); //Perform software reset ETHER.ARSTR = ETHER_ARSTR_ARST; //Wait for the reset to complete sleep(10); //Start EDMAC transmitting and receiving units ETHER.EDSR0 = ETHER_EDSR0_ENT | ETHER_EDSR0_ENR; //To execute a software reset with this register, 1 must be //written to both the SWRT and SWRR bits simultaneously ETHER.EDMR0 = ETHER_EDMR0_SWRT | ETHER_EDMR0_SWRR; //Wait for the reset to complete while(ETHER.EDMR0 & (ETHER_EDMR0_SWRT | ETHER_EDMR0_SWRR)) { } //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Initialize DMA descriptor lists rza1EthInitDmaDesc(interface); //Select little endian mode and set descriptor length (16 bytes) ETHER.EDMR0 = ETHER_EDMR0_DE | ETHER_EDMR0_DL_16; //Error masks ETHER.TRSCER0 = 0; //Use store and forward mode ETHER.TFTR0 = 0; //Set transmit FIFO size and receive FIFO size (2048 bytes) ETHER.FDR0 = ETHER_FDR0_TFD_2048 | ETHER_FDR0_RFD_2048; //Enable continuous reception of multiple frames ETHER.RMCR0 = ETHER_RMCR0_RNC; //No padding insertion into receive data ETHER.RPADIR0 = 0; //Receive FIFO threshold (8 frames or 2048-64 bytes) ETHER.FCFTR0 = ETHER_FCFTR0_RFF_8 | ETHER_FCFTR0_RFD_2048; //Intelligent checksum operation mode ETHER.CSMR = 0; //Enable multicast address filtering ETHER.ECMR0 |= ETH_ECMR0_MCT; //Set the upper 32 bits of the MAC address ETHER.MAHR0 = (interface->macAddr.b[0] << 24) | (interface->macAddr.b[1] << 16) | (interface->macAddr.b[2] << 8) | interface->macAddr.b[3]; //Set the lower 16 bits of the MAC address ETHER.MALR0 = (interface->macAddr.b[4] << 8) | interface->macAddr.b[5]; //Disable all CAM entries ETHER.TSU_TEN = 0; //Maximum frame length that can be accepted ETHER.RFLR0 = RZA1_ETH_RX_BUFFER_SIZE; //Automatic pause frame ETHER.APR0 = 0; //Manual pause frame ETHER.MPR0 = 0; //Automatic pause frame retransmit count ETHER.TPAUSER0 = 0; //Disable all EMAC interrupts ETHER.ECSIPR0 = 0; //Enable the desired EDMAC interrupts ETHER.EESIPR0 = ETHER_EESIPR0_TWBIP | ETHER_EESIPR0_FRIP; //Register interrupt handler R_INTC_Regist_Int_Func(INTC_ID_ETHERI, rza1EthIrqHandler); //Configure interrupt priority R_INTC_Set_Priority(INTC_ID_ETHERI, RZA1_ETH_IRQ_PRIORITY); //Enable EDMAC transmission and reception ETHER.ECMR0 |= ETH_ECMR0_RE | ETH_ECMR0_TE; //Instruct the DMA to poll the receive descriptor list ETHER.EDRRR0 = ETHER_EDRRR0_RR; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; }
error_t rza1EthInit(NetInterface *interface) { error_t error; //Debug message TRACE_INFO("Initializing RZ/A1 Ethernet MAC...\r\n"); //Save underlying network interface nicDriverInterface = interface; //Enable Ethernet peripheral clock CPG.STBCR7 &= ~CPG_STBCR7_MSTP74; //GPIO configuration rza1EthInitGpio(interface); //Perform software reset ETHER.ARSTR = ETHER_ARSTR_ARST; //Wait for the reset to complete sleep(10); //Start EDMAC transmitting and receiving units ETHER.EDSR0 = ETHER_EDSR0_ENT | ETHER_EDSR0_ENR; //To execute a software reset with this register, 1 must be //written to both the SWRT and SWRR bits simultaneously ETHER.EDMR0 = ETHER_EDMR0_SWRT | ETHER_EDMR0_SWRR; //Wait for the reset to complete while(ETHER.EDMR0 & (ETHER_EDMR0_SWRT | ETHER_EDMR0_SWRR)) { } //Valid Ethernet PHY or switch driver? if(interface->phyDriver != NULL) { //Ethernet PHY initialization error = interface->phyDriver->init(interface); } else if(interface->switchDriver != NULL) { //Ethernet switch initialization error = interface->switchDriver->init(interface); } else { //The interface is not properly configured error = ERROR_FAILURE; } //Any error to report? if(error) { return error; } //Initialize DMA descriptor lists rza1EthInitDmaDesc(interface); //Select little endian mode and set descriptor length (16 bytes) ETHER.EDMR0 = ETHER_EDMR0_DE | ETHER_EDMR0_DL_16; //Error masks ETHER.TRSCER0 = 0; //Use store and forward mode ETHER.TFTR0 = 0; //Set transmit FIFO size and receive FIFO size (2048 bytes) ETHER.FDR0 = ETHER_FDR0_TFD_2048 | ETHER_FDR0_RFD_2048; //Enable continuous reception of multiple frames ETHER.RMCR0 = ETHER_RMCR0_RNC; //No padding insertion into receive data ETHER.RPADIR0 = 0; //Receive FIFO threshold (8 frames or 2048-64 bytes) ETHER.FCFTR0 = ETHER_FCFTR0_RFF_8 | ETHER_FCFTR0_RFD_2048; //Intelligent checksum operation mode ETHER.CSMR = 0; //Enable multicast address filtering ETHER.ECMR0 |= ETH_ECMR0_MCT; //Set the upper 32 bits of the MAC address ETHER.MAHR0 = (interface->macAddr.b[0] << 24) | (interface->macAddr.b[1] << 16) | (interface->macAddr.b[2] << 8) | interface->macAddr.b[3]; //Set the lower 16 bits of the MAC address ETHER.MALR0 = (interface->macAddr.b[4] << 8) | interface->macAddr.b[5]; //Disable all CAM entries ETHER.TSU_TEN = 0; //Maximum frame length that can be accepted ETHER.RFLR0 = RZA1_ETH_RX_BUFFER_SIZE; //Automatic pause frame ETHER.APR0 = 0; //Manual pause frame ETHER.MPR0 = 0; //Automatic pause frame retransmit count ETHER.TPAUSER0 = 0; //Disable all EMAC interrupts ETHER.ECSIPR0 = 0; //Enable the desired EDMAC interrupts ETHER.EESIPR0 = ETHER_EESIPR0_TWBIP | ETHER_EESIPR0_FRIP; //Register interrupt handler R_INTC_Regist_Int_Func(INTC_ID_ETHERI, rza1EthIrqHandler); //Configure interrupt priority R_INTC_Set_Priority(INTC_ID_ETHERI, RZA1_ETH_IRQ_PRIORITY); //Enable EDMAC transmission and reception ETHER.ECMR0 |= ETH_ECMR0_RE | ETH_ECMR0_TE; //Instruct the DMA to poll the receive descriptor list ETHER.EDRRR0 = ETHER_EDRRR0_RR; //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Successful initialization return NO_ERROR; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (50, '#pragma location = RZA1_ETH_RAM_SECTION'), (54, '#pragma location = RZA1_ETH_RAM_SECTION'), (58, '#pragma location = RZA1_ETH_RAM_SECTION'), (62, '#pragma location = RZA1_ETH_RAM_SECTION'), (70, ' __attribute__((aligned(32), section(RZA1_ETH_RAM_SECTION)));'), (73, ' __attribute__((aligned(32), section(RZA1_ETH_RAM_SECTION)));'), (76, ' __attribute__((aligned(32), section(RZA1_ETH_RAM_SECTION)));'), (79, ' __attribute__((aligned(32), section(RZA1_ETH_RAM_SECTION)));'), (226, ' ETHER.EESIPR0 = ETHER_EESIPR0_TWBIP | ETHER_EESIPR0_FRIP;'), (247, '//RSK-RZ/A1H, Stream it! RZ, Hachiko or VK-RZ/A1H evaluation board?'), (882, ' ETHER.EESIPR0 = ETHER_EESIPR0_TWBIP | ETHER_EESIPR0_FRIP;')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (66, ' __attribute__((section(".BSS_DMAC_SAMPLE_INTERNAL_RAM"), aligned(32)));'), (69, ' __attribute__((section(".BSS_DMAC_SAMPLE_INTERNAL_RAM"), aligned(32)));'), (72, ' __attribute__((section(".BSS_DMAC_SAMPLE_INTERNAL_RAM"), aligned(32)));'), (75, ' __attribute__((section(".BSS_DMAC_SAMPLE_INTERNAL_RAM"), aligned(32)));'), (222, ' ETHER.EESIPR0 = ETHER_EESIPR0_TWBIP | ETHER_EESIPR0_FRIP;'), (243, '//RSK RZ/A1H, Stream it! RZ, Hachiko or VK-RZ/A1H evaluation board?'), (878, ' ETHER.EESIPR0 = ETHER_EESIPR0_TWBIP | ETHER_EESIPR0_FRIP;')]}
13
9
681
4,976
57
361
5
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
CWE-20
2,922
session_ops.cc
C++
tensorflow::GetSessionHandleOp::Compute
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/data_flow_ops.cc. #include <limits.h> #include <vector> #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/map_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/thread_annotations.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { class GetSessionHandleOp : public OpKernel { public: explicit GetSessionHandleOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* ctx) override { const Tensor& val = ctx->input(0); int64 id = ctx->session_state()->GetNewId(); TensorStore::TensorAndKey tk{val, id, requested_device()}; OP_REQUIRES_OK(ctx, ctx->tensor_store()->AddTensor(name(), tk)); Tensor* handle = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &handle)); if (ctx->expected_output_dtype(0) == DT_RESOURCE) { ResourceHandle resource_handle = MakeResourceHandle<Tensor>( ctx, SessionState::kTensorHandleResourceTypeName, tk.GetHandle(name())); resource_handle.set_maybe_type_name( SessionState::kTensorHandleResourceTypeName); handle->scalar<ResourceHandle>()() = resource_handle; } else { // Legacy behavior in V1. handle->flat<tstring>().setConstant(tk.GetHandle(name())); } } TF_DISALLOW_COPY_AND_ASSIGN(GetSessionHandleOp); }; REGISTER_KERNEL_BUILDER(Name("GetSessionHandle").Device(DEVICE_CPU), GetSessionHandleOp); REGISTER_KERNEL_BUILDER(Name("GetSessionHandleV2").Device(DEVICE_CPU), GetSessionHandleOp); #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("GetSessionHandle") \ .Device(DEVICE_GPU) \ .HostMemory("handle") \ .TypeConstraint<type>("T"), \ GetSessionHandleOp) \ REGISTER_KERNEL_BUILDER(Name("GetSessionHandleV2") \ .Device(DEVICE_GPU) \ .HostMemory("handle") \ .TypeConstraint<type>("T"), \ GetSessionHandleOp) TF_CALL_NUMBER_TYPES(REGISTER_GPU_KERNEL); REGISTER_GPU_KERNEL(bool); #undef REGISTER_GPU_KERNEL class GetSessionTensorOp : public OpKernel { public: explicit GetSessionTensorOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* ctx) override { const Tensor& handle = ctx->input(0); const string& name = handle.scalar<tstring>()(); Tensor val; OP_REQUIRES_OK(ctx, ctx->session_state()->GetTensor(name, &val)); ctx->set_output(0, val); } TF_DISALLOW_COPY_AND_ASSIGN(GetSessionTensorOp); }; REGISTER_KERNEL_BUILDER(Name("GetSessionTensor").Device(DEVICE_CPU), GetSessionTensorOp); #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("GetSessionTensor") \ .Device(DEVICE_GPU) \ .HostMemory("handle") \ .TypeConstraint<type>("dtype"), \ GetSessionTensorOp) TF_CALL_NUMBER_TYPES(REGISTER_GPU_KERNEL); REGISTER_GPU_KERNEL(bool); #undef REGISTER_GPU_KERNEL class DeleteSessionTensorOp : public OpKernel { public: explicit DeleteSessionTensorOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* ctx) override { const Tensor& handle = ctx->input(0); const string& name = handle.scalar<tstring>()(); OP_REQUIRES_OK(ctx, ctx->session_state()->DeleteTensor(name)); } TF_DISALLOW_COPY_AND_ASSIGN(DeleteSessionTensorOp); }; REGISTER_KERNEL_BUILDER(Name("DeleteSessionTensor").Device(DEVICE_CPU), DeleteSessionTensorOp); REGISTER_KERNEL_BUILDER( Name("DeleteSessionTensor").Device(DEVICE_GPU).HostMemory("handle"), DeleteSessionTensorOp); } // namespace tensorflow
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/data_flow_ops.cc. #include <limits.h> #include <vector> #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/map_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/thread_annotations.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { class GetSessionHandleOp : public OpKernel { public: explicit GetSessionHandleOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* ctx) override { const Tensor& val = ctx->input(0); auto session_state = ctx->session_state(); OP_REQUIRES(ctx, session_state != nullptr, errors::FailedPrecondition( "GetSessionHandle called on null session state")); int64 id = session_state->GetNewId(); TensorStore::TensorAndKey tk{val, id, requested_device()}; OP_REQUIRES_OK(ctx, ctx->tensor_store()->AddTensor(name(), tk)); Tensor* handle = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &handle)); if (ctx->expected_output_dtype(0) == DT_RESOURCE) { ResourceHandle resource_handle = MakeResourceHandle<Tensor>( ctx, SessionState::kTensorHandleResourceTypeName, tk.GetHandle(name())); resource_handle.set_maybe_type_name( SessionState::kTensorHandleResourceTypeName); handle->scalar<ResourceHandle>()() = resource_handle; } else { // Legacy behavior in V1. handle->flat<tstring>().setConstant(tk.GetHandle(name())); } } TF_DISALLOW_COPY_AND_ASSIGN(GetSessionHandleOp); }; REGISTER_KERNEL_BUILDER(Name("GetSessionHandle").Device(DEVICE_CPU), GetSessionHandleOp); REGISTER_KERNEL_BUILDER(Name("GetSessionHandleV2").Device(DEVICE_CPU), GetSessionHandleOp); #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("GetSessionHandle") \ .Device(DEVICE_GPU) \ .HostMemory("handle") \ .TypeConstraint<type>("T"), \ GetSessionHandleOp) \ REGISTER_KERNEL_BUILDER(Name("GetSessionHandleV2") \ .Device(DEVICE_GPU) \ .HostMemory("handle") \ .TypeConstraint<type>("T"), \ GetSessionHandleOp) TF_CALL_NUMBER_TYPES(REGISTER_GPU_KERNEL); REGISTER_GPU_KERNEL(bool); #undef REGISTER_GPU_KERNEL class GetSessionTensorOp : public OpKernel { public: explicit GetSessionTensorOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* ctx) override { const Tensor& handle = ctx->input(0); const string& name = handle.scalar<tstring>()(); Tensor val; OP_REQUIRES_OK(ctx, ctx->session_state()->GetTensor(name, &val)); ctx->set_output(0, val); } TF_DISALLOW_COPY_AND_ASSIGN(GetSessionTensorOp); }; REGISTER_KERNEL_BUILDER(Name("GetSessionTensor").Device(DEVICE_CPU), GetSessionTensorOp); #define REGISTER_GPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("GetSessionTensor") \ .Device(DEVICE_GPU) \ .HostMemory("handle") \ .TypeConstraint<type>("dtype"), \ GetSessionTensorOp) TF_CALL_NUMBER_TYPES(REGISTER_GPU_KERNEL); REGISTER_GPU_KERNEL(bool); #undef REGISTER_GPU_KERNEL class DeleteSessionTensorOp : public OpKernel { public: explicit DeleteSessionTensorOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* ctx) override { const Tensor& handle = ctx->input(0); const string& name = handle.scalar<tstring>()(); OP_REQUIRES_OK(ctx, ctx->session_state()->DeleteTensor(name)); } TF_DISALLOW_COPY_AND_ASSIGN(DeleteSessionTensorOp); }; REGISTER_KERNEL_BUILDER(Name("DeleteSessionTensor").Device(DEVICE_CPU), DeleteSessionTensorOp); REGISTER_KERNEL_BUILDER( Name("DeleteSessionTensor").Device(DEVICE_GPU).HostMemory("handle"), DeleteSessionTensorOp); } // namespace tensorflow
void Compute(OpKernelContext* ctx) override { const Tensor& val = ctx->input(0); int64 id = ctx->session_state()->GetNewId(); TensorStore::TensorAndKey tk{val, id, requested_device()}; OP_REQUIRES_OK(ctx, ctx->tensor_store()->AddTensor(name(), tk)); Tensor* handle = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &handle)); if (ctx->expected_output_dtype(0) == DT_RESOURCE) { ResourceHandle resource_handle = MakeResourceHandle<Tensor>( ctx, SessionState::kTensorHandleResourceTypeName, tk.GetHandle(name())); resource_handle.set_maybe_type_name( SessionState::kTensorHandleResourceTypeName); handle->scalar<ResourceHandle>()() = resource_handle; } else { // Legacy behavior in V1. handle->flat<tstring>().setConstant(tk.GetHandle(name())); } }
void Compute(OpKernelContext* ctx) override { const Tensor& val = ctx->input(0); auto session_state = ctx->session_state(); OP_REQUIRES(ctx, session_state != nullptr, errors::FailedPrecondition( "GetSessionHandle called on null session state")); int64 id = session_state->GetNewId(); TensorStore::TensorAndKey tk{val, id, requested_device()}; OP_REQUIRES_OK(ctx, ctx->tensor_store()->AddTensor(name(), tk)); Tensor* handle = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &handle)); if (ctx->expected_output_dtype(0) == DT_RESOURCE) { ResourceHandle resource_handle = MakeResourceHandle<Tensor>( ctx, SessionState::kTensorHandleResourceTypeName, tk.GetHandle(name())); resource_handle.set_maybe_type_name( SessionState::kTensorHandleResourceTypeName); handle->scalar<ResourceHandle>()() = resource_handle; } else { // Legacy behavior in V1. handle->flat<tstring>().setConstant(tk.GetHandle(name())); } }
{'added': [(19, ''), (31, '#include "tensorflow/core/platform/errors.h"'), (47, ' auto session_state = ctx->session_state();'), (48, ' OP_REQUIRES(ctx, session_state != nullptr,'), (49, ' errors::FailedPrecondition('), (50, ' "GetSessionHandle called on null session state"));'), (51, ' int64 id = session_state->GetNewId();')], 'deleted': [(45, ' int64 id = ctx->session_state()->GetNewId();')]}
7
1
86
548
18
178
2
https://github.com/tensorflow/tensorflow
CVE-2020-15204
CWE-476
2,968
tee_svc_cryp.c
C
syscall_cryp_obj_populate
// SPDX-License-Identifier: BSD-2-Clause /* * Copyright (c) 2014, STMicroelectronics International N.V. */ #include <assert.h> #include <crypto/crypto.h> #include <kernel/tee_ta_manager.h> #include <mm/tee_mmu.h> #include <string_ext.h> #include <string.h> #include <sys/queue.h> #include <tee_api_types.h> #include <tee/tee_cryp_utl.h> #include <tee/tee_obj.h> #include <tee/tee_svc_cryp.h> #include <tee/tee_svc.h> #include <trace.h> #include <utee_defines.h> #include <util.h> #include <tee_api_defines_extensions.h> #if defined(CFG_CRYPTO_HKDF) #include <tee/tee_cryp_hkdf.h> #endif #if defined(CFG_CRYPTO_CONCAT_KDF) #include <tee/tee_cryp_concat_kdf.h> #endif #if defined(CFG_CRYPTO_PBKDF2) #include <tee/tee_cryp_pbkdf2.h> #endif typedef void (*tee_cryp_ctx_finalize_func_t) (void *ctx, uint32_t algo); struct tee_cryp_state { TAILQ_ENTRY(tee_cryp_state) link; uint32_t algo; uint32_t mode; vaddr_t key1; vaddr_t key2; void *ctx; tee_cryp_ctx_finalize_func_t ctx_finalize; }; struct tee_cryp_obj_secret { uint32_t key_size; uint32_t alloc_size; /* * Pseudo code visualize layout of structure * Next follows data, such as: * uint8_t data[alloc_size] * key_size must never exceed alloc_size */ }; #define TEE_TYPE_ATTR_OPTIONAL 0x0 #define TEE_TYPE_ATTR_REQUIRED 0x1 #define TEE_TYPE_ATTR_OPTIONAL_GROUP 0x2 #define TEE_TYPE_ATTR_SIZE_INDICATOR 0x4 #define TEE_TYPE_ATTR_GEN_KEY_OPT 0x8 #define TEE_TYPE_ATTR_GEN_KEY_REQ 0x10 /* Handle storing of generic secret keys of varying lengths */ #define ATTR_OPS_INDEX_SECRET 0 /* Convert to/from big-endian byte array and provider-specific bignum */ #define ATTR_OPS_INDEX_BIGNUM 1 /* Convert to/from value attribute depending on direction */ #define ATTR_OPS_INDEX_VALUE 2 struct tee_cryp_obj_type_attrs { uint32_t attr_id; uint16_t flags; uint16_t ops_index; uint16_t raw_offs; uint16_t raw_size; }; #define RAW_DATA(_x, _y) \ .raw_offs = offsetof(_x, _y), .raw_size = MEMBER_SIZE(_x, _y) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_secret_value_attrs[] = { { .attr_id = TEE_ATTR_SECRET_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, e) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, e) }, { .attr_id = TEE_ATTR_RSA_PRIVATE_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, d) }, { .attr_id = TEE_ATTR_RSA_PRIME1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, p) }, { .attr_id = TEE_ATTR_RSA_PRIME2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, q) }, { .attr_id = TEE_ATTR_RSA_EXPONENT1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dp) }, { .attr_id = TEE_ATTR_RSA_EXPONENT2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dq) }, { .attr_id = TEE_ATTR_RSA_COEFFICIENT, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, qp) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, g) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, g) }, { .attr_id = TEE_ATTR_DSA_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, x) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dh_keypair_attrs[] = { { .attr_id = TEE_ATTR_DH_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, p) }, { .attr_id = TEE_ATTR_DH_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, g) }, { .attr_id = TEE_ATTR_DH_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, y) }, { .attr_id = TEE_ATTR_DH_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, x) }, { .attr_id = TEE_ATTR_DH_SUBPRIME, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP | TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, q) }, { .attr_id = TEE_ATTR_DH_X_BITS, .flags = TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct dh_keypair, xbits) }, }; #if defined(CFG_CRYPTO_HKDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_hkdf_ikm_attrs[] = { { .attr_id = TEE_ATTR_HKDF_IKM, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_concat_kdf_z_attrs[] = { { .attr_id = TEE_ATTR_CONCAT_KDF_Z, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_PBKDF2) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_pbkdf2_passwd_attrs[] = { { .attr_id = TEE_ATTR_PBKDF2_PASSWORD, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_pub_key_attrs[] = { { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_public_key, curve) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_keypair_attrs[] = { { .attr_id = TEE_ATTR_ECC_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, d) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_keypair, curve) }, }; struct tee_cryp_obj_type_props { TEE_ObjectType obj_type; uint16_t min_size; /* may not be smaller than this */ uint16_t max_size; /* may not be larger than this */ uint16_t alloc_size; /* this many bytes are allocated to hold data */ uint8_t quanta; /* may only be an multiple of this */ uint8_t num_type_attrs; const struct tee_cryp_obj_type_attrs *type_attrs; }; #define PROP(obj_type, quanta, min_size, max_size, alloc_size, type_attrs) \ { (obj_type), (min_size), (max_size), (alloc_size), (quanta), \ ARRAY_SIZE(type_attrs), (type_attrs) } static const struct tee_cryp_obj_type_props tee_cryp_obj_props[] = { PROP(TEE_TYPE_AES, 64, 128, 256, /* valid sizes 128, 192, 256 */ 256 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES, 56, 56, 56, /* * Valid size 56 without parity, note that we still allocate * for 64 bits since the key is supplied with parity. */ 64 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES3, 56, 112, 168, /* * Valid sizes 112, 168 without parity, note that we still * allocate for with space for the parity since the key is * supplied with parity. */ 192 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_MD5, 8, 64, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA1, 8, 80, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA224, 8, 112, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA256, 8, 192, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA384, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA512, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_GENERIC_SECRET, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), #if defined(CFG_CRYPTO_HKDF) PROP(TEE_TYPE_HKDF_IKM, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_hkdf_ikm_attrs), #endif #if defined(CFG_CRYPTO_CONCAT_KDF) PROP(TEE_TYPE_CONCAT_KDF_Z, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_concat_kdf_z_attrs), #endif #if defined(CFG_CRYPTO_PBKDF2) PROP(TEE_TYPE_PBKDF2_PASSWORD, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_pbkdf2_passwd_attrs), #endif PROP(TEE_TYPE_RSA_PUBLIC_KEY, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_public_key), tee_cryp_obj_rsa_pub_key_attrs), PROP(TEE_TYPE_RSA_KEYPAIR, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_keypair), tee_cryp_obj_rsa_keypair_attrs), PROP(TEE_TYPE_DSA_PUBLIC_KEY, 64, 512, 3072, sizeof(struct dsa_public_key), tee_cryp_obj_dsa_pub_key_attrs), PROP(TEE_TYPE_DSA_KEYPAIR, 64, 512, 3072, sizeof(struct dsa_keypair), tee_cryp_obj_dsa_keypair_attrs), PROP(TEE_TYPE_DH_KEYPAIR, 1, 256, 2048, sizeof(struct dh_keypair), tee_cryp_obj_dh_keypair_attrs), PROP(TEE_TYPE_ECDSA_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDSA_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), PROP(TEE_TYPE_ECDH_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDH_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), }; struct attr_ops { TEE_Result (*from_user)(void *attr, const void *buffer, size_t size); TEE_Result (*to_user)(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size); TEE_Result (*to_binary)(void *attr, void *data, size_t data_len, size_t *offs); bool (*from_binary)(void *attr, const void *data, size_t data_len, size_t *offs); TEE_Result (*from_obj)(void *attr, void *src_attr); void (*free)(void *attr); void (*clear)(void *attr); }; static TEE_Result op_u32_to_binary_helper(uint32_t v, uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; size_t next_offs; if (ADD_OVERFLOW(*offs, sizeof(field), &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) { field = TEE_U32_TO_BIG_ENDIAN(v); memcpy(data + *offs, &field, sizeof(field)); } (*offs) = next_offs; return TEE_SUCCESS; } static bool op_u32_from_binary_helper(uint32_t *v, const uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; if (!data || (*offs + sizeof(field)) > data_len) return false; memcpy(&field, data + *offs, sizeof(field)); *v = TEE_U32_FROM_BIG_ENDIAN(field); (*offs) += sizeof(field); return true; } static TEE_Result op_attr_secret_value_from_user(void *attr, const void *buffer, size_t size) { struct tee_cryp_obj_secret *key = attr; /* Data size has to fit in allocated buffer */ if (size > key->alloc_size) return TEE_ERROR_SECURITY; memcpy(key + 1, buffer, size); key->key_size = size; return TEE_SUCCESS; } static TEE_Result op_attr_secret_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; uint64_t s; uint64_t key_size; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; key_size = key->key_size; res = tee_svc_copy_to_user(size, &key_size, sizeof(key_size)); if (res != TEE_SUCCESS) return res; if (s < key->key_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, key + 1, key->key_size); } static TEE_Result op_attr_secret_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; size_t next_offs; res = op_u32_to_binary_helper(key->key_size, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, key->key_size, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) memcpy((uint8_t *)data + *offs, key + 1, key->key_size); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_secret_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct tee_cryp_obj_secret *key = attr; uint32_t s; if (!op_u32_from_binary_helper(&s, data, data_len, offs)) return false; if ((*offs + s) > data_len) return false; /* Data size has to fit in allocated buffer */ if (s > key->alloc_size) return false; key->key_size = s; memcpy(key + 1, (const uint8_t *)data + *offs, s); (*offs) += s; return true; } static TEE_Result op_attr_secret_value_from_obj(void *attr, void *src_attr) { struct tee_cryp_obj_secret *key = attr; struct tee_cryp_obj_secret *src_key = src_attr; if (src_key->key_size > key->alloc_size) return TEE_ERROR_BAD_STATE; memcpy(key + 1, src_key + 1, src_key->key_size); key->key_size = src_key->key_size; return TEE_SUCCESS; } static void op_attr_secret_value_clear(void *attr) { struct tee_cryp_obj_secret *key = attr; key->key_size = 0; memset(key + 1, 0, key->alloc_size); } static TEE_Result op_attr_bignum_from_user(void *attr, const void *buffer, size_t size) { struct bignum **bn = attr; return crypto_bignum_bin2bn(buffer, size, *bn); } static TEE_Result op_attr_bignum_to_user(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size) { TEE_Result res; struct bignum **bn = attr; uint64_t req_size; uint64_t s; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; req_size = crypto_bignum_num_bytes(*bn); res = tee_svc_copy_to_user(size, &req_size, sizeof(req_size)); if (res != TEE_SUCCESS) return res; if (!req_size) return TEE_SUCCESS; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; /* Check we can access data using supplied user mode pointer */ res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buffer, req_size); if (res != TEE_SUCCESS) return res; /* * Write the bignum (wich raw data points to) into an array of * bytes (stored in buffer) */ crypto_bignum_bn2bin(*bn, buffer); return TEE_SUCCESS; } static TEE_Result op_attr_bignum_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct bignum **bn = attr; uint32_t n = crypto_bignum_num_bytes(*bn); size_t next_offs; res = op_u32_to_binary_helper(n, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, n, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) crypto_bignum_bn2bin(*bn, (uint8_t *)data + *offs); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_bignum_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct bignum **bn = attr; uint32_t n; if (!op_u32_from_binary_helper(&n, data, data_len, offs)) return false; if ((*offs + n) > data_len) return false; if (crypto_bignum_bin2bn((const uint8_t *)data + *offs, n, *bn)) return false; (*offs) += n; return true; } static TEE_Result op_attr_bignum_from_obj(void *attr, void *src_attr) { struct bignum **bn = attr; struct bignum **src_bn = src_attr; crypto_bignum_copy(*bn, *src_bn); return TEE_SUCCESS; } static void op_attr_bignum_clear(void *attr) { struct bignum **bn = attr; crypto_bignum_clear(*bn); } static void op_attr_bignum_free(void *attr) { struct bignum **bn = attr; crypto_bignum_free(*bn); *bn = NULL; } static TEE_Result op_attr_value_from_user(void *attr, const void *buffer, size_t size) { uint32_t *v = attr; if (size != sizeof(uint32_t) * 2) return TEE_ERROR_GENERIC; /* "can't happen */ /* Note that only the first value is copied */ memcpy(v, buffer, sizeof(uint32_t)); return TEE_SUCCESS; } static TEE_Result op_attr_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; uint32_t *v = attr; uint64_t s; uint32_t value[2] = { *v }; uint64_t req_size = sizeof(value); res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, value, req_size); } static TEE_Result op_attr_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_to_binary_helper(*v, data, data_len, offs); } static bool op_attr_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_from_binary_helper(v, data, data_len, offs); } static TEE_Result op_attr_value_from_obj(void *attr, void *src_attr) { uint32_t *v = attr; uint32_t *src_v = src_attr; *v = *src_v; return TEE_SUCCESS; } static void op_attr_value_clear(void *attr) { uint32_t *v = attr; *v = 0; } static const struct attr_ops attr_ops[] = { [ATTR_OPS_INDEX_SECRET] = { .from_user = op_attr_secret_value_from_user, .to_user = op_attr_secret_value_to_user, .to_binary = op_attr_secret_value_to_binary, .from_binary = op_attr_secret_value_from_binary, .from_obj = op_attr_secret_value_from_obj, .free = op_attr_secret_value_clear, /* not a typo */ .clear = op_attr_secret_value_clear, }, [ATTR_OPS_INDEX_BIGNUM] = { .from_user = op_attr_bignum_from_user, .to_user = op_attr_bignum_to_user, .to_binary = op_attr_bignum_to_binary, .from_binary = op_attr_bignum_from_binary, .from_obj = op_attr_bignum_from_obj, .free = op_attr_bignum_free, .clear = op_attr_bignum_clear, }, [ATTR_OPS_INDEX_VALUE] = { .from_user = op_attr_value_from_user, .to_user = op_attr_value_to_user, .to_binary = op_attr_value_to_binary, .from_binary = op_attr_value_from_binary, .from_obj = op_attr_value_from_obj, .free = op_attr_value_clear, /* not a typo */ .clear = op_attr_value_clear, }, }; TEE_Result syscall_cryp_obj_get_info(unsigned long obj, TEE_ObjectInfo *info) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; res = tee_svc_copy_to_user(info, &o->info, sizeof(o->info)); exit: return res; } TEE_Result syscall_cryp_obj_restrict_usage(unsigned long obj, unsigned long usage) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; o->info.objectUsage &= usage; exit: return res; } static int tee_svc_cryp_obj_find_type_attr_idx( uint32_t attr_id, const struct tee_cryp_obj_type_props *type_props) { size_t n; for (n = 0; n < type_props->num_type_attrs; n++) { if (attr_id == type_props->type_attrs[n].attr_id) return n; } return -1; } static const struct tee_cryp_obj_type_props *tee_svc_find_type_props( TEE_ObjectType obj_type) { size_t n; for (n = 0; n < ARRAY_SIZE(tee_cryp_obj_props); n++) { if (tee_cryp_obj_props[n].obj_type == obj_type) return tee_cryp_obj_props + n; } return NULL; } /* Set an attribute on an object */ static void set_attribute(struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return; o->have_attrs |= BIT(idx); } /* Get an attribute on an object */ static uint32_t get_attribute(const struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return 0; return o->have_attrs & BIT(idx); } TEE_Result syscall_cryp_obj_get_attr(unsigned long obj, unsigned long attr_id, void *buffer, uint64_t *size) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; int idx; const struct attr_ops *ops; void *attr; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return TEE_ERROR_ITEM_NOT_FOUND; /* Check that the object is initialized */ if (!(o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED)) return TEE_ERROR_BAD_PARAMETERS; /* Check that getting the attribute is allowed */ if (!(attr_id & TEE_ATTR_BIT_PROTECTED) && !(o->info.objectUsage & TEE_USAGE_EXTRACTABLE)) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) { /* Unknown object type, "can't happen" */ return TEE_ERROR_BAD_STATE; } idx = tee_svc_cryp_obj_find_type_attr_idx(attr_id, type_props); if ((idx < 0) || ((o->have_attrs & (1 << idx)) == 0)) return TEE_ERROR_ITEM_NOT_FOUND; ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; return ops->to_user(attr, sess, buffer, size); } void tee_obj_attr_free(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].free((uint8_t *)o->attr + ta->raw_offs); } } void tee_obj_attr_clear(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].clear((uint8_t *)o->attr + ta->raw_offs); } } TEE_Result tee_obj_attr_to_binary(struct tee_obj *o, void *data, size_t *data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; size_t len = data ? *data_len : 0; TEE_Result res; if (o->info.objectType == TEE_TYPE_DATA) { *data_len = 0; return TEE_SUCCESS; /* pure data object */ } if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; res = attr_ops[ta->ops_index].to_binary(attr, data, len, &offs); if (res != TEE_SUCCESS) return res; } *data_len = offs; if (data && offs > len) return TEE_ERROR_SHORT_BUFFER; return TEE_SUCCESS; } TEE_Result tee_obj_attr_from_binary(struct tee_obj *o, const void *data, size_t data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; if (!attr_ops[ta->ops_index].from_binary(attr, data, data_len, &offs)) return TEE_ERROR_CORRUPT_OBJECT; } return TEE_SUCCESS; } TEE_Result tee_obj_attr_copy_from(struct tee_obj *o, const struct tee_obj *src) { TEE_Result res; const struct tee_cryp_obj_type_props *tp; const struct tee_cryp_obj_type_attrs *ta; size_t n; uint32_t have_attrs = 0; void *attr; void *src_attr; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; if (o->info.objectType == src->info.objectType) { have_attrs = src->have_attrs; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + ta->raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } else { const struct tee_cryp_obj_type_props *tp_src; int idx; if (o->info.objectType == TEE_TYPE_RSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_RSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_DSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_DSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDH_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDH_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else { return TEE_ERROR_BAD_PARAMETERS; } tp_src = tee_svc_find_type_props(src->info.objectType); if (!tp_src) return TEE_ERROR_BAD_STATE; have_attrs = BIT32(tp->num_type_attrs) - 1; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; idx = tee_svc_cryp_obj_find_type_attr_idx(ta->attr_id, tp_src); if (idx < 0) return TEE_ERROR_BAD_STATE; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + tp_src->type_attrs[idx].raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } o->have_attrs = have_attrs; return TEE_SUCCESS; } TEE_Result tee_obj_set_type(struct tee_obj *o, uint32_t obj_type, size_t max_key_size) { TEE_Result res = TEE_SUCCESS; const struct tee_cryp_obj_type_props *type_props; /* Can only set type for newly allocated objs */ if (o->attr) return TEE_ERROR_BAD_STATE; /* * Verify that maxKeySize is supported and find out how * much should be allocated. */ if (obj_type == TEE_TYPE_DATA) { if (max_key_size) return TEE_ERROR_NOT_SUPPORTED; } else { /* Find description of object */ type_props = tee_svc_find_type_props(obj_type); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (max_key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; o->attr = calloc(1, type_props->alloc_size); if (!o->attr) return TEE_ERROR_OUT_OF_MEMORY; } /* If we have a key structure, pre-allocate the bignums inside */ switch (obj_type) { case TEE_TYPE_RSA_PUBLIC_KEY: res = crypto_acipher_alloc_rsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_RSA_KEYPAIR: res = crypto_acipher_alloc_rsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DSA_PUBLIC_KEY: res = crypto_acipher_alloc_dsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_DSA_KEYPAIR: res = crypto_acipher_alloc_dsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DH_KEYPAIR: res = crypto_acipher_alloc_dh_keypair(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_PUBLIC_KEY: case TEE_TYPE_ECDH_PUBLIC_KEY: res = crypto_acipher_alloc_ecc_public_key(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = crypto_acipher_alloc_ecc_keypair(o->attr, max_key_size); break; default: if (obj_type != TEE_TYPE_DATA) { struct tee_cryp_obj_secret *key = o->attr; key->alloc_size = type_props->alloc_size - sizeof(*key); } break; } if (res != TEE_SUCCESS) return res; o->info.objectType = obj_type; o->info.maxKeySize = max_key_size; o->info.objectUsage = TEE_USAGE_DEFAULT; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_alloc(unsigned long obj_type, unsigned long max_key_size, uint32_t *obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; if (obj_type == TEE_TYPE_DATA) return TEE_ERROR_NOT_SUPPORTED; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; o = tee_obj_alloc(); if (!o) return TEE_ERROR_OUT_OF_MEMORY; res = tee_obj_set_type(o, obj_type, max_key_size); if (res != TEE_SUCCESS) { tee_obj_free(o); return res; } tee_obj_add(to_user_ta_ctx(sess->ctx), o); res = tee_svc_copy_kaddr_to_uref(obj, o); if (res != TEE_SUCCESS) tee_obj_close(to_user_ta_ctx(sess->ctx), o); return res; } TEE_Result syscall_cryp_obj_close(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* * If it's busy it's used by an operation, a client should never have * this handle. */ if (o->busy) return TEE_ERROR_ITEM_NOT_FOUND; tee_obj_close(to_user_ta_ctx(sess->ctx), o); return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_reset(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) == 0) { tee_obj_attr_clear(o); o->info.keySize = 0; o->info.objectUsage = TEE_USAGE_DEFAULT; } else { return TEE_ERROR_BAD_PARAMETERS; } /* the object is no more initialized */ o->info.handleFlags &= ~TEE_HANDLE_FLAG_INITIALIZED; return TEE_SUCCESS; } static TEE_Result copy_in_attrs(struct user_ta_ctx *utc, const struct utee_attribute *usr_attrs, uint32_t attr_count, TEE_Attribute *attrs) { TEE_Result res; uint32_t n; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)usr_attrs, attr_count * sizeof(struct utee_attribute)); if (res != TEE_SUCCESS) return res; for (n = 0; n < attr_count; n++) { attrs[n].attributeID = usr_attrs[n].attribute_id; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) { attrs[n].content.value.a = usr_attrs[n].a; attrs[n].content.value.b = usr_attrs[n].b; } else { uintptr_t buf = usr_attrs[n].a; size_t len = usr_attrs[n].b; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, buf, len); if (res != TEE_SUCCESS) return res; attrs[n].content.ref.buffer = (void *)buf; attrs[n].content.ref.length = len; } } return TEE_SUCCESS; } enum attr_usage { ATTR_USAGE_POPULATE, ATTR_USAGE_GENERATE_KEY }; static TEE_Result tee_svc_cryp_check_attr(enum attr_usage usage, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { uint32_t required_flag; uint32_t opt_flag; bool all_opt_needed; uint32_t req_attrs = 0; uint32_t opt_grp_attrs = 0; uint32_t attrs_found = 0; size_t n; uint32_t bit; uint32_t flags; int idx; if (usage == ATTR_USAGE_POPULATE) { required_flag = TEE_TYPE_ATTR_REQUIRED; opt_flag = TEE_TYPE_ATTR_OPTIONAL_GROUP; all_opt_needed = true; } else { required_flag = TEE_TYPE_ATTR_GEN_KEY_REQ; opt_flag = TEE_TYPE_ATTR_GEN_KEY_OPT; all_opt_needed = false; } /* * First find out which attributes are required and which belong to * the optional group */ for (n = 0; n < type_props->num_type_attrs; n++) { bit = 1 << n; flags = type_props->type_attrs[n].flags; if (flags & required_flag) req_attrs |= bit; else if (flags & opt_flag) opt_grp_attrs |= bit; } /* * Verify that all required attributes are in place and * that the same attribute isn't repeated. */ for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; bit = 1 << idx; /* attribute not repeated */ if ((attrs_found & bit) != 0) return TEE_ERROR_ITEM_NOT_FOUND; attrs_found |= bit; } /* Required attribute missing */ if ((attrs_found & req_attrs) != req_attrs) return TEE_ERROR_ITEM_NOT_FOUND; /* * If the flag says that "if one of the optional attributes are included * all of them has to be included" this must be checked. */ if (all_opt_needed && (attrs_found & opt_grp_attrs) != 0 && (attrs_found & opt_grp_attrs) != opt_grp_attrs) return TEE_ERROR_ITEM_NOT_FOUND; return TEE_SUCCESS; } static TEE_Result get_ec_key_size(uint32_t curve, size_t *key_size) { switch (curve) { case TEE_ECC_CURVE_NIST_P192: *key_size = 192; break; case TEE_ECC_CURVE_NIST_P224: *key_size = 224; break; case TEE_ECC_CURVE_NIST_P256: *key_size = 256; break; case TEE_ECC_CURVE_NIST_P384: *key_size = 384; break; case TEE_ECC_CURVE_NIST_P521: *key_size = 521; break; default: return TEE_ERROR_NOT_SUPPORTED; } return TEE_SUCCESS; } static TEE_Result tee_svc_cryp_obj_populate_type( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { TEE_Result res; uint32_t have_attrs = 0; size_t obj_size = 0; size_t n; int idx; const struct attr_ops *ops; void *attr; for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; have_attrs |= BIT32(idx); ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) res = ops->from_user(attr, &attrs[n].content.value, sizeof(attrs[n].content.value)); else res = ops->from_user(attr, attrs[n].content.ref.buffer, attrs[n].content.ref.length); if (res != TEE_SUCCESS) return res; /* * First attr_idx signifies the attribute that gives the size * of the object */ if (type_props->type_attrs[idx].flags & TEE_TYPE_ATTR_SIZE_INDICATOR) { /* * For ECDSA/ECDH we need to translate curve into * object size */ if (attrs[n].attributeID == TEE_ATTR_ECC_CURVE) { res = get_ec_key_size(attrs[n].content.value.a, &obj_size); if (res != TEE_SUCCESS) return res; } else { obj_size += (attrs[n].content.ref.length * 8); } } } /* * We have to do it like this because the parity bits aren't counted * when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) obj_size -= obj_size / 8; /* Exclude parity in size of key */ o->have_attrs = have_attrs; o->info.keySize = obj_size; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_populate(unsigned long obj, struct utee_attribute *usr_attrs, unsigned long attr_count) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *attrs = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_IMPLEMENTED; attrs = malloc(sizeof(TEE_Attribute) * attr_count); if (!attrs) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count, attrs); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props, attrs, attr_count); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count); if (res == TEE_SUCCESS) o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; out: free(attrs); return res; } TEE_Result syscall_cryp_obj_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *dst_o; struct tee_obj *src_o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(dst), &dst_o); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(src), &src_o); if (res != TEE_SUCCESS) return res; if ((src_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; res = tee_obj_attr_copy_from(dst_o, src_o); if (res != TEE_SUCCESS) return res; dst_o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; dst_o->info.keySize = src_o->info.keySize; dst_o->info.objectUsage = src_o->info.objectUsage; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_rsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct rsa_keypair *key = o->attr; uint32_t e = TEE_U32_TO_BIG_ENDIAN(65537); /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; if (!get_attribute(o, type_props, TEE_ATTR_RSA_PUBLIC_EXPONENT)) crypto_bignum_bin2bn((const uint8_t *)&e, sizeof(e), key->e); res = crypto_acipher_gen_rsa_key(key, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size) { TEE_Result res; res = crypto_acipher_gen_dsa_key(o->attr, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dh( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct dh_keypair *tee_dh_key; struct bignum *dh_q = NULL; uint32_t dh_xbits = 0; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_dh_key = (struct dh_keypair *)o->attr; if (get_attribute(o, type_props, TEE_ATTR_DH_SUBPRIME)) dh_q = tee_dh_key->q; if (get_attribute(o, type_props, TEE_ATTR_DH_X_BITS)) dh_xbits = tee_dh_key->xbits; res = crypto_acipher_gen_dh_key(tee_dh_key, dh_q, dh_xbits); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_DH_PUBLIC_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_X_BITS); return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_ecc( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct ecc_keypair *tee_ecc_key; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_ecc_key = (struct ecc_keypair *)o->attr; res = crypto_acipher_gen_ecc_key(tee_ecc_key); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_ECC_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_X); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_Y); set_attribute(o, type_props, TEE_ATTR_ECC_CURVE); return TEE_SUCCESS; } TEE_Result syscall_obj_generate_key(unsigned long obj, unsigned long key_size, const struct utee_attribute *usr_params, unsigned long param_count) { TEE_Result res; struct tee_ta_session *sess; const struct tee_cryp_obj_type_props *type_props; struct tee_obj *o; struct tee_cryp_obj_secret *key; size_t byte_size; TEE_Attribute *params = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_STATE; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_STATE; /* Find description of object */ type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_GENERATE_KEY, type_props, params, param_count); if (res != TEE_SUCCESS) goto out; switch (o->info.objectType) { case TEE_TYPE_AES: case TEE_TYPE_DES: case TEE_TYPE_DES3: case TEE_TYPE_HMAC_MD5: case TEE_TYPE_HMAC_SHA1: case TEE_TYPE_HMAC_SHA224: case TEE_TYPE_HMAC_SHA256: case TEE_TYPE_HMAC_SHA384: case TEE_TYPE_HMAC_SHA512: case TEE_TYPE_GENERIC_SECRET: byte_size = key_size / 8; /* * We have to do it like this because the parity bits aren't * counted when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) { byte_size = (key_size + key_size / 7) / 8; } key = (struct tee_cryp_obj_secret *)o->attr; if (byte_size > key->alloc_size) { res = TEE_ERROR_EXCESS_DATA; goto out; } res = crypto_rng_read((void *)(key + 1), byte_size); if (res != TEE_SUCCESS) goto out; key->key_size = byte_size; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; break; case TEE_TYPE_RSA_KEYPAIR: res = tee_svc_obj_generate_key_rsa(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DSA_KEYPAIR: res = tee_svc_obj_generate_key_dsa(o, type_props, key_size); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DH_KEYPAIR: res = tee_svc_obj_generate_key_dh(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = tee_svc_obj_generate_key_ecc(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; default: res = TEE_ERROR_BAD_FORMAT; } out: free(params); if (res == TEE_SUCCESS) { o->info.keySize = key_size; o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; } return res; } static TEE_Result tee_svc_cryp_get_state(struct tee_ta_session *sess, uint32_t state_id, struct tee_cryp_state **state) { struct tee_cryp_state *s; struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx); TAILQ_FOREACH(s, &utc->cryp_states, link) { if (state_id == (vaddr_t)s) { *state = s; return TEE_SUCCESS; } } return TEE_ERROR_BAD_PARAMETERS; } static void cryp_state_free(struct user_ta_ctx *utc, struct tee_cryp_state *cs) { struct tee_obj *o; if (tee_obj_get(utc, cs->key1, &o) == TEE_SUCCESS) tee_obj_close(utc, o); if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) tee_obj_close(utc, o); TAILQ_REMOVE(&utc->cryp_states, cs, link); if (cs->ctx_finalize != NULL) cs->ctx_finalize(cs->ctx, cs->algo); switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_AE: crypto_authenc_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_MAC: crypto_mac_free_ctx(cs->ctx, cs->algo); break; default: assert(!cs->ctx); } free(cs); } static TEE_Result tee_svc_cryp_check_key_type(const struct tee_obj *o, uint32_t algo, TEE_OperationMode mode) { uint32_t req_key_type; uint32_t req_key_type2 = 0; switch (TEE_ALG_GET_MAIN_ALG(algo)) { case TEE_MAIN_ALGO_MD5: req_key_type = TEE_TYPE_HMAC_MD5; break; case TEE_MAIN_ALGO_SHA1: req_key_type = TEE_TYPE_HMAC_SHA1; break; case TEE_MAIN_ALGO_SHA224: req_key_type = TEE_TYPE_HMAC_SHA224; break; case TEE_MAIN_ALGO_SHA256: req_key_type = TEE_TYPE_HMAC_SHA256; break; case TEE_MAIN_ALGO_SHA384: req_key_type = TEE_TYPE_HMAC_SHA384; break; case TEE_MAIN_ALGO_SHA512: req_key_type = TEE_TYPE_HMAC_SHA512; break; case TEE_MAIN_ALGO_AES: req_key_type = TEE_TYPE_AES; break; case TEE_MAIN_ALGO_DES: req_key_type = TEE_TYPE_DES; break; case TEE_MAIN_ALGO_DES3: req_key_type = TEE_TYPE_DES3; break; case TEE_MAIN_ALGO_RSA: req_key_type = TEE_TYPE_RSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_RSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DSA: req_key_type = TEE_TYPE_DSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_DSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DH: req_key_type = TEE_TYPE_DH_KEYPAIR; break; case TEE_MAIN_ALGO_ECDSA: req_key_type = TEE_TYPE_ECDSA_KEYPAIR; if (mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_ECDSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_ECDH: req_key_type = TEE_TYPE_ECDH_KEYPAIR; break; #if defined(CFG_CRYPTO_HKDF) case TEE_MAIN_ALGO_HKDF: req_key_type = TEE_TYPE_HKDF_IKM; break; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) case TEE_MAIN_ALGO_CONCAT_KDF: req_key_type = TEE_TYPE_CONCAT_KDF_Z; break; #endif #if defined(CFG_CRYPTO_PBKDF2) case TEE_MAIN_ALGO_PBKDF2: req_key_type = TEE_TYPE_PBKDF2_PASSWORD; break; #endif default: return TEE_ERROR_BAD_PARAMETERS; } if (req_key_type != o->info.objectType && req_key_type2 != o->info.objectType) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } TEE_Result syscall_cryp_state_alloc(unsigned long algo, unsigned long mode, unsigned long key1, unsigned long key2, uint32_t *state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o1 = NULL; struct tee_obj *o2 = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); if (key1 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key1), &o1); if (res != TEE_SUCCESS) return res; if (o1->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o1, algo, mode); if (res != TEE_SUCCESS) return res; } if (key2 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key2), &o2); if (res != TEE_SUCCESS) return res; if (o2->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o2, algo, mode); if (res != TEE_SUCCESS) return res; } cs = calloc(1, sizeof(struct tee_cryp_state)); if (!cs) return TEE_ERROR_OUT_OF_MEMORY; TAILQ_INSERT_TAIL(&utc->cryp_states, cs, link); cs->algo = algo; cs->mode = mode; switch (TEE_ALG_GET_CLASS(algo)) { case TEE_OPERATION_EXTENSION: #ifdef CFG_CRYPTO_RSASSA_NA1 if (algo == TEE_ALG_RSASSA_PKCS1_V1_5) goto rsassa_na1; #endif res = TEE_ERROR_NOT_SUPPORTED; break; case TEE_OPERATION_CIPHER: if ((algo == TEE_ALG_AES_XTS && (key1 == 0 || key2 == 0)) || (algo != TEE_ALG_AES_XTS && (key1 == 0 || key2 != 0))) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_cipher_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_AE: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_authenc_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_MAC: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_mac_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_DIGEST: if (key1 != 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_hash_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_ASYMMETRIC_CIPHER: case TEE_OPERATION_ASYMMETRIC_SIGNATURE: rsassa_na1: __maybe_unused if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; case TEE_OPERATION_KEY_DERIVATION: if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; default: res = TEE_ERROR_NOT_SUPPORTED; break; } if (res != TEE_SUCCESS) goto out; res = tee_svc_copy_kaddr_to_uref(state, cs); if (res != TEE_SUCCESS) goto out; /* Register keys */ if (o1 != NULL) { o1->busy = true; cs->key1 = (vaddr_t)o1; } if (o2 != NULL) { o2->busy = true; cs->key2 = (vaddr_t)o2; } out: if (res != TEE_SUCCESS) cryp_state_free(utc, cs); return res; } TEE_Result syscall_cryp_state_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_cryp_state *cs_dst; struct tee_cryp_state *cs_src; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(dst), &cs_dst); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(src), &cs_src); if (res != TEE_SUCCESS) return res; if (cs_dst->algo != cs_src->algo || cs_dst->mode != cs_src->mode) return TEE_ERROR_BAD_PARAMETERS; switch (TEE_ALG_GET_CLASS(cs_src->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_AE: crypto_authenc_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_MAC: crypto_mac_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; default: return TEE_ERROR_BAD_STATE; } return TEE_SUCCESS; } void tee_svc_cryp_free_states(struct user_ta_ctx *utc) { struct tee_cryp_state_head *states = &utc->cryp_states; while (!TAILQ_EMPTY(states)) cryp_state_free(utc, TAILQ_FIRST(states)); } TEE_Result syscall_cryp_state_free(unsigned long state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; cryp_state_free(to_user_ta_ctx(sess->ctx), cs); return TEE_SUCCESS; } TEE_Result syscall_hash_init(unsigned long state, const void *iv __maybe_unused, size_t iv_len __maybe_unused) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_init(cs->ctx, cs->algo); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: { struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = (struct tee_cryp_obj_secret *)o->attr; res = crypto_mac_init(cs->ctx, cs->algo, (void *)(key + 1), key->key_size); if (res != TEE_SUCCESS) return res; break; } default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_update(unsigned long state, const void *chunk, size_t chunk_size) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; /* Zero length hash is valid, but nothing we need to do. */ if (!chunk_size) return TEE_SUCCESS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_final(unsigned long state, const void *chunk, size_t chunk_size, void *hash, uint64_t *hash_len) { TEE_Result res, res2; size_t hash_size; uint64_t hlen; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&hlen, hash_len, sizeof(hlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)hash, hlen); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = tee_hash_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_hash_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = tee_mac_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_mac_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } out: hlen = hash_size; res2 = tee_svc_copy_to_user(hash_len, &hlen, sizeof(*hash_len)); if (res2 != TEE_SUCCESS) return res2; return res; } TEE_Result syscall_cipher_init(unsigned long state, const void *iv, size_t iv_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key1; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) iv, iv_len); if (res != TEE_SUCCESS) return res; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key1 = o->attr; if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) { struct tee_cryp_obj_secret *key2 = o->attr; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, (uint8_t *)(key2 + 1), key2->key_size, iv, iv_len); } else { res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, NULL, 0, iv, iv_len); } if (res != TEE_SUCCESS) return res; cs->ctx_finalize = crypto_cipher_final; return TEE_SUCCESS; } static TEE_Result tee_svc_cipher_update_helper(unsigned long state, bool last_block, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (src_len > 0) { /* Permit src_len == 0 to finalize the operation */ res = tee_do_cipher_update(cs->ctx, cs->algo, cs->mode, last_block, src, src_len, dst); } if (last_block && cs->ctx_finalize != NULL) { cs->ctx_finalize(cs->ctx, cs->algo); cs->ctx_finalize = NULL; } out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; dlen = src_len; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_cipher_update(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, false /* last_block */, src, src_len, dst, dst_len); } TEE_Result syscall_cipher_final(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, true /* last_block */, src, src_len, dst, dst_len); } #if defined(CFG_CRYPTO_HKDF) static TEE_Result get_hkdf_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, void **info, size_t *info_len, size_t *okm_len) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, INFO = 0x4 }; uint8_t found = 0; *salt = *info = NULL; *salt_len = *info_len = *okm_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_HKDF_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_HKDF_OKM_LENGTH: if (!(found & LENGTH)) { *okm_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_HKDF_INFO: if (!(found & INFO)) { *info = params[n].content.ref.buffer; *info_len = params[n].content.ref.length; found |= INFO; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static TEE_Result get_concat_kdf_params(const TEE_Attribute *params, uint32_t param_count, void **other_info, size_t *other_info_len, size_t *derived_key_len) { size_t n; enum { LENGTH = 0x1, INFO = 0x2 }; uint8_t found = 0; *other_info = NULL; *other_info_len = *derived_key_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_CONCAT_KDF_OTHER_INFO: if (!(found & INFO)) { *other_info = params[n].content.ref.buffer; *other_info_len = params[n].content.ref.length; found |= INFO; } break; case TEE_ATTR_CONCAT_KDF_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_PBKDF2) static TEE_Result get_pbkdf2_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, size_t *derived_key_len, size_t *iteration_count) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, COUNT = 0x4 }; uint8_t found = 0; *salt = NULL; *salt_len = *derived_key_len = *iteration_count = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_PBKDF2_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_PBKDF2_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_PBKDF2_ITERATION_COUNT: if (!(found & COUNT)) { *iteration_count = params[n].content.value.a; found |= COUNT; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if ((found & (LENGTH|COUNT)) != (LENGTH|COUNT)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif TEE_Result syscall_cryp_derive_key(unsigned long state, const struct utee_attribute *usr_params, unsigned long param_count, unsigned long derived_key) { TEE_Result res = TEE_ERROR_NOT_SUPPORTED; struct tee_ta_session *sess; struct tee_obj *ko; struct tee_obj *so; struct tee_cryp_state *cs; struct tee_cryp_obj_secret *sk; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; /* Get key set in operation */ res = tee_obj_get(utc, cs->key1, &ko); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, tee_svc_uref_to_vaddr(derived_key), &so); if (res != TEE_SUCCESS) goto out; /* Find information needed about the object to initialize */ sk = so->attr; /* Find description of object */ type_props = tee_svc_find_type_props(so->info.objectType); if (!type_props) { res = TEE_ERROR_NOT_SUPPORTED; goto out; } if (cs->algo == TEE_ALG_DH_DERIVE_SHARED_SECRET) { size_t alloc_size; struct bignum *pub; struct bignum *ss; if (param_count != 1 || params[0].attributeID != TEE_ATTR_DH_PUBLIC_VALUE) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } alloc_size = params[0].content.ref.length * 8; pub = crypto_bignum_allocate(alloc_size); ss = crypto_bignum_allocate(alloc_size); if (pub && ss) { crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, pub); res = crypto_acipher_dh_shared_secret(ko->attr, pub, ss); if (res == TEE_SUCCESS) { sk->key_size = crypto_bignum_num_bytes(ss); crypto_bignum_bn2bin(ss, (uint8_t *)(sk + 1)); so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } else { res = TEE_ERROR_OUT_OF_MEMORY; } crypto_bignum_free(pub); crypto_bignum_free(ss); } else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_ECDH) { size_t alloc_size; struct ecc_public_key key_public; uint8_t *pt_secret; unsigned long pt_secret_len; if (param_count != 2 || params[0].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_X || params[1].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_Y) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (cs->algo) { case TEE_ALG_ECDH_P192: alloc_size = 192; break; case TEE_ALG_ECDH_P224: alloc_size = 224; break; case TEE_ALG_ECDH_P256: alloc_size = 256; break; case TEE_ALG_ECDH_P384: alloc_size = 384; break; case TEE_ALG_ECDH_P521: alloc_size = 521; break; default: res = TEE_ERROR_NOT_IMPLEMENTED; goto out; } /* Create the public key */ res = crypto_acipher_alloc_ecc_public_key(&key_public, alloc_size); if (res != TEE_SUCCESS) goto out; key_public.curve = ((struct ecc_keypair *)ko->attr)->curve; crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, key_public.x); crypto_bignum_bin2bn(params[1].content.ref.buffer, params[1].content.ref.length, key_public.y); pt_secret = (uint8_t *)(sk + 1); pt_secret_len = sk->alloc_size; res = crypto_acipher_ecc_shared_secret(ko->attr, &key_public, pt_secret, &pt_secret_len); if (res == TEE_SUCCESS) { sk->key_size = pt_secret_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } /* free the public key */ crypto_acipher_free_ecc_public_key(&key_public); } #if defined(CFG_CRYPTO_HKDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_HKDF) { void *salt, *info; size_t salt_len, info_len, okm_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ik = ko->attr; const uint8_t *ikm = (const uint8_t *)(ik + 1); res = get_hkdf_params(params, param_count, &salt, &salt_len, &info, &info_len, &okm_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (okm_len > ik->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_hkdf(hash_id, ikm, ik->key_size, salt, salt_len, info, info_len, (uint8_t *)(sk + 1), okm_len); if (res == TEE_SUCCESS) { sk->key_size = okm_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_CONCAT_KDF) { void *info; size_t info_len, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *shared_secret = (const uint8_t *)(ss + 1); res = get_concat_kdf_params(params, param_count, &info, &info_len, &derived_key_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_concat_kdf(hash_id, shared_secret, ss->key_size, info, info_len, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_PBKDF2) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_PBKDF2) { void *salt; size_t salt_len, iteration_count, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *password = (const uint8_t *)(ss + 1); res = get_pbkdf2_params(params, param_count, &salt, &salt_len, &derived_key_len, &iteration_count); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_pbkdf2(hash_id, password, ss->key_size, salt, salt_len, iteration_count, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif else res = TEE_ERROR_NOT_SUPPORTED; out: free(params); return res; } TEE_Result syscall_cryp_random_number_generate(void *buf, size_t blen) { TEE_Result res; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buf, blen); if (res != TEE_SUCCESS) return res; res = crypto_rng_read(buf, blen); if (res != TEE_SUCCESS) return res; return res; } TEE_Result syscall_authenc_init(unsigned long state, const void *nonce, size_t nonce_len, size_t tag_len, size_t aad_len, size_t payload_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = o->attr; res = crypto_authenc_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key + 1), key->key_size, nonce, nonce_len, tag_len, aad_len, payload_len); if (res != TEE_SUCCESS) return res; cs->ctx_finalize = (tee_cryp_ctx_finalize_func_t)crypto_authenc_final; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_aad(unsigned long state, const void *aad_data, size_t aad_data_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = crypto_authenc_update_aad(cs->ctx, cs->algo, cs->mode, aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_payload(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } tmp_dlen = dlen; res = crypto_authenc_update_payload(cs->ctx, cs->algo, cs->mode, src_data, src_len, dst_data, &tmp_dlen); dlen = tmp_dlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_authenc_enc_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, void *tag, uint64_t *tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; uint64_t tlen = 0; size_t tmp_dlen; size_t tmp_tlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_ENCRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_svc_copy_from_user(&tlen, tag_len, sizeof(tlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tlen); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; tmp_tlen = tlen; res = crypto_authenc_enc_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, &tmp_tlen); dlen = tmp_dlen; tlen = tmp_tlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; if (dst_len != NULL) { res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } res2 = tee_svc_copy_to_user(tag_len, &tlen, sizeof(*tag_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_authenc_dec_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, const void *tag, size_t tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_DECRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tag_len); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; res = crypto_authenc_dec_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, tag_len); dlen = tmp_dlen; out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } static int pkcs1_get_salt_len(const TEE_Attribute *params, uint32_t num_params, size_t default_len) { size_t n; assert(default_len < INT_MAX); for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_PSS_SALT_LENGTH) { if (params[n].content.value.a < INT_MAX) return params[n].content.value.a; break; } } /* * If salt length isn't provided use the default value which is * the length of the digest. */ return default_len; } TEE_Result syscall_asymm_operate(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen64; size_t dlen; struct tee_obj *o; void *label = NULL; size_t label_len = 0; size_t n; int salt_len; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen64, dst_len, sizeof(dlen64)); if (res != TEE_SUCCESS) return res; dlen = dlen64; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) dst_data, dlen); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_GENERIC; goto out; } switch (cs->algo) { case TEE_ALG_RSA_NOPAD: if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsanopad_encrypt(o->attr, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsanopad_decrypt(o->attr, src_data, src_len, dst_data, &dlen); } else { /* * We will panic because "the mode is not compatible * with the function" */ res = TEE_ERROR_GENERIC; } break; case TEE_ALG_RSAES_PKCS1_V1_5: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA1: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA224: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA256: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA384: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA512: for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_OAEP_LABEL) { label = params[n].content.ref.buffer; label_len = params[n].content.ref.length; break; } } if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsaes_encrypt(cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsaes_decrypt( cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else { res = TEE_ERROR_BAD_PARAMETERS; } break; #if defined(CFG_CRYPTO_RSASSA_NA1) case TEE_ALG_RSASSA_PKCS1_V1_5: #endif case TEE_ALG_RSASSA_PKCS1_V1_5_MD5: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA1: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA224: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA256: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA384: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA512: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA1: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA224: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA384: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA512: if (cs->mode != TEE_MODE_SIGN) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, src_len); res = crypto_acipher_rsassa_sign(cs->algo, o->attr, salt_len, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_DSA_SHA1: case TEE_ALG_DSA_SHA224: case TEE_ALG_DSA_SHA256: res = crypto_acipher_dsa_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_ECDSA_P192: case TEE_ALG_ECDSA_P224: case TEE_ALG_ECDSA_P256: case TEE_ALG_ECDSA_P384: case TEE_ALG_ECDSA_P521: res = crypto_acipher_ecc_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; default: res = TEE_ERROR_BAD_PARAMETERS; break; } out: free(params); if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; dlen64 = dlen; res2 = tee_svc_copy_to_user(dst_len, &dlen64, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_asymm_verify(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *data, size_t data_len, const void *sig, size_t sig_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; size_t hash_size; int salt_len = 0; TEE_Attribute *params = NULL; uint32_t hash_algo; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_VERIFY) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)data, data_len); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)sig, sig_len); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (TEE_ALG_GET_MAIN_ALG(cs->algo)) { case TEE_MAIN_ALGO_RSA: if (cs->algo != TEE_ALG_RSASSA_PKCS1_V1_5) { hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; if (data_len != hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, hash_size); } res = crypto_acipher_rsassa_verify(cs->algo, o->attr, salt_len, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_DSA: hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; /* * Depending on the DSA algorithm (NIST), the digital signature * output size may be truncated to the size of a key pair * (Q prime size). Q prime size must be less or equal than the * hash output length of the hash algorithm involved. */ if (data_len > hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } res = crypto_acipher_dsa_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_ECDSA: res = crypto_acipher_ecc_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; default: res = TEE_ERROR_NOT_SUPPORTED; } out: free(params); return res; }
// SPDX-License-Identifier: BSD-2-Clause /* * Copyright (c) 2014, STMicroelectronics International N.V. */ #include <assert.h> #include <compiler.h> #include <crypto/crypto.h> #include <kernel/tee_ta_manager.h> #include <mm/tee_mmu.h> #include <string_ext.h> #include <string.h> #include <sys/queue.h> #include <tee_api_types.h> #include <tee/tee_cryp_utl.h> #include <tee/tee_obj.h> #include <tee/tee_svc_cryp.h> #include <tee/tee_svc.h> #include <trace.h> #include <utee_defines.h> #include <util.h> #include <tee_api_defines_extensions.h> #if defined(CFG_CRYPTO_HKDF) #include <tee/tee_cryp_hkdf.h> #endif #if defined(CFG_CRYPTO_CONCAT_KDF) #include <tee/tee_cryp_concat_kdf.h> #endif #if defined(CFG_CRYPTO_PBKDF2) #include <tee/tee_cryp_pbkdf2.h> #endif typedef void (*tee_cryp_ctx_finalize_func_t) (void *ctx, uint32_t algo); struct tee_cryp_state { TAILQ_ENTRY(tee_cryp_state) link; uint32_t algo; uint32_t mode; vaddr_t key1; vaddr_t key2; void *ctx; tee_cryp_ctx_finalize_func_t ctx_finalize; }; struct tee_cryp_obj_secret { uint32_t key_size; uint32_t alloc_size; /* * Pseudo code visualize layout of structure * Next follows data, such as: * uint8_t data[alloc_size] * key_size must never exceed alloc_size */ }; #define TEE_TYPE_ATTR_OPTIONAL 0x0 #define TEE_TYPE_ATTR_REQUIRED 0x1 #define TEE_TYPE_ATTR_OPTIONAL_GROUP 0x2 #define TEE_TYPE_ATTR_SIZE_INDICATOR 0x4 #define TEE_TYPE_ATTR_GEN_KEY_OPT 0x8 #define TEE_TYPE_ATTR_GEN_KEY_REQ 0x10 /* Handle storing of generic secret keys of varying lengths */ #define ATTR_OPS_INDEX_SECRET 0 /* Convert to/from big-endian byte array and provider-specific bignum */ #define ATTR_OPS_INDEX_BIGNUM 1 /* Convert to/from value attribute depending on direction */ #define ATTR_OPS_INDEX_VALUE 2 struct tee_cryp_obj_type_attrs { uint32_t attr_id; uint16_t flags; uint16_t ops_index; uint16_t raw_offs; uint16_t raw_size; }; #define RAW_DATA(_x, _y) \ .raw_offs = offsetof(_x, _y), .raw_size = MEMBER_SIZE(_x, _y) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_secret_value_attrs[] = { { .attr_id = TEE_ATTR_SECRET_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, e) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, e) }, { .attr_id = TEE_ATTR_RSA_PRIVATE_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, d) }, { .attr_id = TEE_ATTR_RSA_PRIME1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, p) }, { .attr_id = TEE_ATTR_RSA_PRIME2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, q) }, { .attr_id = TEE_ATTR_RSA_EXPONENT1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dp) }, { .attr_id = TEE_ATTR_RSA_EXPONENT2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dq) }, { .attr_id = TEE_ATTR_RSA_COEFFICIENT, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, qp) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, g) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, g) }, { .attr_id = TEE_ATTR_DSA_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, x) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dh_keypair_attrs[] = { { .attr_id = TEE_ATTR_DH_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, p) }, { .attr_id = TEE_ATTR_DH_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, g) }, { .attr_id = TEE_ATTR_DH_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, y) }, { .attr_id = TEE_ATTR_DH_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, x) }, { .attr_id = TEE_ATTR_DH_SUBPRIME, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP | TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, q) }, { .attr_id = TEE_ATTR_DH_X_BITS, .flags = TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct dh_keypair, xbits) }, }; #if defined(CFG_CRYPTO_HKDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_hkdf_ikm_attrs[] = { { .attr_id = TEE_ATTR_HKDF_IKM, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_concat_kdf_z_attrs[] = { { .attr_id = TEE_ATTR_CONCAT_KDF_Z, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_PBKDF2) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_pbkdf2_passwd_attrs[] = { { .attr_id = TEE_ATTR_PBKDF2_PASSWORD, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_pub_key_attrs[] = { { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_public_key, curve) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_keypair_attrs[] = { { .attr_id = TEE_ATTR_ECC_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, d) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_keypair, curve) }, }; struct tee_cryp_obj_type_props { TEE_ObjectType obj_type; uint16_t min_size; /* may not be smaller than this */ uint16_t max_size; /* may not be larger than this */ uint16_t alloc_size; /* this many bytes are allocated to hold data */ uint8_t quanta; /* may only be an multiple of this */ uint8_t num_type_attrs; const struct tee_cryp_obj_type_attrs *type_attrs; }; #define PROP(obj_type, quanta, min_size, max_size, alloc_size, type_attrs) \ { (obj_type), (min_size), (max_size), (alloc_size), (quanta), \ ARRAY_SIZE(type_attrs), (type_attrs) } static const struct tee_cryp_obj_type_props tee_cryp_obj_props[] = { PROP(TEE_TYPE_AES, 64, 128, 256, /* valid sizes 128, 192, 256 */ 256 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES, 56, 56, 56, /* * Valid size 56 without parity, note that we still allocate * for 64 bits since the key is supplied with parity. */ 64 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES3, 56, 112, 168, /* * Valid sizes 112, 168 without parity, note that we still * allocate for with space for the parity since the key is * supplied with parity. */ 192 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_MD5, 8, 64, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA1, 8, 80, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA224, 8, 112, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA256, 8, 192, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA384, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA512, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_GENERIC_SECRET, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), #if defined(CFG_CRYPTO_HKDF) PROP(TEE_TYPE_HKDF_IKM, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_hkdf_ikm_attrs), #endif #if defined(CFG_CRYPTO_CONCAT_KDF) PROP(TEE_TYPE_CONCAT_KDF_Z, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_concat_kdf_z_attrs), #endif #if defined(CFG_CRYPTO_PBKDF2) PROP(TEE_TYPE_PBKDF2_PASSWORD, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_pbkdf2_passwd_attrs), #endif PROP(TEE_TYPE_RSA_PUBLIC_KEY, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_public_key), tee_cryp_obj_rsa_pub_key_attrs), PROP(TEE_TYPE_RSA_KEYPAIR, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_keypair), tee_cryp_obj_rsa_keypair_attrs), PROP(TEE_TYPE_DSA_PUBLIC_KEY, 64, 512, 3072, sizeof(struct dsa_public_key), tee_cryp_obj_dsa_pub_key_attrs), PROP(TEE_TYPE_DSA_KEYPAIR, 64, 512, 3072, sizeof(struct dsa_keypair), tee_cryp_obj_dsa_keypair_attrs), PROP(TEE_TYPE_DH_KEYPAIR, 1, 256, 2048, sizeof(struct dh_keypair), tee_cryp_obj_dh_keypair_attrs), PROP(TEE_TYPE_ECDSA_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDSA_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), PROP(TEE_TYPE_ECDH_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDH_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), }; struct attr_ops { TEE_Result (*from_user)(void *attr, const void *buffer, size_t size); TEE_Result (*to_user)(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size); TEE_Result (*to_binary)(void *attr, void *data, size_t data_len, size_t *offs); bool (*from_binary)(void *attr, const void *data, size_t data_len, size_t *offs); TEE_Result (*from_obj)(void *attr, void *src_attr); void (*free)(void *attr); void (*clear)(void *attr); }; static TEE_Result op_u32_to_binary_helper(uint32_t v, uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; size_t next_offs; if (ADD_OVERFLOW(*offs, sizeof(field), &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) { field = TEE_U32_TO_BIG_ENDIAN(v); memcpy(data + *offs, &field, sizeof(field)); } (*offs) = next_offs; return TEE_SUCCESS; } static bool op_u32_from_binary_helper(uint32_t *v, const uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; if (!data || (*offs + sizeof(field)) > data_len) return false; memcpy(&field, data + *offs, sizeof(field)); *v = TEE_U32_FROM_BIG_ENDIAN(field); (*offs) += sizeof(field); return true; } static TEE_Result op_attr_secret_value_from_user(void *attr, const void *buffer, size_t size) { struct tee_cryp_obj_secret *key = attr; /* Data size has to fit in allocated buffer */ if (size > key->alloc_size) return TEE_ERROR_SECURITY; memcpy(key + 1, buffer, size); key->key_size = size; return TEE_SUCCESS; } static TEE_Result op_attr_secret_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; uint64_t s; uint64_t key_size; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; key_size = key->key_size; res = tee_svc_copy_to_user(size, &key_size, sizeof(key_size)); if (res != TEE_SUCCESS) return res; if (s < key->key_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, key + 1, key->key_size); } static TEE_Result op_attr_secret_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; size_t next_offs; res = op_u32_to_binary_helper(key->key_size, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, key->key_size, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) memcpy((uint8_t *)data + *offs, key + 1, key->key_size); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_secret_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct tee_cryp_obj_secret *key = attr; uint32_t s; if (!op_u32_from_binary_helper(&s, data, data_len, offs)) return false; if ((*offs + s) > data_len) return false; /* Data size has to fit in allocated buffer */ if (s > key->alloc_size) return false; key->key_size = s; memcpy(key + 1, (const uint8_t *)data + *offs, s); (*offs) += s; return true; } static TEE_Result op_attr_secret_value_from_obj(void *attr, void *src_attr) { struct tee_cryp_obj_secret *key = attr; struct tee_cryp_obj_secret *src_key = src_attr; if (src_key->key_size > key->alloc_size) return TEE_ERROR_BAD_STATE; memcpy(key + 1, src_key + 1, src_key->key_size); key->key_size = src_key->key_size; return TEE_SUCCESS; } static void op_attr_secret_value_clear(void *attr) { struct tee_cryp_obj_secret *key = attr; key->key_size = 0; memset(key + 1, 0, key->alloc_size); } static TEE_Result op_attr_bignum_from_user(void *attr, const void *buffer, size_t size) { struct bignum **bn = attr; return crypto_bignum_bin2bn(buffer, size, *bn); } static TEE_Result op_attr_bignum_to_user(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size) { TEE_Result res; struct bignum **bn = attr; uint64_t req_size; uint64_t s; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; req_size = crypto_bignum_num_bytes(*bn); res = tee_svc_copy_to_user(size, &req_size, sizeof(req_size)); if (res != TEE_SUCCESS) return res; if (!req_size) return TEE_SUCCESS; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; /* Check we can access data using supplied user mode pointer */ res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buffer, req_size); if (res != TEE_SUCCESS) return res; /* * Write the bignum (wich raw data points to) into an array of * bytes (stored in buffer) */ crypto_bignum_bn2bin(*bn, buffer); return TEE_SUCCESS; } static TEE_Result op_attr_bignum_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct bignum **bn = attr; uint32_t n = crypto_bignum_num_bytes(*bn); size_t next_offs; res = op_u32_to_binary_helper(n, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, n, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) crypto_bignum_bn2bin(*bn, (uint8_t *)data + *offs); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_bignum_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct bignum **bn = attr; uint32_t n; if (!op_u32_from_binary_helper(&n, data, data_len, offs)) return false; if ((*offs + n) > data_len) return false; if (crypto_bignum_bin2bn((const uint8_t *)data + *offs, n, *bn)) return false; (*offs) += n; return true; } static TEE_Result op_attr_bignum_from_obj(void *attr, void *src_attr) { struct bignum **bn = attr; struct bignum **src_bn = src_attr; crypto_bignum_copy(*bn, *src_bn); return TEE_SUCCESS; } static void op_attr_bignum_clear(void *attr) { struct bignum **bn = attr; crypto_bignum_clear(*bn); } static void op_attr_bignum_free(void *attr) { struct bignum **bn = attr; crypto_bignum_free(*bn); *bn = NULL; } static TEE_Result op_attr_value_from_user(void *attr, const void *buffer, size_t size) { uint32_t *v = attr; if (size != sizeof(uint32_t) * 2) return TEE_ERROR_GENERIC; /* "can't happen */ /* Note that only the first value is copied */ memcpy(v, buffer, sizeof(uint32_t)); return TEE_SUCCESS; } static TEE_Result op_attr_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; uint32_t *v = attr; uint64_t s; uint32_t value[2] = { *v }; uint64_t req_size = sizeof(value); res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, value, req_size); } static TEE_Result op_attr_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_to_binary_helper(*v, data, data_len, offs); } static bool op_attr_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_from_binary_helper(v, data, data_len, offs); } static TEE_Result op_attr_value_from_obj(void *attr, void *src_attr) { uint32_t *v = attr; uint32_t *src_v = src_attr; *v = *src_v; return TEE_SUCCESS; } static void op_attr_value_clear(void *attr) { uint32_t *v = attr; *v = 0; } static const struct attr_ops attr_ops[] = { [ATTR_OPS_INDEX_SECRET] = { .from_user = op_attr_secret_value_from_user, .to_user = op_attr_secret_value_to_user, .to_binary = op_attr_secret_value_to_binary, .from_binary = op_attr_secret_value_from_binary, .from_obj = op_attr_secret_value_from_obj, .free = op_attr_secret_value_clear, /* not a typo */ .clear = op_attr_secret_value_clear, }, [ATTR_OPS_INDEX_BIGNUM] = { .from_user = op_attr_bignum_from_user, .to_user = op_attr_bignum_to_user, .to_binary = op_attr_bignum_to_binary, .from_binary = op_attr_bignum_from_binary, .from_obj = op_attr_bignum_from_obj, .free = op_attr_bignum_free, .clear = op_attr_bignum_clear, }, [ATTR_OPS_INDEX_VALUE] = { .from_user = op_attr_value_from_user, .to_user = op_attr_value_to_user, .to_binary = op_attr_value_to_binary, .from_binary = op_attr_value_from_binary, .from_obj = op_attr_value_from_obj, .free = op_attr_value_clear, /* not a typo */ .clear = op_attr_value_clear, }, }; TEE_Result syscall_cryp_obj_get_info(unsigned long obj, TEE_ObjectInfo *info) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; res = tee_svc_copy_to_user(info, &o->info, sizeof(o->info)); exit: return res; } TEE_Result syscall_cryp_obj_restrict_usage(unsigned long obj, unsigned long usage) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; o->info.objectUsage &= usage; exit: return res; } static int tee_svc_cryp_obj_find_type_attr_idx( uint32_t attr_id, const struct tee_cryp_obj_type_props *type_props) { size_t n; for (n = 0; n < type_props->num_type_attrs; n++) { if (attr_id == type_props->type_attrs[n].attr_id) return n; } return -1; } static const struct tee_cryp_obj_type_props *tee_svc_find_type_props( TEE_ObjectType obj_type) { size_t n; for (n = 0; n < ARRAY_SIZE(tee_cryp_obj_props); n++) { if (tee_cryp_obj_props[n].obj_type == obj_type) return tee_cryp_obj_props + n; } return NULL; } /* Set an attribute on an object */ static void set_attribute(struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return; o->have_attrs |= BIT(idx); } /* Get an attribute on an object */ static uint32_t get_attribute(const struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return 0; return o->have_attrs & BIT(idx); } TEE_Result syscall_cryp_obj_get_attr(unsigned long obj, unsigned long attr_id, void *buffer, uint64_t *size) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; int idx; const struct attr_ops *ops; void *attr; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return TEE_ERROR_ITEM_NOT_FOUND; /* Check that the object is initialized */ if (!(o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED)) return TEE_ERROR_BAD_PARAMETERS; /* Check that getting the attribute is allowed */ if (!(attr_id & TEE_ATTR_BIT_PROTECTED) && !(o->info.objectUsage & TEE_USAGE_EXTRACTABLE)) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) { /* Unknown object type, "can't happen" */ return TEE_ERROR_BAD_STATE; } idx = tee_svc_cryp_obj_find_type_attr_idx(attr_id, type_props); if ((idx < 0) || ((o->have_attrs & (1 << idx)) == 0)) return TEE_ERROR_ITEM_NOT_FOUND; ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; return ops->to_user(attr, sess, buffer, size); } void tee_obj_attr_free(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].free((uint8_t *)o->attr + ta->raw_offs); } } void tee_obj_attr_clear(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].clear((uint8_t *)o->attr + ta->raw_offs); } } TEE_Result tee_obj_attr_to_binary(struct tee_obj *o, void *data, size_t *data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; size_t len = data ? *data_len : 0; TEE_Result res; if (o->info.objectType == TEE_TYPE_DATA) { *data_len = 0; return TEE_SUCCESS; /* pure data object */ } if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; res = attr_ops[ta->ops_index].to_binary(attr, data, len, &offs); if (res != TEE_SUCCESS) return res; } *data_len = offs; if (data && offs > len) return TEE_ERROR_SHORT_BUFFER; return TEE_SUCCESS; } TEE_Result tee_obj_attr_from_binary(struct tee_obj *o, const void *data, size_t data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; if (!attr_ops[ta->ops_index].from_binary(attr, data, data_len, &offs)) return TEE_ERROR_CORRUPT_OBJECT; } return TEE_SUCCESS; } TEE_Result tee_obj_attr_copy_from(struct tee_obj *o, const struct tee_obj *src) { TEE_Result res; const struct tee_cryp_obj_type_props *tp; const struct tee_cryp_obj_type_attrs *ta; size_t n; uint32_t have_attrs = 0; void *attr; void *src_attr; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; if (o->info.objectType == src->info.objectType) { have_attrs = src->have_attrs; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + ta->raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } else { const struct tee_cryp_obj_type_props *tp_src; int idx; if (o->info.objectType == TEE_TYPE_RSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_RSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_DSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_DSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDH_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDH_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else { return TEE_ERROR_BAD_PARAMETERS; } tp_src = tee_svc_find_type_props(src->info.objectType); if (!tp_src) return TEE_ERROR_BAD_STATE; have_attrs = BIT32(tp->num_type_attrs) - 1; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; idx = tee_svc_cryp_obj_find_type_attr_idx(ta->attr_id, tp_src); if (idx < 0) return TEE_ERROR_BAD_STATE; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + tp_src->type_attrs[idx].raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } o->have_attrs = have_attrs; return TEE_SUCCESS; } TEE_Result tee_obj_set_type(struct tee_obj *o, uint32_t obj_type, size_t max_key_size) { TEE_Result res = TEE_SUCCESS; const struct tee_cryp_obj_type_props *type_props; /* Can only set type for newly allocated objs */ if (o->attr) return TEE_ERROR_BAD_STATE; /* * Verify that maxKeySize is supported and find out how * much should be allocated. */ if (obj_type == TEE_TYPE_DATA) { if (max_key_size) return TEE_ERROR_NOT_SUPPORTED; } else { /* Find description of object */ type_props = tee_svc_find_type_props(obj_type); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (max_key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; o->attr = calloc(1, type_props->alloc_size); if (!o->attr) return TEE_ERROR_OUT_OF_MEMORY; } /* If we have a key structure, pre-allocate the bignums inside */ switch (obj_type) { case TEE_TYPE_RSA_PUBLIC_KEY: res = crypto_acipher_alloc_rsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_RSA_KEYPAIR: res = crypto_acipher_alloc_rsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DSA_PUBLIC_KEY: res = crypto_acipher_alloc_dsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_DSA_KEYPAIR: res = crypto_acipher_alloc_dsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DH_KEYPAIR: res = crypto_acipher_alloc_dh_keypair(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_PUBLIC_KEY: case TEE_TYPE_ECDH_PUBLIC_KEY: res = crypto_acipher_alloc_ecc_public_key(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = crypto_acipher_alloc_ecc_keypair(o->attr, max_key_size); break; default: if (obj_type != TEE_TYPE_DATA) { struct tee_cryp_obj_secret *key = o->attr; key->alloc_size = type_props->alloc_size - sizeof(*key); } break; } if (res != TEE_SUCCESS) return res; o->info.objectType = obj_type; o->info.maxKeySize = max_key_size; o->info.objectUsage = TEE_USAGE_DEFAULT; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_alloc(unsigned long obj_type, unsigned long max_key_size, uint32_t *obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; if (obj_type == TEE_TYPE_DATA) return TEE_ERROR_NOT_SUPPORTED; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; o = tee_obj_alloc(); if (!o) return TEE_ERROR_OUT_OF_MEMORY; res = tee_obj_set_type(o, obj_type, max_key_size); if (res != TEE_SUCCESS) { tee_obj_free(o); return res; } tee_obj_add(to_user_ta_ctx(sess->ctx), o); res = tee_svc_copy_kaddr_to_uref(obj, o); if (res != TEE_SUCCESS) tee_obj_close(to_user_ta_ctx(sess->ctx), o); return res; } TEE_Result syscall_cryp_obj_close(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* * If it's busy it's used by an operation, a client should never have * this handle. */ if (o->busy) return TEE_ERROR_ITEM_NOT_FOUND; tee_obj_close(to_user_ta_ctx(sess->ctx), o); return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_reset(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) == 0) { tee_obj_attr_clear(o); o->info.keySize = 0; o->info.objectUsage = TEE_USAGE_DEFAULT; } else { return TEE_ERROR_BAD_PARAMETERS; } /* the object is no more initialized */ o->info.handleFlags &= ~TEE_HANDLE_FLAG_INITIALIZED; return TEE_SUCCESS; } static TEE_Result copy_in_attrs(struct user_ta_ctx *utc, const struct utee_attribute *usr_attrs, uint32_t attr_count, TEE_Attribute *attrs) { TEE_Result res; uint32_t n; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)usr_attrs, attr_count * sizeof(struct utee_attribute)); if (res != TEE_SUCCESS) return res; for (n = 0; n < attr_count; n++) { attrs[n].attributeID = usr_attrs[n].attribute_id; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) { attrs[n].content.value.a = usr_attrs[n].a; attrs[n].content.value.b = usr_attrs[n].b; } else { uintptr_t buf = usr_attrs[n].a; size_t len = usr_attrs[n].b; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, buf, len); if (res != TEE_SUCCESS) return res; attrs[n].content.ref.buffer = (void *)buf; attrs[n].content.ref.length = len; } } return TEE_SUCCESS; } enum attr_usage { ATTR_USAGE_POPULATE, ATTR_USAGE_GENERATE_KEY }; static TEE_Result tee_svc_cryp_check_attr(enum attr_usage usage, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { uint32_t required_flag; uint32_t opt_flag; bool all_opt_needed; uint32_t req_attrs = 0; uint32_t opt_grp_attrs = 0; uint32_t attrs_found = 0; size_t n; uint32_t bit; uint32_t flags; int idx; if (usage == ATTR_USAGE_POPULATE) { required_flag = TEE_TYPE_ATTR_REQUIRED; opt_flag = TEE_TYPE_ATTR_OPTIONAL_GROUP; all_opt_needed = true; } else { required_flag = TEE_TYPE_ATTR_GEN_KEY_REQ; opt_flag = TEE_TYPE_ATTR_GEN_KEY_OPT; all_opt_needed = false; } /* * First find out which attributes are required and which belong to * the optional group */ for (n = 0; n < type_props->num_type_attrs; n++) { bit = 1 << n; flags = type_props->type_attrs[n].flags; if (flags & required_flag) req_attrs |= bit; else if (flags & opt_flag) opt_grp_attrs |= bit; } /* * Verify that all required attributes are in place and * that the same attribute isn't repeated. */ for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; bit = 1 << idx; /* attribute not repeated */ if ((attrs_found & bit) != 0) return TEE_ERROR_ITEM_NOT_FOUND; attrs_found |= bit; } /* Required attribute missing */ if ((attrs_found & req_attrs) != req_attrs) return TEE_ERROR_ITEM_NOT_FOUND; /* * If the flag says that "if one of the optional attributes are included * all of them has to be included" this must be checked. */ if (all_opt_needed && (attrs_found & opt_grp_attrs) != 0 && (attrs_found & opt_grp_attrs) != opt_grp_attrs) return TEE_ERROR_ITEM_NOT_FOUND; return TEE_SUCCESS; } static TEE_Result get_ec_key_size(uint32_t curve, size_t *key_size) { switch (curve) { case TEE_ECC_CURVE_NIST_P192: *key_size = 192; break; case TEE_ECC_CURVE_NIST_P224: *key_size = 224; break; case TEE_ECC_CURVE_NIST_P256: *key_size = 256; break; case TEE_ECC_CURVE_NIST_P384: *key_size = 384; break; case TEE_ECC_CURVE_NIST_P521: *key_size = 521; break; default: return TEE_ERROR_NOT_SUPPORTED; } return TEE_SUCCESS; } static TEE_Result tee_svc_cryp_obj_populate_type( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { TEE_Result res; uint32_t have_attrs = 0; size_t obj_size = 0; size_t n; int idx; const struct attr_ops *ops; void *attr; for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; have_attrs |= BIT32(idx); ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) res = ops->from_user(attr, &attrs[n].content.value, sizeof(attrs[n].content.value)); else res = ops->from_user(attr, attrs[n].content.ref.buffer, attrs[n].content.ref.length); if (res != TEE_SUCCESS) return res; /* * First attr_idx signifies the attribute that gives the size * of the object */ if (type_props->type_attrs[idx].flags & TEE_TYPE_ATTR_SIZE_INDICATOR) { /* * For ECDSA/ECDH we need to translate curve into * object size */ if (attrs[n].attributeID == TEE_ATTR_ECC_CURVE) { res = get_ec_key_size(attrs[n].content.value.a, &obj_size); if (res != TEE_SUCCESS) return res; } else { obj_size += (attrs[n].content.ref.length * 8); } } } /* * We have to do it like this because the parity bits aren't counted * when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) obj_size -= obj_size / 8; /* Exclude parity in size of key */ o->have_attrs = have_attrs; o->info.keySize = obj_size; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_populate(unsigned long obj, struct utee_attribute *usr_attrs, unsigned long attr_count) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *attrs = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_IMPLEMENTED; size_t alloc_size = 0; if (MUL_OVERFLOW(sizeof(TEE_Attribute), attr_count, &alloc_size)) return TEE_ERROR_OVERFLOW; attrs = malloc(alloc_size); if (!attrs) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count, attrs); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props, attrs, attr_count); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count); if (res == TEE_SUCCESS) o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; out: free(attrs); return res; } TEE_Result syscall_cryp_obj_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *dst_o; struct tee_obj *src_o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(dst), &dst_o); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(src), &src_o); if (res != TEE_SUCCESS) return res; if ((src_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; res = tee_obj_attr_copy_from(dst_o, src_o); if (res != TEE_SUCCESS) return res; dst_o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; dst_o->info.keySize = src_o->info.keySize; dst_o->info.objectUsage = src_o->info.objectUsage; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_rsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct rsa_keypair *key = o->attr; uint32_t e = TEE_U32_TO_BIG_ENDIAN(65537); /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; if (!get_attribute(o, type_props, TEE_ATTR_RSA_PUBLIC_EXPONENT)) crypto_bignum_bin2bn((const uint8_t *)&e, sizeof(e), key->e); res = crypto_acipher_gen_rsa_key(key, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size) { TEE_Result res; res = crypto_acipher_gen_dsa_key(o->attr, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dh( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct dh_keypair *tee_dh_key; struct bignum *dh_q = NULL; uint32_t dh_xbits = 0; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_dh_key = (struct dh_keypair *)o->attr; if (get_attribute(o, type_props, TEE_ATTR_DH_SUBPRIME)) dh_q = tee_dh_key->q; if (get_attribute(o, type_props, TEE_ATTR_DH_X_BITS)) dh_xbits = tee_dh_key->xbits; res = crypto_acipher_gen_dh_key(tee_dh_key, dh_q, dh_xbits); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_DH_PUBLIC_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_X_BITS); return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_ecc( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct ecc_keypair *tee_ecc_key; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_ecc_key = (struct ecc_keypair *)o->attr; res = crypto_acipher_gen_ecc_key(tee_ecc_key); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_ECC_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_X); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_Y); set_attribute(o, type_props, TEE_ATTR_ECC_CURVE); return TEE_SUCCESS; } TEE_Result syscall_obj_generate_key(unsigned long obj, unsigned long key_size, const struct utee_attribute *usr_params, unsigned long param_count) { TEE_Result res; struct tee_ta_session *sess; const struct tee_cryp_obj_type_props *type_props; struct tee_obj *o; struct tee_cryp_obj_secret *key; size_t byte_size; TEE_Attribute *params = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_STATE; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_STATE; /* Find description of object */ type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_GENERATE_KEY, type_props, params, param_count); if (res != TEE_SUCCESS) goto out; switch (o->info.objectType) { case TEE_TYPE_AES: case TEE_TYPE_DES: case TEE_TYPE_DES3: case TEE_TYPE_HMAC_MD5: case TEE_TYPE_HMAC_SHA1: case TEE_TYPE_HMAC_SHA224: case TEE_TYPE_HMAC_SHA256: case TEE_TYPE_HMAC_SHA384: case TEE_TYPE_HMAC_SHA512: case TEE_TYPE_GENERIC_SECRET: byte_size = key_size / 8; /* * We have to do it like this because the parity bits aren't * counted when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) { byte_size = (key_size + key_size / 7) / 8; } key = (struct tee_cryp_obj_secret *)o->attr; if (byte_size > key->alloc_size) { res = TEE_ERROR_EXCESS_DATA; goto out; } res = crypto_rng_read((void *)(key + 1), byte_size); if (res != TEE_SUCCESS) goto out; key->key_size = byte_size; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; break; case TEE_TYPE_RSA_KEYPAIR: res = tee_svc_obj_generate_key_rsa(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DSA_KEYPAIR: res = tee_svc_obj_generate_key_dsa(o, type_props, key_size); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DH_KEYPAIR: res = tee_svc_obj_generate_key_dh(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = tee_svc_obj_generate_key_ecc(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; default: res = TEE_ERROR_BAD_FORMAT; } out: free(params); if (res == TEE_SUCCESS) { o->info.keySize = key_size; o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; } return res; } static TEE_Result tee_svc_cryp_get_state(struct tee_ta_session *sess, uint32_t state_id, struct tee_cryp_state **state) { struct tee_cryp_state *s; struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx); TAILQ_FOREACH(s, &utc->cryp_states, link) { if (state_id == (vaddr_t)s) { *state = s; return TEE_SUCCESS; } } return TEE_ERROR_BAD_PARAMETERS; } static void cryp_state_free(struct user_ta_ctx *utc, struct tee_cryp_state *cs) { struct tee_obj *o; if (tee_obj_get(utc, cs->key1, &o) == TEE_SUCCESS) tee_obj_close(utc, o); if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) tee_obj_close(utc, o); TAILQ_REMOVE(&utc->cryp_states, cs, link); if (cs->ctx_finalize != NULL) cs->ctx_finalize(cs->ctx, cs->algo); switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_AE: crypto_authenc_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_MAC: crypto_mac_free_ctx(cs->ctx, cs->algo); break; default: assert(!cs->ctx); } free(cs); } static TEE_Result tee_svc_cryp_check_key_type(const struct tee_obj *o, uint32_t algo, TEE_OperationMode mode) { uint32_t req_key_type; uint32_t req_key_type2 = 0; switch (TEE_ALG_GET_MAIN_ALG(algo)) { case TEE_MAIN_ALGO_MD5: req_key_type = TEE_TYPE_HMAC_MD5; break; case TEE_MAIN_ALGO_SHA1: req_key_type = TEE_TYPE_HMAC_SHA1; break; case TEE_MAIN_ALGO_SHA224: req_key_type = TEE_TYPE_HMAC_SHA224; break; case TEE_MAIN_ALGO_SHA256: req_key_type = TEE_TYPE_HMAC_SHA256; break; case TEE_MAIN_ALGO_SHA384: req_key_type = TEE_TYPE_HMAC_SHA384; break; case TEE_MAIN_ALGO_SHA512: req_key_type = TEE_TYPE_HMAC_SHA512; break; case TEE_MAIN_ALGO_AES: req_key_type = TEE_TYPE_AES; break; case TEE_MAIN_ALGO_DES: req_key_type = TEE_TYPE_DES; break; case TEE_MAIN_ALGO_DES3: req_key_type = TEE_TYPE_DES3; break; case TEE_MAIN_ALGO_RSA: req_key_type = TEE_TYPE_RSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_RSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DSA: req_key_type = TEE_TYPE_DSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_DSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DH: req_key_type = TEE_TYPE_DH_KEYPAIR; break; case TEE_MAIN_ALGO_ECDSA: req_key_type = TEE_TYPE_ECDSA_KEYPAIR; if (mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_ECDSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_ECDH: req_key_type = TEE_TYPE_ECDH_KEYPAIR; break; #if defined(CFG_CRYPTO_HKDF) case TEE_MAIN_ALGO_HKDF: req_key_type = TEE_TYPE_HKDF_IKM; break; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) case TEE_MAIN_ALGO_CONCAT_KDF: req_key_type = TEE_TYPE_CONCAT_KDF_Z; break; #endif #if defined(CFG_CRYPTO_PBKDF2) case TEE_MAIN_ALGO_PBKDF2: req_key_type = TEE_TYPE_PBKDF2_PASSWORD; break; #endif default: return TEE_ERROR_BAD_PARAMETERS; } if (req_key_type != o->info.objectType && req_key_type2 != o->info.objectType) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } TEE_Result syscall_cryp_state_alloc(unsigned long algo, unsigned long mode, unsigned long key1, unsigned long key2, uint32_t *state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o1 = NULL; struct tee_obj *o2 = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); if (key1 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key1), &o1); if (res != TEE_SUCCESS) return res; if (o1->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o1, algo, mode); if (res != TEE_SUCCESS) return res; } if (key2 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key2), &o2); if (res != TEE_SUCCESS) return res; if (o2->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o2, algo, mode); if (res != TEE_SUCCESS) return res; } cs = calloc(1, sizeof(struct tee_cryp_state)); if (!cs) return TEE_ERROR_OUT_OF_MEMORY; TAILQ_INSERT_TAIL(&utc->cryp_states, cs, link); cs->algo = algo; cs->mode = mode; switch (TEE_ALG_GET_CLASS(algo)) { case TEE_OPERATION_EXTENSION: #ifdef CFG_CRYPTO_RSASSA_NA1 if (algo == TEE_ALG_RSASSA_PKCS1_V1_5) goto rsassa_na1; #endif res = TEE_ERROR_NOT_SUPPORTED; break; case TEE_OPERATION_CIPHER: if ((algo == TEE_ALG_AES_XTS && (key1 == 0 || key2 == 0)) || (algo != TEE_ALG_AES_XTS && (key1 == 0 || key2 != 0))) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_cipher_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_AE: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_authenc_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_MAC: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_mac_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_DIGEST: if (key1 != 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_hash_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_ASYMMETRIC_CIPHER: case TEE_OPERATION_ASYMMETRIC_SIGNATURE: rsassa_na1: __maybe_unused if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; case TEE_OPERATION_KEY_DERIVATION: if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; default: res = TEE_ERROR_NOT_SUPPORTED; break; } if (res != TEE_SUCCESS) goto out; res = tee_svc_copy_kaddr_to_uref(state, cs); if (res != TEE_SUCCESS) goto out; /* Register keys */ if (o1 != NULL) { o1->busy = true; cs->key1 = (vaddr_t)o1; } if (o2 != NULL) { o2->busy = true; cs->key2 = (vaddr_t)o2; } out: if (res != TEE_SUCCESS) cryp_state_free(utc, cs); return res; } TEE_Result syscall_cryp_state_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_cryp_state *cs_dst; struct tee_cryp_state *cs_src; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(dst), &cs_dst); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(src), &cs_src); if (res != TEE_SUCCESS) return res; if (cs_dst->algo != cs_src->algo || cs_dst->mode != cs_src->mode) return TEE_ERROR_BAD_PARAMETERS; switch (TEE_ALG_GET_CLASS(cs_src->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_AE: crypto_authenc_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_MAC: crypto_mac_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; default: return TEE_ERROR_BAD_STATE; } return TEE_SUCCESS; } void tee_svc_cryp_free_states(struct user_ta_ctx *utc) { struct tee_cryp_state_head *states = &utc->cryp_states; while (!TAILQ_EMPTY(states)) cryp_state_free(utc, TAILQ_FIRST(states)); } TEE_Result syscall_cryp_state_free(unsigned long state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; cryp_state_free(to_user_ta_ctx(sess->ctx), cs); return TEE_SUCCESS; } TEE_Result syscall_hash_init(unsigned long state, const void *iv __maybe_unused, size_t iv_len __maybe_unused) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_init(cs->ctx, cs->algo); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: { struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = (struct tee_cryp_obj_secret *)o->attr; res = crypto_mac_init(cs->ctx, cs->algo, (void *)(key + 1), key->key_size); if (res != TEE_SUCCESS) return res; break; } default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_update(unsigned long state, const void *chunk, size_t chunk_size) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; /* Zero length hash is valid, but nothing we need to do. */ if (!chunk_size) return TEE_SUCCESS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_final(unsigned long state, const void *chunk, size_t chunk_size, void *hash, uint64_t *hash_len) { TEE_Result res, res2; size_t hash_size; uint64_t hlen; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&hlen, hash_len, sizeof(hlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)hash, hlen); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = tee_hash_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_hash_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = tee_mac_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_mac_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } out: hlen = hash_size; res2 = tee_svc_copy_to_user(hash_len, &hlen, sizeof(*hash_len)); if (res2 != TEE_SUCCESS) return res2; return res; } TEE_Result syscall_cipher_init(unsigned long state, const void *iv, size_t iv_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key1; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) iv, iv_len); if (res != TEE_SUCCESS) return res; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key1 = o->attr; if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) { struct tee_cryp_obj_secret *key2 = o->attr; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, (uint8_t *)(key2 + 1), key2->key_size, iv, iv_len); } else { res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, NULL, 0, iv, iv_len); } if (res != TEE_SUCCESS) return res; cs->ctx_finalize = crypto_cipher_final; return TEE_SUCCESS; } static TEE_Result tee_svc_cipher_update_helper(unsigned long state, bool last_block, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (src_len > 0) { /* Permit src_len == 0 to finalize the operation */ res = tee_do_cipher_update(cs->ctx, cs->algo, cs->mode, last_block, src, src_len, dst); } if (last_block && cs->ctx_finalize != NULL) { cs->ctx_finalize(cs->ctx, cs->algo); cs->ctx_finalize = NULL; } out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; dlen = src_len; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_cipher_update(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, false /* last_block */, src, src_len, dst, dst_len); } TEE_Result syscall_cipher_final(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, true /* last_block */, src, src_len, dst, dst_len); } #if defined(CFG_CRYPTO_HKDF) static TEE_Result get_hkdf_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, void **info, size_t *info_len, size_t *okm_len) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, INFO = 0x4 }; uint8_t found = 0; *salt = *info = NULL; *salt_len = *info_len = *okm_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_HKDF_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_HKDF_OKM_LENGTH: if (!(found & LENGTH)) { *okm_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_HKDF_INFO: if (!(found & INFO)) { *info = params[n].content.ref.buffer; *info_len = params[n].content.ref.length; found |= INFO; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static TEE_Result get_concat_kdf_params(const TEE_Attribute *params, uint32_t param_count, void **other_info, size_t *other_info_len, size_t *derived_key_len) { size_t n; enum { LENGTH = 0x1, INFO = 0x2 }; uint8_t found = 0; *other_info = NULL; *other_info_len = *derived_key_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_CONCAT_KDF_OTHER_INFO: if (!(found & INFO)) { *other_info = params[n].content.ref.buffer; *other_info_len = params[n].content.ref.length; found |= INFO; } break; case TEE_ATTR_CONCAT_KDF_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_PBKDF2) static TEE_Result get_pbkdf2_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, size_t *derived_key_len, size_t *iteration_count) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, COUNT = 0x4 }; uint8_t found = 0; *salt = NULL; *salt_len = *derived_key_len = *iteration_count = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_PBKDF2_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_PBKDF2_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_PBKDF2_ITERATION_COUNT: if (!(found & COUNT)) { *iteration_count = params[n].content.value.a; found |= COUNT; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if ((found & (LENGTH|COUNT)) != (LENGTH|COUNT)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif TEE_Result syscall_cryp_derive_key(unsigned long state, const struct utee_attribute *usr_params, unsigned long param_count, unsigned long derived_key) { TEE_Result res = TEE_ERROR_NOT_SUPPORTED; struct tee_ta_session *sess; struct tee_obj *ko; struct tee_obj *so; struct tee_cryp_state *cs; struct tee_cryp_obj_secret *sk; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; /* Get key set in operation */ res = tee_obj_get(utc, cs->key1, &ko); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, tee_svc_uref_to_vaddr(derived_key), &so); if (res != TEE_SUCCESS) goto out; /* Find information needed about the object to initialize */ sk = so->attr; /* Find description of object */ type_props = tee_svc_find_type_props(so->info.objectType); if (!type_props) { res = TEE_ERROR_NOT_SUPPORTED; goto out; } if (cs->algo == TEE_ALG_DH_DERIVE_SHARED_SECRET) { size_t alloc_size; struct bignum *pub; struct bignum *ss; if (param_count != 1 || params[0].attributeID != TEE_ATTR_DH_PUBLIC_VALUE) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } alloc_size = params[0].content.ref.length * 8; pub = crypto_bignum_allocate(alloc_size); ss = crypto_bignum_allocate(alloc_size); if (pub && ss) { crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, pub); res = crypto_acipher_dh_shared_secret(ko->attr, pub, ss); if (res == TEE_SUCCESS) { sk->key_size = crypto_bignum_num_bytes(ss); crypto_bignum_bn2bin(ss, (uint8_t *)(sk + 1)); so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } else { res = TEE_ERROR_OUT_OF_MEMORY; } crypto_bignum_free(pub); crypto_bignum_free(ss); } else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_ECDH) { size_t alloc_size; struct ecc_public_key key_public; uint8_t *pt_secret; unsigned long pt_secret_len; if (param_count != 2 || params[0].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_X || params[1].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_Y) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (cs->algo) { case TEE_ALG_ECDH_P192: alloc_size = 192; break; case TEE_ALG_ECDH_P224: alloc_size = 224; break; case TEE_ALG_ECDH_P256: alloc_size = 256; break; case TEE_ALG_ECDH_P384: alloc_size = 384; break; case TEE_ALG_ECDH_P521: alloc_size = 521; break; default: res = TEE_ERROR_NOT_IMPLEMENTED; goto out; } /* Create the public key */ res = crypto_acipher_alloc_ecc_public_key(&key_public, alloc_size); if (res != TEE_SUCCESS) goto out; key_public.curve = ((struct ecc_keypair *)ko->attr)->curve; crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, key_public.x); crypto_bignum_bin2bn(params[1].content.ref.buffer, params[1].content.ref.length, key_public.y); pt_secret = (uint8_t *)(sk + 1); pt_secret_len = sk->alloc_size; res = crypto_acipher_ecc_shared_secret(ko->attr, &key_public, pt_secret, &pt_secret_len); if (res == TEE_SUCCESS) { sk->key_size = pt_secret_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } /* free the public key */ crypto_acipher_free_ecc_public_key(&key_public); } #if defined(CFG_CRYPTO_HKDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_HKDF) { void *salt, *info; size_t salt_len, info_len, okm_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ik = ko->attr; const uint8_t *ikm = (const uint8_t *)(ik + 1); res = get_hkdf_params(params, param_count, &salt, &salt_len, &info, &info_len, &okm_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (okm_len > ik->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_hkdf(hash_id, ikm, ik->key_size, salt, salt_len, info, info_len, (uint8_t *)(sk + 1), okm_len); if (res == TEE_SUCCESS) { sk->key_size = okm_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_CONCAT_KDF) { void *info; size_t info_len, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *shared_secret = (const uint8_t *)(ss + 1); res = get_concat_kdf_params(params, param_count, &info, &info_len, &derived_key_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_concat_kdf(hash_id, shared_secret, ss->key_size, info, info_len, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_PBKDF2) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_PBKDF2) { void *salt; size_t salt_len, iteration_count, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *password = (const uint8_t *)(ss + 1); res = get_pbkdf2_params(params, param_count, &salt, &salt_len, &derived_key_len, &iteration_count); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_pbkdf2(hash_id, password, ss->key_size, salt, salt_len, iteration_count, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif else res = TEE_ERROR_NOT_SUPPORTED; out: free(params); return res; } TEE_Result syscall_cryp_random_number_generate(void *buf, size_t blen) { TEE_Result res; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buf, blen); if (res != TEE_SUCCESS) return res; res = crypto_rng_read(buf, blen); if (res != TEE_SUCCESS) return res; return res; } TEE_Result syscall_authenc_init(unsigned long state, const void *nonce, size_t nonce_len, size_t tag_len, size_t aad_len, size_t payload_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = o->attr; res = crypto_authenc_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key + 1), key->key_size, nonce, nonce_len, tag_len, aad_len, payload_len); if (res != TEE_SUCCESS) return res; cs->ctx_finalize = (tee_cryp_ctx_finalize_func_t)crypto_authenc_final; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_aad(unsigned long state, const void *aad_data, size_t aad_data_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = crypto_authenc_update_aad(cs->ctx, cs->algo, cs->mode, aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_payload(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } tmp_dlen = dlen; res = crypto_authenc_update_payload(cs->ctx, cs->algo, cs->mode, src_data, src_len, dst_data, &tmp_dlen); dlen = tmp_dlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_authenc_enc_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, void *tag, uint64_t *tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; uint64_t tlen = 0; size_t tmp_dlen; size_t tmp_tlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_ENCRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_svc_copy_from_user(&tlen, tag_len, sizeof(tlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tlen); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; tmp_tlen = tlen; res = crypto_authenc_enc_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, &tmp_tlen); dlen = tmp_dlen; tlen = tmp_tlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; if (dst_len != NULL) { res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } res2 = tee_svc_copy_to_user(tag_len, &tlen, sizeof(*tag_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_authenc_dec_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, const void *tag, size_t tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_DECRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tag_len); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; res = crypto_authenc_dec_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, tag_len); dlen = tmp_dlen; out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } static int pkcs1_get_salt_len(const TEE_Attribute *params, uint32_t num_params, size_t default_len) { size_t n; assert(default_len < INT_MAX); for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_PSS_SALT_LENGTH) { if (params[n].content.value.a < INT_MAX) return params[n].content.value.a; break; } } /* * If salt length isn't provided use the default value which is * the length of the digest. */ return default_len; } TEE_Result syscall_asymm_operate(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen64; size_t dlen; struct tee_obj *o; void *label = NULL; size_t label_len = 0; size_t n; int salt_len; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen64, dst_len, sizeof(dlen64)); if (res != TEE_SUCCESS) return res; dlen = dlen64; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) dst_data, dlen); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_GENERIC; goto out; } switch (cs->algo) { case TEE_ALG_RSA_NOPAD: if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsanopad_encrypt(o->attr, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsanopad_decrypt(o->attr, src_data, src_len, dst_data, &dlen); } else { /* * We will panic because "the mode is not compatible * with the function" */ res = TEE_ERROR_GENERIC; } break; case TEE_ALG_RSAES_PKCS1_V1_5: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA1: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA224: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA256: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA384: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA512: for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_OAEP_LABEL) { label = params[n].content.ref.buffer; label_len = params[n].content.ref.length; break; } } if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsaes_encrypt(cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsaes_decrypt( cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else { res = TEE_ERROR_BAD_PARAMETERS; } break; #if defined(CFG_CRYPTO_RSASSA_NA1) case TEE_ALG_RSASSA_PKCS1_V1_5: #endif case TEE_ALG_RSASSA_PKCS1_V1_5_MD5: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA1: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA224: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA256: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA384: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA512: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA1: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA224: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA384: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA512: if (cs->mode != TEE_MODE_SIGN) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, src_len); res = crypto_acipher_rsassa_sign(cs->algo, o->attr, salt_len, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_DSA_SHA1: case TEE_ALG_DSA_SHA224: case TEE_ALG_DSA_SHA256: res = crypto_acipher_dsa_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_ECDSA_P192: case TEE_ALG_ECDSA_P224: case TEE_ALG_ECDSA_P256: case TEE_ALG_ECDSA_P384: case TEE_ALG_ECDSA_P521: res = crypto_acipher_ecc_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; default: res = TEE_ERROR_BAD_PARAMETERS; break; } out: free(params); if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; dlen64 = dlen; res2 = tee_svc_copy_to_user(dst_len, &dlen64, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_asymm_verify(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *data, size_t data_len, const void *sig, size_t sig_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; size_t hash_size; int salt_len = 0; TEE_Attribute *params = NULL; uint32_t hash_algo; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_VERIFY) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)data, data_len); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)sig, sig_len); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (TEE_ALG_GET_MAIN_ALG(cs->algo)) { case TEE_MAIN_ALGO_RSA: if (cs->algo != TEE_ALG_RSASSA_PKCS1_V1_5) { hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; if (data_len != hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, hash_size); } res = crypto_acipher_rsassa_verify(cs->algo, o->attr, salt_len, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_DSA: hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; /* * Depending on the DSA algorithm (NIST), the digital signature * output size may be truncated to the size of a key pair * (Q prime size). Q prime size must be less or equal than the * hash output length of the hash algorithm involved. */ if (data_len > hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } res = crypto_acipher_dsa_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_ECDSA: res = crypto_acipher_ecc_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; default: res = TEE_ERROR_NOT_SUPPORTED; } out: free(params); return res; }
TEE_Result syscall_cryp_obj_populate(unsigned long obj, struct utee_attribute *usr_attrs, unsigned long attr_count) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *attrs = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_IMPLEMENTED; attrs = malloc(sizeof(TEE_Attribute) * attr_count); if (!attrs) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count, attrs); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props, attrs, attr_count); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count); if (res == TEE_SUCCESS) o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; out: free(attrs); return res; }
TEE_Result syscall_cryp_obj_populate(unsigned long obj, struct utee_attribute *usr_attrs, unsigned long attr_count) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *attrs = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_IMPLEMENTED; size_t alloc_size = 0; if (MUL_OVERFLOW(sizeof(TEE_Attribute), attr_count, &alloc_size)) return TEE_ERROR_OVERFLOW; attrs = malloc(alloc_size); if (!attrs) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count, attrs); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props, attrs, attr_count); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count); if (res == TEE_SUCCESS) o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; out: free(attrs); return res; }
{'added': [(7, '#include <compiler.h>'), (1551, '\tsize_t alloc_size = 0;'), (1552, ''), (1553, '\tif (MUL_OVERFLOW(sizeof(TEE_Attribute), attr_count, &alloc_size))'), (1554, '\t\treturn TEE_ERROR_OVERFLOW;'), (1555, ''), (1556, '\tattrs = malloc(alloc_size);'), (1559, '')], 'deleted': [(1550, '\tattrs = malloc(sizeof(TEE_Attribute) * attr_count);')]}
8
1
2,813
15,893
41
247
10
https://github.com/OP-TEE/optee_os
CVE-2019-1010296
CWE-787
1,489
lua_struct.c
C
b_unpack
/* ** {====================================================== ** Library for packing/unpacking structures. ** $Id: struct.c,v 1.4 2012/07/04 18:54:29 roberto Exp $ ** See Copyright Notice at the end of this file ** ======================================================= */ /* ** Valid formats: ** > - big endian ** < - little endian ** ![num] - alignment ** x - pading ** b/B - signed/unsigned byte ** h/H - signed/unsigned short ** l/L - signed/unsigned long ** T - size_t ** i/In - signed/unsigned integer with size `n' (default is size of int) ** cn - sequence of `n' chars (from/to a string); when packing, n==0 means the whole string; when unpacking, n==0 means use the previous read number as the string length ** s - zero-terminated string ** f - float ** d - double ** ' ' - ignored */ #include <assert.h> #include <ctype.h> #include <limits.h> #include <stddef.h> #include <string.h> #include "lua.h" #include "lauxlib.h" #if (LUA_VERSION_NUM >= 502) #define luaL_register(L,n,f) luaL_newlib(L,f) #endif /* basic integer type */ #if !defined(STRUCT_INT) #define STRUCT_INT long #endif typedef STRUCT_INT Inttype; /* corresponding unsigned version */ typedef unsigned STRUCT_INT Uinttype; /* maximum size (in bytes) for integral types */ #define MAXINTSIZE 32 /* is 'x' a power of 2? */ #define isp2(x) ((x) > 0 && ((x) & ((x) - 1)) == 0) /* dummy structure to get alignment requirements */ struct cD { char c; double d; }; #define PADDING (sizeof(struct cD) - sizeof(double)) #define MAXALIGN (PADDING > sizeof(int) ? PADDING : sizeof(int)) /* endian options */ #define BIG 0 #define LITTLE 1 static union { int dummy; char endian; } const native = {1}; typedef struct Header { int endian; int align; } Header; static int getnum (lua_State *L, const char **fmt, int df) { if (!isdigit(**fmt)) /* no number? */ return df; /* return default value */ else { int a = 0; do { if (a > (INT_MAX / 10) || a * 10 > (INT_MAX - (**fmt - '0'))) luaL_error(L, "integral size overflow"); a = a*10 + *((*fmt)++) - '0'; } while (isdigit(**fmt)); return a; } } #define defaultoptions(h) ((h)->endian = native.endian, (h)->align = 1) static size_t optsize (lua_State *L, char opt, const char **fmt) { switch (opt) { case 'B': case 'b': return sizeof(char); case 'H': case 'h': return sizeof(short); case 'L': case 'l': return sizeof(long); case 'T': return sizeof(size_t); case 'f': return sizeof(float); case 'd': return sizeof(double); case 'x': return 1; case 'c': return getnum(L, fmt, 1); case 'i': case 'I': { int sz = getnum(L, fmt, sizeof(int)); if (sz > MAXINTSIZE) luaL_error(L, "integral size %d is larger than limit of %d", sz, MAXINTSIZE); return sz; } default: return 0; /* other cases do not need alignment */ } } /* ** return number of bytes needed to align an element of size 'size' ** at current position 'len' */ static int gettoalign (size_t len, Header *h, int opt, size_t size) { if (size == 0 || opt == 'c') return 0; if (size > (size_t)h->align) size = h->align; /* respect max. alignment */ return (size - (len & (size - 1))) & (size - 1); } /* ** options to control endianess and alignment */ static void controloptions (lua_State *L, int opt, const char **fmt, Header *h) { switch (opt) { case ' ': return; /* ignore white spaces */ case '>': h->endian = BIG; return; case '<': h->endian = LITTLE; return; case '!': { int a = getnum(L, fmt, MAXALIGN); if (!isp2(a)) luaL_error(L, "alignment %d is not a power of 2", a); h->align = a; return; } default: { const char *msg = lua_pushfstring(L, "invalid format option '%c'", opt); luaL_argerror(L, 1, msg); } } } static void putinteger (lua_State *L, luaL_Buffer *b, int arg, int endian, int size) { lua_Number n = luaL_checknumber(L, arg); Uinttype value; char buff[MAXINTSIZE]; if (n < 0) value = (Uinttype)(Inttype)n; else value = (Uinttype)n; if (endian == LITTLE) { int i; for (i = 0; i < size; i++) { buff[i] = (value & 0xff); value >>= 8; } } else { int i; for (i = size - 1; i >= 0; i--) { buff[i] = (value & 0xff); value >>= 8; } } luaL_addlstring(b, buff, size); } static void correctbytes (char *b, int size, int endian) { if (endian != native.endian) { int i = 0; while (i < --size) { char temp = b[i]; b[i++] = b[size]; b[size] = temp; } } } static int b_pack (lua_State *L) { luaL_Buffer b; const char *fmt = luaL_checkstring(L, 1); Header h; int arg = 2; size_t totalsize = 0; defaultoptions(&h); lua_pushnil(L); /* mark to separate arguments from string buffer */ luaL_buffinit(L, &b); while (*fmt != '\0') { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); int toalign = gettoalign(totalsize, &h, opt, size); totalsize += toalign; while (toalign-- > 0) luaL_addchar(&b, '\0'); switch (opt) { case 'b': case 'B': case 'h': case 'H': case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ putinteger(L, &b, arg++, h.endian, size); break; } case 'x': { luaL_addchar(&b, '\0'); break; } case 'f': { float f = (float)luaL_checknumber(L, arg++); correctbytes((char *)&f, size, h.endian); luaL_addlstring(&b, (char *)&f, size); break; } case 'd': { double d = luaL_checknumber(L, arg++); correctbytes((char *)&d, size, h.endian); luaL_addlstring(&b, (char *)&d, size); break; } case 'c': case 's': { size_t l; const char *s = luaL_checklstring(L, arg++, &l); if (size == 0) size = l; luaL_argcheck(L, l >= (size_t)size, arg, "string too short"); luaL_addlstring(&b, s, size); if (opt == 's') { luaL_addchar(&b, '\0'); /* add zero at the end */ size++; } break; } default: controloptions(L, opt, &fmt, &h); } totalsize += size; } luaL_pushresult(&b); return 1; } static lua_Number getinteger (const char *buff, int endian, int issigned, int size) { Uinttype l = 0; int i; if (endian == BIG) { for (i = 0; i < size; i++) { l <<= 8; l |= (Uinttype)(unsigned char)buff[i]; } } else { for (i = size - 1; i >= 0; i--) { l <<= 8; l |= (Uinttype)(unsigned char)buff[i]; } } if (!issigned) return (lua_Number)l; else { /* signed format */ Uinttype mask = (Uinttype)(~((Uinttype)0)) << (size*8 - 1); if (l & mask) /* negative value? */ l |= mask; /* signal extension */ return (lua_Number)(Inttype)l; } } static int b_unpack (lua_State *L) { Header h; const char *fmt = luaL_checkstring(L, 1); size_t ld; const char *data = luaL_checklstring(L, 2, &ld); size_t pos = luaL_optinteger(L, 3, 1) - 1; defaultoptions(&h); lua_settop(L, 2); while (*fmt) { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); pos += gettoalign(pos, &h, opt, size); luaL_argcheck(L, pos+size <= ld, 2, "data string too short"); luaL_checkstack(L, 1, "too many results"); switch (opt) { case 'b': case 'B': case 'h': case 'H': case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ int issigned = islower(opt); lua_Number res = getinteger(data+pos, h.endian, issigned, size); lua_pushnumber(L, res); break; } case 'x': { break; } case 'f': { float f; memcpy(&f, data+pos, size); correctbytes((char *)&f, sizeof(f), h.endian); lua_pushnumber(L, f); break; } case 'd': { double d; memcpy(&d, data+pos, size); correctbytes((char *)&d, sizeof(d), h.endian); lua_pushnumber(L, d); break; } case 'c': { if (size == 0) { if (!lua_isnumber(L, -1)) luaL_error(L, "format `c0' needs a previous size"); size = lua_tonumber(L, -1); lua_pop(L, 1); luaL_argcheck(L, pos+size <= ld, 2, "data string too short"); } lua_pushlstring(L, data+pos, size); break; } case 's': { const char *e = (const char *)memchr(data+pos, '\0', ld - pos); if (e == NULL) luaL_error(L, "unfinished string in data"); size = (e - (data+pos)) + 1; lua_pushlstring(L, data+pos, size - 1); break; } default: controloptions(L, opt, &fmt, &h); } pos += size; } lua_pushinteger(L, pos + 1); return lua_gettop(L) - 2; } static int b_size (lua_State *L) { Header h; const char *fmt = luaL_checkstring(L, 1); size_t pos = 0; defaultoptions(&h); while (*fmt) { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); pos += gettoalign(pos, &h, opt, size); if (opt == 's') luaL_argerror(L, 1, "option 's' has no fixed size"); else if (opt == 'c' && size == 0) luaL_argerror(L, 1, "option 'c0' has no fixed size"); if (!isalnum(opt)) controloptions(L, opt, &fmt, &h); pos += size; } lua_pushinteger(L, pos); return 1; } /* }====================================================== */ static const struct luaL_Reg thislib[] = { {"pack", b_pack}, {"unpack", b_unpack}, {"size", b_size}, {NULL, NULL} }; LUALIB_API int luaopen_struct (lua_State *L); LUALIB_API int luaopen_struct (lua_State *L) { luaL_register(L, "struct", thislib); return 1; } /****************************************************************************** * Copyright (C) 2010-2012 Lua.org, PUC-Rio. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ******************************************************************************/
/* ** {====================================================== ** Library for packing/unpacking structures. ** $Id: struct.c,v 1.7 2018/05/11 22:04:31 roberto Exp $ ** See Copyright Notice at the end of this file ** ======================================================= */ /* ** Valid formats: ** > - big endian ** < - little endian ** ![num] - alignment ** x - pading ** b/B - signed/unsigned byte ** h/H - signed/unsigned short ** l/L - signed/unsigned long ** T - size_t ** i/In - signed/unsigned integer with size 'n' (default is size of int) ** cn - sequence of 'n' chars (from/to a string); when packing, n==0 means the whole string; when unpacking, n==0 means use the previous read number as the string length ** s - zero-terminated string ** f - float ** d - double ** ' ' - ignored */ #include <assert.h> #include <ctype.h> #include <limits.h> #include <stddef.h> #include <string.h> #include "lua.h" #include "lauxlib.h" #if (LUA_VERSION_NUM >= 502) #define luaL_register(L,n,f) luaL_newlib(L,f) #endif /* basic integer type */ #if !defined(STRUCT_INT) #define STRUCT_INT long #endif typedef STRUCT_INT Inttype; /* corresponding unsigned version */ typedef unsigned STRUCT_INT Uinttype; /* maximum size (in bytes) for integral types */ #define MAXINTSIZE 32 /* is 'x' a power of 2? */ #define isp2(x) ((x) > 0 && ((x) & ((x) - 1)) == 0) /* dummy structure to get alignment requirements */ struct cD { char c; double d; }; #define PADDING (sizeof(struct cD) - sizeof(double)) #define MAXALIGN (PADDING > sizeof(int) ? PADDING : sizeof(int)) /* endian options */ #define BIG 0 #define LITTLE 1 static union { int dummy; char endian; } const native = {1}; typedef struct Header { int endian; int align; } Header; static int getnum (const char **fmt, int df) { if (!isdigit(**fmt)) /* no number? */ return df; /* return default value */ else { int a = 0; do { a = a*10 + *((*fmt)++) - '0'; } while (isdigit(**fmt)); return a; } } #define defaultoptions(h) ((h)->endian = native.endian, (h)->align = 1) static size_t optsize (lua_State *L, char opt, const char **fmt) { switch (opt) { case 'B': case 'b': return sizeof(char); case 'H': case 'h': return sizeof(short); case 'L': case 'l': return sizeof(long); case 'T': return sizeof(size_t); case 'f': return sizeof(float); case 'd': return sizeof(double); case 'x': return 1; case 'c': return getnum(fmt, 1); case 'i': case 'I': { int sz = getnum(fmt, sizeof(int)); if (sz > MAXINTSIZE) luaL_error(L, "integral size %d is larger than limit of %d", sz, MAXINTSIZE); return sz; } default: return 0; /* other cases do not need alignment */ } } /* ** return number of bytes needed to align an element of size 'size' ** at current position 'len' */ static int gettoalign (size_t len, Header *h, int opt, size_t size) { if (size == 0 || opt == 'c') return 0; if (size > (size_t)h->align) size = h->align; /* respect max. alignment */ return (size - (len & (size - 1))) & (size - 1); } /* ** options to control endianess and alignment */ static void controloptions (lua_State *L, int opt, const char **fmt, Header *h) { switch (opt) { case ' ': return; /* ignore white spaces */ case '>': h->endian = BIG; return; case '<': h->endian = LITTLE; return; case '!': { int a = getnum(fmt, MAXALIGN); if (!isp2(a)) luaL_error(L, "alignment %d is not a power of 2", a); h->align = a; return; } default: { const char *msg = lua_pushfstring(L, "invalid format option '%c'", opt); luaL_argerror(L, 1, msg); } } } static void putinteger (lua_State *L, luaL_Buffer *b, int arg, int endian, int size) { lua_Number n = luaL_checknumber(L, arg); Uinttype value; char buff[MAXINTSIZE]; if (n < 0) value = (Uinttype)(Inttype)n; else value = (Uinttype)n; if (endian == LITTLE) { int i; for (i = 0; i < size; i++) { buff[i] = (value & 0xff); value >>= 8; } } else { int i; for (i = size - 1; i >= 0; i--) { buff[i] = (value & 0xff); value >>= 8; } } luaL_addlstring(b, buff, size); } static void correctbytes (char *b, int size, int endian) { if (endian != native.endian) { int i = 0; while (i < --size) { char temp = b[i]; b[i++] = b[size]; b[size] = temp; } } } static int b_pack (lua_State *L) { luaL_Buffer b; const char *fmt = luaL_checkstring(L, 1); Header h; int arg = 2; size_t totalsize = 0; defaultoptions(&h); lua_pushnil(L); /* mark to separate arguments from string buffer */ luaL_buffinit(L, &b); while (*fmt != '\0') { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); int toalign = gettoalign(totalsize, &h, opt, size); totalsize += toalign; while (toalign-- > 0) luaL_addchar(&b, '\0'); switch (opt) { case 'b': case 'B': case 'h': case 'H': case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ putinteger(L, &b, arg++, h.endian, size); break; } case 'x': { luaL_addchar(&b, '\0'); break; } case 'f': { float f = (float)luaL_checknumber(L, arg++); correctbytes((char *)&f, size, h.endian); luaL_addlstring(&b, (char *)&f, size); break; } case 'd': { double d = luaL_checknumber(L, arg++); correctbytes((char *)&d, size, h.endian); luaL_addlstring(&b, (char *)&d, size); break; } case 'c': case 's': { size_t l; const char *s = luaL_checklstring(L, arg++, &l); if (size == 0) size = l; luaL_argcheck(L, l >= (size_t)size, arg, "string too short"); luaL_addlstring(&b, s, size); if (opt == 's') { luaL_addchar(&b, '\0'); /* add zero at the end */ size++; } break; } default: controloptions(L, opt, &fmt, &h); } totalsize += size; } luaL_pushresult(&b); return 1; } static lua_Number getinteger (const char *buff, int endian, int issigned, int size) { Uinttype l = 0; int i; if (endian == BIG) { for (i = 0; i < size; i++) { l <<= 8; l |= (Uinttype)(unsigned char)buff[i]; } } else { for (i = size - 1; i >= 0; i--) { l <<= 8; l |= (Uinttype)(unsigned char)buff[i]; } } if (!issigned) return (lua_Number)l; else { /* signed format */ Uinttype mask = (Uinttype)(~((Uinttype)0)) << (size*8 - 1); if (l & mask) /* negative value? */ l |= mask; /* signal extension */ return (lua_Number)(Inttype)l; } } static int b_unpack (lua_State *L) { Header h; const char *fmt = luaL_checkstring(L, 1); size_t ld; const char *data = luaL_checklstring(L, 2, &ld); size_t pos = luaL_optinteger(L, 3, 1) - 1; int n = 0; /* number of results */ defaultoptions(&h); while (*fmt) { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); pos += gettoalign(pos, &h, opt, size); luaL_argcheck(L, pos+size <= ld, 2, "data string too short"); /* stack space for item + next position */ luaL_checkstack(L, 2, "too many results"); switch (opt) { case 'b': case 'B': case 'h': case 'H': case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ int issigned = islower(opt); lua_Number res = getinteger(data+pos, h.endian, issigned, size); lua_pushnumber(L, res); n++; break; } case 'x': { break; } case 'f': { float f; memcpy(&f, data+pos, size); correctbytes((char *)&f, sizeof(f), h.endian); lua_pushnumber(L, f); n++; break; } case 'd': { double d; memcpy(&d, data+pos, size); correctbytes((char *)&d, sizeof(d), h.endian); lua_pushnumber(L, d); n++; break; } case 'c': { if (size == 0) { if (n == 0 || !lua_isnumber(L, -1)) luaL_error(L, "format 'c0' needs a previous size"); size = lua_tonumber(L, -1); lua_pop(L, 1); n--; luaL_argcheck(L, size <= ld && pos <= ld - size, 2, "data string too short"); } lua_pushlstring(L, data+pos, size); n++; break; } case 's': { const char *e = (const char *)memchr(data+pos, '\0', ld - pos); if (e == NULL) luaL_error(L, "unfinished string in data"); size = (e - (data+pos)) + 1; lua_pushlstring(L, data+pos, size - 1); n++; break; } default: controloptions(L, opt, &fmt, &h); } pos += size; } lua_pushinteger(L, pos + 1); /* next position */ return n + 1; } static int b_size (lua_State *L) { Header h; const char *fmt = luaL_checkstring(L, 1); size_t pos = 0; defaultoptions(&h); while (*fmt) { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); pos += gettoalign(pos, &h, opt, size); if (opt == 's') luaL_argerror(L, 1, "option 's' has no fixed size"); else if (opt == 'c' && size == 0) luaL_argerror(L, 1, "option 'c0' has no fixed size"); if (!isalnum(opt)) controloptions(L, opt, &fmt, &h); pos += size; } lua_pushinteger(L, pos); return 1; } /* }====================================================== */ static const struct luaL_Reg thislib[] = { {"pack", b_pack}, {"unpack", b_unpack}, {"size", b_size}, {NULL, NULL} }; LUALIB_API int luaopen_struct (lua_State *L); LUALIB_API int luaopen_struct (lua_State *L) { luaL_register(L, "struct", thislib); return 1; } /****************************************************************************** * Copyright (C) 2010-2018 Lua.org, PUC-Rio. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ******************************************************************************/
static int b_unpack (lua_State *L) { Header h; const char *fmt = luaL_checkstring(L, 1); size_t ld; const char *data = luaL_checklstring(L, 2, &ld); size_t pos = luaL_optinteger(L, 3, 1) - 1; defaultoptions(&h); lua_settop(L, 2); while (*fmt) { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); pos += gettoalign(pos, &h, opt, size); luaL_argcheck(L, pos+size <= ld, 2, "data string too short"); luaL_checkstack(L, 1, "too many results"); switch (opt) { case 'b': case 'B': case 'h': case 'H': case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ int issigned = islower(opt); lua_Number res = getinteger(data+pos, h.endian, issigned, size); lua_pushnumber(L, res); break; } case 'x': { break; } case 'f': { float f; memcpy(&f, data+pos, size); correctbytes((char *)&f, sizeof(f), h.endian); lua_pushnumber(L, f); break; } case 'd': { double d; memcpy(&d, data+pos, size); correctbytes((char *)&d, sizeof(d), h.endian); lua_pushnumber(L, d); break; } case 'c': { if (size == 0) { if (!lua_isnumber(L, -1)) luaL_error(L, "format `c0' needs a previous size"); size = lua_tonumber(L, -1); lua_pop(L, 1); luaL_argcheck(L, pos+size <= ld, 2, "data string too short"); } lua_pushlstring(L, data+pos, size); break; } case 's': { const char *e = (const char *)memchr(data+pos, '\0', ld - pos); if (e == NULL) luaL_error(L, "unfinished string in data"); size = (e - (data+pos)) + 1; lua_pushlstring(L, data+pos, size - 1); break; } default: controloptions(L, opt, &fmt, &h); } pos += size; } lua_pushinteger(L, pos + 1); return lua_gettop(L) - 2; }
static int b_unpack (lua_State *L) { Header h; const char *fmt = luaL_checkstring(L, 1); size_t ld; const char *data = luaL_checklstring(L, 2, &ld); size_t pos = luaL_optinteger(L, 3, 1) - 1; int n = 0; /* number of results */ defaultoptions(&h); while (*fmt) { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); pos += gettoalign(pos, &h, opt, size); luaL_argcheck(L, pos+size <= ld, 2, "data string too short"); /* stack space for item + next position */ luaL_checkstack(L, 2, "too many results"); switch (opt) { case 'b': case 'B': case 'h': case 'H': case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ int issigned = islower(opt); lua_Number res = getinteger(data+pos, h.endian, issigned, size); lua_pushnumber(L, res); n++; break; } case 'x': { break; } case 'f': { float f; memcpy(&f, data+pos, size); correctbytes((char *)&f, sizeof(f), h.endian); lua_pushnumber(L, f); n++; break; } case 'd': { double d; memcpy(&d, data+pos, size); correctbytes((char *)&d, sizeof(d), h.endian); lua_pushnumber(L, d); n++; break; } case 'c': { if (size == 0) { if (n == 0 || !lua_isnumber(L, -1)) luaL_error(L, "format 'c0' needs a previous size"); size = lua_tonumber(L, -1); lua_pop(L, 1); n--; luaL_argcheck(L, size <= ld && pos <= ld - size, 2, "data string too short"); } lua_pushlstring(L, data+pos, size); n++; break; } case 's': { const char *e = (const char *)memchr(data+pos, '\0', ld - pos); if (e == NULL) luaL_error(L, "unfinished string in data"); size = (e - (data+pos)) + 1; lua_pushlstring(L, data+pos, size - 1); n++; break; } default: controloptions(L, opt, &fmt, &h); } pos += size; } lua_pushinteger(L, pos + 1); /* next position */ return n + 1; }
{'added': [(4, '** $Id: struct.c,v 1.7 2018/05/11 22:04:31 roberto Exp $'), (18, "** i/In - signed/unsigned integer with size 'n' (default is size of int)"), (19, "** cn - sequence of 'n' chars (from/to a string); when packing, n==0 means"), (92, 'static int getnum (const char **fmt, int df) {'), (118, " case 'c': return getnum(fmt, 1);"), (120, ' int sz = getnum(fmt, sizeof(int));'), (153, ' int a = getnum(fmt, MAXALIGN);'), (297, ' int n = 0; /* number of results */'), (304, ' /* stack space for item + next position */'), (305, ' luaL_checkstack(L, 2, "too many results");'), (311, ' lua_pushnumber(L, res); n++;'), (321, ' lua_pushnumber(L, f); n++;'), (328, ' lua_pushnumber(L, d); n++;'), (333, ' if (n == 0 || !lua_isnumber(L, -1))'), (334, ' luaL_error(L, "format \'c0\' needs a previous size");'), (336, ' lua_pop(L, 1); n--;'), (337, ' luaL_argcheck(L, size <= ld && pos <= ld - size,'), (338, ' 2, "data string too short");'), (340, ' lua_pushlstring(L, data+pos, size); n++;'), (348, ' lua_pushlstring(L, data+pos, size - 1); n++;'), (355, ' lua_pushinteger(L, pos + 1); /* next position */'), (356, ' return n + 1;'), (402, '* Copyright (C) 2010-2018 Lua.org, PUC-Rio. All rights reserved.')], 'deleted': [(4, '** $Id: struct.c,v 1.4 2012/07/04 18:54:29 roberto Exp $'), (18, "** i/In - signed/unsigned integer with size `n' (default is size of int)"), (19, "** cn - sequence of `n' chars (from/to a string); when packing, n==0 means"), (92, 'static int getnum (lua_State *L, const char **fmt, int df) {'), (98, " if (a > (INT_MAX / 10) || a * 10 > (INT_MAX - (**fmt - '0')))"), (99, ' luaL_error(L, "integral size overflow");'), (120, " case 'c': return getnum(L, fmt, 1);"), (122, ' int sz = getnum(L, fmt, sizeof(int));'), (155, ' int a = getnum(L, fmt, MAXALIGN);'), (300, ' lua_settop(L, 2);'), (306, ' luaL_checkstack(L, 1, "too many results");'), (312, ' lua_pushnumber(L, res);'), (322, ' lua_pushnumber(L, f);'), (329, ' lua_pushnumber(L, d);'), (334, ' if (!lua_isnumber(L, -1))'), (335, ' luaL_error(L, "format `c0\' needs a previous size");'), (337, ' lua_pop(L, 1);'), (338, ' luaL_argcheck(L, pos+size <= ld, 2, "data string too short");'), (340, ' lua_pushlstring(L, data+pos, size);'), (348, ' lua_pushlstring(L, data+pos, size - 1);'), (355, ' lua_pushinteger(L, pos + 1);'), (356, ' return lua_gettop(L) - 2;'), (402, '* Copyright (C) 2010-2012 Lua.org, PUC-Rio. All rights reserved.')]}
23
23
290
1,981
65
488
19
https://github.com/antirez/redis
CVE-2018-11219
CWE-190
1,536
af_rose.c
C
rose_recvmsg
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) */ #include <linux/capability.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/stat.h> #include <net/net_namespace.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/rose.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/tcp_states.h> #include <net/ip.h> #include <net/arp.h> static int rose_ndevs = 10; int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0; int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1; int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2; int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3; int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE; int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB; int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING; int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT; int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC; int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE; static HLIST_HEAD(rose_list); static DEFINE_SPINLOCK(rose_list_lock); static const struct proto_ops rose_proto_ops; ax25_address rose_callsign; /* * ROSE network devices are virtual network devices encapsulating ROSE * frames into AX.25 which will be sent through an AX.25 device, so form a * special "super class" of normal net devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key rose_netdev_xmit_lock_key; static struct lock_class_key rose_netdev_addr_lock_key; static void rose_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); } static void rose_set_lockdep_key(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); } /* * Convert a ROSE address into text. */ char *rose2asc(char *buf, const rose_address *addr) { if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && addr->rose_addr[4] == 0x00) { strcpy(buf, "*"); } else { sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, addr->rose_addr[1] & 0xFF, addr->rose_addr[2] & 0xFF, addr->rose_addr[3] & 0xFF, addr->rose_addr[4] & 0xFF); } return buf; } /* * Compare two ROSE addresses, 0 == equal. */ int rosecmp(rose_address *addr1, rose_address *addr2) { int i; for (i = 0; i < 5; i++) if (addr1->rose_addr[i] != addr2->rose_addr[i]) return 1; return 0; } /* * Compare two ROSE addresses for only mask digits, 0 == equal. */ int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) { unsigned int i, j; if (mask > 10) return 1; for (i = 0; i < mask; i++) { j = i / 2; if ((i % 2) != 0) { if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F)) return 1; } else { if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0)) return 1; } } return 0; } /* * Socket removal during an interrupt is now safe. */ static void rose_remove_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_del_node_init(sk); spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a broken link layer connection to a * particular neighbour. */ void rose_kill_by_neigh(struct rose_neigh *neigh) { struct sock *s; spin_lock_bh(&rose_list_lock); sk_for_each(s, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->neighbour == neigh) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->neighbour = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void rose_kill_by_device(struct net_device *dev) { struct sock *s; spin_lock_bh(&rose_list_lock); sk_for_each(s, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->device == dev) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->device = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Handle device status changes. */ static int rose_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event != NETDEV_DOWN) return NOTIFY_DONE; switch (dev->type) { case ARPHRD_ROSE: rose_kill_by_device(dev); break; case ARPHRD_AX25: rose_link_device_down(dev); rose_rt_device_down(dev); break; } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void rose_insert_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_add_node(sk, &rose_list); spin_unlock_bh(&rose_list_lock); } /* * Find a socket that wants to accept the Call Request we just * received. */ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) { struct sock *s; spin_lock_bh(&rose_list_lock); sk_for_each(s, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, call) && !rose->source_ndigis && s->sk_state == TCP_LISTEN) goto found; } sk_for_each(s, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, &null_ax25_address) && s->sk_state == TCP_LISTEN) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a connected ROSE socket given my LCI and device. */ struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) { struct sock *s; spin_lock_bh(&rose_list_lock); sk_for_each(s, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->lci == lci && rose->neighbour == neigh) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a unique LCI for a given device. */ unsigned int rose_new_lci(struct rose_neigh *neigh) { int lci; if (neigh->dce_mode) { for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } else { for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } return 0; } /* * Deferred destroy. */ void rose_destroy_socket(struct sock *); /* * Handler for deferred kills. */ static void rose_destroy_timer(unsigned long data) { rose_destroy_socket((struct sock *)data); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. */ void rose_destroy_socket(struct sock *sk) { struct sk_buff *skb; rose_remove_socket(sk); rose_stop_heartbeat(sk); rose_stop_idletimer(sk); rose_stop_timer(sk); rose_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* Queue the unaccepted socket for death */ sock_set_flag(skb->sk, SOCK_DEAD); rose_start_heartbeat(skb->sk); rose_sk(skb->sk)->state = ROSE_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ setup_timer(&sk->sk_timer, rose_destroy_timer, (unsigned long)sk); sk->sk_timer.expires = jiffies + 10 * HZ; add_timer(&sk->sk_timer); } else sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * ROSE socket object. */ static int rose_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int opt; if (level != SOL_ROSE) return -ENOPROTOOPT; if (optlen < sizeof(int)) return -EINVAL; if (get_user(opt, (int __user *)optval)) return -EFAULT; switch (optname) { case ROSE_DEFER: rose->defer = opt ? 1 : 0; return 0; case ROSE_T1: if (opt < 1) return -EINVAL; rose->t1 = opt * HZ; return 0; case ROSE_T2: if (opt < 1) return -EINVAL; rose->t2 = opt * HZ; return 0; case ROSE_T3: if (opt < 1) return -EINVAL; rose->t3 = opt * HZ; return 0; case ROSE_HOLDBACK: if (opt < 1) return -EINVAL; rose->hb = opt * HZ; return 0; case ROSE_IDLE: if (opt < 0) return -EINVAL; rose->idle = opt * 60 * HZ; return 0; case ROSE_QBITINCL: rose->qbitincl = opt ? 1 : 0; return 0; default: return -ENOPROTOOPT; } } static int rose_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int val = 0; int len; if (level != SOL_ROSE) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case ROSE_DEFER: val = rose->defer; break; case ROSE_T1: val = rose->t1 / HZ; break; case ROSE_T2: val = rose->t2 / HZ; break; case ROSE_T3: val = rose->t3 / HZ; break; case ROSE_HOLDBACK: val = rose->hb / HZ; break; case ROSE_IDLE: val = rose->idle / (60 * HZ); break; case ROSE_QBITINCL: val = rose->qbitincl; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, len, sizeof(int)); if (put_user(len, optlen)) return -EFAULT; return copy_to_user(optval, &val, len) ? -EFAULT : 0; } static int rose_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; if (sk->sk_state != TCP_LISTEN) { struct rose_sock *rose = rose_sk(sk); rose->dest_ndigis = 0; memset(&rose->dest_addr, 0, ROSE_ADDR_LEN); memset(&rose->dest_call, 0, AX25_ADDR_LEN); memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; return 0; } return -EOPNOTSUPP; } static struct proto rose_proto = { .name = "ROSE", .owner = THIS_MODULE, .obj_size = sizeof(struct rose_sock), }; static int rose_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct rose_sock *rose; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; if (sock->type != SOCK_SEQPACKET || protocol != 0) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return -ENOMEM; rose = rose_sk(sk); sock_init_data(sock, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sock->ops = &rose_proto_ops; sk->sk_protocol = protocol; init_timer(&rose->timer); init_timer(&rose->idletimer); rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); rose->state = ROSE_STATE_0; return 0; } static struct sock *rose_make_new(struct sock *osk) { struct sock *sk; struct rose_sock *rose, *orose; if (osk->sk_type != SOCK_SEQPACKET) return NULL; sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return NULL; rose = rose_sk(sk); sock_init_data(NULL, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); init_timer(&rose->timer); init_timer(&rose->idletimer); orose = rose_sk(osk); rose->t1 = orose->t1; rose->t2 = orose->t2; rose->t3 = orose->t3; rose->hb = orose->hb; rose->idle = orose->idle; rose->defer = orose->defer; rose->device = orose->device; rose->qbitincl = orose->qbitincl; return sk; } static int rose_release(struct socket *sock) { struct sock *sk = sock->sk; struct rose_sock *rose; if (sk == NULL) return 0; sock_hold(sk); sock_orphan(sk); lock_sock(sk); rose = rose_sk(sk); switch (rose->state) { case ROSE_STATE_0: release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_2: rose->neighbour->use--; release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_1: case ROSE_STATE_3: case ROSE_STATE_4: case ROSE_STATE_5: rose_clear_queues(sk); rose_stop_idletimer(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_start_t3timer(sk); rose->state = ROSE_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; struct net_device *dev; ax25_address *source; ax25_uid_assoc *user; int n; if (!sock_flag(sk, SOCK_ZAPPED)) return -EINVAL; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) return -EADDRNOTAVAIL; source = &addr->srose_call; user = ax25_findbyuid(current_euid()); if (user) { rose->source_call = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; rose->source_call = *source; } rose->source_addr = addr->srose_addr; rose->device = dev; rose->source_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->source_digis[n] = full_addr->srose_digis[n]; } else { if (rose->source_ndigis == 1) { rose->source_digis[0] = addr->srose_digi; } } rose_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); return 0; } static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; unsigned char cause, diagnostic; struct net_device *dev; ax25_uid_assoc *user; int n, err = 0; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS) return -EINVAL; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { /* Connect completed during a ERESTARTSYS event */ sock->state = SS_CONNECTED; goto out_release; } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out_release; } if (sk->sk_state == TCP_ESTABLISHED) { /* No reconnect on a seqpacket socket */ err = -EISCONN; goto out_release; } sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0); if (!rose->neighbour) { err = -ENETUNREACH; goto out_release; } rose->lci = rose_new_lci(rose->neighbour); if (!rose->lci) { err = -ENETUNREACH; goto out_release; } if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ sock_reset_flag(sk, SOCK_ZAPPED); if ((dev = rose_dev_first()) == NULL) { err = -ENETUNREACH; goto out_release; } user = ax25_findbyuid(current_euid()); if (!user) { err = -EINVAL; goto out_release; } memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); rose->source_call = user->call; rose->device = dev; ax25_uid_put(user); rose_insert_socket(sk); /* Finish the bind */ } rose->dest_addr = addr->srose_addr; rose->dest_call = addr->srose_call; rose->rand = ((long)rose & 0xFFFF) + rose->lci; rose->dest_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->dest_digis[n] = full_addr->srose_digis[n]; } else { if (rose->dest_ndigis == 1) { rose->dest_digis[0] = addr->srose_digi; } } /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; rose->state = ROSE_STATE_1; rose->neighbour->use++; rose_write_internal(sk, ROSE_CALL_REQUEST); rose_start_heartbeat(sk); rose_start_t1timer(sk); /* Now the loop */ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { err = -EINPROGRESS; goto out_release; } /* * A Connect Ack with Choke or timeout or failed routing will go to * closed. */ if (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_state != TCP_SYN_SENT) break; if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; } if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); /* Always set at this point */ goto out_release; } sock->state = SS_CONNECTED; out_release: release_sock(sk); return err; } static int rose_accept(struct socket *sock, struct socket *newsock, int flags) { struct sk_buff *skb; struct sock *newsk; DEFINE_WAIT(wait); struct sock *sk; int err = 0; if ((sk = sock->sk) == NULL) return -EINVAL; lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET) { err = -EOPNOTSUPP; goto out_release; } if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto out_release; } /* * The write queue this time is holding sockets ready to use * hooked into the SABM we saved */ for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; if (flags & O_NONBLOCK) { err = -EWOULDBLOCK; break; } if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ skb->sk = NULL; kfree_skb(skb); sk->sk_ack_backlog--; out_release: release_sock(sk); return err; } static int rose_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr; struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int n; memset(srose, 0, sizeof(*srose)); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; for (n = 0; n < rose->dest_ndigis; n++) srose->srose_digis[n] = rose->dest_digis[n]; } else { srose->srose_family = AF_ROSE; srose->srose_addr = rose->source_addr; srose->srose_call = rose->source_call; srose->srose_ndigis = rose->source_ndigis; for (n = 0; n < rose->source_ndigis; n++) srose->srose_digis[n] = rose->source_digis[n]; } *uaddr_len = sizeof(struct full_sockaddr_rose); return 0; } int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) { struct sock *sk; struct sock *make; struct rose_sock *make_rose; struct rose_facilities_struct facilities; int n; skb->sk = NULL; /* Initially we don't know who it's for */ /* * skb->data points to the rose frame start */ memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF, skb->len - ROSE_CALL_REQ_FACILITIES_OFF, &facilities)) { rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); return 0; } sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); /* * We can't accept the Call Request. */ if (sk == NULL || sk_acceptq_is_full(sk) || (make = rose_make_new(sk)) == NULL) { rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); return 0; } skb->sk = make; make->sk_state = TCP_ESTABLISHED; make_rose = rose_sk(make); make_rose->lci = lci; make_rose->dest_addr = facilities.dest_addr; make_rose->dest_call = facilities.dest_call; make_rose->dest_ndigis = facilities.dest_ndigis; for (n = 0 ; n < facilities.dest_ndigis ; n++) make_rose->dest_digis[n] = facilities.dest_digis[n]; make_rose->source_addr = facilities.source_addr; make_rose->source_call = facilities.source_call; make_rose->source_ndigis = facilities.source_ndigis; for (n = 0 ; n < facilities.source_ndigis ; n++) make_rose->source_digis[n]= facilities.source_digis[n]; make_rose->neighbour = neigh; make_rose->device = dev; make_rose->facilities = facilities; make_rose->neighbour->use++; if (rose_sk(sk)->defer) { make_rose->state = ROSE_STATE_5; } else { rose_write_internal(make, ROSE_CALL_ACCEPTED); make_rose->state = ROSE_STATE_3; rose_start_idletimer(make); } make_rose->condition = 0x00; make_rose->vs = 0; make_rose->va = 0; make_rose->vr = 0; make_rose->vl = 0; sk->sk_ack_backlog++; rose_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); rose_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); return 1; } static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name; int err; struct full_sockaddr_rose srose; struct sk_buff *skb; unsigned char *asmptr; int n, size, qbit = 0; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; if (sock_flag(sk, SOCK_ZAPPED)) return -EADDRNOTAVAIL; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); return -EPIPE; } if (rose->neighbour == NULL || rose->device == NULL) return -ENETUNREACH; if (usrose != NULL) { if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose)) return -EINVAL; memset(&srose, 0, sizeof(struct full_sockaddr_rose)); memcpy(&srose, usrose, msg->msg_namelen); if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 || ax25cmp(&rose->dest_call, &srose.srose_call) != 0) return -EISCONN; if (srose.srose_ndigis != rose->dest_ndigis) return -EISCONN; if (srose.srose_ndigis == rose->dest_ndigis) { for (n = 0 ; n < srose.srose_ndigis ; n++) if (ax25cmp(&rose->dest_digis[n], &srose.srose_digis[n])) return -EISCONN; } if (srose.srose_family != AF_ROSE) return -EINVAL; } else { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose.srose_family = AF_ROSE; srose.srose_addr = rose->dest_addr; srose.srose_call = rose->dest_call; srose.srose_ndigis = rose->dest_ndigis; for (n = 0 ; n < rose->dest_ndigis ; n++) srose.srose_digis[n] = rose->dest_digis[n]; } /* Build a packet */ /* Sanity check the packet size */ if (len > 65535) return -EMSGSIZE; size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) return err; skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN); /* * Put the data on the end */ skb_reset_transport_header(skb); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); return err; } /* * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ if (rose->qbitincl) { qbit = skb->data[0]; skb_pull(skb, 1); } /* * Push down the ROSE header */ asmptr = skb_push(skb, ROSE_MIN_LEN); /* Build a ROSE Network header */ asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; asmptr[1] = (rose->lci >> 0) & 0xFF; asmptr[2] = ROSE_DATA; if (qbit) asmptr[0] |= ROSE_Q_BIT; if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); return -ENOTCONN; } #ifdef M_BIT #define ROSE_PACLEN (256-ROSE_MIN_LEN) if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) { unsigned char header[ROSE_MIN_LEN]; struct sk_buff *skbn; int frontlen; int lg; /* Save a copy of the Header */ skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN); skb_pull(skb, ROSE_MIN_LEN); frontlen = skb_headroom(skb); while (skb->len > 0) { if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { kfree_skb(skb); return err; } skbn->sk = sk; skbn->free = 1; skbn->arp = 1; skb_reserve(skbn, frontlen); lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; /* Copy the user data */ skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); skb_pull(skb, lg); /* Duplicate the Header */ skb_push(skbn, ROSE_MIN_LEN); skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); if (skb->len > 0) skbn->data[2] |= M_BIT; skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ } skb->free = 1; kfree_skb(skb); } else { skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ } #else skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ #endif rose_kick(sk); return len; } static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; size_t copied; unsigned char *asmptr; struct sk_buff *skb; int n, er, qbit; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) return er; qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; skb_pull(skb, ROSE_MIN_LEN); if (rose->qbitincl) { asmptr = skb_push(skb, 1); *asmptr = qbit; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (srose != NULL) { memset(srose, 0, msg->msg_namelen); srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; for (n = 0 ; n < rose->dest_ndigis ; n++) full_srose->srose_digis[n] = rose->dest_digis[n]; msg->msg_namelen = sizeof(struct full_sockaddr_rose); } else { if (rose->dest_ndigis >= 1) { srose->srose_ndigis = 1; srose->srose_digi = rose->dest_digis[0]; } msg->msg_namelen = sizeof(struct sockaddr_rose); } } skb_free_datagram(sk, skb); return copied; } static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); void __user *argp = (void __user *)arg; switch (cmd) { case TIOCOUTQ: { long amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; return put_user(amount, (unsigned int __user *) argp); } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; return put_user(amount, (unsigned int __user *) argp); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *) argp); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *) argp); case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: return -EINVAL; case SIOCADDRT: case SIOCDELRT: case SIOCRSCLRRT: if (!capable(CAP_NET_ADMIN)) return -EPERM; return rose_rt_ioctl(cmd, argp); case SIOCRSGCAUSE: { struct rose_cause_struct rose_cause; rose_cause.cause = rose->cause; rose_cause.diagnostic = rose->diagnostic; return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0; } case SIOCRSSCAUSE: { struct rose_cause_struct rose_cause; if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct))) return -EFAULT; rose->cause = rose_cause.cause; rose->diagnostic = rose_cause.diagnostic; return 0; } case SIOCRSSL2CALL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) return -EFAULT; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) return ax25_listen_register(&rose_callsign, NULL); return 0; case SIOCRSGL2CALL: return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0; case SIOCRSACCEPT: if (rose->state == ROSE_STATE_5) { rose_write_internal(sk, ROSE_CALL_ACCEPTED); rose_start_idletimer(sk); rose->condition = 0x00; rose->vs = 0; rose->va = 0; rose->vr = 0; rose->vl = 0; rose->state = ROSE_STATE_3; } return 0; default: return -ENOIOCTLCMD; } return 0; } #ifdef CONFIG_PROC_FS static void *rose_info_start(struct seq_file *seq, loff_t *pos) __acquires(rose_list_lock) { spin_lock_bh(&rose_list_lock); return seq_hlist_start_head(&rose_list, *pos); } static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &rose_list, pos); } static void rose_info_stop(struct seq_file *seq, void *v) __releases(rose_list_lock) { spin_unlock_bh(&rose_list_lock); } static int rose_info_show(struct seq_file *seq, void *v) { char buf[11], rsbuf[11]; if (v == SEQ_START_TOKEN) seq_puts(seq, "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); else { struct sock *s = sk_entry(v); struct rose_sock *rose = rose_sk(s); const char *devname, *callsign; const struct net_device *dev = rose->device; if (!dev) devname = "???"; else devname = dev->name; seq_printf(seq, "%-10s %-9s ", rose2asc(rsbuf, &rose->dest_addr), ax2asc(buf, &rose->dest_call)); if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) callsign = "??????-?"; else callsign = ax2asc(buf, &rose->source_call); seq_printf(seq, "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", rose2asc(rsbuf, &rose->source_addr), callsign, devname, rose->lci & 0x0FFF, (rose->neighbour) ? rose->neighbour->number : 0, rose->state, rose->vs, rose->vr, rose->va, ax25_display_timer(&rose->timer) / HZ, rose->t1 / HZ, rose->t2 / HZ, rose->t3 / HZ, rose->hb / HZ, ax25_display_timer(&rose->idletimer) / (60 * HZ), rose->idle / (60 * HZ), sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); } return 0; } static const struct seq_operations rose_info_seqops = { .start = rose_info_start, .next = rose_info_next, .stop = rose_info_stop, .show = rose_info_show, }; static int rose_info_open(struct inode *inode, struct file *file) { return seq_open(file, &rose_info_seqops); } static const struct file_operations rose_info_fops = { .owner = THIS_MODULE, .open = rose_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ static const struct net_proto_family rose_family_ops = { .family = PF_ROSE, .create = rose_create, .owner = THIS_MODULE, }; static const struct proto_ops rose_proto_ops = { .family = PF_ROSE, .owner = THIS_MODULE, .release = rose_release, .bind = rose_bind, .connect = rose_connect, .socketpair = sock_no_socketpair, .accept = rose_accept, .getname = rose_getname, .poll = datagram_poll, .ioctl = rose_ioctl, .listen = rose_listen, .shutdown = sock_no_shutdown, .setsockopt = rose_setsockopt, .getsockopt = rose_getsockopt, .sendmsg = rose_sendmsg, .recvmsg = rose_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct notifier_block rose_dev_notifier = { .notifier_call = rose_device_event, }; static struct net_device **dev_rose; static struct ax25_protocol rose_pid = { .pid = AX25_P_ROSE, .func = rose_route_frame }; static struct ax25_linkfail rose_linkfail_notifier = { .func = rose_link_failed }; static int __init rose_proto_init(void) { int i; int rc; if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); rc = -EINVAL; goto out; } rc = proto_register(&rose_proto, 0); if (rc != 0) goto out; rose_callsign = null_ax25_address; dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_rose == NULL) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); rc = -ENOMEM; goto out_proto_unregister; } for (i = 0; i < rose_ndevs; i++) { struct net_device *dev; char name[IFNAMSIZ]; sprintf(name, "rose%d", i); dev = alloc_netdev(0, name, rose_setup); if (!dev) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); rc = -ENOMEM; goto fail; } rc = register_netdev(dev); if (rc) { printk(KERN_ERR "ROSE: netdevice registration failed\n"); free_netdev(dev); goto fail; } rose_set_lockdep_key(dev); dev_rose[i] = dev; } sock_register(&rose_family_ops); register_netdevice_notifier(&rose_dev_notifier); ax25_register_pid(&rose_pid); ax25_linkfail_register(&rose_linkfail_notifier); #ifdef CONFIG_SYSCTL rose_register_sysctl(); #endif rose_loopback_init(); rose_add_loopback_neigh(); proc_create("rose", S_IRUGO, init_net.proc_net, &rose_info_fops); proc_create("rose_neigh", S_IRUGO, init_net.proc_net, &rose_neigh_fops); proc_create("rose_nodes", S_IRUGO, init_net.proc_net, &rose_nodes_fops); proc_create("rose_routes", S_IRUGO, init_net.proc_net, &rose_routes_fops); out: return rc; fail: while (--i >= 0) { unregister_netdev(dev_rose[i]); free_netdev(dev_rose[i]); } kfree(dev_rose); out_proto_unregister: proto_unregister(&rose_proto); goto out; } module_init(rose_proto_init); module_param(rose_ndevs, int, 0); MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices"); MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_ROSE); static void __exit rose_exit(void) { int i; remove_proc_entry("rose", init_net.proc_net); remove_proc_entry("rose_neigh", init_net.proc_net); remove_proc_entry("rose_nodes", init_net.proc_net); remove_proc_entry("rose_routes", init_net.proc_net); rose_loopback_clear(); rose_rt_free(); ax25_protocol_release(AX25_P_ROSE); ax25_linkfail_release(&rose_linkfail_notifier); if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); #ifdef CONFIG_SYSCTL rose_unregister_sysctl(); #endif unregister_netdevice_notifier(&rose_dev_notifier); sock_unregister(PF_ROSE); for (i = 0; i < rose_ndevs; i++) { struct net_device *dev = dev_rose[i]; if (dev) { unregister_netdev(dev); free_netdev(dev); } } kfree(dev_rose); proto_unregister(&rose_proto); } module_exit(rose_exit);
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) */ #include <linux/capability.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/stat.h> #include <net/net_namespace.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/rose.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/tcp_states.h> #include <net/ip.h> #include <net/arp.h> static int rose_ndevs = 10; int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0; int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1; int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2; int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3; int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE; int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB; int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING; int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT; int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC; int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE; static HLIST_HEAD(rose_list); static DEFINE_SPINLOCK(rose_list_lock); static const struct proto_ops rose_proto_ops; ax25_address rose_callsign; /* * ROSE network devices are virtual network devices encapsulating ROSE * frames into AX.25 which will be sent through an AX.25 device, so form a * special "super class" of normal net devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key rose_netdev_xmit_lock_key; static struct lock_class_key rose_netdev_addr_lock_key; static void rose_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); } static void rose_set_lockdep_key(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); } /* * Convert a ROSE address into text. */ char *rose2asc(char *buf, const rose_address *addr) { if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && addr->rose_addr[4] == 0x00) { strcpy(buf, "*"); } else { sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, addr->rose_addr[1] & 0xFF, addr->rose_addr[2] & 0xFF, addr->rose_addr[3] & 0xFF, addr->rose_addr[4] & 0xFF); } return buf; } /* * Compare two ROSE addresses, 0 == equal. */ int rosecmp(rose_address *addr1, rose_address *addr2) { int i; for (i = 0; i < 5; i++) if (addr1->rose_addr[i] != addr2->rose_addr[i]) return 1; return 0; } /* * Compare two ROSE addresses for only mask digits, 0 == equal. */ int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) { unsigned int i, j; if (mask > 10) return 1; for (i = 0; i < mask; i++) { j = i / 2; if ((i % 2) != 0) { if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F)) return 1; } else { if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0)) return 1; } } return 0; } /* * Socket removal during an interrupt is now safe. */ static void rose_remove_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_del_node_init(sk); spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a broken link layer connection to a * particular neighbour. */ void rose_kill_by_neigh(struct rose_neigh *neigh) { struct sock *s; spin_lock_bh(&rose_list_lock); sk_for_each(s, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->neighbour == neigh) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->neighbour = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void rose_kill_by_device(struct net_device *dev) { struct sock *s; spin_lock_bh(&rose_list_lock); sk_for_each(s, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->device == dev) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->device = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Handle device status changes. */ static int rose_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event != NETDEV_DOWN) return NOTIFY_DONE; switch (dev->type) { case ARPHRD_ROSE: rose_kill_by_device(dev); break; case ARPHRD_AX25: rose_link_device_down(dev); rose_rt_device_down(dev); break; } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void rose_insert_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_add_node(sk, &rose_list); spin_unlock_bh(&rose_list_lock); } /* * Find a socket that wants to accept the Call Request we just * received. */ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) { struct sock *s; spin_lock_bh(&rose_list_lock); sk_for_each(s, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, call) && !rose->source_ndigis && s->sk_state == TCP_LISTEN) goto found; } sk_for_each(s, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, &null_ax25_address) && s->sk_state == TCP_LISTEN) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a connected ROSE socket given my LCI and device. */ struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) { struct sock *s; spin_lock_bh(&rose_list_lock); sk_for_each(s, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->lci == lci && rose->neighbour == neigh) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a unique LCI for a given device. */ unsigned int rose_new_lci(struct rose_neigh *neigh) { int lci; if (neigh->dce_mode) { for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } else { for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } return 0; } /* * Deferred destroy. */ void rose_destroy_socket(struct sock *); /* * Handler for deferred kills. */ static void rose_destroy_timer(unsigned long data) { rose_destroy_socket((struct sock *)data); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. */ void rose_destroy_socket(struct sock *sk) { struct sk_buff *skb; rose_remove_socket(sk); rose_stop_heartbeat(sk); rose_stop_idletimer(sk); rose_stop_timer(sk); rose_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* Queue the unaccepted socket for death */ sock_set_flag(skb->sk, SOCK_DEAD); rose_start_heartbeat(skb->sk); rose_sk(skb->sk)->state = ROSE_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ setup_timer(&sk->sk_timer, rose_destroy_timer, (unsigned long)sk); sk->sk_timer.expires = jiffies + 10 * HZ; add_timer(&sk->sk_timer); } else sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * ROSE socket object. */ static int rose_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int opt; if (level != SOL_ROSE) return -ENOPROTOOPT; if (optlen < sizeof(int)) return -EINVAL; if (get_user(opt, (int __user *)optval)) return -EFAULT; switch (optname) { case ROSE_DEFER: rose->defer = opt ? 1 : 0; return 0; case ROSE_T1: if (opt < 1) return -EINVAL; rose->t1 = opt * HZ; return 0; case ROSE_T2: if (opt < 1) return -EINVAL; rose->t2 = opt * HZ; return 0; case ROSE_T3: if (opt < 1) return -EINVAL; rose->t3 = opt * HZ; return 0; case ROSE_HOLDBACK: if (opt < 1) return -EINVAL; rose->hb = opt * HZ; return 0; case ROSE_IDLE: if (opt < 0) return -EINVAL; rose->idle = opt * 60 * HZ; return 0; case ROSE_QBITINCL: rose->qbitincl = opt ? 1 : 0; return 0; default: return -ENOPROTOOPT; } } static int rose_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int val = 0; int len; if (level != SOL_ROSE) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case ROSE_DEFER: val = rose->defer; break; case ROSE_T1: val = rose->t1 / HZ; break; case ROSE_T2: val = rose->t2 / HZ; break; case ROSE_T3: val = rose->t3 / HZ; break; case ROSE_HOLDBACK: val = rose->hb / HZ; break; case ROSE_IDLE: val = rose->idle / (60 * HZ); break; case ROSE_QBITINCL: val = rose->qbitincl; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, len, sizeof(int)); if (put_user(len, optlen)) return -EFAULT; return copy_to_user(optval, &val, len) ? -EFAULT : 0; } static int rose_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; if (sk->sk_state != TCP_LISTEN) { struct rose_sock *rose = rose_sk(sk); rose->dest_ndigis = 0; memset(&rose->dest_addr, 0, ROSE_ADDR_LEN); memset(&rose->dest_call, 0, AX25_ADDR_LEN); memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; return 0; } return -EOPNOTSUPP; } static struct proto rose_proto = { .name = "ROSE", .owner = THIS_MODULE, .obj_size = sizeof(struct rose_sock), }; static int rose_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct rose_sock *rose; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; if (sock->type != SOCK_SEQPACKET || protocol != 0) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return -ENOMEM; rose = rose_sk(sk); sock_init_data(sock, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sock->ops = &rose_proto_ops; sk->sk_protocol = protocol; init_timer(&rose->timer); init_timer(&rose->idletimer); rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); rose->state = ROSE_STATE_0; return 0; } static struct sock *rose_make_new(struct sock *osk) { struct sock *sk; struct rose_sock *rose, *orose; if (osk->sk_type != SOCK_SEQPACKET) return NULL; sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return NULL; rose = rose_sk(sk); sock_init_data(NULL, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); init_timer(&rose->timer); init_timer(&rose->idletimer); orose = rose_sk(osk); rose->t1 = orose->t1; rose->t2 = orose->t2; rose->t3 = orose->t3; rose->hb = orose->hb; rose->idle = orose->idle; rose->defer = orose->defer; rose->device = orose->device; rose->qbitincl = orose->qbitincl; return sk; } static int rose_release(struct socket *sock) { struct sock *sk = sock->sk; struct rose_sock *rose; if (sk == NULL) return 0; sock_hold(sk); sock_orphan(sk); lock_sock(sk); rose = rose_sk(sk); switch (rose->state) { case ROSE_STATE_0: release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_2: rose->neighbour->use--; release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_1: case ROSE_STATE_3: case ROSE_STATE_4: case ROSE_STATE_5: rose_clear_queues(sk); rose_stop_idletimer(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_start_t3timer(sk); rose->state = ROSE_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; struct net_device *dev; ax25_address *source; ax25_uid_assoc *user; int n; if (!sock_flag(sk, SOCK_ZAPPED)) return -EINVAL; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) return -EADDRNOTAVAIL; source = &addr->srose_call; user = ax25_findbyuid(current_euid()); if (user) { rose->source_call = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; rose->source_call = *source; } rose->source_addr = addr->srose_addr; rose->device = dev; rose->source_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->source_digis[n] = full_addr->srose_digis[n]; } else { if (rose->source_ndigis == 1) { rose->source_digis[0] = addr->srose_digi; } } rose_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); return 0; } static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; unsigned char cause, diagnostic; struct net_device *dev; ax25_uid_assoc *user; int n, err = 0; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS) return -EINVAL; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { /* Connect completed during a ERESTARTSYS event */ sock->state = SS_CONNECTED; goto out_release; } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out_release; } if (sk->sk_state == TCP_ESTABLISHED) { /* No reconnect on a seqpacket socket */ err = -EISCONN; goto out_release; } sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0); if (!rose->neighbour) { err = -ENETUNREACH; goto out_release; } rose->lci = rose_new_lci(rose->neighbour); if (!rose->lci) { err = -ENETUNREACH; goto out_release; } if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ sock_reset_flag(sk, SOCK_ZAPPED); if ((dev = rose_dev_first()) == NULL) { err = -ENETUNREACH; goto out_release; } user = ax25_findbyuid(current_euid()); if (!user) { err = -EINVAL; goto out_release; } memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); rose->source_call = user->call; rose->device = dev; ax25_uid_put(user); rose_insert_socket(sk); /* Finish the bind */ } rose->dest_addr = addr->srose_addr; rose->dest_call = addr->srose_call; rose->rand = ((long)rose & 0xFFFF) + rose->lci; rose->dest_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->dest_digis[n] = full_addr->srose_digis[n]; } else { if (rose->dest_ndigis == 1) { rose->dest_digis[0] = addr->srose_digi; } } /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; rose->state = ROSE_STATE_1; rose->neighbour->use++; rose_write_internal(sk, ROSE_CALL_REQUEST); rose_start_heartbeat(sk); rose_start_t1timer(sk); /* Now the loop */ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { err = -EINPROGRESS; goto out_release; } /* * A Connect Ack with Choke or timeout or failed routing will go to * closed. */ if (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_state != TCP_SYN_SENT) break; if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; } if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); /* Always set at this point */ goto out_release; } sock->state = SS_CONNECTED; out_release: release_sock(sk); return err; } static int rose_accept(struct socket *sock, struct socket *newsock, int flags) { struct sk_buff *skb; struct sock *newsk; DEFINE_WAIT(wait); struct sock *sk; int err = 0; if ((sk = sock->sk) == NULL) return -EINVAL; lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET) { err = -EOPNOTSUPP; goto out_release; } if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto out_release; } /* * The write queue this time is holding sockets ready to use * hooked into the SABM we saved */ for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; if (flags & O_NONBLOCK) { err = -EWOULDBLOCK; break; } if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ skb->sk = NULL; kfree_skb(skb); sk->sk_ack_backlog--; out_release: release_sock(sk); return err; } static int rose_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr; struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int n; memset(srose, 0, sizeof(*srose)); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; for (n = 0; n < rose->dest_ndigis; n++) srose->srose_digis[n] = rose->dest_digis[n]; } else { srose->srose_family = AF_ROSE; srose->srose_addr = rose->source_addr; srose->srose_call = rose->source_call; srose->srose_ndigis = rose->source_ndigis; for (n = 0; n < rose->source_ndigis; n++) srose->srose_digis[n] = rose->source_digis[n]; } *uaddr_len = sizeof(struct full_sockaddr_rose); return 0; } int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) { struct sock *sk; struct sock *make; struct rose_sock *make_rose; struct rose_facilities_struct facilities; int n; skb->sk = NULL; /* Initially we don't know who it's for */ /* * skb->data points to the rose frame start */ memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF, skb->len - ROSE_CALL_REQ_FACILITIES_OFF, &facilities)) { rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); return 0; } sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); /* * We can't accept the Call Request. */ if (sk == NULL || sk_acceptq_is_full(sk) || (make = rose_make_new(sk)) == NULL) { rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); return 0; } skb->sk = make; make->sk_state = TCP_ESTABLISHED; make_rose = rose_sk(make); make_rose->lci = lci; make_rose->dest_addr = facilities.dest_addr; make_rose->dest_call = facilities.dest_call; make_rose->dest_ndigis = facilities.dest_ndigis; for (n = 0 ; n < facilities.dest_ndigis ; n++) make_rose->dest_digis[n] = facilities.dest_digis[n]; make_rose->source_addr = facilities.source_addr; make_rose->source_call = facilities.source_call; make_rose->source_ndigis = facilities.source_ndigis; for (n = 0 ; n < facilities.source_ndigis ; n++) make_rose->source_digis[n]= facilities.source_digis[n]; make_rose->neighbour = neigh; make_rose->device = dev; make_rose->facilities = facilities; make_rose->neighbour->use++; if (rose_sk(sk)->defer) { make_rose->state = ROSE_STATE_5; } else { rose_write_internal(make, ROSE_CALL_ACCEPTED); make_rose->state = ROSE_STATE_3; rose_start_idletimer(make); } make_rose->condition = 0x00; make_rose->vs = 0; make_rose->va = 0; make_rose->vr = 0; make_rose->vl = 0; sk->sk_ack_backlog++; rose_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); rose_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); return 1; } static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name; int err; struct full_sockaddr_rose srose; struct sk_buff *skb; unsigned char *asmptr; int n, size, qbit = 0; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; if (sock_flag(sk, SOCK_ZAPPED)) return -EADDRNOTAVAIL; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); return -EPIPE; } if (rose->neighbour == NULL || rose->device == NULL) return -ENETUNREACH; if (usrose != NULL) { if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose)) return -EINVAL; memset(&srose, 0, sizeof(struct full_sockaddr_rose)); memcpy(&srose, usrose, msg->msg_namelen); if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 || ax25cmp(&rose->dest_call, &srose.srose_call) != 0) return -EISCONN; if (srose.srose_ndigis != rose->dest_ndigis) return -EISCONN; if (srose.srose_ndigis == rose->dest_ndigis) { for (n = 0 ; n < srose.srose_ndigis ; n++) if (ax25cmp(&rose->dest_digis[n], &srose.srose_digis[n])) return -EISCONN; } if (srose.srose_family != AF_ROSE) return -EINVAL; } else { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose.srose_family = AF_ROSE; srose.srose_addr = rose->dest_addr; srose.srose_call = rose->dest_call; srose.srose_ndigis = rose->dest_ndigis; for (n = 0 ; n < rose->dest_ndigis ; n++) srose.srose_digis[n] = rose->dest_digis[n]; } /* Build a packet */ /* Sanity check the packet size */ if (len > 65535) return -EMSGSIZE; size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) return err; skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN); /* * Put the data on the end */ skb_reset_transport_header(skb); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); return err; } /* * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ if (rose->qbitincl) { qbit = skb->data[0]; skb_pull(skb, 1); } /* * Push down the ROSE header */ asmptr = skb_push(skb, ROSE_MIN_LEN); /* Build a ROSE Network header */ asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; asmptr[1] = (rose->lci >> 0) & 0xFF; asmptr[2] = ROSE_DATA; if (qbit) asmptr[0] |= ROSE_Q_BIT; if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); return -ENOTCONN; } #ifdef M_BIT #define ROSE_PACLEN (256-ROSE_MIN_LEN) if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) { unsigned char header[ROSE_MIN_LEN]; struct sk_buff *skbn; int frontlen; int lg; /* Save a copy of the Header */ skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN); skb_pull(skb, ROSE_MIN_LEN); frontlen = skb_headroom(skb); while (skb->len > 0) { if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { kfree_skb(skb); return err; } skbn->sk = sk; skbn->free = 1; skbn->arp = 1; skb_reserve(skbn, frontlen); lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; /* Copy the user data */ skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); skb_pull(skb, lg); /* Duplicate the Header */ skb_push(skbn, ROSE_MIN_LEN); skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); if (skb->len > 0) skbn->data[2] |= M_BIT; skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ } skb->free = 1; kfree_skb(skb); } else { skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ } #else skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ #endif rose_kick(sk); return len; } static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); size_t copied; unsigned char *asmptr; struct sk_buff *skb; int n, er, qbit; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) return er; qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; skb_pull(skb, ROSE_MIN_LEN); if (rose->qbitincl) { asmptr = skb_push(skb, 1); *asmptr = qbit; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (msg->msg_name) { struct sockaddr_rose *srose; memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); srose = msg->msg_name; srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; for (n = 0 ; n < rose->dest_ndigis ; n++) full_srose->srose_digis[n] = rose->dest_digis[n]; msg->msg_namelen = sizeof(struct full_sockaddr_rose); } else { if (rose->dest_ndigis >= 1) { srose->srose_ndigis = 1; srose->srose_digi = rose->dest_digis[0]; } msg->msg_namelen = sizeof(struct sockaddr_rose); } } skb_free_datagram(sk, skb); return copied; } static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); void __user *argp = (void __user *)arg; switch (cmd) { case TIOCOUTQ: { long amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; return put_user(amount, (unsigned int __user *) argp); } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; return put_user(amount, (unsigned int __user *) argp); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *) argp); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *) argp); case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: return -EINVAL; case SIOCADDRT: case SIOCDELRT: case SIOCRSCLRRT: if (!capable(CAP_NET_ADMIN)) return -EPERM; return rose_rt_ioctl(cmd, argp); case SIOCRSGCAUSE: { struct rose_cause_struct rose_cause; rose_cause.cause = rose->cause; rose_cause.diagnostic = rose->diagnostic; return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0; } case SIOCRSSCAUSE: { struct rose_cause_struct rose_cause; if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct))) return -EFAULT; rose->cause = rose_cause.cause; rose->diagnostic = rose_cause.diagnostic; return 0; } case SIOCRSSL2CALL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) return -EFAULT; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) return ax25_listen_register(&rose_callsign, NULL); return 0; case SIOCRSGL2CALL: return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0; case SIOCRSACCEPT: if (rose->state == ROSE_STATE_5) { rose_write_internal(sk, ROSE_CALL_ACCEPTED); rose_start_idletimer(sk); rose->condition = 0x00; rose->vs = 0; rose->va = 0; rose->vr = 0; rose->vl = 0; rose->state = ROSE_STATE_3; } return 0; default: return -ENOIOCTLCMD; } return 0; } #ifdef CONFIG_PROC_FS static void *rose_info_start(struct seq_file *seq, loff_t *pos) __acquires(rose_list_lock) { spin_lock_bh(&rose_list_lock); return seq_hlist_start_head(&rose_list, *pos); } static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &rose_list, pos); } static void rose_info_stop(struct seq_file *seq, void *v) __releases(rose_list_lock) { spin_unlock_bh(&rose_list_lock); } static int rose_info_show(struct seq_file *seq, void *v) { char buf[11], rsbuf[11]; if (v == SEQ_START_TOKEN) seq_puts(seq, "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); else { struct sock *s = sk_entry(v); struct rose_sock *rose = rose_sk(s); const char *devname, *callsign; const struct net_device *dev = rose->device; if (!dev) devname = "???"; else devname = dev->name; seq_printf(seq, "%-10s %-9s ", rose2asc(rsbuf, &rose->dest_addr), ax2asc(buf, &rose->dest_call)); if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) callsign = "??????-?"; else callsign = ax2asc(buf, &rose->source_call); seq_printf(seq, "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", rose2asc(rsbuf, &rose->source_addr), callsign, devname, rose->lci & 0x0FFF, (rose->neighbour) ? rose->neighbour->number : 0, rose->state, rose->vs, rose->vr, rose->va, ax25_display_timer(&rose->timer) / HZ, rose->t1 / HZ, rose->t2 / HZ, rose->t3 / HZ, rose->hb / HZ, ax25_display_timer(&rose->idletimer) / (60 * HZ), rose->idle / (60 * HZ), sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); } return 0; } static const struct seq_operations rose_info_seqops = { .start = rose_info_start, .next = rose_info_next, .stop = rose_info_stop, .show = rose_info_show, }; static int rose_info_open(struct inode *inode, struct file *file) { return seq_open(file, &rose_info_seqops); } static const struct file_operations rose_info_fops = { .owner = THIS_MODULE, .open = rose_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ static const struct net_proto_family rose_family_ops = { .family = PF_ROSE, .create = rose_create, .owner = THIS_MODULE, }; static const struct proto_ops rose_proto_ops = { .family = PF_ROSE, .owner = THIS_MODULE, .release = rose_release, .bind = rose_bind, .connect = rose_connect, .socketpair = sock_no_socketpair, .accept = rose_accept, .getname = rose_getname, .poll = datagram_poll, .ioctl = rose_ioctl, .listen = rose_listen, .shutdown = sock_no_shutdown, .setsockopt = rose_setsockopt, .getsockopt = rose_getsockopt, .sendmsg = rose_sendmsg, .recvmsg = rose_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct notifier_block rose_dev_notifier = { .notifier_call = rose_device_event, }; static struct net_device **dev_rose; static struct ax25_protocol rose_pid = { .pid = AX25_P_ROSE, .func = rose_route_frame }; static struct ax25_linkfail rose_linkfail_notifier = { .func = rose_link_failed }; static int __init rose_proto_init(void) { int i; int rc; if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); rc = -EINVAL; goto out; } rc = proto_register(&rose_proto, 0); if (rc != 0) goto out; rose_callsign = null_ax25_address; dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_rose == NULL) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); rc = -ENOMEM; goto out_proto_unregister; } for (i = 0; i < rose_ndevs; i++) { struct net_device *dev; char name[IFNAMSIZ]; sprintf(name, "rose%d", i); dev = alloc_netdev(0, name, rose_setup); if (!dev) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); rc = -ENOMEM; goto fail; } rc = register_netdev(dev); if (rc) { printk(KERN_ERR "ROSE: netdevice registration failed\n"); free_netdev(dev); goto fail; } rose_set_lockdep_key(dev); dev_rose[i] = dev; } sock_register(&rose_family_ops); register_netdevice_notifier(&rose_dev_notifier); ax25_register_pid(&rose_pid); ax25_linkfail_register(&rose_linkfail_notifier); #ifdef CONFIG_SYSCTL rose_register_sysctl(); #endif rose_loopback_init(); rose_add_loopback_neigh(); proc_create("rose", S_IRUGO, init_net.proc_net, &rose_info_fops); proc_create("rose_neigh", S_IRUGO, init_net.proc_net, &rose_neigh_fops); proc_create("rose_nodes", S_IRUGO, init_net.proc_net, &rose_nodes_fops); proc_create("rose_routes", S_IRUGO, init_net.proc_net, &rose_routes_fops); out: return rc; fail: while (--i >= 0) { unregister_netdev(dev_rose[i]); free_netdev(dev_rose[i]); } kfree(dev_rose); out_proto_unregister: proto_unregister(&rose_proto); goto out; } module_init(rose_proto_init); module_param(rose_ndevs, int, 0); MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices"); MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_ROSE); static void __exit rose_exit(void) { int i; remove_proc_entry("rose", init_net.proc_net); remove_proc_entry("rose_neigh", init_net.proc_net); remove_proc_entry("rose_nodes", init_net.proc_net); remove_proc_entry("rose_routes", init_net.proc_net); rose_loopback_clear(); rose_rt_free(); ax25_protocol_release(AX25_P_ROSE); ax25_linkfail_release(&rose_linkfail_notifier); if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); #ifdef CONFIG_SYSCTL rose_unregister_sysctl(); #endif unregister_netdevice_notifier(&rose_dev_notifier); sock_unregister(PF_ROSE); for (i = 0; i < rose_ndevs; i++) { struct net_device *dev = dev_rose[i]; if (dev) { unregister_netdev(dev); free_netdev(dev); } } kfree(dev_rose); proto_unregister(&rose_proto); } module_exit(rose_exit);
static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; size_t copied; unsigned char *asmptr; struct sk_buff *skb; int n, er, qbit; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) return er; qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; skb_pull(skb, ROSE_MIN_LEN); if (rose->qbitincl) { asmptr = skb_push(skb, 1); *asmptr = qbit; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (srose != NULL) { memset(srose, 0, msg->msg_namelen); srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; for (n = 0 ; n < rose->dest_ndigis ; n++) full_srose->srose_digis[n] = rose->dest_digis[n]; msg->msg_namelen = sizeof(struct full_sockaddr_rose); } else { if (rose->dest_ndigis >= 1) { srose->srose_ndigis = 1; srose->srose_digi = rose->dest_digis[0]; } msg->msg_namelen = sizeof(struct sockaddr_rose); } } skb_free_datagram(sk, skb); return copied; }
static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); size_t copied; unsigned char *asmptr; struct sk_buff *skb; int n, er, qbit; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) return er; qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; skb_pull(skb, ROSE_MIN_LEN); if (rose->qbitincl) { asmptr = skb_push(skb, 1); *asmptr = qbit; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (msg->msg_name) { struct sockaddr_rose *srose; memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); srose = msg->msg_name; srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; for (n = 0 ; n < rose->dest_ndigis ; n++) full_srose->srose_digis[n] = rose->dest_digis[n]; msg->msg_namelen = sizeof(struct full_sockaddr_rose); } else { if (rose->dest_ndigis >= 1) { srose->srose_ndigis = 1; srose->srose_digi = rose->dest_digis[0]; } msg->msg_namelen = sizeof(struct sockaddr_rose); } } skb_free_datagram(sk, skb); return copied; }
{'added': [(1254, '\tif (msg->msg_name) {'), (1255, '\t\tstruct sockaddr_rose *srose;'), (1256, ''), (1257, '\t\tmemset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));'), (1258, '\t\tsrose = msg->msg_name;')], 'deleted': [(1219, '\tstruct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;'), (1255, '\tif (srose != NULL) {'), (1256, '\t\tmemset(srose, 0, msg->msg_namelen);')]}
5
3
1,218
7,568
49
369
9
https://github.com/torvalds/linux
CVE-2013-7266
CWE-20
215
spl_directory.c
C
spl_filesystem_object_free_storage
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2015 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/info.h" #include "ext/standard/file.h" #include "ext/standard/php_string.h" #include "zend_compile.h" #include "zend_exceptions.h" #include "zend_interfaces.h" #include "php_spl.h" #include "spl_functions.h" #include "spl_engine.h" #include "spl_iterators.h" #include "spl_directory.h" #include "spl_exceptions.h" #include "php.h" #include "fopen_wrappers.h" #include "ext/standard/basic_functions.h" #include "ext/standard/php_filestat.h" #define SPL_HAS_FLAG(flags, test_flag) ((flags & test_flag) ? 1 : 0) /* declare the class handlers */ static zend_object_handlers spl_filesystem_object_handlers; /* includes handler to validate object state when retrieving methods */ static zend_object_handlers spl_filesystem_object_check_handlers; /* decalre the class entry */ PHPAPI zend_class_entry *spl_ce_SplFileInfo; PHPAPI zend_class_entry *spl_ce_DirectoryIterator; PHPAPI zend_class_entry *spl_ce_FilesystemIterator; PHPAPI zend_class_entry *spl_ce_RecursiveDirectoryIterator; PHPAPI zend_class_entry *spl_ce_GlobIterator; PHPAPI zend_class_entry *spl_ce_SplFileObject; PHPAPI zend_class_entry *spl_ce_SplTempFileObject; static void spl_filesystem_file_free_line(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (intern->u.file.current_line) { efree(intern->u.file.current_line); intern->u.file.current_line = NULL; } if (intern->u.file.current_zval) { zval_ptr_dtor(&intern->u.file.current_zval); intern->u.file.current_zval = NULL; } } /* }}} */ static void spl_filesystem_object_free_storage(void *object TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern = (spl_filesystem_object*)object; if (intern->oth_handler && intern->oth_handler->dtor) { intern->oth_handler->dtor(intern TSRMLS_CC); } zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->_path) { efree(intern->_path); } if (intern->file_name) { efree(intern->file_name); } switch(intern->type) { case SPL_FS_INFO: break; case SPL_FS_DIR: if (intern->u.dir.dirp) { php_stream_close(intern->u.dir.dirp); intern->u.dir.dirp = NULL; } if (intern->u.dir.sub_path) { efree(intern->u.dir.sub_path); } break; case SPL_FS_FILE: if (intern->u.file.stream) { if (intern->u.file.zcontext) { /* zend_list_delref(Z_RESVAL_P(intern->zcontext));*/ } if (!intern->u.file.stream->is_persistent) { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE); } else { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE_PERSISTENT); } if (intern->u.file.open_mode) { efree(intern->u.file.open_mode); } if (intern->orig_path) { efree(intern->orig_path); } } spl_filesystem_file_free_line(intern TSRMLS_CC); break; } { zend_object_iterator *iterator; iterator = (zend_object_iterator*) spl_filesystem_object_to_iterator(intern); if (iterator->data != NULL) { iterator->data = NULL; iterator->funcs->dtor(iterator TSRMLS_CC); } } efree(object); } /* }}} */ /* {{{ spl_ce_dir_object_new */ /* creates the object by - allocating memory - initializing the object members - storing the object - setting it's handlers called from - clone - new */ static zend_object_value spl_filesystem_object_new_ex(zend_class_entry *class_type, spl_filesystem_object **obj TSRMLS_DC) { zend_object_value retval; spl_filesystem_object *intern; intern = emalloc(sizeof(spl_filesystem_object)); memset(intern, 0, sizeof(spl_filesystem_object)); /* intern->type = SPL_FS_INFO; done by set 0 */ intern->file_class = spl_ce_SplFileObject; intern->info_class = spl_ce_SplFileInfo; if (obj) *obj = intern; zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, (zend_objects_free_object_storage_t) spl_filesystem_object_free_storage, NULL TSRMLS_CC); retval.handlers = &spl_filesystem_object_handlers; return retval; } /* }}} */ /* {{{ spl_filesystem_object_new */ /* See spl_filesystem_object_new_ex */ static zend_object_value spl_filesystem_object_new(zend_class_entry *class_type TSRMLS_DC) { return spl_filesystem_object_new_ex(class_type, NULL TSRMLS_CC); } /* }}} */ /* {{{ spl_filesystem_object_new_ex */ static zend_object_value spl_filesystem_object_new_check(zend_class_entry *class_type TSRMLS_DC) { zend_object_value ret = spl_filesystem_object_new_ex(class_type, NULL TSRMLS_CC); ret.handlers = &spl_filesystem_object_check_handlers; return ret; } /* }}} */ PHPAPI char* spl_filesystem_object_get_path(spl_filesystem_object *intern, int *len TSRMLS_DC) /* {{{ */ { #ifdef HAVE_GLOB if (intern->type == SPL_FS_DIR) { if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { return php_glob_stream_get_path(intern->u.dir.dirp, 0, len); } } #endif if (len) { *len = intern->_path_len; } return intern->_path; } /* }}} */ static inline void spl_filesystem_object_get_file_name(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: if (!intern->file_name) { php_error_docref(NULL TSRMLS_CC, E_ERROR, "Object not initialized"); } break; case SPL_FS_DIR: if (intern->file_name) { efree(intern->file_name); } intern->file_name_len = spprintf(&intern->file_name, 0, "%s%c%s", spl_filesystem_object_get_path(intern, NULL TSRMLS_CC), slash, intern->u.dir.entry.d_name); break; } } /* }}} */ static int spl_filesystem_dir_read(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (!intern->u.dir.dirp || !php_stream_readdir(intern->u.dir.dirp, &intern->u.dir.entry)) { intern->u.dir.entry.d_name[0] = '\0'; return 0; } else { return 1; } } /* }}} */ #define IS_SLASH_AT(zs, pos) (IS_SLASH(zs[pos])) static inline int spl_filesystem_is_dot(const char * d_name) /* {{{ */ { return !strcmp(d_name, ".") || !strcmp(d_name, ".."); } /* }}} */ /* {{{ spl_filesystem_dir_open */ /* open a directory resource */ static void spl_filesystem_dir_open(spl_filesystem_object* intern, char *path TSRMLS_DC) { int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); intern->type = SPL_FS_DIR; intern->_path_len = strlen(path); intern->u.dir.dirp = php_stream_opendir(path, REPORT_ERRORS, FG(default_context)); if (intern->_path_len > 1 && IS_SLASH_AT(path, intern->_path_len-1)) { intern->_path = estrndup(path, --intern->_path_len); } else { intern->_path = estrndup(path, intern->_path_len); } intern->u.dir.index = 0; if (EG(exception) || intern->u.dir.dirp == NULL) { intern->u.dir.entry.d_name[0] = '\0'; if (!EG(exception)) { /* open failed w/out notice (turned to exception due to EH_THROW) */ zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Failed to open directory \"%s\"", path); } } else { do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } } /* }}} */ static int spl_filesystem_file_open(spl_filesystem_object *intern, int use_include_path, int silent TSRMLS_DC) /* {{{ */ { zval tmp; intern->type = SPL_FS_FILE; php_stat(intern->file_name, intern->file_name_len, FS_IS_DIR, &tmp TSRMLS_CC); if (Z_LVAL(tmp)) { intern->u.file.open_mode = NULL; intern->file_name = NULL; zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Cannot use SplFileObject with directories"); return FAILURE; } intern->u.file.context = php_stream_context_from_zval(intern->u.file.zcontext, 0); intern->u.file.stream = php_stream_open_wrapper_ex(intern->file_name, intern->u.file.open_mode, (use_include_path ? USE_PATH : 0) | REPORT_ERRORS, NULL, intern->u.file.context); if (!intern->file_name_len || !intern->u.file.stream) { if (!EG(exception)) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot open file '%s'", intern->file_name_len ? intern->file_name : ""); } intern->file_name = NULL; /* until here it is not a copy */ intern->u.file.open_mode = NULL; return FAILURE; } if (intern->u.file.zcontext) { zend_list_addref(Z_RESVAL_P(intern->u.file.zcontext)); } if (intern->file_name_len > 1 && IS_SLASH_AT(intern->file_name, intern->file_name_len-1)) { intern->file_name_len--; } intern->orig_path = estrndup(intern->u.file.stream->orig_path, strlen(intern->u.file.stream->orig_path)); intern->file_name = estrndup(intern->file_name, intern->file_name_len); intern->u.file.open_mode = estrndup(intern->u.file.open_mode, intern->u.file.open_mode_len); /* avoid reference counting in debug mode, thus do it manually */ ZVAL_RESOURCE(&intern->u.file.zresource, php_stream_get_resource_id(intern->u.file.stream)); Z_SET_REFCOUNT(intern->u.file.zresource, 1); intern->u.file.delimiter = ','; intern->u.file.enclosure = '"'; intern->u.file.escape = '\\'; zend_hash_find(&intern->std.ce->function_table, "getcurrentline", sizeof("getcurrentline"), (void **) &intern->u.file.func_getCurr); return SUCCESS; } /* }}} */ /* {{{ spl_filesystem_object_clone */ /* Local zend_object_value creation (on stack) Load the 'other' object Create a new empty object (See spl_filesystem_object_new_ex) Open the directory Clone other members (properties) */ static zend_object_value spl_filesystem_object_clone(zval *zobject TSRMLS_DC) { zend_object_value new_obj_val; zend_object *old_object; zend_object *new_object; zend_object_handle handle = Z_OBJ_HANDLE_P(zobject); spl_filesystem_object *intern; spl_filesystem_object *source; int index, skip_dots; old_object = zend_objects_get_address(zobject TSRMLS_CC); source = (spl_filesystem_object*)old_object; new_obj_val = spl_filesystem_object_new_ex(old_object->ce, &intern TSRMLS_CC); new_object = &intern->std; intern->flags = source->flags; switch (source->type) { case SPL_FS_INFO: intern->_path_len = source->_path_len; intern->_path = estrndup(source->_path, source->_path_len); intern->file_name_len = source->file_name_len; intern->file_name = estrndup(source->file_name, intern->file_name_len); break; case SPL_FS_DIR: spl_filesystem_dir_open(intern, source->_path TSRMLS_CC); /* read until we hit the position in which we were before */ skip_dots = SPL_HAS_FLAG(source->flags, SPL_FILE_DIR_SKIPDOTS); for(index = 0; index < source->u.dir.index; ++index) { do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } intern->u.dir.index = index; break; case SPL_FS_FILE: php_error_docref(NULL TSRMLS_CC, E_ERROR, "An object of class %s cannot be cloned", old_object->ce->name); break; } intern->file_class = source->file_class; intern->info_class = source->info_class; intern->oth = source->oth; intern->oth_handler = source->oth_handler; zend_objects_clone_members(new_object, new_obj_val, old_object, handle TSRMLS_CC); if (intern->oth_handler && intern->oth_handler->clone) { intern->oth_handler->clone(source, intern TSRMLS_CC); } return new_obj_val; } /* }}} */ void spl_filesystem_info_set_filename(spl_filesystem_object *intern, char *path, int len, int use_copy TSRMLS_DC) /* {{{ */ { char *p1, *p2; if (intern->file_name) { efree(intern->file_name); } intern->file_name = use_copy ? estrndup(path, len) : path; intern->file_name_len = len; while(IS_SLASH_AT(intern->file_name, intern->file_name_len-1) && intern->file_name_len > 1) { intern->file_name[intern->file_name_len-1] = 0; intern->file_name_len--; } p1 = strrchr(intern->file_name, '/'); #if defined(PHP_WIN32) || defined(NETWARE) p2 = strrchr(intern->file_name, '\\'); #else p2 = 0; #endif if (p1 || p2) { intern->_path_len = (p1 > p2 ? p1 : p2) - intern->file_name; } else { intern->_path_len = 0; } if (intern->_path) { efree(intern->_path); } intern->_path = estrndup(path, intern->_path_len); } /* }}} */ static spl_filesystem_object * spl_filesystem_object_create_info(spl_filesystem_object *source, char *file_path, int file_path_len, int use_copy, zend_class_entry *ce, zval *return_value TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern; zval *arg1; zend_error_handling error_handling; if (!file_path || !file_path_len) { #if defined(PHP_WIN32) zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot create SplFileInfo for empty path"); if (file_path && !use_copy) { efree(file_path); } #else if (file_path && !use_copy) { efree(file_path); } file_path_len = 1; file_path = "/"; #endif return NULL; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); ce = ce ? ce : source->info_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; if (ce->constructor->common.scope != spl_ce_SplFileInfo) { MAKE_STD_ZVAL(arg1); ZVAL_STRINGL(arg1, file_path, file_path_len, use_copy); zend_call_method_with_1_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1); zval_ptr_dtor(&arg1); } else { spl_filesystem_info_set_filename(intern, file_path, file_path_len, use_copy TSRMLS_CC); } zend_restore_error_handling(&error_handling TSRMLS_CC); return intern; } /* }}} */ static spl_filesystem_object * spl_filesystem_object_create_type(int ht, spl_filesystem_object *source, int type, zend_class_entry *ce, zval *return_value TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern; zend_bool use_include_path = 0; zval *arg1, *arg2; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); switch (source->type) { case SPL_FS_INFO: case SPL_FS_FILE: break; case SPL_FS_DIR: if (!source->u.dir.entry.d_name[0]) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Could not open file"); zend_restore_error_handling(&error_handling TSRMLS_CC); return NULL; } } switch (type) { case SPL_FS_INFO: ce = ce ? ce : source->info_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; spl_filesystem_object_get_file_name(source TSRMLS_CC); if (ce->constructor->common.scope != spl_ce_SplFileInfo) { MAKE_STD_ZVAL(arg1); ZVAL_STRINGL(arg1, source->file_name, source->file_name_len, 1); zend_call_method_with_1_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1); zval_ptr_dtor(&arg1); } else { intern->file_name = estrndup(source->file_name, source->file_name_len); intern->file_name_len = source->file_name_len; intern->_path = spl_filesystem_object_get_path(source, &intern->_path_len TSRMLS_CC); intern->_path = estrndup(intern->_path, intern->_path_len); } break; case SPL_FS_FILE: ce = ce ? ce : source->file_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; spl_filesystem_object_get_file_name(source TSRMLS_CC); if (ce->constructor->common.scope != spl_ce_SplFileObject) { MAKE_STD_ZVAL(arg1); MAKE_STD_ZVAL(arg2); ZVAL_STRINGL(arg1, source->file_name, source->file_name_len, 1); ZVAL_STRINGL(arg2, "r", 1, 1); zend_call_method_with_2_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1, arg2); zval_ptr_dtor(&arg1); zval_ptr_dtor(&arg2); } else { intern->file_name = source->file_name; intern->file_name_len = source->file_name_len; intern->_path = spl_filesystem_object_get_path(source, &intern->_path_len TSRMLS_CC); intern->_path = estrndup(intern->_path, intern->_path_len); intern->u.file.open_mode = "r"; intern->u.file.open_mode_len = 1; if (ht && zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sbr", &intern->u.file.open_mode, &intern->u.file.open_mode_len, &use_include_path, &intern->u.file.zcontext) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); intern->u.file.open_mode = NULL; intern->file_name = NULL; zval_dtor(return_value); Z_TYPE_P(return_value) = IS_NULL; return NULL; } if (spl_filesystem_file_open(intern, use_include_path, 0 TSRMLS_CC) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); zval_dtor(return_value); Z_TYPE_P(return_value) = IS_NULL; return NULL; } } break; case SPL_FS_DIR: zend_restore_error_handling(&error_handling TSRMLS_CC); zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Operation not supported"); return NULL; } zend_restore_error_handling(&error_handling TSRMLS_CC); return NULL; } /* }}} */ static int spl_filesystem_is_invalid_or_dot(const char * d_name) /* {{{ */ { return d_name[0] == '\0' || spl_filesystem_is_dot(d_name); } /* }}} */ static char *spl_filesystem_object_get_pathname(spl_filesystem_object *intern, int *len TSRMLS_DC) { /* {{{ */ switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: *len = intern->file_name_len; return intern->file_name; case SPL_FS_DIR: if (intern->u.dir.entry.d_name[0]) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); *len = intern->file_name_len; return intern->file_name; } } *len = 0; return NULL; } /* }}} */ static HashTable* spl_filesystem_object_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(obj TSRMLS_CC); HashTable *rv; zval *tmp, zrv; char *pnstr, *path; int pnlen, path_len; char stmp[2]; *is_temp = 1; if (!intern->std.properties) { rebuild_object_properties(&intern->std); } ALLOC_HASHTABLE(rv); ZEND_INIT_SYMTABLE_EX(rv, zend_hash_num_elements(intern->std.properties) + 3, 0); INIT_PZVAL(&zrv); Z_ARRVAL(zrv) = rv; zend_hash_copy(rv, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); pnstr = spl_gen_private_prop_name(spl_ce_SplFileInfo, "pathName", sizeof("pathName")-1, &pnlen TSRMLS_CC); path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, path, path_len, 1); efree(pnstr); if (intern->file_name) { pnstr = spl_gen_private_prop_name(spl_ce_SplFileInfo, "fileName", sizeof("fileName")-1, &pnlen TSRMLS_CC); spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->file_name + path_len + 1, intern->file_name_len - (path_len + 1), 1); } else { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->file_name, intern->file_name_len, 1); } efree(pnstr); } if (intern->type == SPL_FS_DIR) { #ifdef HAVE_GLOB pnstr = spl_gen_private_prop_name(spl_ce_DirectoryIterator, "glob", sizeof("glob")-1, &pnlen TSRMLS_CC); if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->_path, intern->_path_len, 1); } else { add_assoc_bool_ex(&zrv, pnstr, pnlen+1, 0); } efree(pnstr); #endif pnstr = spl_gen_private_prop_name(spl_ce_RecursiveDirectoryIterator, "subPathName", sizeof("subPathName")-1, &pnlen TSRMLS_CC); if (intern->u.dir.sub_path) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->u.dir.sub_path, intern->u.dir.sub_path_len, 1); } else { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, "", 0, 1); } efree(pnstr); } if (intern->type == SPL_FS_FILE) { pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "openMode", sizeof("openMode")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->u.file.open_mode, intern->u.file.open_mode_len, 1); efree(pnstr); stmp[1] = '\0'; stmp[0] = intern->u.file.delimiter; pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "delimiter", sizeof("delimiter")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, stmp, 1, 1); efree(pnstr); stmp[0] = intern->u.file.enclosure; pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "enclosure", sizeof("enclosure")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, stmp, 1, 1); efree(pnstr); } return rv; } /* }}} */ zend_function *spl_filesystem_object_get_method_check(zval **object_ptr, char *method, int method_len, const struct _zend_literal *key TSRMLS_DC) /* {{{ */ { spl_filesystem_object *fsobj = zend_object_store_get_object(*object_ptr TSRMLS_CC); if (fsobj->u.dir.entry.d_name[0] == '\0' && fsobj->orig_path == NULL) { method = "_bad_state_ex"; method_len = sizeof("_bad_state_ex") - 1; key = NULL; } return zend_get_std_object_handlers()->get_method(object_ptr, method, method_len, key TSRMLS_CC); } /* }}} */ #define DIT_CTOR_FLAGS 0x00000001 #define DIT_CTOR_GLOB 0x00000002 void spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAMETERS, long ctor_flags) /* {{{ */ { spl_filesystem_object *intern; char *path; int parsed, len; long flags; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (SPL_HAS_FLAG(ctor_flags, DIT_CTOR_FLAGS)) { flags = SPL_FILE_DIR_KEY_AS_PATHNAME|SPL_FILE_DIR_CURRENT_AS_FILEINFO; parsed = zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &path, &len, &flags); } else { flags = SPL_FILE_DIR_KEY_AS_PATHNAME|SPL_FILE_DIR_CURRENT_AS_SELF; parsed = zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &path, &len); } if (SPL_HAS_FLAG(ctor_flags, SPL_FILE_DIR_SKIPDOTS)) { flags |= SPL_FILE_DIR_SKIPDOTS; } if (SPL_HAS_FLAG(ctor_flags, SPL_FILE_DIR_UNIXPATHS)) { flags |= SPL_FILE_DIR_UNIXPATHS; } if (parsed == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (!len) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Directory name must not be empty."); zend_restore_error_handling(&error_handling TSRMLS_CC); return; } intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (intern->_path) { /* object is alreay initialized */ zend_restore_error_handling(&error_handling TSRMLS_CC); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Directory object is already initialized"); return; } intern->flags = flags; #ifdef HAVE_GLOB if (SPL_HAS_FLAG(ctor_flags, DIT_CTOR_GLOB) && strstr(path, "glob://") != path) { spprintf(&path, 0, "glob://%s", path); spl_filesystem_dir_open(intern, path TSRMLS_CC); efree(path); } else #endif { spl_filesystem_dir_open(intern, path TSRMLS_CC); } intern->u.dir.is_recursive = instanceof_function(intern->std.ce, spl_ce_RecursiveDirectoryIterator TSRMLS_CC) ? 1 : 0; zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void DirectoryIterator::__construct(string path) Cronstructs a new dir iterator from a path. */ SPL_METHOD(DirectoryIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto void DirectoryIterator::rewind() Rewind dir back to the start */ SPL_METHOD(DirectoryIterator, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index = 0; if (intern->u.dir.dirp) { php_stream_rewinddir(intern->u.dir.dirp); } spl_filesystem_dir_read(intern TSRMLS_CC); } /* }}} */ /* {{{ proto string DirectoryIterator::key() Return current dir entry */ SPL_METHOD(DirectoryIterator, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.dirp) { RETURN_LONG(intern->u.dir.index); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto DirectoryIterator DirectoryIterator::current() Return this (needed for Iterator interface) */ SPL_METHOD(DirectoryIterator, current) { if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_ZVAL(getThis(), 1, 0); } /* }}} */ /* {{{ proto void DirectoryIterator::next() Move to next entry */ SPL_METHOD(DirectoryIterator, next) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index++; do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); if (intern->file_name) { efree(intern->file_name); intern->file_name = NULL; } } /* }}} */ /* {{{ proto void DirectoryIterator::seek(int position) Seek to the given position */ SPL_METHOD(DirectoryIterator, seek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zval *retval = NULL; long pos; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &pos) == FAILURE) { return; } if (intern->u.dir.index > pos) { /* we first rewind */ zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_rewind, "rewind", &retval); if (retval) { zval_ptr_dtor(&retval); retval = NULL; } } while (intern->u.dir.index < pos) { int valid = 0; zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_valid, "valid", &retval); if (retval) { valid = zend_is_true(retval); zval_ptr_dtor(&retval); retval = NULL; } if (!valid) { break; } zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_next, "next", &retval); if (retval) { zval_ptr_dtor(&retval); } } } /* }}} */ /* {{{ proto string DirectoryIterator::valid() Check whether dir contains more entries */ SPL_METHOD(DirectoryIterator, valid) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(intern->u.dir.entry.d_name[0] != '\0'); } /* }}} */ /* {{{ proto string SplFileInfo::getPath() Return the path */ SPL_METHOD(SplFileInfo, getPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *path; int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } path = spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); RETURN_STRINGL(path, path_len, 1); } /* }}} */ /* {{{ proto string SplFileInfo::getFilename() Return filename only */ SPL_METHOD(SplFileInfo, getFilename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { RETURN_STRINGL(intern->file_name + path_len + 1, intern->file_name_len - (path_len + 1), 1); } else { RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } } /* }}} */ /* {{{ proto string DirectoryIterator::getFilename() Return filename of current dir entry */ SPL_METHOD(DirectoryIterator, getFilename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_STRING(intern->u.dir.entry.d_name, 1); } /* }}} */ /* {{{ proto string SplFileInfo::getExtension() Returns file extension component of path */ SPL_METHOD(SplFileInfo, getExtension) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname = NULL; const char *p; size_t flen; int path_len, idx; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { fname = intern->file_name + path_len + 1; flen = intern->file_name_len - (path_len + 1); } else { fname = intern->file_name; flen = intern->file_name_len; } php_basename(fname, flen, NULL, 0, &fname, &flen TSRMLS_CC); p = zend_memrchr(fname, '.', flen); if (p) { idx = p - fname; RETVAL_STRINGL(fname + idx + 1, flen - idx - 1, 1); efree(fname); return; } else { if (fname) { efree(fname); } RETURN_EMPTY_STRING(); } } /* }}}*/ /* {{{ proto string DirectoryIterator::getExtension() Returns the file extension component of path */ SPL_METHOD(DirectoryIterator, getExtension) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname = NULL; const char *p; size_t flen; int idx; if (zend_parse_parameters_none() == FAILURE) { return; } php_basename(intern->u.dir.entry.d_name, strlen(intern->u.dir.entry.d_name), NULL, 0, &fname, &flen TSRMLS_CC); p = zend_memrchr(fname, '.', flen); if (p) { idx = p - fname; RETVAL_STRINGL(fname + idx + 1, flen - idx - 1, 1); efree(fname); return; } else { if (fname) { efree(fname); } RETURN_EMPTY_STRING(); } } /* }}} */ /* {{{ proto string SplFileInfo::getBasename([string $suffix]) U Returns filename component of path */ SPL_METHOD(SplFileInfo, getBasename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname, *suffix = 0; size_t flen; int slen = 0, path_len; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s", &suffix, &slen) == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { fname = intern->file_name + path_len + 1; flen = intern->file_name_len - (path_len + 1); } else { fname = intern->file_name; flen = intern->file_name_len; } php_basename(fname, flen, suffix, slen, &fname, &flen TSRMLS_CC); RETURN_STRINGL(fname, flen, 0); } /* }}}*/ /* {{{ proto string DirectoryIterator::getBasename([string $suffix]) U Returns filename component of current dir entry */ SPL_METHOD(DirectoryIterator, getBasename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *suffix = 0, *fname; int slen = 0; size_t flen; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s", &suffix, &slen) == FAILURE) { return; } php_basename(intern->u.dir.entry.d_name, strlen(intern->u.dir.entry.d_name), suffix, slen, &fname, &flen TSRMLS_CC); RETURN_STRINGL(fname, flen, 0); } /* }}} */ /* {{{ proto string SplFileInfo::getPathname() Return path and filename */ SPL_METHOD(SplFileInfo, getPathname) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *path; int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); if (path != NULL) { RETURN_STRINGL(path, path_len, 1); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto string FilesystemIterator::key() Return getPathname() or getFilename() depending on flags */ SPL_METHOD(FilesystemIterator, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_FILE_DIR_KEY(intern, SPL_FILE_DIR_KEY_AS_FILENAME)) { RETURN_STRING(intern->u.dir.entry.d_name, 1); } else { spl_filesystem_object_get_file_name(intern TSRMLS_CC); RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } } /* }}} */ /* {{{ proto string FilesystemIterator::current() Return getFilename(), getFileInfo() or $this depending on flags */ SPL_METHOD(FilesystemIterator, current) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } else if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); spl_filesystem_object_create_type(0, intern, SPL_FS_INFO, NULL, return_value TSRMLS_CC); } else { RETURN_ZVAL(getThis(), 1, 0); /*RETURN_STRING(intern->u.dir.entry.d_name, 1);*/ } } /* }}} */ /* {{{ proto bool DirectoryIterator::isDot() Returns true if current entry is '.' or '..' */ SPL_METHOD(DirectoryIterator, isDot) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } /* }}} */ /* {{{ proto void SplFileInfo::__construct(string file_name) Cronstructs a new SplFileInfo from a path. */ /* zend_replace_error_handling() is used to throw exceptions in case the constructor fails. Here we use this to ensure the object has a valid directory resource. When the constructor gets called the object is already created by the engine, so we must only call 'additional' initializations. */ SPL_METHOD(SplFileInfo, __construct) { spl_filesystem_object *intern; char *path; int len; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &path, &len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_info_set_filename(intern, path, len, 1 TSRMLS_CC); zend_restore_error_handling(&error_handling TSRMLS_CC); /* intern->type = SPL_FS_INFO; already set */ } /* }}} */ /* {{{ FileInfoFunction */ #define FileInfoFunction(func_name, func_num) \ SPL_METHOD(SplFileInfo, func_name) \ { \ spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); \ zend_error_handling error_handling; \ if (zend_parse_parameters_none() == FAILURE) { \ return; \ } \ \ zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC);\ spl_filesystem_object_get_file_name(intern TSRMLS_CC); \ php_stat(intern->file_name, intern->file_name_len, func_num, return_value TSRMLS_CC); \ zend_restore_error_handling(&error_handling TSRMLS_CC); \ } /* }}} */ /* {{{ proto int SplFileInfo::getPerms() Get file permissions */ FileInfoFunction(getPerms, FS_PERMS) /* }}} */ /* {{{ proto int SplFileInfo::getInode() Get file inode */ FileInfoFunction(getInode, FS_INODE) /* }}} */ /* {{{ proto int SplFileInfo::getSize() Get file size */ FileInfoFunction(getSize, FS_SIZE) /* }}} */ /* {{{ proto int SplFileInfo::getOwner() Get file owner */ FileInfoFunction(getOwner, FS_OWNER) /* }}} */ /* {{{ proto int SplFileInfo::getGroup() Get file group */ FileInfoFunction(getGroup, FS_GROUP) /* }}} */ /* {{{ proto int SplFileInfo::getATime() Get last access time of file */ FileInfoFunction(getATime, FS_ATIME) /* }}} */ /* {{{ proto int SplFileInfo::getMTime() Get last modification time of file */ FileInfoFunction(getMTime, FS_MTIME) /* }}} */ /* {{{ proto int SplFileInfo::getCTime() Get inode modification time of file */ FileInfoFunction(getCTime, FS_CTIME) /* }}} */ /* {{{ proto string SplFileInfo::getType() Get file type */ FileInfoFunction(getType, FS_TYPE) /* }}} */ /* {{{ proto bool SplFileInfo::isWritable() Returns true if file can be written */ FileInfoFunction(isWritable, FS_IS_W) /* }}} */ /* {{{ proto bool SplFileInfo::isReadable() Returns true if file can be read */ FileInfoFunction(isReadable, FS_IS_R) /* }}} */ /* {{{ proto bool SplFileInfo::isExecutable() Returns true if file is executable */ FileInfoFunction(isExecutable, FS_IS_X) /* }}} */ /* {{{ proto bool SplFileInfo::isFile() Returns true if file is a regular file */ FileInfoFunction(isFile, FS_IS_FILE) /* }}} */ /* {{{ proto bool SplFileInfo::isDir() Returns true if file is directory */ FileInfoFunction(isDir, FS_IS_DIR) /* }}} */ /* {{{ proto bool SplFileInfo::isLink() Returns true if file is symbolic link */ FileInfoFunction(isLink, FS_IS_LINK) /* }}} */ /* {{{ proto string SplFileInfo::getLinkTarget() U Return the target of a symbolic link */ SPL_METHOD(SplFileInfo, getLinkTarget) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int ret; char buff[MAXPATHLEN]; zend_error_handling error_handling; if (zend_parse_parameters_none() == FAILURE) { return; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); #if defined(PHP_WIN32) || HAVE_SYMLINK if (intern->file_name == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty filename"); RETURN_FALSE; } else if (!IS_ABSOLUTE_PATH(intern->file_name, intern->file_name_len)) { char expanded_path[MAXPATHLEN]; if (!expand_filepath_with_mode(intern->file_name, expanded_path, NULL, 0, CWD_EXPAND TSRMLS_CC)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "No such file or directory"); RETURN_FALSE; } ret = php_sys_readlink(expanded_path, buff, MAXPATHLEN - 1); } else { ret = php_sys_readlink(intern->file_name, buff, MAXPATHLEN-1); } #else ret = -1; /* always fail if not implemented */ #endif if (ret == -1) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Unable to read link %s, error: %s", intern->file_name, strerror(errno)); RETVAL_FALSE; } else { /* Append NULL to the end of the string */ buff[ret] = '\0'; RETVAL_STRINGL(buff, ret, 1); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ #if (!defined(__BEOS__) && !defined(NETWARE) && HAVE_REALPATH) || defined(ZTS) /* {{{ proto string SplFileInfo::getRealPath() Return the resolved path */ SPL_METHOD(SplFileInfo, getRealPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char buff[MAXPATHLEN]; char *filename; zend_error_handling error_handling; if (zend_parse_parameters_none() == FAILURE) { return; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (intern->type == SPL_FS_DIR && !intern->file_name && intern->u.dir.entry.d_name[0]) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); } if (intern->orig_path) { filename = intern->orig_path; } else { filename = intern->file_name; } if (filename && VCWD_REALPATH(filename, buff)) { #ifdef ZTS if (VCWD_ACCESS(buff, F_OK)) { RETVAL_FALSE; } else #endif RETVAL_STRING(buff, 1); } else { RETVAL_FALSE; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ #endif /* {{{ proto SplFileObject SplFileInfo::openFile([string mode = 'r' [, bool use_include_path [, resource context]]]) Open the current file */ SPL_METHOD(SplFileInfo, openFile) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_object_create_type(ht, intern, SPL_FS_FILE, NULL, return_value TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileInfo::setFileClass([string class_name]) Class to use in openFile() */ SPL_METHOD(SplFileInfo, setFileClass) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = spl_ce_SplFileObject; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { intern->file_class = ce; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileInfo::setInfoClass([string class_name]) Class to use in getFileInfo(), getPathInfo() */ SPL_METHOD(SplFileInfo, setInfoClass) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = spl_ce_SplFileInfo; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { intern->info_class = ce; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto SplFileInfo SplFileInfo::getFileInfo([string $class_name]) Get/copy file info */ SPL_METHOD(SplFileInfo, getFileInfo) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = intern->info_class; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { spl_filesystem_object_create_type(ht, intern, SPL_FS_INFO, ce, return_value TSRMLS_CC); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto SplFileInfo SplFileInfo::getPathInfo([string $class_name]) Get/copy file info */ SPL_METHOD(SplFileInfo, getPathInfo) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = intern->info_class; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { int path_len; char *path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); if (path) { char *dpath = estrndup(path, path_len); path_len = php_dirname(dpath, path_len); spl_filesystem_object_create_info(intern, dpath, path_len, 1, ce, return_value TSRMLS_CC); efree(dpath); } } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ */ SPL_METHOD(SplFileInfo, _bad_state_ex) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "The parent constructor was not called: the object is in an " "invalid state "); } /* }}} */ /* {{{ proto void FilesystemIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a path. */ SPL_METHOD(FilesystemIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS | SPL_FILE_DIR_SKIPDOTS); } /* }}} */ /* {{{ proto void FilesystemIterator::rewind() Rewind dir back to the start */ SPL_METHOD(FilesystemIterator, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index = 0; if (intern->u.dir.dirp) { php_stream_rewinddir(intern->u.dir.dirp); } do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } /* }}} */ /* {{{ proto int FilesystemIterator::getFlags() Get handling flags */ SPL_METHOD(FilesystemIterator, getFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->flags & (SPL_FILE_DIR_KEY_MODE_MASK | SPL_FILE_DIR_CURRENT_MODE_MASK | SPL_FILE_DIR_OTHERS_MASK)); } /* }}} */ /* {{{ proto void FilesystemIterator::setFlags(long $flags) Set handling flags */ SPL_METHOD(FilesystemIterator, setFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long flags; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &flags) == FAILURE) { return; } intern->flags &= ~(SPL_FILE_DIR_KEY_MODE_MASK|SPL_FILE_DIR_CURRENT_MODE_MASK|SPL_FILE_DIR_OTHERS_MASK); intern->flags |= ((SPL_FILE_DIR_KEY_MODE_MASK|SPL_FILE_DIR_CURRENT_MODE_MASK|SPL_FILE_DIR_OTHERS_MASK) & flags); } /* }}} */ /* {{{ proto bool RecursiveDirectoryIterator::hasChildren([bool $allow_links = false]) Returns whether current entry is a directory and not '.' or '..' */ SPL_METHOD(RecursiveDirectoryIterator, hasChildren) { zend_bool allow_links = 0; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|b", &allow_links) == FAILURE) { return; } if (spl_filesystem_is_invalid_or_dot(intern->u.dir.entry.d_name)) { RETURN_FALSE; } else { spl_filesystem_object_get_file_name(intern TSRMLS_CC); if (!allow_links && !(intern->flags & SPL_FILE_DIR_FOLLOW_SYMLINKS)) { php_stat(intern->file_name, intern->file_name_len, FS_IS_LINK, return_value TSRMLS_CC); if (zend_is_true(return_value)) { RETURN_FALSE; } } php_stat(intern->file_name, intern->file_name_len, FS_IS_DIR, return_value TSRMLS_CC); } } /* }}} */ /* {{{ proto RecursiveDirectoryIterator DirectoryIterator::getChildren() Returns an iterator for the current entry if it is a directory */ SPL_METHOD(RecursiveDirectoryIterator, getChildren) { zval *zpath, *zflags; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_object *subdir; char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_file_name(intern TSRMLS_CC); MAKE_STD_ZVAL(zflags); MAKE_STD_ZVAL(zpath); ZVAL_LONG(zflags, intern->flags); ZVAL_STRINGL(zpath, intern->file_name, intern->file_name_len, 1); spl_instantiate_arg_ex2(Z_OBJCE_P(getThis()), &return_value, 0, zpath, zflags TSRMLS_CC); zval_ptr_dtor(&zpath); zval_ptr_dtor(&zflags); subdir = (spl_filesystem_object*)zend_object_store_get_object(return_value TSRMLS_CC); if (subdir) { if (intern->u.dir.sub_path && intern->u.dir.sub_path[0]) { subdir->u.dir.sub_path_len = spprintf(&subdir->u.dir.sub_path, 0, "%s%c%s", intern->u.dir.sub_path, slash, intern->u.dir.entry.d_name); } else { subdir->u.dir.sub_path_len = strlen(intern->u.dir.entry.d_name); subdir->u.dir.sub_path = estrndup(intern->u.dir.entry.d_name, subdir->u.dir.sub_path_len); } subdir->info_class = intern->info_class; subdir->file_class = intern->file_class; subdir->oth = intern->oth; } } /* }}} */ /* {{{ proto void RecursiveDirectoryIterator::getSubPath() Get sub path */ SPL_METHOD(RecursiveDirectoryIterator, getSubPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.sub_path) { RETURN_STRINGL(intern->u.dir.sub_path, intern->u.dir.sub_path_len, 1); } else { RETURN_STRINGL("", 0, 1); } } /* }}} */ /* {{{ proto void RecursiveDirectoryIterator::getSubPathname() Get sub path and file name */ SPL_METHOD(RecursiveDirectoryIterator, getSubPathname) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *sub_name; int len; char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.sub_path) { len = spprintf(&sub_name, 0, "%s%c%s", intern->u.dir.sub_path, slash, intern->u.dir.entry.d_name); RETURN_STRINGL(sub_name, len, 0); } else { RETURN_STRING(intern->u.dir.entry.d_name, 1); } } /* }}} */ /* {{{ proto int RecursiveDirectoryIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a path. */ SPL_METHOD(RecursiveDirectoryIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS); } /* }}} */ #ifdef HAVE_GLOB /* {{{ proto int GlobIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a glob expression (no glob:// needed). */ SPL_METHOD(GlobIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS|DIT_CTOR_GLOB); } /* }}} */ /* {{{ proto int GlobIterator::cont() Return the number of directories and files found by globbing */ SPL_METHOD(GlobIterator, count) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { RETURN_LONG(php_glob_stream_get_count(intern->u.dir.dirp, NULL)); } else { /* should not happen */ php_error_docref(NULL TSRMLS_CC, E_ERROR, "GlobIterator lost glob state"); } } /* }}} */ #endif /* HAVE_GLOB */ /* {{{ forward declarations to the iterator handlers */ static void spl_filesystem_dir_it_dtor(zend_object_iterator *iter TSRMLS_DC); static int spl_filesystem_dir_it_valid(zend_object_iterator *iter TSRMLS_DC); static void spl_filesystem_dir_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC); static void spl_filesystem_dir_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC); static void spl_filesystem_dir_it_move_forward(zend_object_iterator *iter TSRMLS_DC); static void spl_filesystem_dir_it_rewind(zend_object_iterator *iter TSRMLS_DC); /* iterator handler table */ zend_object_iterator_funcs spl_filesystem_dir_it_funcs = { spl_filesystem_dir_it_dtor, spl_filesystem_dir_it_valid, spl_filesystem_dir_it_current_data, spl_filesystem_dir_it_current_key, spl_filesystem_dir_it_move_forward, spl_filesystem_dir_it_rewind }; /* }}} */ /* {{{ spl_ce_dir_get_iterator */ zend_object_iterator *spl_filesystem_dir_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) { spl_filesystem_iterator *iterator; spl_filesystem_object *dir_object; if (by_ref) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } dir_object = (spl_filesystem_object*)zend_object_store_get_object(object TSRMLS_CC); iterator = spl_filesystem_object_to_iterator(dir_object); /* initialize iterator if it wasn't gotten before */ if (iterator->intern.data == NULL) { iterator->intern.data = object; iterator->intern.funcs = &spl_filesystem_dir_it_funcs; /* ->current must be initialized; rewind doesn't set it and valid * doesn't check whether it's set */ iterator->current = object; } zval_add_ref(&object); return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ spl_filesystem_dir_it_dtor */ static void spl_filesystem_dir_it_dtor(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; if (iterator->intern.data) { zval *object = iterator->intern.data; zval_ptr_dtor(&object); } /* Otherwise we were called from the owning object free storage handler as * it sets * iterator->intern.data to NULL. * We don't even need to destroy iterator->current as we didn't add a * reference to it in move_forward or get_iterator */ } /* }}} */ /* {{{ spl_filesystem_dir_it_valid */ static int spl_filesystem_dir_it_valid(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); return object->u.dir.entry.d_name[0] != '\0' ? SUCCESS : FAILURE; } /* }}} */ /* {{{ spl_filesystem_dir_it_current_data */ static void spl_filesystem_dir_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; *data = &iterator->current; } /* }}} */ /* {{{ spl_filesystem_dir_it_current_key */ static void spl_filesystem_dir_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); ZVAL_LONG(key, object->u.dir.index); } /* }}} */ /* {{{ spl_filesystem_dir_it_move_forward */ static void spl_filesystem_dir_it_move_forward(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); object->u.dir.index++; spl_filesystem_dir_read(object TSRMLS_CC); if (object->file_name) { efree(object->file_name); object->file_name = NULL; } } /* }}} */ /* {{{ spl_filesystem_dir_it_rewind */ static void spl_filesystem_dir_it_rewind(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); object->u.dir.index = 0; if (object->u.dir.dirp) { php_stream_rewinddir(object->u.dir.dirp); } spl_filesystem_dir_read(object TSRMLS_CC); } /* }}} */ /* {{{ spl_filesystem_tree_it_dtor */ static void spl_filesystem_tree_it_dtor(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; if (iterator->intern.data) { zval *object = iterator->intern.data; zval_ptr_dtor(&object); } else { if (iterator->current) { zval_ptr_dtor(&iterator->current); } } } /* }}} */ /* {{{ spl_filesystem_tree_it_current_data */ static void spl_filesystem_tree_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); if (SPL_FILE_DIR_CURRENT(object, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) { if (!iterator->current) { ALLOC_INIT_ZVAL(iterator->current); spl_filesystem_object_get_file_name(object TSRMLS_CC); ZVAL_STRINGL(iterator->current, object->file_name, object->file_name_len, 1); } *data = &iterator->current; } else if (SPL_FILE_DIR_CURRENT(object, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) { if (!iterator->current) { ALLOC_INIT_ZVAL(iterator->current); spl_filesystem_object_get_file_name(object TSRMLS_CC); spl_filesystem_object_create_type(0, object, SPL_FS_INFO, NULL, iterator->current TSRMLS_CC); } *data = &iterator->current; } else { *data = (zval**)&iterator->intern.data; } } /* }}} */ /* {{{ spl_filesystem_tree_it_current_key */ static void spl_filesystem_tree_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); if (SPL_FILE_DIR_KEY(object, SPL_FILE_DIR_KEY_AS_FILENAME)) { ZVAL_STRING(key, object->u.dir.entry.d_name, 1); } else { spl_filesystem_object_get_file_name(object TSRMLS_CC); ZVAL_STRINGL(key, object->file_name, object->file_name_len, 1); } } /* }}} */ /* {{{ spl_filesystem_tree_it_move_forward */ static void spl_filesystem_tree_it_move_forward(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); object->u.dir.index++; do { spl_filesystem_dir_read(object TSRMLS_CC); } while (spl_filesystem_is_dot(object->u.dir.entry.d_name)); if (object->file_name) { efree(object->file_name); object->file_name = NULL; } if (iterator->current) { zval_ptr_dtor(&iterator->current); iterator->current = NULL; } } /* }}} */ /* {{{ spl_filesystem_tree_it_rewind */ static void spl_filesystem_tree_it_rewind(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); object->u.dir.index = 0; if (object->u.dir.dirp) { php_stream_rewinddir(object->u.dir.dirp); } do { spl_filesystem_dir_read(object TSRMLS_CC); } while (spl_filesystem_is_dot(object->u.dir.entry.d_name)); if (iterator->current) { zval_ptr_dtor(&iterator->current); iterator->current = NULL; } } /* }}} */ /* {{{ iterator handler table */ zend_object_iterator_funcs spl_filesystem_tree_it_funcs = { spl_filesystem_tree_it_dtor, spl_filesystem_dir_it_valid, spl_filesystem_tree_it_current_data, spl_filesystem_tree_it_current_key, spl_filesystem_tree_it_move_forward, spl_filesystem_tree_it_rewind }; /* }}} */ /* {{{ spl_ce_dir_get_iterator */ zend_object_iterator *spl_filesystem_tree_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) { spl_filesystem_iterator *iterator; spl_filesystem_object *dir_object; if (by_ref) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } dir_object = (spl_filesystem_object*)zend_object_store_get_object(object TSRMLS_CC); iterator = spl_filesystem_object_to_iterator(dir_object); /* initialize iterator if wasn't gotten before */ if (iterator->intern.data == NULL) { iterator->intern.data = object; iterator->intern.funcs = &spl_filesystem_tree_it_funcs; } zval_add_ref(&object); return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ spl_filesystem_object_cast */ static int spl_filesystem_object_cast(zval *readobj, zval *writeobj, int type TSRMLS_DC) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(readobj TSRMLS_CC); if (type == IS_STRING) { if (Z_OBJCE_P(readobj)->__tostring) { return std_object_handlers.cast_object(readobj, writeobj, type TSRMLS_CC); } switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: if (readobj == writeobj) { zval retval; zval *retval_ptr = &retval; ZVAL_STRINGL(retval_ptr, intern->file_name, intern->file_name_len, 1); zval_dtor(readobj); ZVAL_ZVAL(writeobj, retval_ptr, 0, 0); } else { ZVAL_STRINGL(writeobj, intern->file_name, intern->file_name_len, 1); } return SUCCESS; case SPL_FS_DIR: if (readobj == writeobj) { zval retval; zval *retval_ptr = &retval; ZVAL_STRING(retval_ptr, intern->u.dir.entry.d_name, 1); zval_dtor(readobj); ZVAL_ZVAL(writeobj, retval_ptr, 0, 0); } else { ZVAL_STRING(writeobj, intern->u.dir.entry.d_name, 1); } return SUCCESS; } } else if (type == IS_BOOL) { ZVAL_BOOL(writeobj, 1); return SUCCESS; } if (readobj == writeobj) { zval_dtor(readobj); } ZVAL_NULL(writeobj); return FAILURE; } /* }}} */ /* {{{ declare method parameters */ /* supply a name and default to call by parameter */ ZEND_BEGIN_ARG_INFO(arginfo_info___construct, 0) ZEND_ARG_INFO(0, file_name) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_info_openFile, 0, 0, 0) ZEND_ARG_INFO(0, open_mode) ZEND_ARG_INFO(0, use_include_path) ZEND_ARG_INFO(0, context) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_info_optinalFileClass, 0, 0, 0) ZEND_ARG_INFO(0, class_name) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_optinalSuffix, 0, 0, 0) ZEND_ARG_INFO(0, suffix) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_splfileinfo_void, 0) ZEND_END_ARG_INFO() /* the method table */ /* each method can have its own parameters and visibility */ static const zend_function_entry spl_SplFileInfo_functions[] = { SPL_ME(SplFileInfo, __construct, arginfo_info___construct, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getExtension, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getBasename, arginfo_optinalSuffix, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPathname, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPerms, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getInode, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getSize, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getOwner, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getGroup, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getATime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getMTime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getCTime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getType, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isWritable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isReadable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isExecutable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isFile, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isDir, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isLink, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getLinkTarget, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) #if (!defined(__BEOS__) && !defined(NETWARE) && HAVE_REALPATH) || defined(ZTS) SPL_ME(SplFileInfo, getRealPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) #endif SPL_ME(SplFileInfo, getFileInfo, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPathInfo, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, openFile, arginfo_info_openFile, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, setFileClass, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, setInfoClass, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, _bad_state_ex, NULL, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL) SPL_MA(SplFileInfo, __toString, SplFileInfo, getPathname, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO(arginfo_dir___construct, 0) ZEND_ARG_INFO(0, path) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_dir_it_seek, 0) ZEND_ARG_INFO(0, position) ZEND_END_ARG_INFO(); /* the method table */ /* each method can have its own parameters and visibility */ static const zend_function_entry spl_DirectoryIterator_functions[] = { SPL_ME(DirectoryIterator, __construct, arginfo_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getExtension, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getBasename, arginfo_optinalSuffix, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, isDot, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, valid, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, seek, arginfo_dir_it_seek, ZEND_ACC_PUBLIC) SPL_MA(DirectoryIterator, __toString, DirectoryIterator, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir___construct, 0, 0, 1) ZEND_ARG_INFO(0, path) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir_hasChildren, 0, 0, 0) ZEND_ARG_INFO(0, allow_links) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir_setFlags, 0, 0, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() static const zend_function_entry spl_FilesystemIterator_functions[] = { SPL_ME(FilesystemIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, getFlags, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, setFlags, arginfo_r_dir_setFlags, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_RecursiveDirectoryIterator_functions[] = { SPL_ME(RecursiveDirectoryIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, hasChildren, arginfo_r_dir_hasChildren, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getSubPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getSubPathname,arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; #ifdef HAVE_GLOB static const zend_function_entry spl_GlobIterator_functions[] = { SPL_ME(GlobIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(GlobIterator, count, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; #endif /* }}} */ static int spl_filesystem_file_read(spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { char *buf; size_t line_len = 0; long line_add = (intern->u.file.current_line || intern->u.file.current_zval) ? 1 : 0; spl_filesystem_file_free_line(intern TSRMLS_CC); if (php_stream_eof(intern->u.file.stream)) { if (!silent) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot read from file %s", intern->file_name); } return FAILURE; } if (intern->u.file.max_line_len > 0) { buf = safe_emalloc((intern->u.file.max_line_len + 1), sizeof(char), 0); if (php_stream_get_line(intern->u.file.stream, buf, intern->u.file.max_line_len + 1, &line_len) == NULL) { efree(buf); buf = NULL; } else { buf[line_len] = '\0'; } } else { buf = php_stream_get_line(intern->u.file.stream, NULL, 0, &line_len); } if (!buf) { intern->u.file.current_line = estrdup(""); intern->u.file.current_line_len = 0; } else { if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_DROP_NEW_LINE)) { line_len = strcspn(buf, "\r\n"); buf[line_len] = '\0'; } intern->u.file.current_line = buf; intern->u.file.current_line_len = line_len; } intern->u.file.current_line_num += line_add; return SUCCESS; } /* }}} */ static int spl_filesystem_file_call(spl_filesystem_object *intern, zend_function *func_ptr, int pass_num_args, zval *return_value, zval *arg2 TSRMLS_DC) /* {{{ */ { zend_fcall_info fci; zend_fcall_info_cache fcic; zval z_fname; zval * zresource_ptr = &intern->u.file.zresource, *retval; int result; int num_args = pass_num_args + (arg2 ? 2 : 1); zval ***params = (zval***)safe_emalloc(num_args, sizeof(zval**), 0); params[0] = &zresource_ptr; if (arg2) { params[1] = &arg2; } zend_get_parameters_array_ex(pass_num_args, params+(arg2 ? 2 : 1)); ZVAL_STRING(&z_fname, func_ptr->common.function_name, 0); fci.size = sizeof(fci); fci.function_table = EG(function_table); fci.object_ptr = NULL; fci.function_name = &z_fname; fci.retval_ptr_ptr = &retval; fci.param_count = num_args; fci.params = params; fci.no_separation = 1; fci.symbol_table = NULL; fcic.initialized = 1; fcic.function_handler = func_ptr; fcic.calling_scope = NULL; fcic.called_scope = NULL; fcic.object_ptr = NULL; result = zend_call_function(&fci, &fcic TSRMLS_CC); if (result == FAILURE) { RETVAL_FALSE; } else { ZVAL_ZVAL(return_value, retval, 1, 1); } efree(params); return result; } /* }}} */ #define FileFunctionCall(func_name, pass_num_args, arg2) /* {{{ */ \ { \ zend_function *func_ptr; \ int ret; \ ret = zend_hash_find(EG(function_table), #func_name, sizeof(#func_name), (void **) &func_ptr); \ if (ret != SUCCESS) { \ zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Internal error, function '%s' not found. Please report", #func_name); \ return; \ } \ spl_filesystem_file_call(intern, func_ptr, pass_num_args, return_value, arg2 TSRMLS_CC); \ } /* }}} */ static int spl_filesystem_file_read_csv(spl_filesystem_object *intern, char delimiter, char enclosure, char escape, zval *return_value TSRMLS_DC) /* {{{ */ { int ret = SUCCESS; do { ret = spl_filesystem_file_read(intern, 1 TSRMLS_CC); } while (ret == SUCCESS && !intern->u.file.current_line_len && SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_SKIP_EMPTY)); if (ret == SUCCESS) { size_t buf_len = intern->u.file.current_line_len; char *buf = estrndup(intern->u.file.current_line, buf_len); if (intern->u.file.current_zval) { zval_ptr_dtor(&intern->u.file.current_zval); } ALLOC_INIT_ZVAL(intern->u.file.current_zval); php_fgetcsv(intern->u.file.stream, delimiter, enclosure, escape, buf_len, buf, intern->u.file.current_zval TSRMLS_CC); if (return_value) { if (Z_TYPE_P(return_value) != IS_NULL) { zval_dtor(return_value); ZVAL_NULL(return_value); } ZVAL_ZVAL(return_value, intern->u.file.current_zval, 1, 0); } } return ret; } /* }}} */ static int spl_filesystem_file_read_line_ex(zval * this_ptr, spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { zval *retval = NULL; /* 1) use fgetcsv? 2) overloaded call the function, 3) do it directly */ if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) || intern->u.file.func_getCurr->common.scope != spl_ce_SplFileObject) { if (php_stream_eof(intern->u.file.stream)) { if (!silent) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot read from file %s", intern->file_name); } return FAILURE; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV)) { return spl_filesystem_file_read_csv(intern, intern->u.file.delimiter, intern->u.file.enclosure, intern->u.file.escape, NULL TSRMLS_CC); } else { zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.file.func_getCurr, "getCurrentLine", &retval); } if (retval) { if (intern->u.file.current_line || intern->u.file.current_zval) { intern->u.file.current_line_num++; } spl_filesystem_file_free_line(intern TSRMLS_CC); if (Z_TYPE_P(retval) == IS_STRING) { intern->u.file.current_line = estrndup(Z_STRVAL_P(retval), Z_STRLEN_P(retval)); intern->u.file.current_line_len = Z_STRLEN_P(retval); } else { MAKE_STD_ZVAL(intern->u.file.current_zval); ZVAL_ZVAL(intern->u.file.current_zval, retval, 1, 0); } zval_ptr_dtor(&retval); return SUCCESS; } else { return FAILURE; } } else { return spl_filesystem_file_read(intern, silent TSRMLS_CC); } } /* }}} */ static int spl_filesystem_file_is_empty_line(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (intern->u.file.current_line) { return intern->u.file.current_line_len == 0; } else if (intern->u.file.current_zval) { switch(Z_TYPE_P(intern->u.file.current_zval)) { case IS_STRING: return Z_STRLEN_P(intern->u.file.current_zval) == 0; case IS_ARRAY: if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) && zend_hash_num_elements(Z_ARRVAL_P(intern->u.file.current_zval)) == 1) { zval ** first = Z_ARRVAL_P(intern->u.file.current_zval)->pListHead->pData; return Z_TYPE_PP(first) == IS_STRING && Z_STRLEN_PP(first) == 0; } return zend_hash_num_elements(Z_ARRVAL_P(intern->u.file.current_zval)) == 0; case IS_NULL: return 1; default: return 0; } } else { return 1; } } /* }}} */ static int spl_filesystem_file_read_line(zval * this_ptr, spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { int ret = spl_filesystem_file_read_line_ex(this_ptr, intern, silent TSRMLS_CC); while (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_SKIP_EMPTY) && ret == SUCCESS && spl_filesystem_file_is_empty_line(intern TSRMLS_CC)) { spl_filesystem_file_free_line(intern TSRMLS_CC); ret = spl_filesystem_file_read_line_ex(this_ptr, intern, silent TSRMLS_CC); } return ret; } /* }}} */ static void spl_filesystem_file_rewind(zval * this_ptr, spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (-1 == php_stream_rewind(intern->u.file.stream)) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot rewind file %s", intern->file_name); } else { spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num = 0; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { spl_filesystem_file_read_line(this_ptr, intern, 1 TSRMLS_CC); } } /* }}} */ /* {{{ proto void SplFileObject::__construct(string filename [, string mode = 'r' [, bool use_include_path [, resource context]]]]) Construct a new file object */ SPL_METHOD(SplFileObject, __construct) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_bool use_include_path = 0; char *p1, *p2; char *tmp_path; int tmp_path_len; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); intern->u.file.open_mode = NULL; intern->u.file.open_mode_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbr!", &intern->file_name, &intern->file_name_len, &intern->u.file.open_mode, &intern->u.file.open_mode_len, &use_include_path, &intern->u.file.zcontext) == FAILURE) { intern->u.file.open_mode = NULL; intern->file_name = NULL; zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (intern->u.file.open_mode == NULL) { intern->u.file.open_mode = "r"; intern->u.file.open_mode_len = 1; } if (spl_filesystem_file_open(intern, use_include_path, 0 TSRMLS_CC) == SUCCESS) { tmp_path_len = strlen(intern->u.file.stream->orig_path); if (tmp_path_len > 1 && IS_SLASH_AT(intern->u.file.stream->orig_path, tmp_path_len-1)) { tmp_path_len--; } tmp_path = estrndup(intern->u.file.stream->orig_path, tmp_path_len); p1 = strrchr(tmp_path, '/'); #if defined(PHP_WIN32) || defined(NETWARE) p2 = strrchr(tmp_path, '\\'); #else p2 = 0; #endif if (p1 || p2) { intern->_path_len = (p1 > p2 ? p1 : p2) - tmp_path; } else { intern->_path_len = 0; } efree(tmp_path); intern->_path = estrndup(intern->u.file.stream->orig_path, intern->_path_len); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplTempFileObject::__construct([int max_memory]) Construct a new temp file object */ SPL_METHOD(SplTempFileObject, __construct) { long max_memory = PHP_STREAM_MAX_MEM; char tmp_fname[48]; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &max_memory) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (max_memory < 0) { intern->file_name = "php://memory"; intern->file_name_len = 12; } else if (ZEND_NUM_ARGS()) { intern->file_name_len = slprintf(tmp_fname, sizeof(tmp_fname), "php://temp/maxmemory:%ld", max_memory); intern->file_name = tmp_fname; } else { intern->file_name = "php://temp"; intern->file_name_len = 10; } intern->u.file.open_mode = "wb"; intern->u.file.open_mode_len = 1; intern->u.file.zcontext = NULL; if (spl_filesystem_file_open(intern, 0, 0 TSRMLS_CC) == SUCCESS) { intern->_path_len = 0; intern->_path = estrndup("", 0); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileObject::rewind() Rewind the file and read the first line */ SPL_METHOD(SplFileObject, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_file_rewind(getThis(), intern TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileObject::eof() Return whether end of file is reached */ SPL_METHOD(SplFileObject, eof) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(php_stream_eof(intern->u.file.stream)); } /* }}} */ /* {{{ proto void SplFileObject::valid() Return !eof() */ SPL_METHOD(SplFileObject, valid) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { RETURN_BOOL(intern->u.file.current_line || intern->u.file.current_zval); } else { RETVAL_BOOL(!php_stream_eof(intern->u.file.stream)); } } /* }}} */ /* {{{ proto string SplFileObject::fgets() Rturn next line from file */ SPL_METHOD(SplFileObject, fgets) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_filesystem_file_read(intern, 0 TSRMLS_CC) == FAILURE) { RETURN_FALSE; } RETURN_STRINGL(intern->u.file.current_line, intern->u.file.current_line_len, 1); } /* }}} */ /* {{{ proto string SplFileObject::current() Return current line from file */ SPL_METHOD(SplFileObject, current) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (!intern->u.file.current_line && !intern->u.file.current_zval) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } if (intern->u.file.current_line && (!SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) || !intern->u.file.current_zval)) { RETURN_STRINGL(intern->u.file.current_line, intern->u.file.current_line_len, 1); } else if (intern->u.file.current_zval) { RETURN_ZVAL(intern->u.file.current_zval, 1, 0); } RETURN_FALSE; } /* }}} */ /* {{{ proto int SplFileObject::key() Return line number */ SPL_METHOD(SplFileObject, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } /* Do not read the next line to support correct counting with fgetc() if (!intern->current_line) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } */ RETURN_LONG(intern->u.file.current_line_num); } /* }}} */ /* {{{ proto void SplFileObject::next() Read next line */ SPL_METHOD(SplFileObject, next) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_file_free_line(intern TSRMLS_CC); if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } intern->u.file.current_line_num++; } /* }}} */ /* {{{ proto void SplFileObject::setFlags(int flags) Set file handling flags */ SPL_METHOD(SplFileObject, setFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &intern->flags) == FAILURE) { return; } } /* }}} */ /* {{{ proto int SplFileObject::getFlags() Get file handling flags */ SPL_METHOD(SplFileObject, getFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->flags & SPL_FILE_OBJECT_MASK); } /* }}} */ /* {{{ proto void SplFileObject::setMaxLineLen(int max_len) Set maximum line length */ SPL_METHOD(SplFileObject, setMaxLineLen) { long max_len; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &max_len) == FAILURE) { return; } if (max_len < 0) { zend_throw_exception_ex(spl_ce_DomainException, 0 TSRMLS_CC, "Maximum line length must be greater than or equal zero"); return; } intern->u.file.max_line_len = max_len; } /* }}} */ /* {{{ proto int SplFileObject::getMaxLineLen() Get maximum line length */ SPL_METHOD(SplFileObject, getMaxLineLen) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG((long)intern->u.file.max_line_len); } /* }}} */ /* {{{ proto bool SplFileObject::hasChildren() Return false */ SPL_METHOD(SplFileObject, hasChildren) { if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_FALSE; } /* }}} */ /* {{{ proto bool SplFileObject::getChildren() Read NULL */ SPL_METHOD(SplFileObject, getChildren) { if (zend_parse_parameters_none() == FAILURE) { return; } /* return NULL */ } /* }}} */ /* {{{ FileFunction */ #define FileFunction(func_name) \ SPL_METHOD(SplFileObject, func_name) \ { \ spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); \ FileFunctionCall(func_name, ZEND_NUM_ARGS(), NULL); \ } /* }}} */ /* {{{ proto array SplFileObject::fgetcsv([string delimiter [, string enclosure [, escape = '\\']]]) Return current line as csv */ SPL_METHOD(SplFileObject, fgetcsv) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = intern->u.file.delimiter, enclosure = intern->u.file.enclosure, escape = intern->u.file.escape; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sss", &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 3: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 2: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 1: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 0: break; } spl_filesystem_file_read_csv(intern, delimiter, enclosure, escape, return_value TSRMLS_CC); } } /* }}} */ /* {{{ proto int SplFileObject::fputcsv(array fields, [string delimiter [, string enclosure [, string escape]]]) Output a field array as a CSV line */ SPL_METHOD(SplFileObject, fputcsv) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = intern->u.file.delimiter, enclosure = intern->u.file.enclosure, escape = intern->u.file.escape; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0, ret; zval *fields = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a|sss", &fields, &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 4: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 3: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 2: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 1: case 0: break; } ret = php_fputcsv(intern->u.file.stream, fields, delimiter, enclosure, escape TSRMLS_CC); RETURN_LONG(ret); } } /* }}} */ /* {{{ proto void SplFileObject::setCsvControl([string delimiter = ',' [, string enclosure = '"' [, string escape = '\\']]]) Set the delimiter and enclosure character used in fgetcsv */ SPL_METHOD(SplFileObject, setCsvControl) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = ',', enclosure = '"', escape='\\'; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sss", &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 3: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 2: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 1: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 0: break; } intern->u.file.delimiter = delimiter; intern->u.file.enclosure = enclosure; intern->u.file.escape = escape; } } /* }}} */ /* {{{ proto array SplFileObject::getCsvControl() Get the delimiter and enclosure character used in fgetcsv */ SPL_METHOD(SplFileObject, getCsvControl) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter[2], enclosure[2]; array_init(return_value); delimiter[0] = intern->u.file.delimiter; delimiter[1] = '\0'; enclosure[0] = intern->u.file.enclosure; enclosure[1] = '\0'; add_next_index_string(return_value, delimiter, 1); add_next_index_string(return_value, enclosure, 1); } /* }}} */ /* {{{ proto bool SplFileObject::flock(int operation [, int &wouldblock]) Portable file locking */ FileFunction(flock) /* }}} */ /* {{{ proto bool SplFileObject::fflush() Flush the file */ SPL_METHOD(SplFileObject, fflush) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); RETURN_BOOL(!php_stream_flush(intern->u.file.stream)); } /* }}} */ /* {{{ proto int SplFileObject::ftell() Return current file position */ SPL_METHOD(SplFileObject, ftell) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long ret = php_stream_tell(intern->u.file.stream); if (ret == -1) { RETURN_FALSE; } else { RETURN_LONG(ret); } } /* }}} */ /* {{{ proto int SplFileObject::fseek(int pos [, int whence = SEEK_SET]) Return current file position */ SPL_METHOD(SplFileObject, fseek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long pos, whence = SEEK_SET; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &pos, &whence) == FAILURE) { return; } spl_filesystem_file_free_line(intern TSRMLS_CC); RETURN_LONG(php_stream_seek(intern->u.file.stream, pos, whence)); } /* }}} */ /* {{{ proto int SplFileObject::fgetc() Get a character form the file */ SPL_METHOD(SplFileObject, fgetc) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char buf[2]; int result; spl_filesystem_file_free_line(intern TSRMLS_CC); result = php_stream_getc(intern->u.file.stream); if (result == EOF) { RETVAL_FALSE; } else { if (result == '\n') { intern->u.file.current_line_num++; } buf[0] = result; buf[1] = '\0'; RETURN_STRINGL(buf, 1, 1); } } /* }}} */ /* {{{ proto string SplFileObject::fgetss([string allowable_tags]) Get a line from file pointer and strip HTML tags */ SPL_METHOD(SplFileObject, fgetss) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zval *arg2 = NULL; MAKE_STD_ZVAL(arg2); if (intern->u.file.max_line_len > 0) { ZVAL_LONG(arg2, intern->u.file.max_line_len); } else { ZVAL_LONG(arg2, 1024); } spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num++; FileFunctionCall(fgetss, ZEND_NUM_ARGS(), arg2); zval_ptr_dtor(&arg2); } /* }}} */ /* {{{ proto int SplFileObject::fpassthru() Output all remaining data from a file pointer */ SPL_METHOD(SplFileObject, fpassthru) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); RETURN_LONG(php_stream_passthru(intern->u.file.stream)); } /* }}} */ /* {{{ proto bool SplFileObject::fscanf(string format [, string ...]) Implements a mostly ANSI compatible fscanf() */ SPL_METHOD(SplFileObject, fscanf) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num++; FileFunctionCall(fscanf, ZEND_NUM_ARGS(), NULL); } /* }}} */ /* {{{ proto mixed SplFileObject::fwrite(string str [, int length]) Binary-safe file write */ SPL_METHOD(SplFileObject, fwrite) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *str; int str_len; long length = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &str, &str_len, &length) == FAILURE) { return; } if (ZEND_NUM_ARGS() > 1) { str_len = MAX(0, MIN(length, str_len)); } if (!str_len) { RETURN_LONG(0); } RETURN_LONG(php_stream_write(intern->u.file.stream, str, str_len)); } /* }}} */ SPL_METHOD(SplFileObject, fread) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long length = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &length) == FAILURE) { return; } if (length <= 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Length parameter must be greater than 0"); RETURN_FALSE; } Z_STRVAL_P(return_value) = emalloc(length + 1); Z_STRLEN_P(return_value) = php_stream_read(intern->u.file.stream, Z_STRVAL_P(return_value), length); /* needed because recv/read/gzread doesnt put a null at the end*/ Z_STRVAL_P(return_value)[Z_STRLEN_P(return_value)] = 0; Z_TYPE_P(return_value) = IS_STRING; } /* {{{ proto bool SplFileObject::fstat() Stat() on a filehandle */ FileFunction(fstat) /* }}} */ /* {{{ proto bool SplFileObject::ftruncate(int size) Truncate file to 'size' length */ SPL_METHOD(SplFileObject, ftruncate) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long size; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &size) == FAILURE) { return; } if (!php_stream_truncate_supported(intern->u.file.stream)) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Can't truncate file %s", intern->file_name); RETURN_FALSE; } RETURN_BOOL(0 == php_stream_truncate_set_size(intern->u.file.stream, size)); } /* }}} */ /* {{{ proto void SplFileObject::seek(int line_pos) Seek to specified line */ SPL_METHOD(SplFileObject, seek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long line_pos; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &line_pos) == FAILURE) { return; } if (line_pos < 0) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Can't seek file %s to negative line %ld", intern->file_name, line_pos); RETURN_FALSE; } spl_filesystem_file_rewind(getThis(), intern TSRMLS_CC); while(intern->u.file.current_line_num < line_pos) { if (spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC) == FAILURE) { break; } } } /* }}} */ /* {{{ Function/Class/Method definitions */ ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object___construct, 0, 0, 1) ZEND_ARG_INFO(0, file_name) ZEND_ARG_INFO(0, open_mode) ZEND_ARG_INFO(0, use_include_path) ZEND_ARG_INFO(0, context) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_file_object_setFlags, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_file_object_setMaxLineLen, 0) ZEND_ARG_INFO(0, max_len) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetcsv, 0, 0, 0) ZEND_ARG_INFO(0, delimiter) ZEND_ARG_INFO(0, enclosure) ZEND_ARG_INFO(0, escape) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fputcsv, 0, 0, 1) ZEND_ARG_INFO(0, fields) ZEND_ARG_INFO(0, delimiter) ZEND_ARG_INFO(0, enclosure) ZEND_ARG_INFO(0, escape) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_flock, 0, 0, 1) ZEND_ARG_INFO(0, operation) ZEND_ARG_INFO(1, wouldblock) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fseek, 0, 0, 1) ZEND_ARG_INFO(0, pos) ZEND_ARG_INFO(0, whence) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetss, 0, 0, 0) ZEND_ARG_INFO(0, allowable_tags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fscanf, 1, 0, 1) ZEND_ARG_INFO(0, format) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fwrite, 0, 0, 1) ZEND_ARG_INFO(0, str) ZEND_ARG_INFO(0, length) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fread, 0, 0, 1) ZEND_ARG_INFO(0, length) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_ftruncate, 0, 0, 1) ZEND_ARG_INFO(0, size) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_seek, 0, 0, 1) ZEND_ARG_INFO(0, line_pos) ZEND_END_ARG_INFO() static const zend_function_entry spl_SplFileObject_functions[] = { SPL_ME(SplFileObject, __construct, arginfo_file_object___construct, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, eof, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, valid, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgets, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetcsv, arginfo_file_object_fgetcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fputcsv, arginfo_file_object_fputcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setCsvControl, arginfo_file_object_fgetcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getCsvControl, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, flock, arginfo_file_object_flock, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fflush, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, ftell, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fseek, arginfo_file_object_fseek, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetc, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fpassthru, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetss, arginfo_file_object_fgetss, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fscanf, arginfo_file_object_fscanf, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fwrite, arginfo_file_object_fwrite, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fread, arginfo_file_object_fread, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fstat, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, ftruncate, arginfo_file_object_ftruncate, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setFlags, arginfo_file_object_setFlags, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getFlags, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setMaxLineLen, arginfo_file_object_setMaxLineLen, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getMaxLineLen, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, hasChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, seek, arginfo_file_object_seek, ZEND_ACC_PUBLIC) /* mappings */ SPL_MA(SplFileObject, getCurrentLine, SplFileObject, fgets, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_MA(SplFileObject, __toString, SplFileObject, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO_EX(arginfo_temp_file_object___construct, 0, 0, 0) ZEND_ARG_INFO(0, max_memory) ZEND_END_ARG_INFO() static const zend_function_entry spl_SplTempFileObject_functions[] = { SPL_ME(SplTempFileObject, __construct, arginfo_temp_file_object___construct, ZEND_ACC_PUBLIC) PHP_FE_END }; /* }}} */ /* {{{ PHP_MINIT_FUNCTION(spl_directory) */ PHP_MINIT_FUNCTION(spl_directory) { REGISTER_SPL_STD_CLASS_EX(SplFileInfo, spl_filesystem_object_new, spl_SplFileInfo_functions); memcpy(&spl_filesystem_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); spl_filesystem_object_handlers.clone_obj = spl_filesystem_object_clone; spl_filesystem_object_handlers.cast_object = spl_filesystem_object_cast; spl_filesystem_object_handlers.get_debug_info = spl_filesystem_object_get_debug_info; spl_ce_SplFileInfo->serialize = zend_class_serialize_deny; spl_ce_SplFileInfo->unserialize = zend_class_unserialize_deny; REGISTER_SPL_SUB_CLASS_EX(DirectoryIterator, SplFileInfo, spl_filesystem_object_new, spl_DirectoryIterator_functions); zend_class_implements(spl_ce_DirectoryIterator TSRMLS_CC, 1, zend_ce_iterator); REGISTER_SPL_IMPLEMENTS(DirectoryIterator, SeekableIterator); spl_ce_DirectoryIterator->get_iterator = spl_filesystem_dir_get_iterator; REGISTER_SPL_SUB_CLASS_EX(FilesystemIterator, DirectoryIterator, spl_filesystem_object_new, spl_FilesystemIterator_functions); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_MODE_MASK", SPL_FILE_DIR_CURRENT_MODE_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_PATHNAME", SPL_FILE_DIR_CURRENT_AS_PATHNAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_FILEINFO", SPL_FILE_DIR_CURRENT_AS_FILEINFO); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_SELF", SPL_FILE_DIR_CURRENT_AS_SELF); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_MODE_MASK", SPL_FILE_DIR_KEY_MODE_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_AS_PATHNAME", SPL_FILE_DIR_KEY_AS_PATHNAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "FOLLOW_SYMLINKS", SPL_FILE_DIR_FOLLOW_SYMLINKS); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_AS_FILENAME", SPL_FILE_DIR_KEY_AS_FILENAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "NEW_CURRENT_AND_KEY", SPL_FILE_DIR_KEY_AS_FILENAME|SPL_FILE_DIR_CURRENT_AS_FILEINFO); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "OTHER_MODE_MASK", SPL_FILE_DIR_OTHERS_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "SKIP_DOTS", SPL_FILE_DIR_SKIPDOTS); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "UNIX_PATHS", SPL_FILE_DIR_UNIXPATHS); spl_ce_FilesystemIterator->get_iterator = spl_filesystem_tree_get_iterator; REGISTER_SPL_SUB_CLASS_EX(RecursiveDirectoryIterator, FilesystemIterator, spl_filesystem_object_new, spl_RecursiveDirectoryIterator_functions); REGISTER_SPL_IMPLEMENTS(RecursiveDirectoryIterator, RecursiveIterator); memcpy(&spl_filesystem_object_check_handlers, &spl_filesystem_object_handlers, sizeof(zend_object_handlers)); spl_filesystem_object_check_handlers.get_method = spl_filesystem_object_get_method_check; #ifdef HAVE_GLOB REGISTER_SPL_SUB_CLASS_EX(GlobIterator, FilesystemIterator, spl_filesystem_object_new_check, spl_GlobIterator_functions); REGISTER_SPL_IMPLEMENTS(GlobIterator, Countable); #endif REGISTER_SPL_SUB_CLASS_EX(SplFileObject, SplFileInfo, spl_filesystem_object_new_check, spl_SplFileObject_functions); REGISTER_SPL_IMPLEMENTS(SplFileObject, RecursiveIterator); REGISTER_SPL_IMPLEMENTS(SplFileObject, SeekableIterator); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "DROP_NEW_LINE", SPL_FILE_OBJECT_DROP_NEW_LINE); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "READ_AHEAD", SPL_FILE_OBJECT_READ_AHEAD); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "SKIP_EMPTY", SPL_FILE_OBJECT_SKIP_EMPTY); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "READ_CSV", SPL_FILE_OBJECT_READ_CSV); REGISTER_SPL_SUB_CLASS_EX(SplTempFileObject, SplFileObject, spl_filesystem_object_new_check, spl_SplTempFileObject_functions); return SUCCESS; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2015 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/info.h" #include "ext/standard/file.h" #include "ext/standard/php_string.h" #include "zend_compile.h" #include "zend_exceptions.h" #include "zend_interfaces.h" #include "php_spl.h" #include "spl_functions.h" #include "spl_engine.h" #include "spl_iterators.h" #include "spl_directory.h" #include "spl_exceptions.h" #include "php.h" #include "fopen_wrappers.h" #include "ext/standard/basic_functions.h" #include "ext/standard/php_filestat.h" #define SPL_HAS_FLAG(flags, test_flag) ((flags & test_flag) ? 1 : 0) /* declare the class handlers */ static zend_object_handlers spl_filesystem_object_handlers; /* includes handler to validate object state when retrieving methods */ static zend_object_handlers spl_filesystem_object_check_handlers; /* decalre the class entry */ PHPAPI zend_class_entry *spl_ce_SplFileInfo; PHPAPI zend_class_entry *spl_ce_DirectoryIterator; PHPAPI zend_class_entry *spl_ce_FilesystemIterator; PHPAPI zend_class_entry *spl_ce_RecursiveDirectoryIterator; PHPAPI zend_class_entry *spl_ce_GlobIterator; PHPAPI zend_class_entry *spl_ce_SplFileObject; PHPAPI zend_class_entry *spl_ce_SplTempFileObject; static void spl_filesystem_file_free_line(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (intern->u.file.current_line) { efree(intern->u.file.current_line); intern->u.file.current_line = NULL; } if (intern->u.file.current_zval) { zval_ptr_dtor(&intern->u.file.current_zval); intern->u.file.current_zval = NULL; } } /* }}} */ static void spl_filesystem_object_free_storage(void *object TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern = (spl_filesystem_object*)object; if (intern->oth_handler && intern->oth_handler->dtor) { intern->oth_handler->dtor(intern TSRMLS_CC); } zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->_path) { efree(intern->_path); } if (intern->file_name) { efree(intern->file_name); } switch(intern->type) { case SPL_FS_INFO: break; case SPL_FS_DIR: if (intern->u.dir.dirp) { php_stream_close(intern->u.dir.dirp); intern->u.dir.dirp = NULL; } if (intern->u.dir.sub_path) { efree(intern->u.dir.sub_path); } break; case SPL_FS_FILE: if (intern->u.file.stream) { if (intern->u.file.zcontext) { /* zend_list_delref(Z_RESVAL_P(intern->zcontext));*/ } if (!intern->u.file.stream->is_persistent) { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE); } else { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE_PERSISTENT); } if (intern->u.file.open_mode) { efree(intern->u.file.open_mode); } if (intern->orig_path) { efree(intern->orig_path); } } spl_filesystem_file_free_line(intern TSRMLS_CC); break; } { zend_object_iterator *iterator; iterator = (zend_object_iterator*) spl_filesystem_object_to_iterator(intern); if (iterator->data != NULL) { iterator->data = NULL; iterator->funcs->dtor(iterator TSRMLS_CC); } } efree(object); } /* }}} */ /* {{{ spl_ce_dir_object_new */ /* creates the object by - allocating memory - initializing the object members - storing the object - setting it's handlers called from - clone - new */ static zend_object_value spl_filesystem_object_new_ex(zend_class_entry *class_type, spl_filesystem_object **obj TSRMLS_DC) { zend_object_value retval; spl_filesystem_object *intern; intern = emalloc(sizeof(spl_filesystem_object)); memset(intern, 0, sizeof(spl_filesystem_object)); /* intern->type = SPL_FS_INFO; done by set 0 */ intern->file_class = spl_ce_SplFileObject; intern->info_class = spl_ce_SplFileInfo; if (obj) *obj = intern; zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, (zend_objects_free_object_storage_t) spl_filesystem_object_free_storage, NULL TSRMLS_CC); retval.handlers = &spl_filesystem_object_handlers; return retval; } /* }}} */ /* {{{ spl_filesystem_object_new */ /* See spl_filesystem_object_new_ex */ static zend_object_value spl_filesystem_object_new(zend_class_entry *class_type TSRMLS_DC) { return spl_filesystem_object_new_ex(class_type, NULL TSRMLS_CC); } /* }}} */ /* {{{ spl_filesystem_object_new_ex */ static zend_object_value spl_filesystem_object_new_check(zend_class_entry *class_type TSRMLS_DC) { zend_object_value ret = spl_filesystem_object_new_ex(class_type, NULL TSRMLS_CC); ret.handlers = &spl_filesystem_object_check_handlers; return ret; } /* }}} */ PHPAPI char* spl_filesystem_object_get_path(spl_filesystem_object *intern, int *len TSRMLS_DC) /* {{{ */ { #ifdef HAVE_GLOB if (intern->type == SPL_FS_DIR) { if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { return php_glob_stream_get_path(intern->u.dir.dirp, 0, len); } } #endif if (len) { *len = intern->_path_len; } return intern->_path; } /* }}} */ static inline void spl_filesystem_object_get_file_name(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: if (!intern->file_name) { php_error_docref(NULL TSRMLS_CC, E_ERROR, "Object not initialized"); } break; case SPL_FS_DIR: if (intern->file_name) { efree(intern->file_name); } intern->file_name_len = spprintf(&intern->file_name, 0, "%s%c%s", spl_filesystem_object_get_path(intern, NULL TSRMLS_CC), slash, intern->u.dir.entry.d_name); break; } } /* }}} */ static int spl_filesystem_dir_read(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (!intern->u.dir.dirp || !php_stream_readdir(intern->u.dir.dirp, &intern->u.dir.entry)) { intern->u.dir.entry.d_name[0] = '\0'; return 0; } else { return 1; } } /* }}} */ #define IS_SLASH_AT(zs, pos) (IS_SLASH(zs[pos])) static inline int spl_filesystem_is_dot(const char * d_name) /* {{{ */ { return !strcmp(d_name, ".") || !strcmp(d_name, ".."); } /* }}} */ /* {{{ spl_filesystem_dir_open */ /* open a directory resource */ static void spl_filesystem_dir_open(spl_filesystem_object* intern, char *path TSRMLS_DC) { int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); intern->type = SPL_FS_DIR; intern->_path_len = strlen(path); intern->u.dir.dirp = php_stream_opendir(path, REPORT_ERRORS, FG(default_context)); if (intern->_path_len > 1 && IS_SLASH_AT(path, intern->_path_len-1)) { intern->_path = estrndup(path, --intern->_path_len); } else { intern->_path = estrndup(path, intern->_path_len); } intern->u.dir.index = 0; if (EG(exception) || intern->u.dir.dirp == NULL) { intern->u.dir.entry.d_name[0] = '\0'; if (!EG(exception)) { /* open failed w/out notice (turned to exception due to EH_THROW) */ zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Failed to open directory \"%s\"", path); } } else { do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } } /* }}} */ static int spl_filesystem_file_open(spl_filesystem_object *intern, int use_include_path, int silent TSRMLS_DC) /* {{{ */ { zval tmp; intern->type = SPL_FS_FILE; php_stat(intern->file_name, intern->file_name_len, FS_IS_DIR, &tmp TSRMLS_CC); if (Z_LVAL(tmp)) { intern->u.file.open_mode = NULL; intern->file_name = NULL; zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Cannot use SplFileObject with directories"); return FAILURE; } intern->u.file.context = php_stream_context_from_zval(intern->u.file.zcontext, 0); intern->u.file.stream = php_stream_open_wrapper_ex(intern->file_name, intern->u.file.open_mode, (use_include_path ? USE_PATH : 0) | REPORT_ERRORS, NULL, intern->u.file.context); if (!intern->file_name_len || !intern->u.file.stream) { if (!EG(exception)) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot open file '%s'", intern->file_name_len ? intern->file_name : ""); } intern->file_name = NULL; /* until here it is not a copy */ intern->u.file.open_mode = NULL; return FAILURE; } if (intern->u.file.zcontext) { zend_list_addref(Z_RESVAL_P(intern->u.file.zcontext)); } if (intern->file_name_len > 1 && IS_SLASH_AT(intern->file_name, intern->file_name_len-1)) { intern->file_name_len--; } intern->orig_path = estrndup(intern->u.file.stream->orig_path, strlen(intern->u.file.stream->orig_path)); intern->file_name = estrndup(intern->file_name, intern->file_name_len); intern->u.file.open_mode = estrndup(intern->u.file.open_mode, intern->u.file.open_mode_len); /* avoid reference counting in debug mode, thus do it manually */ ZVAL_RESOURCE(&intern->u.file.zresource, php_stream_get_resource_id(intern->u.file.stream)); Z_SET_REFCOUNT(intern->u.file.zresource, 1); intern->u.file.delimiter = ','; intern->u.file.enclosure = '"'; intern->u.file.escape = '\\'; zend_hash_find(&intern->std.ce->function_table, "getcurrentline", sizeof("getcurrentline"), (void **) &intern->u.file.func_getCurr); return SUCCESS; } /* }}} */ /* {{{ spl_filesystem_object_clone */ /* Local zend_object_value creation (on stack) Load the 'other' object Create a new empty object (See spl_filesystem_object_new_ex) Open the directory Clone other members (properties) */ static zend_object_value spl_filesystem_object_clone(zval *zobject TSRMLS_DC) { zend_object_value new_obj_val; zend_object *old_object; zend_object *new_object; zend_object_handle handle = Z_OBJ_HANDLE_P(zobject); spl_filesystem_object *intern; spl_filesystem_object *source; int index, skip_dots; old_object = zend_objects_get_address(zobject TSRMLS_CC); source = (spl_filesystem_object*)old_object; new_obj_val = spl_filesystem_object_new_ex(old_object->ce, &intern TSRMLS_CC); new_object = &intern->std; intern->flags = source->flags; switch (source->type) { case SPL_FS_INFO: intern->_path_len = source->_path_len; intern->_path = estrndup(source->_path, source->_path_len); intern->file_name_len = source->file_name_len; intern->file_name = estrndup(source->file_name, intern->file_name_len); break; case SPL_FS_DIR: spl_filesystem_dir_open(intern, source->_path TSRMLS_CC); /* read until we hit the position in which we were before */ skip_dots = SPL_HAS_FLAG(source->flags, SPL_FILE_DIR_SKIPDOTS); for(index = 0; index < source->u.dir.index; ++index) { do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } intern->u.dir.index = index; break; case SPL_FS_FILE: php_error_docref(NULL TSRMLS_CC, E_ERROR, "An object of class %s cannot be cloned", old_object->ce->name); break; } intern->file_class = source->file_class; intern->info_class = source->info_class; intern->oth = source->oth; intern->oth_handler = source->oth_handler; zend_objects_clone_members(new_object, new_obj_val, old_object, handle TSRMLS_CC); if (intern->oth_handler && intern->oth_handler->clone) { intern->oth_handler->clone(source, intern TSRMLS_CC); } return new_obj_val; } /* }}} */ void spl_filesystem_info_set_filename(spl_filesystem_object *intern, char *path, int len, int use_copy TSRMLS_DC) /* {{{ */ { char *p1, *p2; if (intern->file_name) { efree(intern->file_name); } intern->file_name = use_copy ? estrndup(path, len) : path; intern->file_name_len = len; while(IS_SLASH_AT(intern->file_name, intern->file_name_len-1) && intern->file_name_len > 1) { intern->file_name[intern->file_name_len-1] = 0; intern->file_name_len--; } p1 = strrchr(intern->file_name, '/'); #if defined(PHP_WIN32) || defined(NETWARE) p2 = strrchr(intern->file_name, '\\'); #else p2 = 0; #endif if (p1 || p2) { intern->_path_len = (p1 > p2 ? p1 : p2) - intern->file_name; } else { intern->_path_len = 0; } if (intern->_path) { efree(intern->_path); } intern->_path = estrndup(path, intern->_path_len); } /* }}} */ static spl_filesystem_object * spl_filesystem_object_create_info(spl_filesystem_object *source, char *file_path, int file_path_len, int use_copy, zend_class_entry *ce, zval *return_value TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern; zval *arg1; zend_error_handling error_handling; if (!file_path || !file_path_len) { #if defined(PHP_WIN32) zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot create SplFileInfo for empty path"); if (file_path && !use_copy) { efree(file_path); } #else if (file_path && !use_copy) { efree(file_path); } file_path_len = 1; file_path = "/"; #endif return NULL; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); ce = ce ? ce : source->info_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; if (ce->constructor->common.scope != spl_ce_SplFileInfo) { MAKE_STD_ZVAL(arg1); ZVAL_STRINGL(arg1, file_path, file_path_len, use_copy); zend_call_method_with_1_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1); zval_ptr_dtor(&arg1); } else { spl_filesystem_info_set_filename(intern, file_path, file_path_len, use_copy TSRMLS_CC); } zend_restore_error_handling(&error_handling TSRMLS_CC); return intern; } /* }}} */ static spl_filesystem_object * spl_filesystem_object_create_type(int ht, spl_filesystem_object *source, int type, zend_class_entry *ce, zval *return_value TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern; zend_bool use_include_path = 0; zval *arg1, *arg2; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); switch (source->type) { case SPL_FS_INFO: case SPL_FS_FILE: break; case SPL_FS_DIR: if (!source->u.dir.entry.d_name[0]) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Could not open file"); zend_restore_error_handling(&error_handling TSRMLS_CC); return NULL; } } switch (type) { case SPL_FS_INFO: ce = ce ? ce : source->info_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; spl_filesystem_object_get_file_name(source TSRMLS_CC); if (ce->constructor->common.scope != spl_ce_SplFileInfo) { MAKE_STD_ZVAL(arg1); ZVAL_STRINGL(arg1, source->file_name, source->file_name_len, 1); zend_call_method_with_1_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1); zval_ptr_dtor(&arg1); } else { intern->file_name = estrndup(source->file_name, source->file_name_len); intern->file_name_len = source->file_name_len; intern->_path = spl_filesystem_object_get_path(source, &intern->_path_len TSRMLS_CC); intern->_path = estrndup(intern->_path, intern->_path_len); } break; case SPL_FS_FILE: ce = ce ? ce : source->file_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; spl_filesystem_object_get_file_name(source TSRMLS_CC); if (ce->constructor->common.scope != spl_ce_SplFileObject) { MAKE_STD_ZVAL(arg1); MAKE_STD_ZVAL(arg2); ZVAL_STRINGL(arg1, source->file_name, source->file_name_len, 1); ZVAL_STRINGL(arg2, "r", 1, 1); zend_call_method_with_2_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1, arg2); zval_ptr_dtor(&arg1); zval_ptr_dtor(&arg2); } else { intern->file_name = source->file_name; intern->file_name_len = source->file_name_len; intern->_path = spl_filesystem_object_get_path(source, &intern->_path_len TSRMLS_CC); intern->_path = estrndup(intern->_path, intern->_path_len); intern->u.file.open_mode = "r"; intern->u.file.open_mode_len = 1; if (ht && zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sbr", &intern->u.file.open_mode, &intern->u.file.open_mode_len, &use_include_path, &intern->u.file.zcontext) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); intern->u.file.open_mode = NULL; intern->file_name = NULL; zval_dtor(return_value); Z_TYPE_P(return_value) = IS_NULL; return NULL; } if (spl_filesystem_file_open(intern, use_include_path, 0 TSRMLS_CC) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); zval_dtor(return_value); Z_TYPE_P(return_value) = IS_NULL; return NULL; } } break; case SPL_FS_DIR: zend_restore_error_handling(&error_handling TSRMLS_CC); zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Operation not supported"); return NULL; } zend_restore_error_handling(&error_handling TSRMLS_CC); return NULL; } /* }}} */ static int spl_filesystem_is_invalid_or_dot(const char * d_name) /* {{{ */ { return d_name[0] == '\0' || spl_filesystem_is_dot(d_name); } /* }}} */ static char *spl_filesystem_object_get_pathname(spl_filesystem_object *intern, int *len TSRMLS_DC) { /* {{{ */ switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: *len = intern->file_name_len; return intern->file_name; case SPL_FS_DIR: if (intern->u.dir.entry.d_name[0]) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); *len = intern->file_name_len; return intern->file_name; } } *len = 0; return NULL; } /* }}} */ static HashTable* spl_filesystem_object_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(obj TSRMLS_CC); HashTable *rv; zval *tmp, zrv; char *pnstr, *path; int pnlen, path_len; char stmp[2]; *is_temp = 1; if (!intern->std.properties) { rebuild_object_properties(&intern->std); } ALLOC_HASHTABLE(rv); ZEND_INIT_SYMTABLE_EX(rv, zend_hash_num_elements(intern->std.properties) + 3, 0); INIT_PZVAL(&zrv); Z_ARRVAL(zrv) = rv; zend_hash_copy(rv, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); pnstr = spl_gen_private_prop_name(spl_ce_SplFileInfo, "pathName", sizeof("pathName")-1, &pnlen TSRMLS_CC); path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, path, path_len, 1); efree(pnstr); if (intern->file_name) { pnstr = spl_gen_private_prop_name(spl_ce_SplFileInfo, "fileName", sizeof("fileName")-1, &pnlen TSRMLS_CC); spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->file_name + path_len + 1, intern->file_name_len - (path_len + 1), 1); } else { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->file_name, intern->file_name_len, 1); } efree(pnstr); } if (intern->type == SPL_FS_DIR) { #ifdef HAVE_GLOB pnstr = spl_gen_private_prop_name(spl_ce_DirectoryIterator, "glob", sizeof("glob")-1, &pnlen TSRMLS_CC); if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->_path, intern->_path_len, 1); } else { add_assoc_bool_ex(&zrv, pnstr, pnlen+1, 0); } efree(pnstr); #endif pnstr = spl_gen_private_prop_name(spl_ce_RecursiveDirectoryIterator, "subPathName", sizeof("subPathName")-1, &pnlen TSRMLS_CC); if (intern->u.dir.sub_path) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->u.dir.sub_path, intern->u.dir.sub_path_len, 1); } else { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, "", 0, 1); } efree(pnstr); } if (intern->type == SPL_FS_FILE) { pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "openMode", sizeof("openMode")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->u.file.open_mode, intern->u.file.open_mode_len, 1); efree(pnstr); stmp[1] = '\0'; stmp[0] = intern->u.file.delimiter; pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "delimiter", sizeof("delimiter")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, stmp, 1, 1); efree(pnstr); stmp[0] = intern->u.file.enclosure; pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "enclosure", sizeof("enclosure")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, stmp, 1, 1); efree(pnstr); } return rv; } /* }}} */ zend_function *spl_filesystem_object_get_method_check(zval **object_ptr, char *method, int method_len, const struct _zend_literal *key TSRMLS_DC) /* {{{ */ { spl_filesystem_object *fsobj = zend_object_store_get_object(*object_ptr TSRMLS_CC); if (fsobj->u.dir.entry.d_name[0] == '\0' && fsobj->orig_path == NULL) { method = "_bad_state_ex"; method_len = sizeof("_bad_state_ex") - 1; key = NULL; } return zend_get_std_object_handlers()->get_method(object_ptr, method, method_len, key TSRMLS_CC); } /* }}} */ #define DIT_CTOR_FLAGS 0x00000001 #define DIT_CTOR_GLOB 0x00000002 void spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAMETERS, long ctor_flags) /* {{{ */ { spl_filesystem_object *intern; char *path; int parsed, len; long flags; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (SPL_HAS_FLAG(ctor_flags, DIT_CTOR_FLAGS)) { flags = SPL_FILE_DIR_KEY_AS_PATHNAME|SPL_FILE_DIR_CURRENT_AS_FILEINFO; parsed = zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &path, &len, &flags); } else { flags = SPL_FILE_DIR_KEY_AS_PATHNAME|SPL_FILE_DIR_CURRENT_AS_SELF; parsed = zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &path, &len); } if (SPL_HAS_FLAG(ctor_flags, SPL_FILE_DIR_SKIPDOTS)) { flags |= SPL_FILE_DIR_SKIPDOTS; } if (SPL_HAS_FLAG(ctor_flags, SPL_FILE_DIR_UNIXPATHS)) { flags |= SPL_FILE_DIR_UNIXPATHS; } if (parsed == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (!len) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Directory name must not be empty."); zend_restore_error_handling(&error_handling TSRMLS_CC); return; } intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (intern->_path) { /* object is alreay initialized */ zend_restore_error_handling(&error_handling TSRMLS_CC); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Directory object is already initialized"); return; } intern->flags = flags; #ifdef HAVE_GLOB if (SPL_HAS_FLAG(ctor_flags, DIT_CTOR_GLOB) && strstr(path, "glob://") != path) { spprintf(&path, 0, "glob://%s", path); spl_filesystem_dir_open(intern, path TSRMLS_CC); efree(path); } else #endif { spl_filesystem_dir_open(intern, path TSRMLS_CC); } intern->u.dir.is_recursive = instanceof_function(intern->std.ce, spl_ce_RecursiveDirectoryIterator TSRMLS_CC) ? 1 : 0; zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void DirectoryIterator::__construct(string path) Cronstructs a new dir iterator from a path. */ SPL_METHOD(DirectoryIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto void DirectoryIterator::rewind() Rewind dir back to the start */ SPL_METHOD(DirectoryIterator, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index = 0; if (intern->u.dir.dirp) { php_stream_rewinddir(intern->u.dir.dirp); } spl_filesystem_dir_read(intern TSRMLS_CC); } /* }}} */ /* {{{ proto string DirectoryIterator::key() Return current dir entry */ SPL_METHOD(DirectoryIterator, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.dirp) { RETURN_LONG(intern->u.dir.index); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto DirectoryIterator DirectoryIterator::current() Return this (needed for Iterator interface) */ SPL_METHOD(DirectoryIterator, current) { if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_ZVAL(getThis(), 1, 0); } /* }}} */ /* {{{ proto void DirectoryIterator::next() Move to next entry */ SPL_METHOD(DirectoryIterator, next) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index++; do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); if (intern->file_name) { efree(intern->file_name); intern->file_name = NULL; } } /* }}} */ /* {{{ proto void DirectoryIterator::seek(int position) Seek to the given position */ SPL_METHOD(DirectoryIterator, seek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zval *retval = NULL; long pos; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &pos) == FAILURE) { return; } if (intern->u.dir.index > pos) { /* we first rewind */ zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_rewind, "rewind", &retval); if (retval) { zval_ptr_dtor(&retval); retval = NULL; } } while (intern->u.dir.index < pos) { int valid = 0; zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_valid, "valid", &retval); if (retval) { valid = zend_is_true(retval); zval_ptr_dtor(&retval); retval = NULL; } if (!valid) { break; } zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_next, "next", &retval); if (retval) { zval_ptr_dtor(&retval); } } } /* }}} */ /* {{{ proto string DirectoryIterator::valid() Check whether dir contains more entries */ SPL_METHOD(DirectoryIterator, valid) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(intern->u.dir.entry.d_name[0] != '\0'); } /* }}} */ /* {{{ proto string SplFileInfo::getPath() Return the path */ SPL_METHOD(SplFileInfo, getPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *path; int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } path = spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); RETURN_STRINGL(path, path_len, 1); } /* }}} */ /* {{{ proto string SplFileInfo::getFilename() Return filename only */ SPL_METHOD(SplFileInfo, getFilename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { RETURN_STRINGL(intern->file_name + path_len + 1, intern->file_name_len - (path_len + 1), 1); } else { RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } } /* }}} */ /* {{{ proto string DirectoryIterator::getFilename() Return filename of current dir entry */ SPL_METHOD(DirectoryIterator, getFilename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_STRING(intern->u.dir.entry.d_name, 1); } /* }}} */ /* {{{ proto string SplFileInfo::getExtension() Returns file extension component of path */ SPL_METHOD(SplFileInfo, getExtension) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname = NULL; const char *p; size_t flen; int path_len, idx; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { fname = intern->file_name + path_len + 1; flen = intern->file_name_len - (path_len + 1); } else { fname = intern->file_name; flen = intern->file_name_len; } php_basename(fname, flen, NULL, 0, &fname, &flen TSRMLS_CC); p = zend_memrchr(fname, '.', flen); if (p) { idx = p - fname; RETVAL_STRINGL(fname + idx + 1, flen - idx - 1, 1); efree(fname); return; } else { if (fname) { efree(fname); } RETURN_EMPTY_STRING(); } } /* }}}*/ /* {{{ proto string DirectoryIterator::getExtension() Returns the file extension component of path */ SPL_METHOD(DirectoryIterator, getExtension) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname = NULL; const char *p; size_t flen; int idx; if (zend_parse_parameters_none() == FAILURE) { return; } php_basename(intern->u.dir.entry.d_name, strlen(intern->u.dir.entry.d_name), NULL, 0, &fname, &flen TSRMLS_CC); p = zend_memrchr(fname, '.', flen); if (p) { idx = p - fname; RETVAL_STRINGL(fname + idx + 1, flen - idx - 1, 1); efree(fname); return; } else { if (fname) { efree(fname); } RETURN_EMPTY_STRING(); } } /* }}} */ /* {{{ proto string SplFileInfo::getBasename([string $suffix]) U Returns filename component of path */ SPL_METHOD(SplFileInfo, getBasename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname, *suffix = 0; size_t flen; int slen = 0, path_len; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s", &suffix, &slen) == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { fname = intern->file_name + path_len + 1; flen = intern->file_name_len - (path_len + 1); } else { fname = intern->file_name; flen = intern->file_name_len; } php_basename(fname, flen, suffix, slen, &fname, &flen TSRMLS_CC); RETURN_STRINGL(fname, flen, 0); } /* }}}*/ /* {{{ proto string DirectoryIterator::getBasename([string $suffix]) U Returns filename component of current dir entry */ SPL_METHOD(DirectoryIterator, getBasename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *suffix = 0, *fname; int slen = 0; size_t flen; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s", &suffix, &slen) == FAILURE) { return; } php_basename(intern->u.dir.entry.d_name, strlen(intern->u.dir.entry.d_name), suffix, slen, &fname, &flen TSRMLS_CC); RETURN_STRINGL(fname, flen, 0); } /* }}} */ /* {{{ proto string SplFileInfo::getPathname() Return path and filename */ SPL_METHOD(SplFileInfo, getPathname) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *path; int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); if (path != NULL) { RETURN_STRINGL(path, path_len, 1); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto string FilesystemIterator::key() Return getPathname() or getFilename() depending on flags */ SPL_METHOD(FilesystemIterator, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_FILE_DIR_KEY(intern, SPL_FILE_DIR_KEY_AS_FILENAME)) { RETURN_STRING(intern->u.dir.entry.d_name, 1); } else { spl_filesystem_object_get_file_name(intern TSRMLS_CC); RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } } /* }}} */ /* {{{ proto string FilesystemIterator::current() Return getFilename(), getFileInfo() or $this depending on flags */ SPL_METHOD(FilesystemIterator, current) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } else if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); spl_filesystem_object_create_type(0, intern, SPL_FS_INFO, NULL, return_value TSRMLS_CC); } else { RETURN_ZVAL(getThis(), 1, 0); /*RETURN_STRING(intern->u.dir.entry.d_name, 1);*/ } } /* }}} */ /* {{{ proto bool DirectoryIterator::isDot() Returns true if current entry is '.' or '..' */ SPL_METHOD(DirectoryIterator, isDot) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } /* }}} */ /* {{{ proto void SplFileInfo::__construct(string file_name) Cronstructs a new SplFileInfo from a path. */ /* zend_replace_error_handling() is used to throw exceptions in case the constructor fails. Here we use this to ensure the object has a valid directory resource. When the constructor gets called the object is already created by the engine, so we must only call 'additional' initializations. */ SPL_METHOD(SplFileInfo, __construct) { spl_filesystem_object *intern; char *path; int len; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &path, &len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_info_set_filename(intern, path, len, 1 TSRMLS_CC); zend_restore_error_handling(&error_handling TSRMLS_CC); /* intern->type = SPL_FS_INFO; already set */ } /* }}} */ /* {{{ FileInfoFunction */ #define FileInfoFunction(func_name, func_num) \ SPL_METHOD(SplFileInfo, func_name) \ { \ spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); \ zend_error_handling error_handling; \ if (zend_parse_parameters_none() == FAILURE) { \ return; \ } \ \ zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC);\ spl_filesystem_object_get_file_name(intern TSRMLS_CC); \ php_stat(intern->file_name, intern->file_name_len, func_num, return_value TSRMLS_CC); \ zend_restore_error_handling(&error_handling TSRMLS_CC); \ } /* }}} */ /* {{{ proto int SplFileInfo::getPerms() Get file permissions */ FileInfoFunction(getPerms, FS_PERMS) /* }}} */ /* {{{ proto int SplFileInfo::getInode() Get file inode */ FileInfoFunction(getInode, FS_INODE) /* }}} */ /* {{{ proto int SplFileInfo::getSize() Get file size */ FileInfoFunction(getSize, FS_SIZE) /* }}} */ /* {{{ proto int SplFileInfo::getOwner() Get file owner */ FileInfoFunction(getOwner, FS_OWNER) /* }}} */ /* {{{ proto int SplFileInfo::getGroup() Get file group */ FileInfoFunction(getGroup, FS_GROUP) /* }}} */ /* {{{ proto int SplFileInfo::getATime() Get last access time of file */ FileInfoFunction(getATime, FS_ATIME) /* }}} */ /* {{{ proto int SplFileInfo::getMTime() Get last modification time of file */ FileInfoFunction(getMTime, FS_MTIME) /* }}} */ /* {{{ proto int SplFileInfo::getCTime() Get inode modification time of file */ FileInfoFunction(getCTime, FS_CTIME) /* }}} */ /* {{{ proto string SplFileInfo::getType() Get file type */ FileInfoFunction(getType, FS_TYPE) /* }}} */ /* {{{ proto bool SplFileInfo::isWritable() Returns true if file can be written */ FileInfoFunction(isWritable, FS_IS_W) /* }}} */ /* {{{ proto bool SplFileInfo::isReadable() Returns true if file can be read */ FileInfoFunction(isReadable, FS_IS_R) /* }}} */ /* {{{ proto bool SplFileInfo::isExecutable() Returns true if file is executable */ FileInfoFunction(isExecutable, FS_IS_X) /* }}} */ /* {{{ proto bool SplFileInfo::isFile() Returns true if file is a regular file */ FileInfoFunction(isFile, FS_IS_FILE) /* }}} */ /* {{{ proto bool SplFileInfo::isDir() Returns true if file is directory */ FileInfoFunction(isDir, FS_IS_DIR) /* }}} */ /* {{{ proto bool SplFileInfo::isLink() Returns true if file is symbolic link */ FileInfoFunction(isLink, FS_IS_LINK) /* }}} */ /* {{{ proto string SplFileInfo::getLinkTarget() U Return the target of a symbolic link */ SPL_METHOD(SplFileInfo, getLinkTarget) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int ret; char buff[MAXPATHLEN]; zend_error_handling error_handling; if (zend_parse_parameters_none() == FAILURE) { return; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); #if defined(PHP_WIN32) || HAVE_SYMLINK if (intern->file_name == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty filename"); RETURN_FALSE; } else if (!IS_ABSOLUTE_PATH(intern->file_name, intern->file_name_len)) { char expanded_path[MAXPATHLEN]; if (!expand_filepath_with_mode(intern->file_name, expanded_path, NULL, 0, CWD_EXPAND TSRMLS_CC)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "No such file or directory"); RETURN_FALSE; } ret = php_sys_readlink(expanded_path, buff, MAXPATHLEN - 1); } else { ret = php_sys_readlink(intern->file_name, buff, MAXPATHLEN-1); } #else ret = -1; /* always fail if not implemented */ #endif if (ret == -1) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Unable to read link %s, error: %s", intern->file_name, strerror(errno)); RETVAL_FALSE; } else { /* Append NULL to the end of the string */ buff[ret] = '\0'; RETVAL_STRINGL(buff, ret, 1); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ #if (!defined(__BEOS__) && !defined(NETWARE) && HAVE_REALPATH) || defined(ZTS) /* {{{ proto string SplFileInfo::getRealPath() Return the resolved path */ SPL_METHOD(SplFileInfo, getRealPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char buff[MAXPATHLEN]; char *filename; zend_error_handling error_handling; if (zend_parse_parameters_none() == FAILURE) { return; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (intern->type == SPL_FS_DIR && !intern->file_name && intern->u.dir.entry.d_name[0]) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); } if (intern->orig_path) { filename = intern->orig_path; } else { filename = intern->file_name; } if (filename && VCWD_REALPATH(filename, buff)) { #ifdef ZTS if (VCWD_ACCESS(buff, F_OK)) { RETVAL_FALSE; } else #endif RETVAL_STRING(buff, 1); } else { RETVAL_FALSE; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ #endif /* {{{ proto SplFileObject SplFileInfo::openFile([string mode = 'r' [, bool use_include_path [, resource context]]]) Open the current file */ SPL_METHOD(SplFileInfo, openFile) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_object_create_type(ht, intern, SPL_FS_FILE, NULL, return_value TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileInfo::setFileClass([string class_name]) Class to use in openFile() */ SPL_METHOD(SplFileInfo, setFileClass) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = spl_ce_SplFileObject; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { intern->file_class = ce; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileInfo::setInfoClass([string class_name]) Class to use in getFileInfo(), getPathInfo() */ SPL_METHOD(SplFileInfo, setInfoClass) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = spl_ce_SplFileInfo; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { intern->info_class = ce; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto SplFileInfo SplFileInfo::getFileInfo([string $class_name]) Get/copy file info */ SPL_METHOD(SplFileInfo, getFileInfo) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = intern->info_class; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { spl_filesystem_object_create_type(ht, intern, SPL_FS_INFO, ce, return_value TSRMLS_CC); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto SplFileInfo SplFileInfo::getPathInfo([string $class_name]) Get/copy file info */ SPL_METHOD(SplFileInfo, getPathInfo) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = intern->info_class; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { int path_len; char *path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); if (path) { char *dpath = estrndup(path, path_len); path_len = php_dirname(dpath, path_len); spl_filesystem_object_create_info(intern, dpath, path_len, 1, ce, return_value TSRMLS_CC); efree(dpath); } } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ */ SPL_METHOD(SplFileInfo, _bad_state_ex) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "The parent constructor was not called: the object is in an " "invalid state "); } /* }}} */ /* {{{ proto void FilesystemIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a path. */ SPL_METHOD(FilesystemIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS | SPL_FILE_DIR_SKIPDOTS); } /* }}} */ /* {{{ proto void FilesystemIterator::rewind() Rewind dir back to the start */ SPL_METHOD(FilesystemIterator, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index = 0; if (intern->u.dir.dirp) { php_stream_rewinddir(intern->u.dir.dirp); } do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } /* }}} */ /* {{{ proto int FilesystemIterator::getFlags() Get handling flags */ SPL_METHOD(FilesystemIterator, getFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->flags & (SPL_FILE_DIR_KEY_MODE_MASK | SPL_FILE_DIR_CURRENT_MODE_MASK | SPL_FILE_DIR_OTHERS_MASK)); } /* }}} */ /* {{{ proto void FilesystemIterator::setFlags(long $flags) Set handling flags */ SPL_METHOD(FilesystemIterator, setFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long flags; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &flags) == FAILURE) { return; } intern->flags &= ~(SPL_FILE_DIR_KEY_MODE_MASK|SPL_FILE_DIR_CURRENT_MODE_MASK|SPL_FILE_DIR_OTHERS_MASK); intern->flags |= ((SPL_FILE_DIR_KEY_MODE_MASK|SPL_FILE_DIR_CURRENT_MODE_MASK|SPL_FILE_DIR_OTHERS_MASK) & flags); } /* }}} */ /* {{{ proto bool RecursiveDirectoryIterator::hasChildren([bool $allow_links = false]) Returns whether current entry is a directory and not '.' or '..' */ SPL_METHOD(RecursiveDirectoryIterator, hasChildren) { zend_bool allow_links = 0; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|b", &allow_links) == FAILURE) { return; } if (spl_filesystem_is_invalid_or_dot(intern->u.dir.entry.d_name)) { RETURN_FALSE; } else { spl_filesystem_object_get_file_name(intern TSRMLS_CC); if (!allow_links && !(intern->flags & SPL_FILE_DIR_FOLLOW_SYMLINKS)) { php_stat(intern->file_name, intern->file_name_len, FS_IS_LINK, return_value TSRMLS_CC); if (zend_is_true(return_value)) { RETURN_FALSE; } } php_stat(intern->file_name, intern->file_name_len, FS_IS_DIR, return_value TSRMLS_CC); } } /* }}} */ /* {{{ proto RecursiveDirectoryIterator DirectoryIterator::getChildren() Returns an iterator for the current entry if it is a directory */ SPL_METHOD(RecursiveDirectoryIterator, getChildren) { zval *zpath, *zflags; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_object *subdir; char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_file_name(intern TSRMLS_CC); MAKE_STD_ZVAL(zflags); MAKE_STD_ZVAL(zpath); ZVAL_LONG(zflags, intern->flags); ZVAL_STRINGL(zpath, intern->file_name, intern->file_name_len, 1); spl_instantiate_arg_ex2(Z_OBJCE_P(getThis()), &return_value, 0, zpath, zflags TSRMLS_CC); zval_ptr_dtor(&zpath); zval_ptr_dtor(&zflags); subdir = (spl_filesystem_object*)zend_object_store_get_object(return_value TSRMLS_CC); if (subdir) { if (intern->u.dir.sub_path && intern->u.dir.sub_path[0]) { subdir->u.dir.sub_path_len = spprintf(&subdir->u.dir.sub_path, 0, "%s%c%s", intern->u.dir.sub_path, slash, intern->u.dir.entry.d_name); } else { subdir->u.dir.sub_path_len = strlen(intern->u.dir.entry.d_name); subdir->u.dir.sub_path = estrndup(intern->u.dir.entry.d_name, subdir->u.dir.sub_path_len); } subdir->info_class = intern->info_class; subdir->file_class = intern->file_class; subdir->oth = intern->oth; } } /* }}} */ /* {{{ proto void RecursiveDirectoryIterator::getSubPath() Get sub path */ SPL_METHOD(RecursiveDirectoryIterator, getSubPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.sub_path) { RETURN_STRINGL(intern->u.dir.sub_path, intern->u.dir.sub_path_len, 1); } else { RETURN_STRINGL("", 0, 1); } } /* }}} */ /* {{{ proto void RecursiveDirectoryIterator::getSubPathname() Get sub path and file name */ SPL_METHOD(RecursiveDirectoryIterator, getSubPathname) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *sub_name; int len; char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.sub_path) { len = spprintf(&sub_name, 0, "%s%c%s", intern->u.dir.sub_path, slash, intern->u.dir.entry.d_name); RETURN_STRINGL(sub_name, len, 0); } else { RETURN_STRING(intern->u.dir.entry.d_name, 1); } } /* }}} */ /* {{{ proto int RecursiveDirectoryIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a path. */ SPL_METHOD(RecursiveDirectoryIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS); } /* }}} */ #ifdef HAVE_GLOB /* {{{ proto int GlobIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a glob expression (no glob:// needed). */ SPL_METHOD(GlobIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS|DIT_CTOR_GLOB); } /* }}} */ /* {{{ proto int GlobIterator::cont() Return the number of directories and files found by globbing */ SPL_METHOD(GlobIterator, count) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { RETURN_LONG(php_glob_stream_get_count(intern->u.dir.dirp, NULL)); } else { /* should not happen */ php_error_docref(NULL TSRMLS_CC, E_ERROR, "GlobIterator lost glob state"); } } /* }}} */ #endif /* HAVE_GLOB */ /* {{{ forward declarations to the iterator handlers */ static void spl_filesystem_dir_it_dtor(zend_object_iterator *iter TSRMLS_DC); static int spl_filesystem_dir_it_valid(zend_object_iterator *iter TSRMLS_DC); static void spl_filesystem_dir_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC); static void spl_filesystem_dir_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC); static void spl_filesystem_dir_it_move_forward(zend_object_iterator *iter TSRMLS_DC); static void spl_filesystem_dir_it_rewind(zend_object_iterator *iter TSRMLS_DC); /* iterator handler table */ zend_object_iterator_funcs spl_filesystem_dir_it_funcs = { spl_filesystem_dir_it_dtor, spl_filesystem_dir_it_valid, spl_filesystem_dir_it_current_data, spl_filesystem_dir_it_current_key, spl_filesystem_dir_it_move_forward, spl_filesystem_dir_it_rewind }; /* }}} */ /* {{{ spl_ce_dir_get_iterator */ zend_object_iterator *spl_filesystem_dir_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) { spl_filesystem_iterator *iterator; spl_filesystem_object *dir_object; if (by_ref) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } dir_object = (spl_filesystem_object*)zend_object_store_get_object(object TSRMLS_CC); iterator = spl_filesystem_object_to_iterator(dir_object); /* initialize iterator if it wasn't gotten before */ if (iterator->intern.data == NULL) { iterator->intern.data = object; iterator->intern.funcs = &spl_filesystem_dir_it_funcs; /* ->current must be initialized; rewind doesn't set it and valid * doesn't check whether it's set */ iterator->current = object; } zval_add_ref(&object); return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ spl_filesystem_dir_it_dtor */ static void spl_filesystem_dir_it_dtor(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; if (iterator->intern.data) { zval *object = iterator->intern.data; zval_ptr_dtor(&object); } /* Otherwise we were called from the owning object free storage handler as * it sets * iterator->intern.data to NULL. * We don't even need to destroy iterator->current as we didn't add a * reference to it in move_forward or get_iterator */ } /* }}} */ /* {{{ spl_filesystem_dir_it_valid */ static int spl_filesystem_dir_it_valid(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); return object->u.dir.entry.d_name[0] != '\0' ? SUCCESS : FAILURE; } /* }}} */ /* {{{ spl_filesystem_dir_it_current_data */ static void spl_filesystem_dir_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; *data = &iterator->current; } /* }}} */ /* {{{ spl_filesystem_dir_it_current_key */ static void spl_filesystem_dir_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); ZVAL_LONG(key, object->u.dir.index); } /* }}} */ /* {{{ spl_filesystem_dir_it_move_forward */ static void spl_filesystem_dir_it_move_forward(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); object->u.dir.index++; spl_filesystem_dir_read(object TSRMLS_CC); if (object->file_name) { efree(object->file_name); object->file_name = NULL; } } /* }}} */ /* {{{ spl_filesystem_dir_it_rewind */ static void spl_filesystem_dir_it_rewind(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); object->u.dir.index = 0; if (object->u.dir.dirp) { php_stream_rewinddir(object->u.dir.dirp); } spl_filesystem_dir_read(object TSRMLS_CC); } /* }}} */ /* {{{ spl_filesystem_tree_it_dtor */ static void spl_filesystem_tree_it_dtor(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; if (iterator->intern.data) { zval *object = iterator->intern.data; zval_ptr_dtor(&object); } else { if (iterator->current) { zval_ptr_dtor(&iterator->current); } } } /* }}} */ /* {{{ spl_filesystem_tree_it_current_data */ static void spl_filesystem_tree_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); if (SPL_FILE_DIR_CURRENT(object, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) { if (!iterator->current) { ALLOC_INIT_ZVAL(iterator->current); spl_filesystem_object_get_file_name(object TSRMLS_CC); ZVAL_STRINGL(iterator->current, object->file_name, object->file_name_len, 1); } *data = &iterator->current; } else if (SPL_FILE_DIR_CURRENT(object, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) { if (!iterator->current) { ALLOC_INIT_ZVAL(iterator->current); spl_filesystem_object_get_file_name(object TSRMLS_CC); spl_filesystem_object_create_type(0, object, SPL_FS_INFO, NULL, iterator->current TSRMLS_CC); } *data = &iterator->current; } else { *data = (zval**)&iterator->intern.data; } } /* }}} */ /* {{{ spl_filesystem_tree_it_current_key */ static void spl_filesystem_tree_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); if (SPL_FILE_DIR_KEY(object, SPL_FILE_DIR_KEY_AS_FILENAME)) { ZVAL_STRING(key, object->u.dir.entry.d_name, 1); } else { spl_filesystem_object_get_file_name(object TSRMLS_CC); ZVAL_STRINGL(key, object->file_name, object->file_name_len, 1); } } /* }}} */ /* {{{ spl_filesystem_tree_it_move_forward */ static void spl_filesystem_tree_it_move_forward(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); object->u.dir.index++; do { spl_filesystem_dir_read(object TSRMLS_CC); } while (spl_filesystem_is_dot(object->u.dir.entry.d_name)); if (object->file_name) { efree(object->file_name); object->file_name = NULL; } if (iterator->current) { zval_ptr_dtor(&iterator->current); iterator->current = NULL; } } /* }}} */ /* {{{ spl_filesystem_tree_it_rewind */ static void spl_filesystem_tree_it_rewind(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); object->u.dir.index = 0; if (object->u.dir.dirp) { php_stream_rewinddir(object->u.dir.dirp); } do { spl_filesystem_dir_read(object TSRMLS_CC); } while (spl_filesystem_is_dot(object->u.dir.entry.d_name)); if (iterator->current) { zval_ptr_dtor(&iterator->current); iterator->current = NULL; } } /* }}} */ /* {{{ iterator handler table */ zend_object_iterator_funcs spl_filesystem_tree_it_funcs = { spl_filesystem_tree_it_dtor, spl_filesystem_dir_it_valid, spl_filesystem_tree_it_current_data, spl_filesystem_tree_it_current_key, spl_filesystem_tree_it_move_forward, spl_filesystem_tree_it_rewind }; /* }}} */ /* {{{ spl_ce_dir_get_iterator */ zend_object_iterator *spl_filesystem_tree_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) { spl_filesystem_iterator *iterator; spl_filesystem_object *dir_object; if (by_ref) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } dir_object = (spl_filesystem_object*)zend_object_store_get_object(object TSRMLS_CC); iterator = spl_filesystem_object_to_iterator(dir_object); /* initialize iterator if wasn't gotten before */ if (iterator->intern.data == NULL) { iterator->intern.data = object; iterator->intern.funcs = &spl_filesystem_tree_it_funcs; } zval_add_ref(&object); return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ spl_filesystem_object_cast */ static int spl_filesystem_object_cast(zval *readobj, zval *writeobj, int type TSRMLS_DC) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(readobj TSRMLS_CC); if (type == IS_STRING) { if (Z_OBJCE_P(readobj)->__tostring) { return std_object_handlers.cast_object(readobj, writeobj, type TSRMLS_CC); } switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: if (readobj == writeobj) { zval retval; zval *retval_ptr = &retval; ZVAL_STRINGL(retval_ptr, intern->file_name, intern->file_name_len, 1); zval_dtor(readobj); ZVAL_ZVAL(writeobj, retval_ptr, 0, 0); } else { ZVAL_STRINGL(writeobj, intern->file_name, intern->file_name_len, 1); } return SUCCESS; case SPL_FS_DIR: if (readobj == writeobj) { zval retval; zval *retval_ptr = &retval; ZVAL_STRING(retval_ptr, intern->u.dir.entry.d_name, 1); zval_dtor(readobj); ZVAL_ZVAL(writeobj, retval_ptr, 0, 0); } else { ZVAL_STRING(writeobj, intern->u.dir.entry.d_name, 1); } return SUCCESS; } } else if (type == IS_BOOL) { ZVAL_BOOL(writeobj, 1); return SUCCESS; } if (readobj == writeobj) { zval_dtor(readobj); } ZVAL_NULL(writeobj); return FAILURE; } /* }}} */ /* {{{ declare method parameters */ /* supply a name and default to call by parameter */ ZEND_BEGIN_ARG_INFO(arginfo_info___construct, 0) ZEND_ARG_INFO(0, file_name) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_info_openFile, 0, 0, 0) ZEND_ARG_INFO(0, open_mode) ZEND_ARG_INFO(0, use_include_path) ZEND_ARG_INFO(0, context) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_info_optinalFileClass, 0, 0, 0) ZEND_ARG_INFO(0, class_name) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_optinalSuffix, 0, 0, 0) ZEND_ARG_INFO(0, suffix) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_splfileinfo_void, 0) ZEND_END_ARG_INFO() /* the method table */ /* each method can have its own parameters and visibility */ static const zend_function_entry spl_SplFileInfo_functions[] = { SPL_ME(SplFileInfo, __construct, arginfo_info___construct, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getExtension, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getBasename, arginfo_optinalSuffix, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPathname, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPerms, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getInode, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getSize, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getOwner, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getGroup, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getATime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getMTime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getCTime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getType, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isWritable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isReadable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isExecutable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isFile, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isDir, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isLink, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getLinkTarget, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) #if (!defined(__BEOS__) && !defined(NETWARE) && HAVE_REALPATH) || defined(ZTS) SPL_ME(SplFileInfo, getRealPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) #endif SPL_ME(SplFileInfo, getFileInfo, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPathInfo, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, openFile, arginfo_info_openFile, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, setFileClass, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, setInfoClass, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, _bad_state_ex, NULL, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL) SPL_MA(SplFileInfo, __toString, SplFileInfo, getPathname, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO(arginfo_dir___construct, 0) ZEND_ARG_INFO(0, path) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_dir_it_seek, 0) ZEND_ARG_INFO(0, position) ZEND_END_ARG_INFO(); /* the method table */ /* each method can have its own parameters and visibility */ static const zend_function_entry spl_DirectoryIterator_functions[] = { SPL_ME(DirectoryIterator, __construct, arginfo_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getExtension, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getBasename, arginfo_optinalSuffix, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, isDot, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, valid, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, seek, arginfo_dir_it_seek, ZEND_ACC_PUBLIC) SPL_MA(DirectoryIterator, __toString, DirectoryIterator, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir___construct, 0, 0, 1) ZEND_ARG_INFO(0, path) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir_hasChildren, 0, 0, 0) ZEND_ARG_INFO(0, allow_links) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir_setFlags, 0, 0, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() static const zend_function_entry spl_FilesystemIterator_functions[] = { SPL_ME(FilesystemIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, getFlags, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, setFlags, arginfo_r_dir_setFlags, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_RecursiveDirectoryIterator_functions[] = { SPL_ME(RecursiveDirectoryIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, hasChildren, arginfo_r_dir_hasChildren, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getSubPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getSubPathname,arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; #ifdef HAVE_GLOB static const zend_function_entry spl_GlobIterator_functions[] = { SPL_ME(GlobIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(GlobIterator, count, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; #endif /* }}} */ static int spl_filesystem_file_read(spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { char *buf; size_t line_len = 0; long line_add = (intern->u.file.current_line || intern->u.file.current_zval) ? 1 : 0; spl_filesystem_file_free_line(intern TSRMLS_CC); if (php_stream_eof(intern->u.file.stream)) { if (!silent) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot read from file %s", intern->file_name); } return FAILURE; } if (intern->u.file.max_line_len > 0) { buf = safe_emalloc((intern->u.file.max_line_len + 1), sizeof(char), 0); if (php_stream_get_line(intern->u.file.stream, buf, intern->u.file.max_line_len + 1, &line_len) == NULL) { efree(buf); buf = NULL; } else { buf[line_len] = '\0'; } } else { buf = php_stream_get_line(intern->u.file.stream, NULL, 0, &line_len); } if (!buf) { intern->u.file.current_line = estrdup(""); intern->u.file.current_line_len = 0; } else { if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_DROP_NEW_LINE)) { line_len = strcspn(buf, "\r\n"); buf[line_len] = '\0'; } intern->u.file.current_line = buf; intern->u.file.current_line_len = line_len; } intern->u.file.current_line_num += line_add; return SUCCESS; } /* }}} */ static int spl_filesystem_file_call(spl_filesystem_object *intern, zend_function *func_ptr, int pass_num_args, zval *return_value, zval *arg2 TSRMLS_DC) /* {{{ */ { zend_fcall_info fci; zend_fcall_info_cache fcic; zval z_fname; zval * zresource_ptr = &intern->u.file.zresource, *retval; int result; int num_args = pass_num_args + (arg2 ? 2 : 1); zval ***params = (zval***)safe_emalloc(num_args, sizeof(zval**), 0); params[0] = &zresource_ptr; if (arg2) { params[1] = &arg2; } zend_get_parameters_array_ex(pass_num_args, params+(arg2 ? 2 : 1)); ZVAL_STRING(&z_fname, func_ptr->common.function_name, 0); fci.size = sizeof(fci); fci.function_table = EG(function_table); fci.object_ptr = NULL; fci.function_name = &z_fname; fci.retval_ptr_ptr = &retval; fci.param_count = num_args; fci.params = params; fci.no_separation = 1; fci.symbol_table = NULL; fcic.initialized = 1; fcic.function_handler = func_ptr; fcic.calling_scope = NULL; fcic.called_scope = NULL; fcic.object_ptr = NULL; result = zend_call_function(&fci, &fcic TSRMLS_CC); if (result == FAILURE) { RETVAL_FALSE; } else { ZVAL_ZVAL(return_value, retval, 1, 1); } efree(params); return result; } /* }}} */ #define FileFunctionCall(func_name, pass_num_args, arg2) /* {{{ */ \ { \ zend_function *func_ptr; \ int ret; \ ret = zend_hash_find(EG(function_table), #func_name, sizeof(#func_name), (void **) &func_ptr); \ if (ret != SUCCESS) { \ zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Internal error, function '%s' not found. Please report", #func_name); \ return; \ } \ spl_filesystem_file_call(intern, func_ptr, pass_num_args, return_value, arg2 TSRMLS_CC); \ } /* }}} */ static int spl_filesystem_file_read_csv(spl_filesystem_object *intern, char delimiter, char enclosure, char escape, zval *return_value TSRMLS_DC) /* {{{ */ { int ret = SUCCESS; do { ret = spl_filesystem_file_read(intern, 1 TSRMLS_CC); } while (ret == SUCCESS && !intern->u.file.current_line_len && SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_SKIP_EMPTY)); if (ret == SUCCESS) { size_t buf_len = intern->u.file.current_line_len; char *buf = estrndup(intern->u.file.current_line, buf_len); if (intern->u.file.current_zval) { zval_ptr_dtor(&intern->u.file.current_zval); } ALLOC_INIT_ZVAL(intern->u.file.current_zval); php_fgetcsv(intern->u.file.stream, delimiter, enclosure, escape, buf_len, buf, intern->u.file.current_zval TSRMLS_CC); if (return_value) { if (Z_TYPE_P(return_value) != IS_NULL) { zval_dtor(return_value); ZVAL_NULL(return_value); } ZVAL_ZVAL(return_value, intern->u.file.current_zval, 1, 0); } } return ret; } /* }}} */ static int spl_filesystem_file_read_line_ex(zval * this_ptr, spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { zval *retval = NULL; /* 1) use fgetcsv? 2) overloaded call the function, 3) do it directly */ if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) || intern->u.file.func_getCurr->common.scope != spl_ce_SplFileObject) { if (php_stream_eof(intern->u.file.stream)) { if (!silent) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot read from file %s", intern->file_name); } return FAILURE; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV)) { return spl_filesystem_file_read_csv(intern, intern->u.file.delimiter, intern->u.file.enclosure, intern->u.file.escape, NULL TSRMLS_CC); } else { zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.file.func_getCurr, "getCurrentLine", &retval); } if (retval) { if (intern->u.file.current_line || intern->u.file.current_zval) { intern->u.file.current_line_num++; } spl_filesystem_file_free_line(intern TSRMLS_CC); if (Z_TYPE_P(retval) == IS_STRING) { intern->u.file.current_line = estrndup(Z_STRVAL_P(retval), Z_STRLEN_P(retval)); intern->u.file.current_line_len = Z_STRLEN_P(retval); } else { MAKE_STD_ZVAL(intern->u.file.current_zval); ZVAL_ZVAL(intern->u.file.current_zval, retval, 1, 0); } zval_ptr_dtor(&retval); return SUCCESS; } else { return FAILURE; } } else { return spl_filesystem_file_read(intern, silent TSRMLS_CC); } } /* }}} */ static int spl_filesystem_file_is_empty_line(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (intern->u.file.current_line) { return intern->u.file.current_line_len == 0; } else if (intern->u.file.current_zval) { switch(Z_TYPE_P(intern->u.file.current_zval)) { case IS_STRING: return Z_STRLEN_P(intern->u.file.current_zval) == 0; case IS_ARRAY: if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) && zend_hash_num_elements(Z_ARRVAL_P(intern->u.file.current_zval)) == 1) { zval ** first = Z_ARRVAL_P(intern->u.file.current_zval)->pListHead->pData; return Z_TYPE_PP(first) == IS_STRING && Z_STRLEN_PP(first) == 0; } return zend_hash_num_elements(Z_ARRVAL_P(intern->u.file.current_zval)) == 0; case IS_NULL: return 1; default: return 0; } } else { return 1; } } /* }}} */ static int spl_filesystem_file_read_line(zval * this_ptr, spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { int ret = spl_filesystem_file_read_line_ex(this_ptr, intern, silent TSRMLS_CC); while (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_SKIP_EMPTY) && ret == SUCCESS && spl_filesystem_file_is_empty_line(intern TSRMLS_CC)) { spl_filesystem_file_free_line(intern TSRMLS_CC); ret = spl_filesystem_file_read_line_ex(this_ptr, intern, silent TSRMLS_CC); } return ret; } /* }}} */ static void spl_filesystem_file_rewind(zval * this_ptr, spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (-1 == php_stream_rewind(intern->u.file.stream)) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot rewind file %s", intern->file_name); } else { spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num = 0; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { spl_filesystem_file_read_line(this_ptr, intern, 1 TSRMLS_CC); } } /* }}} */ /* {{{ proto void SplFileObject::__construct(string filename [, string mode = 'r' [, bool use_include_path [, resource context]]]]) Construct a new file object */ SPL_METHOD(SplFileObject, __construct) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_bool use_include_path = 0; char *p1, *p2; char *tmp_path; int tmp_path_len; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); intern->u.file.open_mode = NULL; intern->u.file.open_mode_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbr!", &intern->file_name, &intern->file_name_len, &intern->u.file.open_mode, &intern->u.file.open_mode_len, &use_include_path, &intern->u.file.zcontext) == FAILURE) { intern->u.file.open_mode = NULL; intern->file_name = NULL; zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (intern->u.file.open_mode == NULL) { intern->u.file.open_mode = "r"; intern->u.file.open_mode_len = 1; } if (spl_filesystem_file_open(intern, use_include_path, 0 TSRMLS_CC) == SUCCESS) { tmp_path_len = strlen(intern->u.file.stream->orig_path); if (tmp_path_len > 1 && IS_SLASH_AT(intern->u.file.stream->orig_path, tmp_path_len-1)) { tmp_path_len--; } tmp_path = estrndup(intern->u.file.stream->orig_path, tmp_path_len); p1 = strrchr(tmp_path, '/'); #if defined(PHP_WIN32) || defined(NETWARE) p2 = strrchr(tmp_path, '\\'); #else p2 = 0; #endif if (p1 || p2) { intern->_path_len = (p1 > p2 ? p1 : p2) - tmp_path; } else { intern->_path_len = 0; } efree(tmp_path); intern->_path = estrndup(intern->u.file.stream->orig_path, intern->_path_len); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplTempFileObject::__construct([int max_memory]) Construct a new temp file object */ SPL_METHOD(SplTempFileObject, __construct) { long max_memory = PHP_STREAM_MAX_MEM; char tmp_fname[48]; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &max_memory) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (max_memory < 0) { intern->file_name = "php://memory"; intern->file_name_len = 12; } else if (ZEND_NUM_ARGS()) { intern->file_name_len = slprintf(tmp_fname, sizeof(tmp_fname), "php://temp/maxmemory:%ld", max_memory); intern->file_name = tmp_fname; } else { intern->file_name = "php://temp"; intern->file_name_len = 10; } intern->u.file.open_mode = "wb"; intern->u.file.open_mode_len = 1; intern->u.file.zcontext = NULL; if (spl_filesystem_file_open(intern, 0, 0 TSRMLS_CC) == SUCCESS) { intern->_path_len = 0; intern->_path = estrndup("", 0); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileObject::rewind() Rewind the file and read the first line */ SPL_METHOD(SplFileObject, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_file_rewind(getThis(), intern TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileObject::eof() Return whether end of file is reached */ SPL_METHOD(SplFileObject, eof) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(php_stream_eof(intern->u.file.stream)); } /* }}} */ /* {{{ proto void SplFileObject::valid() Return !eof() */ SPL_METHOD(SplFileObject, valid) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { RETURN_BOOL(intern->u.file.current_line || intern->u.file.current_zval); } else { RETVAL_BOOL(!php_stream_eof(intern->u.file.stream)); } } /* }}} */ /* {{{ proto string SplFileObject::fgets() Rturn next line from file */ SPL_METHOD(SplFileObject, fgets) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_filesystem_file_read(intern, 0 TSRMLS_CC) == FAILURE) { RETURN_FALSE; } RETURN_STRINGL(intern->u.file.current_line, intern->u.file.current_line_len, 1); } /* }}} */ /* {{{ proto string SplFileObject::current() Return current line from file */ SPL_METHOD(SplFileObject, current) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (!intern->u.file.current_line && !intern->u.file.current_zval) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } if (intern->u.file.current_line && (!SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) || !intern->u.file.current_zval)) { RETURN_STRINGL(intern->u.file.current_line, intern->u.file.current_line_len, 1); } else if (intern->u.file.current_zval) { RETURN_ZVAL(intern->u.file.current_zval, 1, 0); } RETURN_FALSE; } /* }}} */ /* {{{ proto int SplFileObject::key() Return line number */ SPL_METHOD(SplFileObject, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } /* Do not read the next line to support correct counting with fgetc() if (!intern->current_line) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } */ RETURN_LONG(intern->u.file.current_line_num); } /* }}} */ /* {{{ proto void SplFileObject::next() Read next line */ SPL_METHOD(SplFileObject, next) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_file_free_line(intern TSRMLS_CC); if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } intern->u.file.current_line_num++; } /* }}} */ /* {{{ proto void SplFileObject::setFlags(int flags) Set file handling flags */ SPL_METHOD(SplFileObject, setFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &intern->flags) == FAILURE) { return; } } /* }}} */ /* {{{ proto int SplFileObject::getFlags() Get file handling flags */ SPL_METHOD(SplFileObject, getFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->flags & SPL_FILE_OBJECT_MASK); } /* }}} */ /* {{{ proto void SplFileObject::setMaxLineLen(int max_len) Set maximum line length */ SPL_METHOD(SplFileObject, setMaxLineLen) { long max_len; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &max_len) == FAILURE) { return; } if (max_len < 0) { zend_throw_exception_ex(spl_ce_DomainException, 0 TSRMLS_CC, "Maximum line length must be greater than or equal zero"); return; } intern->u.file.max_line_len = max_len; } /* }}} */ /* {{{ proto int SplFileObject::getMaxLineLen() Get maximum line length */ SPL_METHOD(SplFileObject, getMaxLineLen) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG((long)intern->u.file.max_line_len); } /* }}} */ /* {{{ proto bool SplFileObject::hasChildren() Return false */ SPL_METHOD(SplFileObject, hasChildren) { if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_FALSE; } /* }}} */ /* {{{ proto bool SplFileObject::getChildren() Read NULL */ SPL_METHOD(SplFileObject, getChildren) { if (zend_parse_parameters_none() == FAILURE) { return; } /* return NULL */ } /* }}} */ /* {{{ FileFunction */ #define FileFunction(func_name) \ SPL_METHOD(SplFileObject, func_name) \ { \ spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); \ FileFunctionCall(func_name, ZEND_NUM_ARGS(), NULL); \ } /* }}} */ /* {{{ proto array SplFileObject::fgetcsv([string delimiter [, string enclosure [, escape = '\\']]]) Return current line as csv */ SPL_METHOD(SplFileObject, fgetcsv) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = intern->u.file.delimiter, enclosure = intern->u.file.enclosure, escape = intern->u.file.escape; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sss", &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 3: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 2: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 1: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 0: break; } spl_filesystem_file_read_csv(intern, delimiter, enclosure, escape, return_value TSRMLS_CC); } } /* }}} */ /* {{{ proto int SplFileObject::fputcsv(array fields, [string delimiter [, string enclosure [, string escape]]]) Output a field array as a CSV line */ SPL_METHOD(SplFileObject, fputcsv) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = intern->u.file.delimiter, enclosure = intern->u.file.enclosure, escape = intern->u.file.escape; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0, ret; zval *fields = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a|sss", &fields, &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 4: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 3: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 2: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 1: case 0: break; } ret = php_fputcsv(intern->u.file.stream, fields, delimiter, enclosure, escape TSRMLS_CC); RETURN_LONG(ret); } } /* }}} */ /* {{{ proto void SplFileObject::setCsvControl([string delimiter = ',' [, string enclosure = '"' [, string escape = '\\']]]) Set the delimiter and enclosure character used in fgetcsv */ SPL_METHOD(SplFileObject, setCsvControl) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = ',', enclosure = '"', escape='\\'; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sss", &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 3: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 2: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 1: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 0: break; } intern->u.file.delimiter = delimiter; intern->u.file.enclosure = enclosure; intern->u.file.escape = escape; } } /* }}} */ /* {{{ proto array SplFileObject::getCsvControl() Get the delimiter and enclosure character used in fgetcsv */ SPL_METHOD(SplFileObject, getCsvControl) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter[2], enclosure[2]; array_init(return_value); delimiter[0] = intern->u.file.delimiter; delimiter[1] = '\0'; enclosure[0] = intern->u.file.enclosure; enclosure[1] = '\0'; add_next_index_string(return_value, delimiter, 1); add_next_index_string(return_value, enclosure, 1); } /* }}} */ /* {{{ proto bool SplFileObject::flock(int operation [, int &wouldblock]) Portable file locking */ FileFunction(flock) /* }}} */ /* {{{ proto bool SplFileObject::fflush() Flush the file */ SPL_METHOD(SplFileObject, fflush) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); RETURN_BOOL(!php_stream_flush(intern->u.file.stream)); } /* }}} */ /* {{{ proto int SplFileObject::ftell() Return current file position */ SPL_METHOD(SplFileObject, ftell) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long ret = php_stream_tell(intern->u.file.stream); if (ret == -1) { RETURN_FALSE; } else { RETURN_LONG(ret); } } /* }}} */ /* {{{ proto int SplFileObject::fseek(int pos [, int whence = SEEK_SET]) Return current file position */ SPL_METHOD(SplFileObject, fseek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long pos, whence = SEEK_SET; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &pos, &whence) == FAILURE) { return; } spl_filesystem_file_free_line(intern TSRMLS_CC); RETURN_LONG(php_stream_seek(intern->u.file.stream, pos, whence)); } /* }}} */ /* {{{ proto int SplFileObject::fgetc() Get a character form the file */ SPL_METHOD(SplFileObject, fgetc) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char buf[2]; int result; spl_filesystem_file_free_line(intern TSRMLS_CC); result = php_stream_getc(intern->u.file.stream); if (result == EOF) { RETVAL_FALSE; } else { if (result == '\n') { intern->u.file.current_line_num++; } buf[0] = result; buf[1] = '\0'; RETURN_STRINGL(buf, 1, 1); } } /* }}} */ /* {{{ proto string SplFileObject::fgetss([string allowable_tags]) Get a line from file pointer and strip HTML tags */ SPL_METHOD(SplFileObject, fgetss) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zval *arg2 = NULL; MAKE_STD_ZVAL(arg2); if (intern->u.file.max_line_len > 0) { ZVAL_LONG(arg2, intern->u.file.max_line_len); } else { ZVAL_LONG(arg2, 1024); } spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num++; FileFunctionCall(fgetss, ZEND_NUM_ARGS(), arg2); zval_ptr_dtor(&arg2); } /* }}} */ /* {{{ proto int SplFileObject::fpassthru() Output all remaining data from a file pointer */ SPL_METHOD(SplFileObject, fpassthru) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); RETURN_LONG(php_stream_passthru(intern->u.file.stream)); } /* }}} */ /* {{{ proto bool SplFileObject::fscanf(string format [, string ...]) Implements a mostly ANSI compatible fscanf() */ SPL_METHOD(SplFileObject, fscanf) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num++; FileFunctionCall(fscanf, ZEND_NUM_ARGS(), NULL); } /* }}} */ /* {{{ proto mixed SplFileObject::fwrite(string str [, int length]) Binary-safe file write */ SPL_METHOD(SplFileObject, fwrite) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *str; int str_len; long length = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &str, &str_len, &length) == FAILURE) { return; } if (ZEND_NUM_ARGS() > 1) { str_len = MAX(0, MIN(length, str_len)); } if (!str_len) { RETURN_LONG(0); } RETURN_LONG(php_stream_write(intern->u.file.stream, str, str_len)); } /* }}} */ SPL_METHOD(SplFileObject, fread) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long length = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &length) == FAILURE) { return; } if (length <= 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Length parameter must be greater than 0"); RETURN_FALSE; } if (length > INT_MAX) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Length parameter must be no more than %d", INT_MAX); RETURN_FALSE; } Z_STRVAL_P(return_value) = emalloc(length + 1); Z_STRLEN_P(return_value) = php_stream_read(intern->u.file.stream, Z_STRVAL_P(return_value), length); /* needed because recv/read/gzread doesnt put a null at the end*/ Z_STRVAL_P(return_value)[Z_STRLEN_P(return_value)] = 0; Z_TYPE_P(return_value) = IS_STRING; } /* {{{ proto bool SplFileObject::fstat() Stat() on a filehandle */ FileFunction(fstat) /* }}} */ /* {{{ proto bool SplFileObject::ftruncate(int size) Truncate file to 'size' length */ SPL_METHOD(SplFileObject, ftruncate) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long size; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &size) == FAILURE) { return; } if (!php_stream_truncate_supported(intern->u.file.stream)) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Can't truncate file %s", intern->file_name); RETURN_FALSE; } RETURN_BOOL(0 == php_stream_truncate_set_size(intern->u.file.stream, size)); } /* }}} */ /* {{{ proto void SplFileObject::seek(int line_pos) Seek to specified line */ SPL_METHOD(SplFileObject, seek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long line_pos; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &line_pos) == FAILURE) { return; } if (line_pos < 0) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Can't seek file %s to negative line %ld", intern->file_name, line_pos); RETURN_FALSE; } spl_filesystem_file_rewind(getThis(), intern TSRMLS_CC); while(intern->u.file.current_line_num < line_pos) { if (spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC) == FAILURE) { break; } } } /* }}} */ /* {{{ Function/Class/Method definitions */ ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object___construct, 0, 0, 1) ZEND_ARG_INFO(0, file_name) ZEND_ARG_INFO(0, open_mode) ZEND_ARG_INFO(0, use_include_path) ZEND_ARG_INFO(0, context) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_file_object_setFlags, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_file_object_setMaxLineLen, 0) ZEND_ARG_INFO(0, max_len) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetcsv, 0, 0, 0) ZEND_ARG_INFO(0, delimiter) ZEND_ARG_INFO(0, enclosure) ZEND_ARG_INFO(0, escape) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fputcsv, 0, 0, 1) ZEND_ARG_INFO(0, fields) ZEND_ARG_INFO(0, delimiter) ZEND_ARG_INFO(0, enclosure) ZEND_ARG_INFO(0, escape) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_flock, 0, 0, 1) ZEND_ARG_INFO(0, operation) ZEND_ARG_INFO(1, wouldblock) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fseek, 0, 0, 1) ZEND_ARG_INFO(0, pos) ZEND_ARG_INFO(0, whence) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetss, 0, 0, 0) ZEND_ARG_INFO(0, allowable_tags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fscanf, 1, 0, 1) ZEND_ARG_INFO(0, format) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fwrite, 0, 0, 1) ZEND_ARG_INFO(0, str) ZEND_ARG_INFO(0, length) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fread, 0, 0, 1) ZEND_ARG_INFO(0, length) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_ftruncate, 0, 0, 1) ZEND_ARG_INFO(0, size) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_seek, 0, 0, 1) ZEND_ARG_INFO(0, line_pos) ZEND_END_ARG_INFO() static const zend_function_entry spl_SplFileObject_functions[] = { SPL_ME(SplFileObject, __construct, arginfo_file_object___construct, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, eof, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, valid, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgets, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetcsv, arginfo_file_object_fgetcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fputcsv, arginfo_file_object_fputcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setCsvControl, arginfo_file_object_fgetcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getCsvControl, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, flock, arginfo_file_object_flock, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fflush, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, ftell, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fseek, arginfo_file_object_fseek, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetc, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fpassthru, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetss, arginfo_file_object_fgetss, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fscanf, arginfo_file_object_fscanf, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fwrite, arginfo_file_object_fwrite, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fread, arginfo_file_object_fread, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fstat, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, ftruncate, arginfo_file_object_ftruncate, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setFlags, arginfo_file_object_setFlags, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getFlags, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setMaxLineLen, arginfo_file_object_setMaxLineLen, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getMaxLineLen, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, hasChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, seek, arginfo_file_object_seek, ZEND_ACC_PUBLIC) /* mappings */ SPL_MA(SplFileObject, getCurrentLine, SplFileObject, fgets, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_MA(SplFileObject, __toString, SplFileObject, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO_EX(arginfo_temp_file_object___construct, 0, 0, 0) ZEND_ARG_INFO(0, max_memory) ZEND_END_ARG_INFO() static const zend_function_entry spl_SplTempFileObject_functions[] = { SPL_ME(SplTempFileObject, __construct, arginfo_temp_file_object___construct, ZEND_ACC_PUBLIC) PHP_FE_END }; /* }}} */ /* {{{ PHP_MINIT_FUNCTION(spl_directory) */ PHP_MINIT_FUNCTION(spl_directory) { REGISTER_SPL_STD_CLASS_EX(SplFileInfo, spl_filesystem_object_new, spl_SplFileInfo_functions); memcpy(&spl_filesystem_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); spl_filesystem_object_handlers.clone_obj = spl_filesystem_object_clone; spl_filesystem_object_handlers.cast_object = spl_filesystem_object_cast; spl_filesystem_object_handlers.get_debug_info = spl_filesystem_object_get_debug_info; spl_ce_SplFileInfo->serialize = zend_class_serialize_deny; spl_ce_SplFileInfo->unserialize = zend_class_unserialize_deny; REGISTER_SPL_SUB_CLASS_EX(DirectoryIterator, SplFileInfo, spl_filesystem_object_new, spl_DirectoryIterator_functions); zend_class_implements(spl_ce_DirectoryIterator TSRMLS_CC, 1, zend_ce_iterator); REGISTER_SPL_IMPLEMENTS(DirectoryIterator, SeekableIterator); spl_ce_DirectoryIterator->get_iterator = spl_filesystem_dir_get_iterator; REGISTER_SPL_SUB_CLASS_EX(FilesystemIterator, DirectoryIterator, spl_filesystem_object_new, spl_FilesystemIterator_functions); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_MODE_MASK", SPL_FILE_DIR_CURRENT_MODE_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_PATHNAME", SPL_FILE_DIR_CURRENT_AS_PATHNAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_FILEINFO", SPL_FILE_DIR_CURRENT_AS_FILEINFO); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_SELF", SPL_FILE_DIR_CURRENT_AS_SELF); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_MODE_MASK", SPL_FILE_DIR_KEY_MODE_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_AS_PATHNAME", SPL_FILE_DIR_KEY_AS_PATHNAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "FOLLOW_SYMLINKS", SPL_FILE_DIR_FOLLOW_SYMLINKS); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_AS_FILENAME", SPL_FILE_DIR_KEY_AS_FILENAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "NEW_CURRENT_AND_KEY", SPL_FILE_DIR_KEY_AS_FILENAME|SPL_FILE_DIR_CURRENT_AS_FILEINFO); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "OTHER_MODE_MASK", SPL_FILE_DIR_OTHERS_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "SKIP_DOTS", SPL_FILE_DIR_SKIPDOTS); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "UNIX_PATHS", SPL_FILE_DIR_UNIXPATHS); spl_ce_FilesystemIterator->get_iterator = spl_filesystem_tree_get_iterator; REGISTER_SPL_SUB_CLASS_EX(RecursiveDirectoryIterator, FilesystemIterator, spl_filesystem_object_new, spl_RecursiveDirectoryIterator_functions); REGISTER_SPL_IMPLEMENTS(RecursiveDirectoryIterator, RecursiveIterator); memcpy(&spl_filesystem_object_check_handlers, &spl_filesystem_object_handlers, sizeof(zend_object_handlers)); spl_filesystem_object_check_handlers.get_method = spl_filesystem_object_get_method_check; #ifdef HAVE_GLOB REGISTER_SPL_SUB_CLASS_EX(GlobIterator, FilesystemIterator, spl_filesystem_object_new_check, spl_GlobIterator_functions); REGISTER_SPL_IMPLEMENTS(GlobIterator, Countable); #endif REGISTER_SPL_SUB_CLASS_EX(SplFileObject, SplFileInfo, spl_filesystem_object_new_check, spl_SplFileObject_functions); REGISTER_SPL_IMPLEMENTS(SplFileObject, RecursiveIterator); REGISTER_SPL_IMPLEMENTS(SplFileObject, SeekableIterator); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "DROP_NEW_LINE", SPL_FILE_OBJECT_DROP_NEW_LINE); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "READ_AHEAD", SPL_FILE_OBJECT_READ_AHEAD); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "SKIP_EMPTY", SPL_FILE_OBJECT_SKIP_EMPTY); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "READ_CSV", SPL_FILE_OBJECT_READ_CSV); REGISTER_SPL_SUB_CLASS_EX(SplTempFileObject, SplFileObject, spl_filesystem_object_new_check, spl_SplTempFileObject_functions); return SUCCESS; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */
static void spl_filesystem_object_free_storage(void *object TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern = (spl_filesystem_object*)object; if (intern->oth_handler && intern->oth_handler->dtor) { intern->oth_handler->dtor(intern TSRMLS_CC); } zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->_path) { efree(intern->_path); } if (intern->file_name) { efree(intern->file_name); } switch(intern->type) { case SPL_FS_INFO: break; case SPL_FS_DIR: if (intern->u.dir.dirp) { php_stream_close(intern->u.dir.dirp); intern->u.dir.dirp = NULL; } if (intern->u.dir.sub_path) { efree(intern->u.dir.sub_path); } break; case SPL_FS_FILE: if (intern->u.file.stream) { if (intern->u.file.zcontext) { /* zend_list_delref(Z_RESVAL_P(intern->zcontext));*/ } if (!intern->u.file.stream->is_persistent) { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE); } else { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE_PERSISTENT); } if (intern->u.file.open_mode) { efree(intern->u.file.open_mode); } if (intern->orig_path) { efree(intern->orig_path); } } spl_filesystem_file_free_line(intern TSRMLS_CC); break; } { zend_object_iterator *iterator; iterator = (zend_object_iterator*) spl_filesystem_object_to_iterator(intern); if (iterator->data != NULL) { iterator->data = NULL; iterator->funcs->dtor(iterator TSRMLS_CC); } } efree(object); } /* }}} */
static void spl_filesystem_object_free_storage(void *object TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern = (spl_filesystem_object*)object; if (intern->oth_handler && intern->oth_handler->dtor) { intern->oth_handler->dtor(intern TSRMLS_CC); } zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->_path) { efree(intern->_path); } if (intern->file_name) { efree(intern->file_name); } switch(intern->type) { case SPL_FS_INFO: break; case SPL_FS_DIR: if (intern->u.dir.dirp) { php_stream_close(intern->u.dir.dirp); intern->u.dir.dirp = NULL; } if (intern->u.dir.sub_path) { efree(intern->u.dir.sub_path); } break; case SPL_FS_FILE: if (intern->u.file.stream) { if (intern->u.file.zcontext) { /* zend_list_delref(Z_RESVAL_P(intern->zcontext));*/ } if (!intern->u.file.stream->is_persistent) { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE); } else { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE_PERSISTENT); } if (intern->u.file.open_mode) { efree(intern->u.file.open_mode); } if (intern->orig_path) { efree(intern->orig_path); } } spl_filesystem_file_free_line(intern TSRMLS_CC); break; } { zend_object_iterator *iterator; iterator = (zend_object_iterator*) spl_filesystem_object_to_iterator(intern); if (iterator->data != NULL) { iterator->data = NULL; iterator->funcs->dtor(iterator TSRMLS_CC); } } efree(object); } /* }}} */
{'added': [(82, ''), (84, ''), (101, '\t\t}'), (137, '/* creates the object by'), (138, ' - allocating memory'), (143, ' called from'), (316, ''), (328, " Load the 'other' object"), (373, ''), (392, ''), (416, ''), (462, ''), (517, ''), (533, ''), (536, ''), (537, '\t\t\tif (ht && zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sbr",'), (538, '\t\t\t\t\t&intern->u.file.open_mode, &intern->u.file.open_mode_len,'), (547, ''), (556, '\tcase SPL_FS_DIR:'), (620, ''), (668, ''), (674, ''), (754, ''), (772, ''), (802, ''), (862, ''), (878, ''), (894, ''), (900, ''), (914, ''), (1022, '/* }}}*/'), (1032, ''), (1068, ''), (1087, ''), (1110, ''), (1124, ''), (1125, ' When the constructor gets called the object is already created'), (1143, ''), (1147, ''), (1252, ''), (1300, ''), (1310, ''), (1313, '\t} else {'), (1351, ''), (1369, ''), (1387, ''), (1405, ''), (1466, ''), (1522, ''), (1526, ''), (1557, ''), (1578, ''), (1614, ''), (1669, ''), (1704, ''), (1722, ''), (1736, ''), (1806, ''), (1827, ''), (1871, ''), (1927, 'ZEND_BEGIN_ARG_INFO(arginfo_info___construct, 0)'), (1986, 'ZEND_BEGIN_ARG_INFO(arginfo_dir___construct, 0)'), (1990, 'ZEND_BEGIN_ARG_INFO(arginfo_dir_it_seek, 0)'), (2012, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir___construct, 0, 0, 1)'), (2061, ''), (2089, ''), (2110, ''), (2136, ''), (2162, ''), (2166, ''), (2240, ''), (2263, ''), (2297, '\tif (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbr!",'), (2299, '\t\t\t&intern->u.file.open_mode, &intern->u.file.open_mode_len,'), (2300, '\t\t\t&use_include_path, &intern->u.file.zcontext) == FAILURE) {'), (2306, ''), (2371, ''), (2384, ''), (2397, ''), (2410, ''), (2427, ''), (2443, ''), (2464, ''), (2481, ''), (2533, ''), (2542, ''), (2557, ''), (2588, ''), (2630, ''), (2673, ''), (2716, ''), (2745, '\tspl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);'), (2875, '\tif (length > INT_MAX) {'), (2876, '\t\tphp_error_docref(NULL TSRMLS_CC, E_WARNING, "Length parameter must be no more than %d", INT_MAX);'), (2877, '\t\tRETURN_FALSE;'), (2878, '\t}'), (2899, ''), (2908, ''), (2918, ''), (2924, '\t\tRETURN_FALSE;'), (2926, ''), (2928, ''), (2965, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_flock, 0, 0, 1)'), (2970, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fseek, 0, 0, 1)'), (2975, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetss, 0, 0, 0)'), (2979, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fscanf, 1, 0, 1)'), (2983, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fwrite, 0, 0, 1)'), (2992, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_ftruncate, 0, 0, 1)'), (2996, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_seek, 0, 0, 1)'), (3085, ''), (3102, '')], 'deleted': [(82, ''), (84, ''), (101, '\t\t}'), (137, '/* creates the object by'), (138, ' - allocating memory'), (143, ' called from'), (316, ''), (328, " Load the 'other' object"), (373, ''), (392, ''), (416, ''), (462, ''), (517, ''), (533, ''), (536, ''), (537, '\t\t\tif (ht && zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sbr",'), (538, '\t\t\t\t\t&intern->u.file.open_mode, &intern->u.file.open_mode_len,'), (547, ''), (556, '\tcase SPL_FS_DIR:'), (620, ''), (668, ''), (674, ''), (754, ''), (772, ''), (802, ''), (862, ''), (878, ''), (894, ''), (900, ''), (914, ''), (1022, '/* }}}*/'), (1032, ''), (1068, ''), (1087, ''), (1110, ''), (1124, ''), (1125, ' When the constructor gets called the object is already created'), (1143, ''), (1147, ''), (1252, ''), (1300, ''), (1310, ''), (1313, '\t} else {'), (1351, ''), (1369, ''), (1387, ''), (1405, ''), (1466, ''), (1522, ''), (1526, ''), (1557, ''), (1578, ''), (1614, ''), (1669, ''), (1704, ''), (1722, ''), (1736, ''), (1806, ''), (1827, ''), (1871, ''), (1927, 'ZEND_BEGIN_ARG_INFO(arginfo_info___construct, 0)'), (1986, 'ZEND_BEGIN_ARG_INFO(arginfo_dir___construct, 0)'), (1990, 'ZEND_BEGIN_ARG_INFO(arginfo_dir_it_seek, 0)'), (2012, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir___construct, 0, 0, 1)'), (2061, ''), (2089, ''), (2110, ''), (2136, ''), (2162, ''), (2166, ''), (2240, ''), (2263, ''), (2297, '\tif (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbr!",'), (2299, '\t\t\t&intern->u.file.open_mode, &intern->u.file.open_mode_len,'), (2300, '\t\t\t&use_include_path, &intern->u.file.zcontext) == FAILURE) {'), (2306, ''), (2371, ''), (2384, ''), (2397, ''), (2410, ''), (2427, ''), (2443, ''), (2464, ''), (2481, ''), (2533, ''), (2542, ''), (2557, ''), (2588, ''), (2630, ''), (2673, ''), (2716, ''), (2745, '\tspl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);'), (2895, ''), (2904, ''), (2914, ''), (2920, '\t\tRETURN_FALSE;'), (2922, ''), (2924, ''), (2961, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_flock, 0, 0, 1)'), (2966, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fseek, 0, 0, 1)'), (2971, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetss, 0, 0, 0)'), (2975, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fscanf, 1, 0, 1)'), (2979, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fwrite, 0, 0, 1)'), (2988, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_ftruncate, 0, 0, 1)'), (2992, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_seek, 0, 0, 1)'), (3081, ''), (3098, '')]}
111
107
2,194
15,773
55
321
16
https://github.com/php/php-src
CVE-2016-5770
CWE-190
152
elementwise.cc
C++
tflite::ops::builtin::elementwise::GenericPrepare
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include <stdlib.h> #include <cmath> #include <limits> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" namespace tflite { namespace ops { namespace builtin { namespace elementwise { namespace { constexpr char kAbsName[] = "Abs"; constexpr char kSinName[] = "Sin"; constexpr char kCosName[] = "Cos"; constexpr char kLogName[] = "Log"; constexpr char kSqrtName[] = "Sqrt"; constexpr char kRsqrtName[] = "Rsqrt"; constexpr char kSquareName[] = "Square"; constexpr char kNotName[] = "Not"; struct OpData { int32_t multiplier; int32_t shift; int input_offset; int output_offset; }; bool IsNumericSupportedType(const TfLiteType type) { return type == kTfLiteFloat32; } bool IsLogicalSupportedType(const TfLiteType type) { return type == kTfLiteBool; } bool IsAbsSupportedType(const TfLiteType type) { return type == kTfLiteFloat32 || type == kTfLiteInt8; } typedef bool (*IsSupportedType)(TfLiteType); template <IsSupportedType is_supported_type, const char* op_name> TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (!is_supported_type(input->type)) { TF_LITE_UNSUPPORTED_TYPE(context, input->type, op_name); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus AbsPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ( context, (GenericPrepare<IsAbsSupportedType, kAbsName>(context, node)), kTfLiteOk); const TfLiteTensor* input = GetInput(context, node, 0); if (input->type == kTfLiteInt8) { TfLiteTensor* output = GetOutput(context, node, 0); auto* op_data = static_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, input->quantization.type, kTfLiteAffineQuantization); TF_LITE_ENSURE_EQ(context, output->quantization.type, kTfLiteAffineQuantization); const auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(input->quantization.params); const auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>( output->quantization.params); TF_LITE_ENSURE(context, input_params != nullptr); TF_LITE_ENSURE(context, input_params->scale != nullptr); TF_LITE_ENSURE(context, input_params->scale->size > 0); TF_LITE_ENSURE(context, input_params->zero_point->size > 0); TF_LITE_ENSURE(context, output_params != nullptr); TF_LITE_ENSURE(context, output_params->scale != nullptr); TF_LITE_ENSURE(context, output_params->scale->size > 0); TF_LITE_ENSURE(context, output_params->zero_point->size > 0); op_data->input_offset = input_params->zero_point->data[0]; op_data->output_offset = output_params->zero_point->data[0]; const float input_scale = input_params->scale->data[0]; const float output_scale = output_params->scale->data[0]; double scale = input_scale / output_scale; QuantizeMultiplier(scale, &op_data->multiplier, &op_data->shift); } return kTfLiteOk; } template <typename T> inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, std::function<T(T)> func, TfLiteType expected_type) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type); const int64_t num_elements = NumElements(input); const T* in_data = GetTensorData<T>(input); T* out_data = GetTensorData<T>(output); for (int64_t i = 0; i < num_elements; ++i) { out_data[i] = func(in_data[i]); } return kTfLiteOk; } inline TfLiteStatus EvalNumeric(TfLiteContext* context, TfLiteNode* node, float float_func(float)) { return EvalImpl<float>(context, node, float_func, kTfLiteFloat32); } inline TfLiteStatus EvalLogical(TfLiteContext* context, TfLiteNode* node, bool bool_func(bool)) { return EvalImpl<bool>(context, node, bool_func, kTfLiteBool); } void* AbsInit(TfLiteContext* context, const char* buffer, size_t length) { return new OpData(); } void AbsFree(TfLiteContext* context, void* buffer) { delete static_cast<OpData*>(buffer); } TfLiteStatus AbsEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteType type = GetInput(context, node, 0)->type; switch (type) { case kTfLiteFloat32: return EvalImpl<float>(context, node, std::abs<float>, type); case kTfLiteInt8: { const auto* op_data = static_cast<const OpData*>(node->user_data); const int kMinInt8 = std::numeric_limits<int8_t>::min(); const int kMaxInt8 = std::numeric_limits<int8_t>::max(); std::function<int8_t(int8_t)> func = [&](int8_t i) { const int32_t value = std::abs(i - op_data->input_offset); return std::min( std::max(op_data->output_offset + MultiplyByQuantizedMultiplier( value, op_data->multiplier, op_data->shift), kMinInt8), kMaxInt8); }; return EvalImpl<int8_t>(context, node, func, type); } default: TF_LITE_KERNEL_LOG(context, "Current data type %s is not supported.", TfLiteTypeGetName(type)); return kTfLiteError; } } TfLiteStatus SinEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, std::sin); } TfLiteStatus CosEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, std::cos); } TfLiteStatus LogEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, std::log); } TfLiteStatus SqrtEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, std::sqrt); } TfLiteStatus RsqrtEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, [](float f) { return 1.f / std::sqrt(f); }); } TfLiteStatus SquareEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, [](float f) { return f * f; }); } TfLiteStatus LogicalNotEval(TfLiteContext* context, TfLiteNode* node) { return EvalLogical(context, node, [](bool v) { return !v; }); } } // namespace } // namespace elementwise TfLiteRegistration* Register_ABS() { static TfLiteRegistration r = {elementwise::AbsInit, elementwise::AbsFree, elementwise::AbsPrepare, elementwise::AbsEval}; return &r; } TfLiteRegistration* Register_SIN() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kSinName>, elementwise::SinEval}; return &r; } TfLiteRegistration* Register_COS() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kCosName>, elementwise::CosEval}; return &r; } TfLiteRegistration* Register_LOG() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kLogName>, elementwise::LogEval}; return &r; } TfLiteRegistration* Register_SQRT() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kSqrtName>, elementwise::SqrtEval}; return &r; } TfLiteRegistration* Register_RSQRT() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kRsqrtName>, elementwise::RsqrtEval}; return &r; } TfLiteRegistration* Register_SQUARE() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kSquareName>, elementwise::SquareEval}; return &r; } TfLiteRegistration* Register_LOGICAL_NOT() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsLogicalSupportedType, elementwise::kNotName>, elementwise::LogicalNotEval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include <stdlib.h> #include <cmath> #include <limits> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" namespace tflite { namespace ops { namespace builtin { namespace elementwise { namespace { constexpr char kAbsName[] = "Abs"; constexpr char kSinName[] = "Sin"; constexpr char kCosName[] = "Cos"; constexpr char kLogName[] = "Log"; constexpr char kSqrtName[] = "Sqrt"; constexpr char kRsqrtName[] = "Rsqrt"; constexpr char kSquareName[] = "Square"; constexpr char kNotName[] = "Not"; struct OpData { int32_t multiplier; int32_t shift; int input_offset; int output_offset; }; bool IsNumericSupportedType(const TfLiteType type) { return type == kTfLiteFloat32; } bool IsLogicalSupportedType(const TfLiteType type) { return type == kTfLiteBool; } bool IsAbsSupportedType(const TfLiteType type) { return type == kTfLiteFloat32 || type == kTfLiteInt8; } typedef bool (*IsSupportedType)(TfLiteType); template <IsSupportedType is_supported_type, const char* op_name> TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (!is_supported_type(input->type)) { TF_LITE_UNSUPPORTED_TYPE(context, input->type, op_name); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus AbsPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ( context, (GenericPrepare<IsAbsSupportedType, kAbsName>(context, node)), kTfLiteOk); const TfLiteTensor* input = GetInput(context, node, 0); if (input->type == kTfLiteInt8) { TfLiteTensor* output = GetOutput(context, node, 0); auto* op_data = static_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, input->quantization.type, kTfLiteAffineQuantization); TF_LITE_ENSURE_EQ(context, output->quantization.type, kTfLiteAffineQuantization); const auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(input->quantization.params); const auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>( output->quantization.params); TF_LITE_ENSURE(context, input_params != nullptr); TF_LITE_ENSURE(context, input_params->scale != nullptr); TF_LITE_ENSURE(context, input_params->scale->size > 0); TF_LITE_ENSURE(context, input_params->zero_point->size > 0); TF_LITE_ENSURE(context, output_params != nullptr); TF_LITE_ENSURE(context, output_params->scale != nullptr); TF_LITE_ENSURE(context, output_params->scale->size > 0); TF_LITE_ENSURE(context, output_params->zero_point->size > 0); op_data->input_offset = input_params->zero_point->data[0]; op_data->output_offset = output_params->zero_point->data[0]; const float input_scale = input_params->scale->data[0]; const float output_scale = output_params->scale->data[0]; double scale = input_scale / output_scale; QuantizeMultiplier(scale, &op_data->multiplier, &op_data->shift); } return kTfLiteOk; } template <typename T> inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, std::function<T(T)> func, TfLiteType expected_type) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type); const int64_t num_elements = NumElements(input); const T* in_data = GetTensorData<T>(input); T* out_data = GetTensorData<T>(output); for (int64_t i = 0; i < num_elements; ++i) { out_data[i] = func(in_data[i]); } return kTfLiteOk; } inline TfLiteStatus EvalNumeric(TfLiteContext* context, TfLiteNode* node, float float_func(float)) { return EvalImpl<float>(context, node, float_func, kTfLiteFloat32); } inline TfLiteStatus EvalLogical(TfLiteContext* context, TfLiteNode* node, bool bool_func(bool)) { return EvalImpl<bool>(context, node, bool_func, kTfLiteBool); } void* AbsInit(TfLiteContext* context, const char* buffer, size_t length) { return new OpData(); } void AbsFree(TfLiteContext* context, void* buffer) { delete static_cast<OpData*>(buffer); } TfLiteStatus AbsEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteType type = GetInput(context, node, 0)->type; switch (type) { case kTfLiteFloat32: return EvalImpl<float>(context, node, std::abs<float>, type); case kTfLiteInt8: { const auto* op_data = static_cast<const OpData*>(node->user_data); const int kMinInt8 = std::numeric_limits<int8_t>::min(); const int kMaxInt8 = std::numeric_limits<int8_t>::max(); std::function<int8_t(int8_t)> func = [&](int8_t i) { const int32_t value = std::abs(i - op_data->input_offset); return std::min( std::max(op_data->output_offset + MultiplyByQuantizedMultiplier( value, op_data->multiplier, op_data->shift), kMinInt8), kMaxInt8); }; return EvalImpl<int8_t>(context, node, func, type); } default: TF_LITE_KERNEL_LOG(context, "Current data type %s is not supported.", TfLiteTypeGetName(type)); return kTfLiteError; } } TfLiteStatus SinEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, std::sin); } TfLiteStatus CosEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, std::cos); } TfLiteStatus LogEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, std::log); } TfLiteStatus SqrtEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, std::sqrt); } TfLiteStatus RsqrtEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, [](float f) { return 1.f / std::sqrt(f); }); } TfLiteStatus SquareEval(TfLiteContext* context, TfLiteNode* node) { return EvalNumeric(context, node, [](float f) { return f * f; }); } TfLiteStatus LogicalNotEval(TfLiteContext* context, TfLiteNode* node) { return EvalLogical(context, node, [](bool v) { return !v; }); } } // namespace } // namespace elementwise TfLiteRegistration* Register_ABS() { static TfLiteRegistration r = {elementwise::AbsInit, elementwise::AbsFree, elementwise::AbsPrepare, elementwise::AbsEval}; return &r; } TfLiteRegistration* Register_SIN() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kSinName>, elementwise::SinEval}; return &r; } TfLiteRegistration* Register_COS() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kCosName>, elementwise::CosEval}; return &r; } TfLiteRegistration* Register_LOG() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kLogName>, elementwise::LogEval}; return &r; } TfLiteRegistration* Register_SQRT() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kSqrtName>, elementwise::SqrtEval}; return &r; } TfLiteRegistration* Register_RSQRT() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kRsqrtName>, elementwise::RsqrtEval}; return &r; } TfLiteRegistration* Register_SQUARE() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsNumericSupportedType, elementwise::kSquareName>, elementwise::SquareEval}; return &r; } TfLiteRegistration* Register_LOGICAL_NOT() { static TfLiteRegistration r = { /*init=*/nullptr, /*free=*/nullptr, elementwise::GenericPrepare<elementwise::IsLogicalSupportedType, elementwise::kNotName>, elementwise::LogicalNotEval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (!is_supported_type(input->type)) { TF_LITE_UNSUPPORTED_TYPE(context, input->type, op_name); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (!is_supported_type(input->type)) { TF_LITE_UNSUPPORTED_TYPE(context, input->type, op_name); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
{'added': [(69, ' const TfLiteTensor* input;'), (70, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (71, ' TfLiteTensor* output;'), (72, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (119, ' const TfLiteTensor* input;'), (120, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (121, ' TfLiteTensor* output;'), (122, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));')], 'deleted': [(69, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (70, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (117, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (118, ' TfLiteTensor* output = GetOutput(context, node, 0);')]}
8
4
232
1,627
12
116
2
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
320
secure_enclave.c
C
trustedDecryptDkgSecretAES
/* Modifications Copyright (C) 2019-2020 SKALE Labs Copyright 2018 Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <string.h> #include <stdio.h> #include <stdbool.h> #include <assert.h> #include "secure_enclave_t.h" #include "sgx_tcrypto.h" #include "sgx_tseal.h" #include <sgx_tgmp.h> #include <sgx_trts.h> #include <sgx_key.h> #include "Point.h" #include "DomainParameters.h" #include "Signature.h" #include "Curves.h" #include "DHDkg.h" #include "AESUtils.h" #include "EnclaveConstants.h" #include "EnclaveCommon.h" #include "SIGNED_ENCLAVE_VERSION" #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) #define INIT_ERROR_STATE *errString = 0; *errStatus = UNKNOWN_ERROR; #define SET_SUCCESS *errStatus = 0; #define CHECK_STATE(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR((const char*) __FILE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ return;} #define CHECK_STATE_CLEAN(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR(__FILE__); LOG_ERROR(__LINE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ goto clean;} #define CHECK_STATUS(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ LOG_ERROR(__FUNCTION__); \ snprintf(errString, BUF_LEN, "failed with status %d : %s", status, __ERRMESSAGE__); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; #define CHECK_STATUS2(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ snprintf(errString, BUF_LEN, __ERRMESSAGE__, status); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; void *(*gmp_realloc_func)(void *, size_t, size_t); void *(*oc_realloc_func)(void *, size_t, size_t); void (*gmp_free_func)(void *, size_t); void (*oc_free_func)(void *, size_t); void *reallocate_function(void *, size_t, size_t); void free_function(void *, size_t); unsigned char *globalRandom = NULL; #define CALL_ONCE \ static volatile bool called = false;\ if (called) { \ LOG_ERROR(__FUNCTION__); \ LOG_ERROR("This function shouldnt be called twice. Aborting!"); \ abort(); \ } else {called = true;}; void trustedEnclaveInit(uint32_t _logLevel) { CALL_ONCE LOG_INFO(__FUNCTION__); globalLogLevel_ = _logLevel; oc_realloc_func = &reallocate_function; oc_free_func = &free_function; LOG_INFO("Setting memory functions"); mp_get_memory_functions(NULL, &gmp_realloc_func, &gmp_free_func); mp_set_memory_functions(NULL, oc_realloc_func, oc_free_func); LOG_INFO("Calling enclave init"); enclave_init(); LOG_INFO("Reading random"); globalRandom = calloc(32,1); int ret = sgx_read_rand(globalRandom, 32); if(ret != SGX_SUCCESS) { LOG_ERROR("sgx_read_rand failed. Aboring enclave."); abort(); } LOG_INFO("Successfully inited enclave. Signed enclave version:" SIGNED_ENCLAVE_VERSION ); #ifndef SGX_DEBUG LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_DEBUG != 0 LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_MODE == SIM LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE SIMULATION MODE! NEVER USE IN PRODUCTION!"); #endif } void free_function(void *ptr, size_t sz) { if (sgx_is_within_enclave(ptr, sz)) gmp_free_func(ptr, sz); else { sgx_status_t status; status = oc_free(ptr, sz); if (status != SGX_SUCCESS) abort(); } } void *reallocate_function(void *ptr, size_t osize, size_t nsize) { uint64_t nptr; sgx_status_t status; if (sgx_is_within_enclave(ptr, osize)) { return gmp_realloc_func(ptr, osize, nsize); } status = oc_realloc(&nptr, ptr, osize, nsize); if (status != SGX_SUCCESS) abort(); /* * If the entire range of allocated memory is not outside the enclave * then something truly terrible has happened. In theory, we could * free() and try again, but would you trust the OS at this point? */ if (!sgx_is_outside_enclave((void *) ptr, nsize)) abort(); return (void *) nptr; } void get_global_random(unsigned char *_randBuff, uint64_t _size) { char errString[BUF_LEN]; int status; int *errStatus = &status; INIT_ERROR_STATE CHECK_STATE(_size <= 32) CHECK_STATE(_randBuff); sgx_sha_state_handle_t shaStateHandle; CHECK_STATE(sgx_sha256_init(&shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_update(globalRandom, 32, shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_get_hash(shaStateHandle, (sgx_sha256_hash_t *)globalRandom) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_close(shaStateHandle) == SGX_SUCCESS); memcpy(_randBuff, globalRandom, _size); } void sealHexSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); CHECK_STATE(strnlen(sek_hex, 33) == 32) uint64_t plaintextLen = strlen(sek_hex) + 1; uint64_t sealedLen = sgx_calc_sealed_data_size(0, plaintextLen); sgx_attributes_t attribute_mask; attribute_mask.flags = 0xfffffffffffffff3; attribute_mask.xfrm = 0x0; sgx_misc_select_t misc = 0xF0000000; sgx_status_t status = sgx_seal_data_ex(SGX_KEYPOLICY_MRENCLAVE, attribute_mask, misc, 0, NULL, plaintextLen, (uint8_t *) sek_hex, sealedLen, (sgx_sealed_data_t *) encrypted_sek); CHECK_STATUS("seal SEK failed after SEK generation"); uint32_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(encrypt_text_length = plaintextLen); SAFE_CHAR_BUF(unsealedKey, BUF_LEN); uint32_t decLen = BUF_LEN; uint32_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(add_text_length == 0); CHECK_STATE(sgx_is_within_enclave(encrypted_sek,sizeof(sgx_sealed_data_t))); status = sgx_unseal_data((const sgx_sealed_data_t *)encrypted_sek, NULL, NULL, (uint8_t *) unsealedKey, &decLen ); CHECK_STATUS("seal/unseal SEK failed after SEK generation in unseal"); *enc_len = sealedLen; SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); RANDOM_CHAR_BUF(SEK_raw, SGX_AESGCM_KEY_SIZE); carray2Hex((uint8_t*) SEK_raw, SGX_AESGCM_KEY_SIZE, sek_hex); memcpy(AES_key, SEK_raw, SGX_AESGCM_KEY_SIZE); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK(int *errStatus, char *errString, uint8_t *encrypted_sek) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); SAFE_CHAR_BUF(aes_key_hex, BUF_LEN); uint32_t dec_len = BUF_LEN; sgx_status_t status = sgx_unseal_data( (const sgx_sealed_data_t *) encrypted_sek, NULL, 0, (uint8_t *)aes_key_hex, &dec_len); if (status == 0x3001) { LOG_ERROR("Could not decrypt LevelDB storage! \n" "If you upgraded sgxwallet software or if you are restoring from backup, please run sgxwallet with -b flag and " "pass your backup key."); } CHECK_STATUS2("sgx unseal SEK failed with status %d"); uint64_t len; hex2carray(aes_key_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK_backup(int *errStatus, char *errString, uint8_t *encrypted_sek, uint32_t *enc_len, const char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); uint64_t len; hex2carray(sek_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, (char *)sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t *enc_len, char *pub_key_x, char *pub_key_y) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); RANDOM_CHAR_BUF(rand_char, 32); mpz_t seed; mpz_init(seed); mpz_t skey; mpz_init(skey); point Pkey = point_init(); mpz_import(seed, 32, 1, sizeof(rand_char[0]), 0, 0, rand_char); mpz_mod(skey, seed, curve->p); signature_extract_public_key(Pkey, skey, curve); SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, Pkey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, Pkey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SAFE_CHAR_BUF(skey_str, ECDSA_SKEY_LEN);SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2); mpz_get_str(arr_skey_str, ECDSA_SKEY_BASE, skey); n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { skey_str[i] = '0'; } strncpy(skey_str + n_zeroes, arr_skey_str, 65 - n_zeroes); skey_str[ECDSA_SKEY_LEN - 1] = 0; snprintf(errString, BUF_LEN, "skey len is %d\n", (int) strlen(skey_str)); int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN); CHECK_STATUS("ecdsa private key encryption failed"); *enc_len = strlen(skey_str) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, ECDSA_SKEY_LEN); CHECK_STATUS2("ecdsa private key decr failed with status %d"); SET_SUCCESS clean: mpz_clear(seed); mpz_clear(skey); point_clear(Pkey); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, char *pub_key_x, char *pub_key_y) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); point pKey = point_init(); point pKey_test = point_init(); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("AES_decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; strncpy(errString, skey, 1024); status = mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE); CHECK_STATUS("mpz_set_str failed for private key"); signature_extract_public_key(pKey, privateKeyMpz, curve); point_multiplication(pKey_test, privateKeyMpz, curve->G, curve); if (!point_cmp(pKey, pKey_test)) { snprintf(errString, BUF_LEN, "Points are not equal"); LOG_ERROR(errString); *errStatus = -11; goto clean; } SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, pKey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, pKey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SET_SUCCESS clean: mpz_clear(privateKeyMpz); point_clear(pKey); point_clear(pKey_test); static uint64_t counter = 0; if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; } static uint64_t sigCounter = 0; void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, const char *hash, char *sigR, char *sigS, uint8_t *sig_v, int base) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(hash); CHECK_STATE(sigR); CHECK_STATE(sigS); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); mpz_t msgMpz; mpz_init(msgMpz); signature sign = signature_init(); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; if (mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid secret key"); LOG_ERROR(errString); goto clean; } if (mpz_set_str(msgMpz, hash, 16) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid message hash"); LOG_ERROR(errString); goto clean; } signature_sign(sign, msgMpz, privateKeyMpz, curve); sigCounter++; if (sigCounter % 1000 == 0) { point Pkey = point_init(); signature_extract_public_key(Pkey, privateKeyMpz, curve); if (!signature_verify(msgMpz, sign, Pkey, curve)) { *errStatus = -2; snprintf(errString, BUF_LEN, "signature is not verified! "); point_clear(Pkey); goto clean; } point_clear(Pkey); } SAFE_CHAR_BUF(arrM, BUF_LEN); mpz_get_str(arrM, 16, msgMpz); snprintf(errString, BUF_LEN, "message is %s ", arrM); SAFE_CHAR_BUF(arrR, BUF_LEN); mpz_get_str(arrR, base, sign->r); strncpy(sigR, arrR, 1024); SAFE_CHAR_BUF(arrS, BUF_LEN); mpz_get_str(arrS, base, sign->s); strncpy(sigS, arrS, 1024); *sig_v = sign->v; SET_SUCCESS clean: mpz_clear(privateKeyMpz); mpz_clear(msgMpz); signature_free(sign); LOG_DEBUG(__FUNCTION__ ); LOG_DEBUG("SGX call completed"); } void trustedDecryptKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, char *key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(key); *errStatus = -9; int status = AES_decrypt_DH(encryptedPrivateKey, enc_len, key, 3072); if (status != 0) { *errStatus = status; snprintf(errString, BUF_LEN, "aes decrypt failed with status %d", status); LOG_ERROR(errString); goto clean; } *errStatus = -10; uint64_t keyLen = strnlen(key, MAX_KEY_LENGTH); if (keyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Key is not null terminated"); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; } void trustedEncryptKeyAES(int *errStatus, char *errString, const char *key, uint8_t *encryptedPrivateKey, uint32_t *enc_len) { LOG_INFO(__FUNCTION__); *errString = 0; *errStatus = UNKNOWN_ERROR; CHECK_STATE(key); CHECK_STATE(encryptedPrivateKey); *errStatus = UNKNOWN_ERROR; int status = AES_encrypt_DH((char *)key, encryptedPrivateKey, BUF_LEN); CHECK_STATUS2("AES encrypt failed with status %d"); *enc_len = strlen(key) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; SAFE_CHAR_BUF(decryptedKey, BUF_LEN); status = AES_decrypt_DH(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN); CHECK_STATUS2("trustedDecryptKey failed with status %d"); uint64_t decryptedKeyLen = strnlen(decryptedKey, MAX_KEY_LENGTH); if (decryptedKeyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Decrypted key is not null terminated"); LOG_ERROR(errString); goto clean; } *errStatus = -8; if (strncmp(key, decryptedKey, MAX_KEY_LENGTH) != 0) { snprintf(errString, BUF_LEN, "Decrypted key does not match original key"); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedBlsSignMessageAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, char *_hashX, char *_hashY, char *signature) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(_hashX); CHECK_STATE(_hashY); CHECK_STATE(signature); SAFE_CHAR_BUF(key, BUF_LEN);SAFE_CHAR_BUF(sig, BUF_LEN); int status = AES_decrypt(encryptedPrivateKey, enc_len, key, BUF_LEN); CHECK_STATUS("AES decrypt failed") if (!enclave_sign(key, _hashX, _hashY, sig)) { strncpy(errString, "Enclave failed to create bls signature", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } strncpy(signature, sig, BUF_LEN); if (strnlen(signature, BUF_LEN) < 10) { strncpy(errString, "Signature too short", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } SET_SUCCESS LOG_DEBUG("SGX call completed"); clean: ; LOG_DEBUG("SGX call completed"); } void trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t *enc_len, size_t _t) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); SAFE_CHAR_BUF(dkg_secret, DKG_BUFER_LENGTH); int status = gen_dkg_poly(dkg_secret, _t); CHECK_STATUS("gen_dkg_poly failed") status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN); CHECK_STATUS("SGX AES encrypt DKG poly failed"); *enc_len = strlen(dkg_secret) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; SAFE_CHAR_BUF(decr_dkg_secret, DKG_BUFER_LENGTH); status = AES_decrypt(encrypted_dkg_secret, *enc_len, decr_dkg_secret, DKG_BUFER_LENGTH); CHECK_STATUS("aes decrypt dkg poly failed"); if (strcmp(dkg_secret, decr_dkg_secret) != 0) { snprintf(errString, BUF_LEN, "encrypted poly is not equal to decrypted poly"); LOG_ERROR(errString); *errStatus = -333; goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedDecryptDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t enc_len, uint8_t *decrypted_dkg_secret) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(decrypted_dkg_secret); int status = AES_decrypt(encrypted_dkg_secret, enc_len, (char *) decrypted_dkg_secret, 3072); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint32_t enc_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_poly); memset(getThreadLocalDecryptedDkgPoly(), 0, DKG_BUFER_LENGTH); int status = AES_decrypt(encrypted_poly, enc_len, (char *) getThreadLocalDecryptedDkgPoly(), DKG_BUFER_LENGTH); CHECK_STATUS2("sgx_unseal_data - encrypted_poly failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint32_t *dec_len, char *result_str, char *s_shareG2, char *pub_keyB, uint8_t _t, uint8_t _n, uint8_t ind) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE uint32_t enc_len; int status; CHECK_STATE(encrypted_skey); CHECK_STATE(result_str); CHECK_STATE(s_shareG2); CHECK_STATE(pub_keyB); LOG_DEBUG(__FUNCTION__); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); SAFE_CHAR_BUF(pub_key_x, BUF_LEN);SAFE_CHAR_BUF(pub_key_y, BUF_LEN); trustedGenerateEcdsaKeyAES(&status, errString, encrypted_skey, &enc_len, pub_key_x, pub_key_y); CHECK_STATUS("trustedGenerateEcdsaKeyAES failed"); status = AES_decrypt(encrypted_skey, enc_len, skey, ECDSA_SKEY_LEN); skey[ECDSA_SKEY_LEN - 1] = 0; CHECK_STATUS2("AES_decrypt failed (in trustedGetEncryptedSecretShareAES) with status %d"); *dec_len = enc_len; SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN); status = gen_session_key(skey, pub_keyB, common_key); CHECK_STATUS("gen_session_key failed") SAFE_CHAR_BUF(s_share, ECDSA_SKEY_LEN); status = calc_secret_share(getThreadLocalDecryptedDkgPoly(), s_share, _t, _n, ind); CHECK_STATUS("calc secret share failed") status = calc_secret_shareG2(s_share, s_shareG2); CHECK_STATUS("invalid decr secret share"); SAFE_CHAR_BUF(cypher, ECDSA_SKEY_LEN); status=xor_encrypt(common_key, s_share, cypher); CHECK_STATUS("xor_encrypt failed") strncpy(result_str, cypher, strlen(cypher)); strncpy(result_str + strlen(cypher), pub_key_x, strlen(pub_key_x)); strncpy(result_str + strlen(pub_key_x) + strlen(pub_key_y), pub_key_y, strlen(pub_key_y)); SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t enc_len, char *public_shares, unsigned _t, unsigned _n) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(public_shares); CHECK_STATE(_t <= _n && _n > 0) SAFE_CHAR_BUF(decrypted_dkg_secret, DKG_MAX_SEALED_LEN); int status = AES_decrypt(encrypted_dkg_secret, enc_len, decrypted_dkg_secret, DKG_MAX_SEALED_LEN); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d"); status = calc_public_shares(decrypted_dkg_secret, public_shares, _t) != 0; CHECK_STATUS("t does not match polynomial in db"); SET_SUCCESS clean: ; LOG_INFO("SGX call completed"); } void trustedDkgVerifyAES(int *errStatus, char *errString, const char *public_shares, const char *s_share, uint8_t *encryptedPrivateKey, uint64_t enc_len, unsigned _t, int _ind, int *result) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(public_shares); CHECK_STATE(s_share); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t s; mpz_init(s); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("AES_decrypt failed (in trustedDkgVerifyAES) with status %d"); SAFE_CHAR_BUF(encr_sshare, ECDSA_SKEY_LEN); strncpy(encr_sshare, s_share, ECDSA_SKEY_LEN - 1); SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); SAFE_CHAR_BUF(decr_sshare, ECDSA_SKEY_LEN); status=xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed") status = mpz_set_str(s, decr_sshare, 16); CHECK_STATUS("invalid decr secret share"); *result = Verification(public_shares, s, _t, _ind); SET_SUCCESS clean: mpz_clear(s); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedCreateBlsKeyAES(int *errStatus, char *errString, const char *s_shares, uint8_t *encryptedPrivateKey, uint64_t key_len, uint8_t *encr_bls_key, uint32_t *enc_bls_key_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(s_shares); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(encr_bls_key); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t sum; mpz_init(sum); mpz_set_ui(sum, 0); mpz_t q; mpz_init(q); mpz_set_str(q, "21888242871839275222246405745257275088548364400416034343698204186575808495617", 10); mpz_t bls_key; mpz_init(bls_key); int status = AES_decrypt(encryptedPrivateKey, key_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[ECDSA_SKEY_LEN - 1] = 0; int num_shares = strlen(s_shares) / 192; for (int i = 0; i < num_shares; i++) { SAFE_CHAR_BUF(encr_sshare, 65); strncpy(encr_sshare, s_shares + 192 * i, 64); encr_sshare[64] = 0; SAFE_CHAR_BUF(s_share, 193); strncpy(s_share, s_shares + 192 * i, 192); s_share[192] = 0; SAFE_CHAR_BUF(common_key, 65); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); common_key[64] = 0; SAFE_CHAR_BUF(decr_sshare, 65); status = xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed"); decr_sshare[64] = 0; mpz_t decr_secret_share; mpz_init(decr_secret_share); if (mpz_set_str(decr_secret_share, decr_sshare, 16) == -1) { *errStatus = 111; snprintf(errString, BUF_LEN, "invalid decrypted secret share"); LOG_ERROR(errString); mpz_clear(decr_secret_share); goto clean; } mpz_addmul_ui(sum, decr_secret_share, 1); mpz_clear(decr_secret_share); } mpz_mod(bls_key, sum, q); SAFE_CHAR_BUF(key_share, BLS_KEY_LENGTH); SAFE_CHAR_BUF(arr_skey_str, BUF_LEN); mpz_get_str(arr_skey_str, 16, bls_key); int n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { key_share[i] = '0'; } strncpy(key_share + n_zeroes, arr_skey_str, 65 - n_zeroes); key_share[BLS_KEY_LENGTH - 1] = 0; status = AES_encrypt(key_share, encr_bls_key, BUF_LEN); CHECK_STATUS2("aes encrypt bls private key failed with status %d "); *enc_bls_key_len = strlen(key_share) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; SET_SUCCESS clean: mpz_clear(bls_key); mpz_clear(sum); mpz_clear(q); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetBlsPubKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t key_len, char *bls_pub_key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(bls_pub_key); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey_hex, ECDSA_SKEY_LEN); int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, ECDSA_SKEY_LEN); CHECK_STATUS2("AES decrypt failed %d"); skey_hex[ECDSA_SKEY_LEN - 1] = 0; status = calc_bls_public_key(skey_hex, bls_pub_key); CHECK_STATUS("could not calculate bls public key"); SET_SUCCESS static uint64_t counter = 0; clean: if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; }
/* Modifications Copyright (C) 2019-2020 SKALE Labs Copyright 2018 Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <string.h> #include <stdio.h> #include <stdbool.h> #include <assert.h> #include "secure_enclave_t.h" #include "sgx_tcrypto.h" #include "sgx_tseal.h" #include <sgx_tgmp.h> #include <sgx_trts.h> #include <sgx_key.h> #include "Point.h" #include "DomainParameters.h" #include "Signature.h" #include "Curves.h" #include "DHDkg.h" #include "AESUtils.h" #include "EnclaveConstants.h" #include "EnclaveCommon.h" #include "SIGNED_ENCLAVE_VERSION" #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) #define INIT_ERROR_STATE *errString = 0; *errStatus = UNKNOWN_ERROR; #define SET_SUCCESS *errStatus = 0; #define CHECK_STATE(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR((const char*) __FILE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ return;} #define CHECK_STATE_CLEAN(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR(__FILE__); LOG_ERROR(__LINE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ goto clean;} #define CHECK_STATUS(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ LOG_ERROR(__FUNCTION__); \ snprintf(errString, BUF_LEN, "failed with status %d : %s", status, __ERRMESSAGE__); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; #define CHECK_STATUS2(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ snprintf(errString, BUF_LEN, __ERRMESSAGE__, status); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; void *(*gmp_realloc_func)(void *, size_t, size_t); void *(*oc_realloc_func)(void *, size_t, size_t); void (*gmp_free_func)(void *, size_t); void (*oc_free_func)(void *, size_t); void *reallocate_function(void *, size_t, size_t); void free_function(void *, size_t); unsigned char *globalRandom = NULL; #define CALL_ONCE \ static volatile bool called = false;\ if (called) { \ LOG_ERROR(__FUNCTION__); \ LOG_ERROR("This function shouldnt be called twice. Aborting!"); \ abort(); \ } else {called = true;}; void trustedEnclaveInit(uint64_t _logLevel) { CALL_ONCE LOG_INFO(__FUNCTION__); globalLogLevel_ = _logLevel; oc_realloc_func = &reallocate_function; oc_free_func = &free_function; LOG_INFO("Setting memory functions"); mp_get_memory_functions(NULL, &gmp_realloc_func, &gmp_free_func); mp_set_memory_functions(NULL, oc_realloc_func, oc_free_func); LOG_INFO("Calling enclave init"); enclave_init(); LOG_INFO("Reading random"); globalRandom = calloc(32,1); int ret = sgx_read_rand(globalRandom, 32); if(ret != SGX_SUCCESS) { LOG_ERROR("sgx_read_rand failed. Aboring enclave."); abort(); } LOG_INFO("Successfully inited enclave. Signed enclave version:" SIGNED_ENCLAVE_VERSION ); #ifndef SGX_DEBUG LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_DEBUG != 0 LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_MODE == SIM LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE SIMULATION MODE! NEVER USE IN PRODUCTION!"); #endif } void free_function(void *ptr, size_t sz) { if (sgx_is_within_enclave(ptr, sz)) gmp_free_func(ptr, sz); else { sgx_status_t status; status = oc_free(ptr, sz); if (status != SGX_SUCCESS) abort(); } } void *reallocate_function(void *ptr, size_t osize, size_t nsize) { uint64_t nptr; sgx_status_t status; if (sgx_is_within_enclave(ptr, osize)) { return gmp_realloc_func(ptr, osize, nsize); } status = oc_realloc(&nptr, ptr, osize, nsize); if (status != SGX_SUCCESS) abort(); /* * If the entire range of allocated memory is not outside the enclave * then something truly terrible has happened. In theory, we could * free() and try again, but would you trust the OS at this point? */ if (!sgx_is_outside_enclave((void *) ptr, nsize)) abort(); return (void *) nptr; } void get_global_random(unsigned char *_randBuff, uint64_t _size) { char errString[BUF_LEN]; int status; int *errStatus = &status; INIT_ERROR_STATE CHECK_STATE(_size <= 32) CHECK_STATE(_randBuff); sgx_sha_state_handle_t shaStateHandle; CHECK_STATE(sgx_sha256_init(&shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_update(globalRandom, 32, shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_get_hash(shaStateHandle, (sgx_sha256_hash_t *)globalRandom) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_close(shaStateHandle) == SGX_SUCCESS); memcpy(_randBuff, globalRandom, _size); } void sealHexSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); CHECK_STATE(strnlen(sek_hex, 33) == 32) uint64_t plaintextLen = strlen(sek_hex) + 1; uint64_t sealedLen = sgx_calc_sealed_data_size(0, plaintextLen); sgx_attributes_t attribute_mask; attribute_mask.flags = 0xfffffffffffffff3; attribute_mask.xfrm = 0x0; sgx_misc_select_t misc = 0xF0000000; sgx_status_t status = sgx_seal_data_ex(SGX_KEYPOLICY_MRENCLAVE, attribute_mask, misc, 0, NULL, plaintextLen, (uint8_t *) sek_hex, sealedLen, (sgx_sealed_data_t *) encrypted_sek); CHECK_STATUS("seal SEK failed after SEK generation"); uint64_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(encrypt_text_length = plaintextLen); SAFE_CHAR_BUF(unsealedKey, BUF_LEN); uint32_t decLen = BUF_LEN; uint64_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(add_text_length == 0); CHECK_STATE(sgx_is_within_enclave(encrypted_sek,sizeof(sgx_sealed_data_t))); status = sgx_unseal_data((const sgx_sealed_data_t *)encrypted_sek, NULL, NULL, (uint8_t *) unsealedKey, &decLen ); CHECK_STATUS("seal/unseal SEK failed after SEK generation in unseal"); *enc_len = sealedLen; SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); RANDOM_CHAR_BUF(SEK_raw, SGX_AESGCM_KEY_SIZE); carray2Hex((uint8_t*) SEK_raw, SGX_AESGCM_KEY_SIZE, sek_hex); memcpy(AES_key, SEK_raw, SGX_AESGCM_KEY_SIZE); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK(int *errStatus, char *errString, uint8_t *encrypted_sek) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); SAFE_CHAR_BUF(aes_key_hex, BUF_LEN); uint32_t dec_len = BUF_LEN; sgx_status_t status = sgx_unseal_data( (const sgx_sealed_data_t *) encrypted_sek, NULL, 0, (uint8_t *)aes_key_hex, &dec_len); if (status == 0x3001) { LOG_ERROR("Could not decrypt LevelDB storage! \n" "If you upgraded sgxwallet software or if you are restoring from backup, please run sgxwallet with -b flag and " "pass your backup key."); } CHECK_STATUS2("sgx unseal SEK failed with status %d"); uint64_t len; hex2carray(aes_key_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK_backup(int *errStatus, char *errString, uint8_t *encrypted_sek, uint64_t *enc_len, const char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); uint64_t len; hex2carray(sek_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, (char *)sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t *enc_len, char *pub_key_x, char *pub_key_y) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); RANDOM_CHAR_BUF(rand_char, 32); mpz_t seed; mpz_init(seed); mpz_t skey; mpz_init(skey); point Pkey = point_init(); mpz_import(seed, 32, 1, sizeof(rand_char[0]), 0, 0, rand_char); mpz_mod(skey, seed, curve->p); signature_extract_public_key(Pkey, skey, curve); SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, Pkey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, Pkey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SAFE_CHAR_BUF(skey_str, BUF_LEN); SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2); mpz_get_str(arr_skey_str, ECDSA_SKEY_BASE, skey); n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { skey_str[i] = '0'; } strncpy(skey_str + n_zeroes, arr_skey_str, 65 - n_zeroes); snprintf(errString, BUF_LEN, "skey len is %d\n", (int) strlen(skey_str)); int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN, ECDSA, NON_DECRYPTABLE, enc_len); CHECK_STATUS("ecdsa private key encryption failed"); status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, BUF_LEN); CHECK_STATUS2("ecdsa private key decr failed with status %d"); SET_SUCCESS clean: mpz_clear(seed); mpz_clear(skey); point_clear(Pkey); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, char *pub_key_x, char *pub_key_y) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE SAFE_CHAR_BUF(skey, BUF_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); point pKey = point_init(); point pKey_test = point_init(); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN); CHECK_STATUS2("AES_decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; strncpy(errString, skey, 1024); status = mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE); CHECK_STATUS("mpz_set_str failed for private key"); signature_extract_public_key(pKey, privateKeyMpz, curve); point_multiplication(pKey_test, privateKeyMpz, curve->G, curve); if (!point_cmp(pKey, pKey_test)) { snprintf(errString, BUF_LEN, "Points are not equal"); LOG_ERROR(errString); *errStatus = -11; goto clean; } SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, pKey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, pKey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SET_SUCCESS clean: mpz_clear(privateKeyMpz); point_clear(pKey); point_clear(pKey_test); static uint64_t counter = 0; if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; } static uint64_t sigCounter = 0; void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, const char *hash, char *sigR, char *sigS, uint8_t *sig_v, int base) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(hash); CHECK_STATE(sigR); CHECK_STATE(sigS); SAFE_CHAR_BUF(skey, BUF_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); mpz_t msgMpz; mpz_init(msgMpz); signature sign = signature_init(); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; if (mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid secret key"); LOG_ERROR(errString); goto clean; } if (mpz_set_str(msgMpz, hash, 16) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid message hash"); LOG_ERROR(errString); goto clean; } signature_sign(sign, msgMpz, privateKeyMpz, curve); sigCounter++; if (sigCounter % 1000 == 0) { point Pkey = point_init(); signature_extract_public_key(Pkey, privateKeyMpz, curve); if (!signature_verify(msgMpz, sign, Pkey, curve)) { *errStatus = -2; snprintf(errString, BUF_LEN, "signature is not verified! "); point_clear(Pkey); goto clean; } point_clear(Pkey); } SAFE_CHAR_BUF(arrM, BUF_LEN); mpz_get_str(arrM, 16, msgMpz); snprintf(errString, BUF_LEN, "message is %s ", arrM); SAFE_CHAR_BUF(arrR, BUF_LEN); mpz_get_str(arrR, base, sign->r); strncpy(sigR, arrR, 1024); SAFE_CHAR_BUF(arrS, BUF_LEN); mpz_get_str(arrS, base, sign->s); strncpy(sigS, arrS, 1024); *sig_v = sign->v; SET_SUCCESS clean: mpz_clear(privateKeyMpz); mpz_clear(msgMpz); signature_free(sign); LOG_DEBUG(__FUNCTION__ ); LOG_DEBUG("SGX call completed"); } void trustedDecryptKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, char *key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(key); *errStatus = -9; int status = AES_decrypt(encryptedPrivateKey, enc_len, key, 3072); if (status != 0) { *errStatus = status; snprintf(errString, BUF_LEN, "aes decrypt failed with status %d", status); LOG_ERROR(errString); goto clean; } *errStatus = -10; uint64_t keyLen = strnlen(key, MAX_KEY_LENGTH); if (keyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Key is not null terminated"); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; } void trustedEncryptKeyAES(int *errStatus, char *errString, const char *key, uint8_t *encryptedPrivateKey, uint64_t *enc_len) { LOG_INFO(__FUNCTION__); *errString = 0; *errStatus = UNKNOWN_ERROR; CHECK_STATE(key); CHECK_STATE(encryptedPrivateKey); *errStatus = UNKNOWN_ERROR; int status = AES_encrypt((char *)key, encryptedPrivateKey, BUF_LEN, DKG, DECRYPTABLE, enc_len); CHECK_STATUS2("AES encrypt failed with status %d"); SAFE_CHAR_BUF(decryptedKey, BUF_LEN); status = AES_decrypt(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN); CHECK_STATUS2("trustedDecryptKey failed with status %d"); uint64_t decryptedKeyLen = strnlen(decryptedKey, MAX_KEY_LENGTH); if (decryptedKeyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Decrypted key is not null terminated"); LOG_ERROR(errString); goto clean; } *errStatus = -8; if (strncmp(key, decryptedKey, MAX_KEY_LENGTH) != 0) { snprintf(errString, BUF_LEN, "Decrypted key does not match original key"); LOG_ERROR(key); LOG_ERROR(decryptedKey); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedBlsSignMessageAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, char *_hashX, char *_hashY, char *signature) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(_hashX); CHECK_STATE(_hashY); CHECK_STATE(signature); SAFE_CHAR_BUF(key, BUF_LEN);SAFE_CHAR_BUF(sig, BUF_LEN); int status = AES_decrypt(encryptedPrivateKey, enc_len, key, BUF_LEN); CHECK_STATUS("AES decrypt failed") if (!enclave_sign(key, _hashX, _hashY, sig)) { strncpy(errString, "Enclave failed to create bls signature", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } strncpy(signature, sig, BUF_LEN); if (strnlen(signature, BUF_LEN) < 10) { strncpy(errString, "Signature too short", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } SET_SUCCESS LOG_DEBUG("SGX call completed"); clean: ; LOG_DEBUG("SGX call completed"); } void trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t *enc_len, size_t _t) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); SAFE_CHAR_BUF(dkg_secret, DKG_BUFER_LENGTH); int status = gen_dkg_poly(dkg_secret, _t); CHECK_STATUS("gen_dkg_poly failed") status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN, DKG, DECRYPTABLE, enc_len); CHECK_STATUS("SGX AES encrypt DKG poly failed"); SAFE_CHAR_BUF(decr_dkg_secret, DKG_BUFER_LENGTH); status = AES_decrypt(encrypted_dkg_secret, *enc_len, decr_dkg_secret, DKG_BUFER_LENGTH); CHECK_STATUS("aes decrypt dkg poly failed"); if (strcmp(dkg_secret, decr_dkg_secret) != 0) { snprintf(errString, BUF_LEN, "encrypted poly is not equal to decrypted poly"); LOG_ERROR(errString); *errStatus = -333; goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedDecryptDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t enc_len, uint8_t *decrypted_dkg_secret) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(decrypted_dkg_secret); int status = AES_decrypt(encrypted_dkg_secret, enc_len, (char *) decrypted_dkg_secret, 3072); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint64_t enc_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_poly); memset(getThreadLocalDecryptedDkgPoly(), 0, DKG_BUFER_LENGTH); int status = AES_decrypt(encrypted_poly, enc_len, (char *) getThreadLocalDecryptedDkgPoly(), DKG_BUFER_LENGTH); CHECK_STATUS2("sgx_unseal_data - encrypted_poly failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint64_t *dec_len, char *result_str, char *s_shareG2, char *pub_keyB, uint8_t _t, uint8_t _n, uint8_t ind) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE uint64_t enc_len; int status; CHECK_STATE(encrypted_skey); CHECK_STATE(result_str); CHECK_STATE(s_shareG2); CHECK_STATE(pub_keyB); LOG_DEBUG(__FUNCTION__); SAFE_CHAR_BUF(skey, BUF_LEN); SAFE_CHAR_BUF(pub_key_x, BUF_LEN);SAFE_CHAR_BUF(pub_key_y, BUF_LEN); trustedGenerateEcdsaKeyAES(&status, errString, encrypted_skey, &enc_len, pub_key_x, pub_key_y); CHECK_STATUS("trustedGenerateEcdsaKeyAES failed"); status = AES_decrypt(encrypted_skey, enc_len, skey, BUF_LEN); skey[ECDSA_SKEY_LEN - 1] = 0; CHECK_STATUS2("AES_decrypt failed (in trustedGetEncryptedSecretShareAES) with status %d"); *dec_len = enc_len; SAFE_CHAR_BUF(common_key, BUF_LEN); status = gen_session_key(skey, pub_keyB, common_key); CHECK_STATUS("gen_session_key failed") SAFE_CHAR_BUF(s_share, BUF_LEN); status = calc_secret_share(getThreadLocalDecryptedDkgPoly(), s_share, _t, _n, ind); CHECK_STATUS("calc secret share failed") status = calc_secret_shareG2(s_share, s_shareG2); CHECK_STATUS("invalid decr secret share"); SAFE_CHAR_BUF(cypher, BUF_LEN); status=xor_encrypt(common_key, s_share, cypher); CHECK_STATUS("xor_encrypt failed") strncpy(result_str, cypher, strlen(cypher)); strncpy(result_str + strlen(cypher), pub_key_x, strlen(pub_key_x)); strncpy(result_str + strlen(pub_key_x) + strlen(pub_key_y), pub_key_y, strlen(pub_key_y)); SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t enc_len, char *public_shares, unsigned _t, unsigned _n) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(public_shares); CHECK_STATE(_t <= _n && _n > 0) SAFE_CHAR_BUF(decrypted_dkg_secret, DKG_MAX_SEALED_LEN); int status = AES_decrypt(encrypted_dkg_secret, enc_len, decrypted_dkg_secret, DKG_MAX_SEALED_LEN); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d"); status = calc_public_shares(decrypted_dkg_secret, public_shares, _t) != 0; CHECK_STATUS("t does not match polynomial in db"); SET_SUCCESS clean: ; LOG_INFO("SGX call completed"); } void trustedDkgVerifyAES(int *errStatus, char *errString, const char *public_shares, const char *s_share, uint8_t *encryptedPrivateKey, uint64_t enc_len, unsigned _t, int _ind, int *result) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(public_shares); CHECK_STATE(s_share); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey,BUF_LEN); mpz_t s; mpz_init(s); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN); CHECK_STATUS2("AES_decrypt failed (in trustedDkgVerifyAES) with status %d"); SAFE_CHAR_BUF(encr_sshare, BUF_LEN); strncpy(encr_sshare, s_share, ECDSA_SKEY_LEN - 1); SAFE_CHAR_BUF(common_key, BUF_LEN); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); SAFE_CHAR_BUF(decr_sshare, BUF_LEN); status=xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed") status = mpz_set_str(s, decr_sshare, 16); CHECK_STATUS("invalid decr secret share"); *result = Verification(public_shares, s, _t, _ind); SET_SUCCESS clean: mpz_clear(s); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedCreateBlsKeyAES(int *errStatus, char *errString, const char *s_shares, uint8_t *encryptedPrivateKey, uint64_t key_len, uint8_t *encr_bls_key, uint64_t *enc_bls_key_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(s_shares); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(encr_bls_key); SAFE_CHAR_BUF(skey, BUF_LEN); mpz_t sum; mpz_init(sum); mpz_set_ui(sum, 0); mpz_t q; mpz_init(q); mpz_set_str(q, "21888242871839275222246405745257275088548364400416034343698204186575808495617", 10); mpz_t bls_key; mpz_init(bls_key); int status = AES_decrypt(encryptedPrivateKey, key_len, skey, BUF_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[ECDSA_SKEY_LEN - 1] = 0; int num_shares = strlen(s_shares) / 192; for (int i = 0; i < num_shares; i++) { SAFE_CHAR_BUF(encr_sshare, 65); strncpy(encr_sshare, s_shares + 192 * i, 64); encr_sshare[64] = 0; SAFE_CHAR_BUF(s_share, 193); strncpy(s_share, s_shares + 192 * i, 192); s_share[192] = 0; SAFE_CHAR_BUF(common_key, 65); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); common_key[64] = 0; SAFE_CHAR_BUF(decr_sshare, 65); status = xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed"); decr_sshare[64] = 0; mpz_t decr_secret_share; mpz_init(decr_secret_share); if (mpz_set_str(decr_secret_share, decr_sshare, 16) == -1) { *errStatus = 111; snprintf(errString, BUF_LEN, "invalid decrypted secret share"); LOG_ERROR(errString); mpz_clear(decr_secret_share); goto clean; } mpz_addmul_ui(sum, decr_secret_share, 1); mpz_clear(decr_secret_share); } mpz_mod(bls_key, sum, q); SAFE_CHAR_BUF(key_share, BLS_KEY_LENGTH); SAFE_CHAR_BUF(arr_skey_str, BUF_LEN); mpz_get_str(arr_skey_str, 16, bls_key); int n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { key_share[i] = '0'; } strncpy(key_share + n_zeroes, arr_skey_str, 65 - n_zeroes); key_share[BLS_KEY_LENGTH - 1] = 0; status = AES_encrypt(key_share, encr_bls_key, BUF_LEN, BLS, NON_DECRYPTABLE, enc_bls_key_len); CHECK_STATUS2("aes encrypt bls private key failed with status %d "); SET_SUCCESS clean: mpz_clear(bls_key); mpz_clear(sum); mpz_clear(q); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetBlsPubKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t key_len, char *bls_pub_key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(bls_pub_key); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey_hex, BUF_LEN); int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, BUF_LEN); CHECK_STATUS2("AES decrypt failed %d"); skey_hex[ECDSA_SKEY_LEN - 1] = 0; status = calc_bls_public_key(skey_hex, bls_pub_key); CHECK_STATUS("could not calculate bls public key"); SET_SUCCESS static uint64_t counter = 0; clean: if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; }
trustedDecryptDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t enc_len, uint8_t *decrypted_dkg_secret) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(decrypted_dkg_secret); int status = AES_decrypt(encrypted_dkg_secret, enc_len, (char *) decrypted_dkg_secret, 3072); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); }
trustedDecryptDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t enc_len, uint8_t *decrypted_dkg_secret) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(decrypted_dkg_secret); int status = AES_decrypt(encrypted_dkg_secret, enc_len, (char *) decrypted_dkg_secret, 3072); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); }
{'added': [(125, 'void trustedEnclaveInit(uint64_t _logLevel) {'), (235, ' uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) {'), (258, ' uint64_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (266, ' uint64_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (282, ' uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) {'), (344, ' uint8_t *encrypted_sek, uint64_t *enc_len, const char *sek_hex) {'), (373, ' uint8_t *encryptedPrivateKey, uint64_t *enc_len, char *pub_key_x, char *pub_key_y) {'), (413, ' SAFE_CHAR_BUF(skey_str, BUF_LEN);'), (414, ' SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2);'), (423, ' int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN,'), (424, ' ECDSA, NON_DECRYPTABLE, enc_len);'), (427, ' status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, BUF_LEN);'), (441, ' uint8_t *encryptedPrivateKey, uint64_t enc_len, char *pub_key_x, char *pub_key_y) {'), (445, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (457, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN);'), (517, 'void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len,'), (528, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (536, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN);'), (602, ' uint64_t enc_len, char *key) {'), (612, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, key, 3072);'), (638, ' uint8_t *encryptedPrivateKey, uint64_t *enc_len) {'), (649, ' int status = AES_encrypt((char *)key, encryptedPrivateKey, BUF_LEN,'), (650, ' DKG, DECRYPTABLE, enc_len);'), (656, ' status = AES_decrypt(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN);'), (672, ' LOG_ERROR(key);'), (673, ' LOG_ERROR(decryptedKey);'), (687, ' uint64_t enc_len, char *_hashX,'), (729, 'trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t *enc_len, size_t _t) {'), (741, ' status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN,'), (742, ' DKG, DECRYPTABLE, enc_len);'), (746, ''), (772, ' uint64_t enc_len,'), (794, 'void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint64_t enc_len) {'), (814, 'void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint64_t *dec_len,'), (821, ' uint64_t enc_len;'), (831, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (839, ' status = AES_decrypt(encrypted_skey, enc_len, skey, BUF_LEN);'), (847, ' SAFE_CHAR_BUF(common_key, BUF_LEN);'), (853, ' SAFE_CHAR_BUF(s_share, BUF_LEN);'), (862, ' SAFE_CHAR_BUF(cypher, BUF_LEN);'), (879, 'void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t enc_len,'), (917, ' SAFE_CHAR_BUF(skey,BUF_LEN);'), (922, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN);'), (926, ' SAFE_CHAR_BUF(encr_sshare, BUF_LEN);'), (930, ' SAFE_CHAR_BUF(common_key, BUF_LEN);'), (936, ' SAFE_CHAR_BUF(decr_sshare, BUF_LEN);'), (958, ' uint64_t *enc_bls_key_len) {'), (968, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (982, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey, BUF_LEN);'), (1042, ' status = AES_encrypt(key_share, encr_bls_key, BUF_LEN, BLS, NON_DECRYPTABLE, enc_bls_key_len);'), (1066, ' SAFE_CHAR_BUF(skey_hex, BUF_LEN);'), (1068, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, BUF_LEN);')], 'deleted': [(125, 'void trustedEnclaveInit(uint32_t _logLevel) {'), (235, ' uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) {'), (258, ' uint32_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (266, ' uint32_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (282, ' uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) {'), (344, ' uint8_t *encrypted_sek, uint32_t *enc_len, const char *sek_hex) {'), (373, ' uint8_t *encryptedPrivateKey, uint32_t *enc_len, char *pub_key_x, char *pub_key_y) {'), (413, ' SAFE_CHAR_BUF(skey_str, ECDSA_SKEY_LEN);SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2);'), (420, ' skey_str[ECDSA_SKEY_LEN - 1] = 0;'), (423, ' int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN);'), (426, ' *enc_len = strlen(skey_str) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (427, ''), (428, ' status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, ECDSA_SKEY_LEN);'), (442, ' uint8_t *encryptedPrivateKey, uint32_t enc_len, char *pub_key_x, char *pub_key_y) {'), (446, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (458, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN);'), (518, 'void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len,'), (529, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (537, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN);'), (603, ' uint32_t enc_len, char *key) {'), (613, ' int status = AES_decrypt_DH(encryptedPrivateKey, enc_len, key, 3072);'), (639, ' uint8_t *encryptedPrivateKey, uint32_t *enc_len) {'), (650, ' int status = AES_encrypt_DH((char *)key, encryptedPrivateKey, BUF_LEN);'), (654, ' *enc_len = strlen(key) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (655, ''), (658, ' status = AES_decrypt_DH(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN);'), (687, ' uint32_t enc_len, char *_hashX,'), (729, 'trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t *enc_len, size_t _t) {'), (741, ' status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN);'), (745, ' *enc_len = strlen(dkg_secret) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (771, ' uint32_t enc_len,'), (793, 'void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint32_t enc_len) {'), (813, 'void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint32_t *dec_len,'), (820, ' uint32_t enc_len;'), (830, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (838, ' status = AES_decrypt(encrypted_skey, enc_len, skey, ECDSA_SKEY_LEN);'), (846, ' SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN);'), (852, ' SAFE_CHAR_BUF(s_share, ECDSA_SKEY_LEN);'), (861, ' SAFE_CHAR_BUF(cypher, ECDSA_SKEY_LEN);'), (878, 'void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t enc_len,'), (916, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (921, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN);'), (925, ' SAFE_CHAR_BUF(encr_sshare, ECDSA_SKEY_LEN);'), (929, ' SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN);'), (935, ' SAFE_CHAR_BUF(decr_sshare, ECDSA_SKEY_LEN);'), (957, ' uint32_t *enc_bls_key_len) {'), (967, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (981, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey, ECDSA_SKEY_LEN);'), (1041, ' status = AES_encrypt(key_share, encr_bls_key, BUF_LEN);'), (1045, ' *enc_bls_key_len = strlen(key_share) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (1046, ''), (1067, ' SAFE_CHAR_BUF(skey_hex, ECDSA_SKEY_LEN);'), (1069, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, ECDSA_SKEY_LEN);')]}
52
53
678
4,334
16
75
1
https://github.com/skalenetwork/sgxwallet
CVE-2021-36218
CWE-787
1,150
nego.c
C++
nego_read_request
/** * FreeRDP: A Remote Desktop Protocol Implementation * RDP Protocol Security Negotiation * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2014 Norbert Federa <norbert.federa@thincast.com> * Copyright 2015 Thincast Technologies GmbH * Copyright 2015 DI (FH) Martin Haimberger <martin.haimberger@thincast.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <winpr/crt.h> #include <freerdp/log.h> #include "tpkt.h" #include "nego.h" #include "transport.h" #define TAG FREERDP_TAG("core.nego") struct rdp_nego { UINT16 port; UINT32 flags; const char* hostname; char* cookie; BYTE* RoutingToken; DWORD RoutingTokenLength; BOOL SendPreconnectionPdu; UINT32 PreconnectionId; char* PreconnectionBlob; NEGO_STATE state; BOOL TcpConnected; BOOL SecurityConnected; UINT32 CookieMaxLength; BOOL sendNegoData; UINT32 SelectedProtocol; UINT32 RequestedProtocols; BOOL NegotiateSecurityLayer; BOOL EnabledProtocols[16]; BOOL RestrictedAdminModeRequired; BOOL GatewayEnabled; BOOL GatewayBypassLocal; rdpTransport* transport; }; static const char* nego_state_string(NEGO_STATE state) { static const char* const NEGO_STATE_STRINGS[] = { "NEGO_STATE_INITIAL", "NEGO_STATE_EXT", "NEGO_STATE_NLA", "NEGO_STATE_TLS", "NEGO_STATE_RDP", "NEGO_STATE_FAIL", "NEGO_STATE_FINAL", "NEGO_STATE_INVALID" }; if (state >= ARRAYSIZE(NEGO_STATE_STRINGS)) return NEGO_STATE_STRINGS[ARRAYSIZE(NEGO_STATE_STRINGS) - 1]; return NEGO_STATE_STRINGS[state]; } static const char* protocol_security_string(UINT32 security) { static const char* PROTOCOL_SECURITY_STRINGS[] = { "RDP", "TLS", "NLA", "UNK", "UNK", "UNK", "UNK", "UNK", "EXT", "UNK" }; if (security >= ARRAYSIZE(PROTOCOL_SECURITY_STRINGS)) return PROTOCOL_SECURITY_STRINGS[ARRAYSIZE(PROTOCOL_SECURITY_STRINGS) - 1]; return PROTOCOL_SECURITY_STRINGS[security]; } static BOOL nego_transport_connect(rdpNego* nego); static BOOL nego_transport_disconnect(rdpNego* nego); static BOOL nego_security_connect(rdpNego* nego); static BOOL nego_send_preconnection_pdu(rdpNego* nego); static BOOL nego_recv_response(rdpNego* nego); static void nego_send(rdpNego* nego); static void nego_process_negotiation_request(rdpNego* nego, wStream* s); static void nego_process_negotiation_response(rdpNego* nego, wStream* s); static void nego_process_negotiation_failure(rdpNego* nego, wStream* s); /** * Negotiate protocol security and connect. * @param nego * @return */ BOOL nego_connect(rdpNego* nego) { rdpSettings* settings = nego->transport->settings; if (nego->state == NEGO_STATE_INITIAL) { if (nego->EnabledProtocols[PROTOCOL_HYBRID_EX]) { nego->state = NEGO_STATE_EXT; } else if (nego->EnabledProtocols[PROTOCOL_HYBRID]) { nego->state = NEGO_STATE_NLA; } else if (nego->EnabledProtocols[PROTOCOL_SSL]) { nego->state = NEGO_STATE_TLS; } else if (nego->EnabledProtocols[PROTOCOL_RDP]) { nego->state = NEGO_STATE_RDP; } else { WLog_ERR(TAG, "No security protocol is enabled"); nego->state = NEGO_STATE_FAIL; return FALSE; } if (!nego->NegotiateSecurityLayer) { WLog_DBG(TAG, "Security Layer Negotiation is disabled"); /* attempt only the highest enabled protocol (see nego_attempt_*) */ nego->EnabledProtocols[PROTOCOL_HYBRID] = FALSE; nego->EnabledProtocols[PROTOCOL_SSL] = FALSE; nego->EnabledProtocols[PROTOCOL_RDP] = FALSE; nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = FALSE; if (nego->state == NEGO_STATE_EXT) { nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = TRUE; nego->EnabledProtocols[PROTOCOL_HYBRID] = TRUE; nego->SelectedProtocol = PROTOCOL_HYBRID_EX; } else if (nego->state == NEGO_STATE_NLA) { nego->EnabledProtocols[PROTOCOL_HYBRID] = TRUE; nego->SelectedProtocol = PROTOCOL_HYBRID; } else if (nego->state == NEGO_STATE_TLS) { nego->EnabledProtocols[PROTOCOL_SSL] = TRUE; nego->SelectedProtocol = PROTOCOL_SSL; } else if (nego->state == NEGO_STATE_RDP) { nego->EnabledProtocols[PROTOCOL_RDP] = TRUE; nego->SelectedProtocol = PROTOCOL_RDP; } } if (nego->SendPreconnectionPdu) { if (!nego_send_preconnection_pdu(nego)) { WLog_ERR(TAG, "Failed to send preconnection pdu"); nego->state = NEGO_STATE_FINAL; return FALSE; } } } if (!nego->NegotiateSecurityLayer) { nego->state = NEGO_STATE_FINAL; } else { do { WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); nego_send(nego); if (nego->state == NEGO_STATE_FAIL) { if (freerdp_get_last_error(nego->transport->context) == FREERDP_ERROR_SUCCESS) WLog_ERR(TAG, "Protocol Security Negotiation Failure"); nego->state = NEGO_STATE_FINAL; return FALSE; } } while (nego->state != NEGO_STATE_FINAL); } WLog_DBG(TAG, "Negotiated %s security", protocol_security_string(nego->SelectedProtocol)); /* update settings with negotiated protocol security */ settings->RequestedProtocols = nego->RequestedProtocols; settings->SelectedProtocol = nego->SelectedProtocol; settings->NegotiationFlags = nego->flags; if (nego->SelectedProtocol == PROTOCOL_RDP) { settings->UseRdpSecurityLayer = TRUE; if (!settings->EncryptionMethods) { /** * Advertise all supported encryption methods if the client * implementation did not set any security methods */ settings->EncryptionMethods = ENCRYPTION_METHOD_40BIT | ENCRYPTION_METHOD_56BIT | ENCRYPTION_METHOD_128BIT | ENCRYPTION_METHOD_FIPS; } } /* finally connect security layer (if not already done) */ if (!nego_security_connect(nego)) { WLog_DBG(TAG, "Failed to connect with %s security", protocol_security_string(nego->SelectedProtocol)); return FALSE; } return TRUE; } BOOL nego_disconnect(rdpNego* nego) { nego->state = NEGO_STATE_INITIAL; return nego_transport_disconnect(nego); } /* connect to selected security layer */ BOOL nego_security_connect(rdpNego* nego) { if (!nego->TcpConnected) { nego->SecurityConnected = FALSE; } else if (!nego->SecurityConnected) { if (nego->SelectedProtocol == PROTOCOL_HYBRID) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_HYBRID"); nego->SecurityConnected = transport_connect_nla(nego->transport); } else if (nego->SelectedProtocol == PROTOCOL_SSL) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_SSL"); nego->SecurityConnected = transport_connect_tls(nego->transport); } else if (nego->SelectedProtocol == PROTOCOL_RDP) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_RDP"); nego->SecurityConnected = transport_connect_rdp(nego->transport); } else { WLog_ERR(TAG, "cannot connect security layer because no protocol has been selected yet."); } } return nego->SecurityConnected; } /** * Connect TCP layer. * @param nego * @return */ static BOOL nego_tcp_connect(rdpNego* nego) { if (!nego->TcpConnected) { if (nego->GatewayEnabled) { if (nego->GatewayBypassLocal) { /* Attempt a direct connection first, and then fallback to using the gateway */ WLog_INFO(TAG, "Detecting if host can be reached locally. - This might take some time."); WLog_INFO(TAG, "To disable auto detection use /gateway-usage-method:direct"); transport_set_gateway_enabled(nego->transport, FALSE); nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 1); } if (!nego->TcpConnected) { transport_set_gateway_enabled(nego->transport, TRUE); nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 15); } } else { nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 15); } } return nego->TcpConnected; } /** * Connect TCP layer. For direct approach, connect security layer as well. * @param nego * @return */ BOOL nego_transport_connect(rdpNego* nego) { if (!nego_tcp_connect(nego)) return FALSE; if (nego->TcpConnected && !nego->NegotiateSecurityLayer) return nego_security_connect(nego); return nego->TcpConnected; } /** * Disconnect TCP layer. * @param nego * @return */ BOOL nego_transport_disconnect(rdpNego* nego) { if (nego->TcpConnected) transport_disconnect(nego->transport); nego->TcpConnected = FALSE; nego->SecurityConnected = FALSE; return TRUE; } /** * Send preconnection information if enabled. * @param nego * @return */ BOOL nego_send_preconnection_pdu(rdpNego* nego) { wStream* s; UINT32 cbSize; UINT16 cchPCB = 0; WCHAR* wszPCB = NULL; WLog_DBG(TAG, "Sending preconnection PDU"); if (!nego_tcp_connect(nego)) return FALSE; /* it's easier to always send the version 2 PDU, and it's just 2 bytes overhead */ cbSize = PRECONNECTION_PDU_V2_MIN_SIZE; if (nego->PreconnectionBlob) { cchPCB = (UINT16)ConvertToUnicode(CP_UTF8, 0, nego->PreconnectionBlob, -1, &wszPCB, 0); cchPCB += 1; /* zero-termination */ cbSize += cchPCB * 2; } s = Stream_New(NULL, cbSize); if (!s) { free(wszPCB); WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } Stream_Write_UINT32(s, cbSize); /* cbSize */ Stream_Write_UINT32(s, 0); /* Flags */ Stream_Write_UINT32(s, PRECONNECTION_PDU_V2); /* Version */ Stream_Write_UINT32(s, nego->PreconnectionId); /* Id */ Stream_Write_UINT16(s, cchPCB); /* cchPCB */ if (wszPCB) { Stream_Write(s, wszPCB, cchPCB * 2); /* wszPCB */ free(wszPCB); } Stream_SealLength(s); if (transport_write(nego->transport, s) < 0) { Stream_Free(s, TRUE); return FALSE; } Stream_Free(s, TRUE); return TRUE; } /** * Attempt negotiating NLA + TLS extended security. * @param nego */ static void nego_attempt_ext(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_HYBRID | PROTOCOL_SSL | PROTOCOL_HYBRID_EX; WLog_DBG(TAG, "Attempting NLA extended security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_HYBRID]) nego->state = NEGO_STATE_NLA; else if (nego->EnabledProtocols[PROTOCOL_SSL]) nego->state = NEGO_STATE_TLS; else if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating NLA + TLS security. * @param nego */ static void nego_attempt_nla(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_HYBRID | PROTOCOL_SSL; WLog_DBG(TAG, "Attempting NLA security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_SSL]) nego->state = NEGO_STATE_TLS; else if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating TLS security. * @param nego */ static void nego_attempt_tls(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_SSL; WLog_DBG(TAG, "Attempting TLS security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating standard RDP security. * @param nego */ static void nego_attempt_rdp(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_RDP; WLog_DBG(TAG, "Attempting RDP security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } } /** * Wait to receive a negotiation response * @param nego */ BOOL nego_recv_response(rdpNego* nego) { int status; wStream* s; s = Stream_New(NULL, 1024); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } status = transport_read_pdu(nego->transport, s); if (status < 0) { Stream_Free(s, TRUE); return FALSE; } status = nego_recv(nego->transport, s, nego); Stream_Free(s, TRUE); if (status < 0) return FALSE; return TRUE; } /** * Receive protocol security negotiation message.\n * @msdn{cc240501} * @param transport transport * @param s stream * @param extra nego pointer */ int nego_recv(rdpTransport* transport, wStream* s, void* extra) { BYTE li; BYTE type; UINT16 length; rdpNego* nego = (rdpNego*)extra; if (!tpkt_read_header(s, &length)) return -1; if (!tpdu_read_connection_confirm(s, &li, length)) return -1; if (li > 6) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ switch (type) { case TYPE_RDP_NEG_RSP: nego_process_negotiation_response(nego, s); WLog_DBG(TAG, "selected_protocol: %" PRIu32 "", nego->SelectedProtocol); /* enhanced security selected ? */ if (nego->SelectedProtocol) { if ((nego->SelectedProtocol == PROTOCOL_HYBRID) && (!nego->EnabledProtocols[PROTOCOL_HYBRID])) { nego->state = NEGO_STATE_FAIL; } if ((nego->SelectedProtocol == PROTOCOL_SSL) && (!nego->EnabledProtocols[PROTOCOL_SSL])) { nego->state = NEGO_STATE_FAIL; } } else if (!nego->EnabledProtocols[PROTOCOL_RDP]) { nego->state = NEGO_STATE_FAIL; } break; case TYPE_RDP_NEG_FAILURE: nego_process_negotiation_failure(nego, s); break; } } else if (li == 6) { WLog_DBG(TAG, "no rdpNegData"); if (!nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_FAIL; else nego->state = NEGO_STATE_FINAL; } else { WLog_ERR(TAG, "invalid negotiation response"); nego->state = NEGO_STATE_FAIL; } if (!tpkt_ensure_stream_consumed(s, length)) return -1; return 0; } /** * Read optional routing token or cookie of X.224 Connection Request PDU. * @msdn{cc240470} * @param nego * @param s stream */ static BOOL nego_read_request_token_or_cookie(rdpNego* nego, wStream* s) { /* routingToken and cookie are optional and mutually exclusive! * * routingToken (variable): An optional and variable-length routing * token (used for load balancing) terminated by a 0x0D0A two-byte * sequence: (check [MSFT-SDLBTS] for details!) * Cookie:[space]msts=[ip address].[port].[reserved][\x0D\x0A] * * cookie (variable): An optional and variable-length ANSI character * string terminated by a 0x0D0A two-byte sequence: * Cookie:[space]mstshash=[ANSISTRING][\x0D\x0A] */ BYTE* str = NULL; UINT16 crlf = 0; size_t pos, len; BOOL result = FALSE; BOOL isToken = FALSE; size_t remain = Stream_GetRemainingLength(s); str = Stream_Pointer(s); pos = Stream_GetPosition(s); /* minimum length for token is 15 */ if (remain < 15) return TRUE; if (memcmp(Stream_Pointer(s), "Cookie: mstshash=", 17) != 0) { isToken = TRUE; } else { /* not a token, minimum length for cookie is 19 */ if (remain < 19) return TRUE; Stream_Seek(s, 17); } while ((remain = Stream_GetRemainingLength(s)) >= 2) { Stream_Read_UINT16(s, crlf); if (crlf == 0x0A0D) break; Stream_Rewind(s, 1); } if (crlf == 0x0A0D) { Stream_Rewind(s, 2); len = Stream_GetPosition(s) - pos; remain = Stream_GetRemainingLength(s); Stream_Write_UINT16(s, 0); if (strnlen((char*)str, len) == len) { if (isToken) result = nego_set_routing_token(nego, str, len); else result = nego_set_cookie(nego, (char*)str); } } if (!result) { Stream_SetPosition(s, pos); WLog_ERR(TAG, "invalid %s received", isToken ? "routing token" : "cookie"); } else { WLog_DBG(TAG, "received %s [%s]", isToken ? "routing token" : "cookie", str); } return result; } /** * Read protocol security negotiation request message.\n * @param nego * @param s stream */ BOOL nego_read_request(rdpNego* nego, wStream* s) { BYTE li; BYTE type; UINT16 length; if (!tpkt_read_header(s, &length)) return FALSE; if (!tpdu_read_connection_request(s, &li, length)) return FALSE; if (li != Stream_GetRemainingLength(s) + 6) { WLog_ERR(TAG, "Incorrect TPDU length indicator."); return FALSE; } if (!nego_read_request_token_or_cookie(nego, s)) { WLog_ERR(TAG, "Failed to parse routing token or cookie."); return FALSE; } if (Stream_GetRemainingLength(s) >= 8) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ if (type != TYPE_RDP_NEG_REQ) { WLog_ERR(TAG, "Incorrect negotiation request type %" PRIu8 "", type); return FALSE; } nego_process_negotiation_request(nego, s); } return tpkt_ensure_stream_consumed(s, length); } /** * Send protocol security negotiation message. * @param nego */ void nego_send(rdpNego* nego) { if (nego->state == NEGO_STATE_EXT) nego_attempt_ext(nego); else if (nego->state == NEGO_STATE_NLA) nego_attempt_nla(nego); else if (nego->state == NEGO_STATE_TLS) nego_attempt_tls(nego); else if (nego->state == NEGO_STATE_RDP) nego_attempt_rdp(nego); else WLog_ERR(TAG, "invalid negotiation state for sending"); } /** * Send RDP Negotiation Request (RDP_NEG_REQ).\n * @msdn{cc240500}\n * @msdn{cc240470} * @param nego */ BOOL nego_send_negotiation_request(rdpNego* nego) { BOOL rc = FALSE; wStream* s; size_t length; size_t bm, em; BYTE flags = 0; size_t cookie_length; s = Stream_New(NULL, 512); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } length = TPDU_CONNECTION_REQUEST_LENGTH; bm = Stream_GetPosition(s); Stream_Seek(s, length); if (nego->RoutingToken) { Stream_Write(s, nego->RoutingToken, nego->RoutingTokenLength); /* Ensure Routing Token is correctly terminated - may already be present in string */ if ((nego->RoutingTokenLength > 2) && (nego->RoutingToken[nego->RoutingTokenLength - 2] == 0x0D) && (nego->RoutingToken[nego->RoutingTokenLength - 1] == 0x0A)) { WLog_DBG(TAG, "Routing token looks correctly terminated - use verbatim"); length += nego->RoutingTokenLength; } else { WLog_DBG(TAG, "Adding terminating CRLF to routing token"); Stream_Write_UINT8(s, 0x0D); /* CR */ Stream_Write_UINT8(s, 0x0A); /* LF */ length += nego->RoutingTokenLength + 2; } } else if (nego->cookie) { cookie_length = strlen(nego->cookie); if (cookie_length > nego->CookieMaxLength) cookie_length = nego->CookieMaxLength; Stream_Write(s, "Cookie: mstshash=", 17); Stream_Write(s, (BYTE*)nego->cookie, cookie_length); Stream_Write_UINT8(s, 0x0D); /* CR */ Stream_Write_UINT8(s, 0x0A); /* LF */ length += cookie_length + 19; } WLog_DBG(TAG, "RequestedProtocols: %" PRIu32 "", nego->RequestedProtocols); if ((nego->RequestedProtocols > PROTOCOL_RDP) || (nego->sendNegoData)) { /* RDP_NEG_DATA must be present for TLS and NLA */ if (nego->RestrictedAdminModeRequired) flags |= RESTRICTED_ADMIN_MODE_REQUIRED; Stream_Write_UINT8(s, TYPE_RDP_NEG_REQ); Stream_Write_UINT8(s, flags); Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, nego->RequestedProtocols); /* requestedProtocols */ length += 8; } if (length > UINT16_MAX) goto fail; em = Stream_GetPosition(s); Stream_SetPosition(s, bm); tpkt_write_header(s, (UINT16)length); tpdu_write_connection_request(s, (UINT16)length - 5); Stream_SetPosition(s, em); Stream_SealLength(s); rc = (transport_write(nego->transport, s) >= 0); fail: Stream_Free(s, TRUE); return rc; } /** * Process Negotiation Request from Connection Request message. * @param nego * @param s */ void nego_process_negotiation_request(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->RequestedProtocols); WLog_DBG(TAG, "RDP_NEG_REQ: RequestedProtocol: 0x%08" PRIX32 "", nego->RequestedProtocols); nego->state = NEGO_STATE_FINAL; } /** * Process Negotiation Response from Connection Confirm message. * @param nego * @param s */ void nego_process_negotiation_response(rdpNego* nego, wStream* s) { UINT16 length; WLog_DBG(TAG, "RDP_NEG_RSP"); if (Stream_GetRemainingLength(s) < 7) { WLog_ERR(TAG, "Invalid RDP_NEG_RSP"); nego->state = NEGO_STATE_FAIL; return; } Stream_Read_UINT8(s, nego->flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->SelectedProtocol); nego->state = NEGO_STATE_FINAL; } /** * Process Negotiation Failure from Connection Confirm message. * @param nego * @param s */ void nego_process_negotiation_failure(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; UINT32 failureCode; WLog_DBG(TAG, "RDP_NEG_FAILURE"); Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, failureCode); switch (failureCode) { case SSL_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_REQUIRED_BY_SERVER"); break; case SSL_NOT_ALLOWED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_NOT_ALLOWED_BY_SERVER"); nego->sendNegoData = TRUE; break; case SSL_CERT_NOT_ON_SERVER: WLog_ERR(TAG, "Error: SSL_CERT_NOT_ON_SERVER"); nego->sendNegoData = TRUE; break; case INCONSISTENT_FLAGS: WLog_ERR(TAG, "Error: INCONSISTENT_FLAGS"); break; case HYBRID_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: HYBRID_REQUIRED_BY_SERVER"); break; default: WLog_ERR(TAG, "Error: Unknown protocol security error %" PRIu32 "", failureCode); break; } nego->state = NEGO_STATE_FAIL; } /** * Send RDP Negotiation Response (RDP_NEG_RSP).\n * @param nego */ BOOL nego_send_negotiation_response(rdpNego* nego) { UINT16 length; size_t bm, em; BOOL status; wStream* s; BYTE flags; rdpSettings* settings; status = TRUE; settings = nego->transport->settings; s = Stream_New(NULL, 512); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } length = TPDU_CONNECTION_CONFIRM_LENGTH; bm = Stream_GetPosition(s); Stream_Seek(s, length); if (nego->SelectedProtocol & PROTOCOL_FAILED_NEGO) { UINT32 errorCode = (nego->SelectedProtocol & ~PROTOCOL_FAILED_NEGO); flags = 0; Stream_Write_UINT8(s, TYPE_RDP_NEG_FAILURE); Stream_Write_UINT8(s, flags); /* flags */ Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, errorCode); length += 8; status = FALSE; } else { flags = EXTENDED_CLIENT_DATA_SUPPORTED; if (settings->SupportGraphicsPipeline) flags |= DYNVC_GFX_PROTOCOL_SUPPORTED; /* RDP_NEG_DATA must be present for TLS, NLA, and RDP */ Stream_Write_UINT8(s, TYPE_RDP_NEG_RSP); Stream_Write_UINT8(s, flags); /* flags */ Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, nego->SelectedProtocol); /* selectedProtocol */ length += 8; } em = Stream_GetPosition(s); Stream_SetPosition(s, bm); tpkt_write_header(s, length); tpdu_write_connection_confirm(s, length - 5); Stream_SetPosition(s, em); Stream_SealLength(s); if (transport_write(nego->transport, s) < 0) { Stream_Free(s, TRUE); return FALSE; } Stream_Free(s, TRUE); if (status) { /* update settings with negotiated protocol security */ settings->RequestedProtocols = nego->RequestedProtocols; settings->SelectedProtocol = nego->SelectedProtocol; if (settings->SelectedProtocol == PROTOCOL_RDP) { settings->TlsSecurity = FALSE; settings->NlaSecurity = FALSE; settings->RdpSecurity = TRUE; settings->UseRdpSecurityLayer = TRUE; if (settings->EncryptionLevel == ENCRYPTION_LEVEL_NONE) { /** * If the server implementation did not explicitely set a * encryption level we default to client compatible */ settings->EncryptionLevel = ENCRYPTION_LEVEL_CLIENT_COMPATIBLE; } if (settings->LocalConnection) { /** * Note: This hack was firstly introduced in commit 95f5e115 to * disable the unnecessary encryption with peers connecting to * 127.0.0.1 or local unix sockets. * This also affects connections via port tunnels! (e.g. ssh -L) */ WLog_INFO(TAG, "Turning off encryption for local peer with standard rdp security"); settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } if (!settings->RdpServerRsaKey && !settings->RdpKeyFile && !settings->RdpKeyContent) { WLog_ERR(TAG, "Missing server certificate"); return FALSE; } } else if (settings->SelectedProtocol == PROTOCOL_SSL) { settings->TlsSecurity = TRUE; settings->NlaSecurity = FALSE; settings->RdpSecurity = FALSE; settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } else if (settings->SelectedProtocol == PROTOCOL_HYBRID) { settings->TlsSecurity = TRUE; settings->NlaSecurity = TRUE; settings->RdpSecurity = FALSE; settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } } return status; } /** * Initialize NEGO state machine. * @param nego */ void nego_init(rdpNego* nego) { nego->state = NEGO_STATE_INITIAL; nego->RequestedProtocols = PROTOCOL_RDP; nego->CookieMaxLength = DEFAULT_COOKIE_MAX_LENGTH; nego->sendNegoData = FALSE; nego->flags = 0; } /** * Create a new NEGO state machine instance. * @param transport * @return */ rdpNego* nego_new(rdpTransport* transport) { rdpNego* nego = (rdpNego*)calloc(1, sizeof(rdpNego)); if (!nego) return NULL; nego->transport = transport; nego_init(nego); return nego; } /** * Free NEGO state machine. * @param nego */ void nego_free(rdpNego* nego) { if (nego) { free(nego->RoutingToken); free(nego->cookie); free(nego); } } /** * Set target hostname and port. * @param nego * @param hostname * @param port */ BOOL nego_set_target(rdpNego* nego, const char* hostname, UINT16 port) { if (!nego || !hostname) return FALSE; nego->hostname = hostname; nego->port = port; return TRUE; } /** * Enable security layer negotiation. * @param nego pointer to the negotiation structure * @param enable_rdp whether to enable security layer negotiation (TRUE for enabled, FALSE for * disabled) */ void nego_set_negotiation_enabled(rdpNego* nego, BOOL NegotiateSecurityLayer) { WLog_DBG(TAG, "Enabling security layer negotiation: %s", NegotiateSecurityLayer ? "TRUE" : "FALSE"); nego->NegotiateSecurityLayer = NegotiateSecurityLayer; } /** * Enable restricted admin mode. * @param nego pointer to the negotiation structure * @param enable_restricted whether to enable security layer negotiation (TRUE for enabled, FALSE * for disabled) */ void nego_set_restricted_admin_mode_required(rdpNego* nego, BOOL RestrictedAdminModeRequired) { WLog_DBG(TAG, "Enabling restricted admin mode: %s", RestrictedAdminModeRequired ? "TRUE" : "FALSE"); nego->RestrictedAdminModeRequired = RestrictedAdminModeRequired; } void nego_set_gateway_enabled(rdpNego* nego, BOOL GatewayEnabled) { nego->GatewayEnabled = GatewayEnabled; } void nego_set_gateway_bypass_local(rdpNego* nego, BOOL GatewayBypassLocal) { nego->GatewayBypassLocal = GatewayBypassLocal; } /** * Enable RDP security protocol. * @param nego pointer to the negotiation structure * @param enable_rdp whether to enable normal RDP protocol (TRUE for enabled, FALSE for disabled) */ void nego_enable_rdp(rdpNego* nego, BOOL enable_rdp) { WLog_DBG(TAG, "Enabling RDP security: %s", enable_rdp ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_RDP] = enable_rdp; } /** * Enable TLS security protocol. * @param nego pointer to the negotiation structure * @param enable_tls whether to enable TLS + RDP protocol (TRUE for enabled, FALSE for disabled) */ void nego_enable_tls(rdpNego* nego, BOOL enable_tls) { WLog_DBG(TAG, "Enabling TLS security: %s", enable_tls ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_SSL] = enable_tls; } /** * Enable NLA security protocol. * @param nego pointer to the negotiation structure * @param enable_nla whether to enable network level authentication protocol (TRUE for enabled, * FALSE for disabled) */ void nego_enable_nla(rdpNego* nego, BOOL enable_nla) { WLog_DBG(TAG, "Enabling NLA security: %s", enable_nla ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_HYBRID] = enable_nla; } /** * Enable NLA extended security protocol. * @param nego pointer to the negotiation structure * @param enable_ext whether to enable network level authentication extended protocol (TRUE for * enabled, FALSE for disabled) */ void nego_enable_ext(rdpNego* nego, BOOL enable_ext) { WLog_DBG(TAG, "Enabling NLA extended security: %s", enable_ext ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = enable_ext; } /** * Set routing token. * @param nego * @param RoutingToken * @param RoutingTokenLength */ BOOL nego_set_routing_token(rdpNego* nego, BYTE* RoutingToken, DWORD RoutingTokenLength) { if (RoutingTokenLength == 0) return FALSE; free(nego->RoutingToken); nego->RoutingTokenLength = RoutingTokenLength; nego->RoutingToken = (BYTE*)malloc(nego->RoutingTokenLength); if (!nego->RoutingToken) return FALSE; CopyMemory(nego->RoutingToken, RoutingToken, nego->RoutingTokenLength); return TRUE; } /** * Set cookie. * @param nego * @param cookie */ BOOL nego_set_cookie(rdpNego* nego, char* cookie) { if (nego->cookie) { free(nego->cookie); nego->cookie = NULL; } if (!cookie) return TRUE; nego->cookie = _strdup(cookie); if (!nego->cookie) return FALSE; return TRUE; } /** * Set cookie maximum length * @param nego * @param CookieMaxLength */ void nego_set_cookie_max_length(rdpNego* nego, UINT32 CookieMaxLength) { nego->CookieMaxLength = CookieMaxLength; } /** * Enable / disable preconnection PDU. * @param nego * @param send_pcpdu */ void nego_set_send_preconnection_pdu(rdpNego* nego, BOOL SendPreconnectionPdu) { nego->SendPreconnectionPdu = SendPreconnectionPdu; } /** * Set preconnection id. * @param nego * @param id */ void nego_set_preconnection_id(rdpNego* nego, UINT32 PreconnectionId) { nego->PreconnectionId = PreconnectionId; } /** * Set preconnection blob. * @param nego * @param blob */ void nego_set_preconnection_blob(rdpNego* nego, char* PreconnectionBlob) { nego->PreconnectionBlob = PreconnectionBlob; } UINT32 nego_get_selected_protocol(rdpNego* nego) { if (!nego) return 0; return nego->SelectedProtocol; } BOOL nego_set_selected_protocol(rdpNego* nego, UINT32 SelectedProtocol) { if (!nego) return FALSE; nego->SelectedProtocol = SelectedProtocol; return TRUE; } UINT32 nego_get_requested_protocols(rdpNego* nego) { if (!nego) return 0; return nego->RequestedProtocols; } BOOL nego_set_requested_protocols(rdpNego* nego, UINT32 RequestedProtocols) { if (!nego) return FALSE; nego->RequestedProtocols = RequestedProtocols; return TRUE; } NEGO_STATE nego_get_state(rdpNego* nego) { if (!nego) return NEGO_STATE_FAIL; return nego->state; } BOOL nego_set_state(rdpNego* nego, NEGO_STATE state) { if (!nego) return FALSE; nego->state = state; return TRUE; } SEC_WINNT_AUTH_IDENTITY* nego_get_identity(rdpNego* nego) { if (!nego) return NULL; return nla_get_identity(nego->transport->nla); } void nego_free_nla(rdpNego* nego) { if (!nego || !nego->transport) return; nla_free(nego->transport->nla); nego->transport->nla = NULL; } const BYTE* nego_get_routing_token(rdpNego* nego, DWORD* RoutingTokenLength) { if (!nego) return NULL; if (RoutingTokenLength) *RoutingTokenLength = nego->RoutingTokenLength; return nego->RoutingToken; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * RDP Protocol Security Negotiation * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2014 Norbert Federa <norbert.federa@thincast.com> * Copyright 2015 Thincast Technologies GmbH * Copyright 2015 DI (FH) Martin Haimberger <martin.haimberger@thincast.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <winpr/crt.h> #include <freerdp/log.h> #include "tpkt.h" #include "nego.h" #include "transport.h" #define TAG FREERDP_TAG("core.nego") struct rdp_nego { UINT16 port; UINT32 flags; const char* hostname; char* cookie; BYTE* RoutingToken; DWORD RoutingTokenLength; BOOL SendPreconnectionPdu; UINT32 PreconnectionId; char* PreconnectionBlob; NEGO_STATE state; BOOL TcpConnected; BOOL SecurityConnected; UINT32 CookieMaxLength; BOOL sendNegoData; UINT32 SelectedProtocol; UINT32 RequestedProtocols; BOOL NegotiateSecurityLayer; BOOL EnabledProtocols[16]; BOOL RestrictedAdminModeRequired; BOOL GatewayEnabled; BOOL GatewayBypassLocal; rdpTransport* transport; }; static const char* nego_state_string(NEGO_STATE state) { static const char* const NEGO_STATE_STRINGS[] = { "NEGO_STATE_INITIAL", "NEGO_STATE_EXT", "NEGO_STATE_NLA", "NEGO_STATE_TLS", "NEGO_STATE_RDP", "NEGO_STATE_FAIL", "NEGO_STATE_FINAL", "NEGO_STATE_INVALID" }; if (state >= ARRAYSIZE(NEGO_STATE_STRINGS)) return NEGO_STATE_STRINGS[ARRAYSIZE(NEGO_STATE_STRINGS) - 1]; return NEGO_STATE_STRINGS[state]; } static const char* protocol_security_string(UINT32 security) { static const char* PROTOCOL_SECURITY_STRINGS[] = { "RDP", "TLS", "NLA", "UNK", "UNK", "UNK", "UNK", "UNK", "EXT", "UNK" }; if (security >= ARRAYSIZE(PROTOCOL_SECURITY_STRINGS)) return PROTOCOL_SECURITY_STRINGS[ARRAYSIZE(PROTOCOL_SECURITY_STRINGS) - 1]; return PROTOCOL_SECURITY_STRINGS[security]; } static BOOL nego_transport_connect(rdpNego* nego); static BOOL nego_transport_disconnect(rdpNego* nego); static BOOL nego_security_connect(rdpNego* nego); static BOOL nego_send_preconnection_pdu(rdpNego* nego); static BOOL nego_recv_response(rdpNego* nego); static void nego_send(rdpNego* nego); static BOOL nego_process_negotiation_request(rdpNego* nego, wStream* s); static BOOL nego_process_negotiation_response(rdpNego* nego, wStream* s); static BOOL nego_process_negotiation_failure(rdpNego* nego, wStream* s); /** * Negotiate protocol security and connect. * @param nego * @return */ BOOL nego_connect(rdpNego* nego) { rdpSettings* settings = nego->transport->settings; if (nego->state == NEGO_STATE_INITIAL) { if (nego->EnabledProtocols[PROTOCOL_HYBRID_EX]) { nego->state = NEGO_STATE_EXT; } else if (nego->EnabledProtocols[PROTOCOL_HYBRID]) { nego->state = NEGO_STATE_NLA; } else if (nego->EnabledProtocols[PROTOCOL_SSL]) { nego->state = NEGO_STATE_TLS; } else if (nego->EnabledProtocols[PROTOCOL_RDP]) { nego->state = NEGO_STATE_RDP; } else { WLog_ERR(TAG, "No security protocol is enabled"); nego->state = NEGO_STATE_FAIL; return FALSE; } if (!nego->NegotiateSecurityLayer) { WLog_DBG(TAG, "Security Layer Negotiation is disabled"); /* attempt only the highest enabled protocol (see nego_attempt_*) */ nego->EnabledProtocols[PROTOCOL_HYBRID] = FALSE; nego->EnabledProtocols[PROTOCOL_SSL] = FALSE; nego->EnabledProtocols[PROTOCOL_RDP] = FALSE; nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = FALSE; if (nego->state == NEGO_STATE_EXT) { nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = TRUE; nego->EnabledProtocols[PROTOCOL_HYBRID] = TRUE; nego->SelectedProtocol = PROTOCOL_HYBRID_EX; } else if (nego->state == NEGO_STATE_NLA) { nego->EnabledProtocols[PROTOCOL_HYBRID] = TRUE; nego->SelectedProtocol = PROTOCOL_HYBRID; } else if (nego->state == NEGO_STATE_TLS) { nego->EnabledProtocols[PROTOCOL_SSL] = TRUE; nego->SelectedProtocol = PROTOCOL_SSL; } else if (nego->state == NEGO_STATE_RDP) { nego->EnabledProtocols[PROTOCOL_RDP] = TRUE; nego->SelectedProtocol = PROTOCOL_RDP; } } if (nego->SendPreconnectionPdu) { if (!nego_send_preconnection_pdu(nego)) { WLog_ERR(TAG, "Failed to send preconnection pdu"); nego->state = NEGO_STATE_FINAL; return FALSE; } } } if (!nego->NegotiateSecurityLayer) { nego->state = NEGO_STATE_FINAL; } else { do { WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); nego_send(nego); if (nego->state == NEGO_STATE_FAIL) { if (freerdp_get_last_error(nego->transport->context) == FREERDP_ERROR_SUCCESS) WLog_ERR(TAG, "Protocol Security Negotiation Failure"); nego->state = NEGO_STATE_FINAL; return FALSE; } } while (nego->state != NEGO_STATE_FINAL); } WLog_DBG(TAG, "Negotiated %s security", protocol_security_string(nego->SelectedProtocol)); /* update settings with negotiated protocol security */ settings->RequestedProtocols = nego->RequestedProtocols; settings->SelectedProtocol = nego->SelectedProtocol; settings->NegotiationFlags = nego->flags; if (nego->SelectedProtocol == PROTOCOL_RDP) { settings->UseRdpSecurityLayer = TRUE; if (!settings->EncryptionMethods) { /** * Advertise all supported encryption methods if the client * implementation did not set any security methods */ settings->EncryptionMethods = ENCRYPTION_METHOD_40BIT | ENCRYPTION_METHOD_56BIT | ENCRYPTION_METHOD_128BIT | ENCRYPTION_METHOD_FIPS; } } /* finally connect security layer (if not already done) */ if (!nego_security_connect(nego)) { WLog_DBG(TAG, "Failed to connect with %s security", protocol_security_string(nego->SelectedProtocol)); return FALSE; } return TRUE; } BOOL nego_disconnect(rdpNego* nego) { nego->state = NEGO_STATE_INITIAL; return nego_transport_disconnect(nego); } /* connect to selected security layer */ BOOL nego_security_connect(rdpNego* nego) { if (!nego->TcpConnected) { nego->SecurityConnected = FALSE; } else if (!nego->SecurityConnected) { if (nego->SelectedProtocol == PROTOCOL_HYBRID) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_HYBRID"); nego->SecurityConnected = transport_connect_nla(nego->transport); } else if (nego->SelectedProtocol == PROTOCOL_SSL) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_SSL"); nego->SecurityConnected = transport_connect_tls(nego->transport); } else if (nego->SelectedProtocol == PROTOCOL_RDP) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_RDP"); nego->SecurityConnected = transport_connect_rdp(nego->transport); } else { WLog_ERR(TAG, "cannot connect security layer because no protocol has been selected yet."); } } return nego->SecurityConnected; } /** * Connect TCP layer. * @param nego * @return */ static BOOL nego_tcp_connect(rdpNego* nego) { if (!nego->TcpConnected) { if (nego->GatewayEnabled) { if (nego->GatewayBypassLocal) { /* Attempt a direct connection first, and then fallback to using the gateway */ WLog_INFO(TAG, "Detecting if host can be reached locally. - This might take some time."); WLog_INFO(TAG, "To disable auto detection use /gateway-usage-method:direct"); transport_set_gateway_enabled(nego->transport, FALSE); nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 1); } if (!nego->TcpConnected) { transport_set_gateway_enabled(nego->transport, TRUE); nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 15); } } else { nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 15); } } return nego->TcpConnected; } /** * Connect TCP layer. For direct approach, connect security layer as well. * @param nego * @return */ BOOL nego_transport_connect(rdpNego* nego) { if (!nego_tcp_connect(nego)) return FALSE; if (nego->TcpConnected && !nego->NegotiateSecurityLayer) return nego_security_connect(nego); return nego->TcpConnected; } /** * Disconnect TCP layer. * @param nego * @return */ BOOL nego_transport_disconnect(rdpNego* nego) { if (nego->TcpConnected) transport_disconnect(nego->transport); nego->TcpConnected = FALSE; nego->SecurityConnected = FALSE; return TRUE; } /** * Send preconnection information if enabled. * @param nego * @return */ BOOL nego_send_preconnection_pdu(rdpNego* nego) { wStream* s; UINT32 cbSize; UINT16 cchPCB = 0; WCHAR* wszPCB = NULL; WLog_DBG(TAG, "Sending preconnection PDU"); if (!nego_tcp_connect(nego)) return FALSE; /* it's easier to always send the version 2 PDU, and it's just 2 bytes overhead */ cbSize = PRECONNECTION_PDU_V2_MIN_SIZE; if (nego->PreconnectionBlob) { cchPCB = (UINT16)ConvertToUnicode(CP_UTF8, 0, nego->PreconnectionBlob, -1, &wszPCB, 0); cchPCB += 1; /* zero-termination */ cbSize += cchPCB * 2; } s = Stream_New(NULL, cbSize); if (!s) { free(wszPCB); WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } Stream_Write_UINT32(s, cbSize); /* cbSize */ Stream_Write_UINT32(s, 0); /* Flags */ Stream_Write_UINT32(s, PRECONNECTION_PDU_V2); /* Version */ Stream_Write_UINT32(s, nego->PreconnectionId); /* Id */ Stream_Write_UINT16(s, cchPCB); /* cchPCB */ if (wszPCB) { Stream_Write(s, wszPCB, cchPCB * 2); /* wszPCB */ free(wszPCB); } Stream_SealLength(s); if (transport_write(nego->transport, s) < 0) { Stream_Free(s, TRUE); return FALSE; } Stream_Free(s, TRUE); return TRUE; } /** * Attempt negotiating NLA + TLS extended security. * @param nego */ static void nego_attempt_ext(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_HYBRID | PROTOCOL_SSL | PROTOCOL_HYBRID_EX; WLog_DBG(TAG, "Attempting NLA extended security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_HYBRID]) nego->state = NEGO_STATE_NLA; else if (nego->EnabledProtocols[PROTOCOL_SSL]) nego->state = NEGO_STATE_TLS; else if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating NLA + TLS security. * @param nego */ static void nego_attempt_nla(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_HYBRID | PROTOCOL_SSL; WLog_DBG(TAG, "Attempting NLA security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_SSL]) nego->state = NEGO_STATE_TLS; else if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating TLS security. * @param nego */ static void nego_attempt_tls(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_SSL; WLog_DBG(TAG, "Attempting TLS security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating standard RDP security. * @param nego */ static void nego_attempt_rdp(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_RDP; WLog_DBG(TAG, "Attempting RDP security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } } /** * Wait to receive a negotiation response * @param nego */ BOOL nego_recv_response(rdpNego* nego) { int status; wStream* s; s = Stream_New(NULL, 1024); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } status = transport_read_pdu(nego->transport, s); if (status < 0) { Stream_Free(s, TRUE); return FALSE; } status = nego_recv(nego->transport, s, nego); Stream_Free(s, TRUE); if (status < 0) return FALSE; return TRUE; } /** * Receive protocol security negotiation message.\n * @msdn{cc240501} * @param transport transport * @param s stream * @param extra nego pointer */ int nego_recv(rdpTransport* transport, wStream* s, void* extra) { BYTE li; BYTE type; UINT16 length; rdpNego* nego = (rdpNego*)extra; if (!tpkt_read_header(s, &length)) return -1; if (!tpdu_read_connection_confirm(s, &li, length)) return -1; if (li > 6) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ switch (type) { case TYPE_RDP_NEG_RSP: if (!nego_process_negotiation_response(nego, s)) return -1; WLog_DBG(TAG, "selected_protocol: %" PRIu32 "", nego->SelectedProtocol); /* enhanced security selected ? */ if (nego->SelectedProtocol) { if ((nego->SelectedProtocol == PROTOCOL_HYBRID) && (!nego->EnabledProtocols[PROTOCOL_HYBRID])) { nego->state = NEGO_STATE_FAIL; } if ((nego->SelectedProtocol == PROTOCOL_SSL) && (!nego->EnabledProtocols[PROTOCOL_SSL])) { nego->state = NEGO_STATE_FAIL; } } else if (!nego->EnabledProtocols[PROTOCOL_RDP]) { nego->state = NEGO_STATE_FAIL; } break; case TYPE_RDP_NEG_FAILURE: if (!nego_process_negotiation_failure(nego, s)) return -1; break; } } else if (li == 6) { WLog_DBG(TAG, "no rdpNegData"); if (!nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_FAIL; else nego->state = NEGO_STATE_FINAL; } else { WLog_ERR(TAG, "invalid negotiation response"); nego->state = NEGO_STATE_FAIL; } if (!tpkt_ensure_stream_consumed(s, length)) return -1; return 0; } /** * Read optional routing token or cookie of X.224 Connection Request PDU. * @msdn{cc240470} * @param nego * @param s stream */ static BOOL nego_read_request_token_or_cookie(rdpNego* nego, wStream* s) { /* routingToken and cookie are optional and mutually exclusive! * * routingToken (variable): An optional and variable-length routing * token (used for load balancing) terminated by a 0x0D0A two-byte * sequence: (check [MSFT-SDLBTS] for details!) * Cookie:[space]msts=[ip address].[port].[reserved][\x0D\x0A] * * cookie (variable): An optional and variable-length ANSI character * string terminated by a 0x0D0A two-byte sequence: * Cookie:[space]mstshash=[ANSISTRING][\x0D\x0A] */ BYTE* str = NULL; UINT16 crlf = 0; size_t pos, len; BOOL result = FALSE; BOOL isToken = FALSE; size_t remain = Stream_GetRemainingLength(s); str = Stream_Pointer(s); pos = Stream_GetPosition(s); /* minimum length for token is 15 */ if (remain < 15) return TRUE; if (memcmp(Stream_Pointer(s), "Cookie: mstshash=", 17) != 0) { isToken = TRUE; } else { /* not a token, minimum length for cookie is 19 */ if (remain < 19) return TRUE; Stream_Seek(s, 17); } while ((remain = Stream_GetRemainingLength(s)) >= 2) { Stream_Read_UINT16(s, crlf); if (crlf == 0x0A0D) break; Stream_Rewind(s, 1); } if (crlf == 0x0A0D) { Stream_Rewind(s, 2); len = Stream_GetPosition(s) - pos; remain = Stream_GetRemainingLength(s); Stream_Write_UINT16(s, 0); if (strnlen((char*)str, len) == len) { if (isToken) result = nego_set_routing_token(nego, str, len); else result = nego_set_cookie(nego, (char*)str); } } if (!result) { Stream_SetPosition(s, pos); WLog_ERR(TAG, "invalid %s received", isToken ? "routing token" : "cookie"); } else { WLog_DBG(TAG, "received %s [%s]", isToken ? "routing token" : "cookie", str); } return result; } /** * Read protocol security negotiation request message.\n * @param nego * @param s stream */ BOOL nego_read_request(rdpNego* nego, wStream* s) { BYTE li; BYTE type; UINT16 length; if (!tpkt_read_header(s, &length)) return FALSE; if (!tpdu_read_connection_request(s, &li, length)) return FALSE; if (li != Stream_GetRemainingLength(s) + 6) { WLog_ERR(TAG, "Incorrect TPDU length indicator."); return FALSE; } if (!nego_read_request_token_or_cookie(nego, s)) { WLog_ERR(TAG, "Failed to parse routing token or cookie."); return FALSE; } if (Stream_GetRemainingLength(s) >= 8) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ if (type != TYPE_RDP_NEG_REQ) { WLog_ERR(TAG, "Incorrect negotiation request type %" PRIu8 "", type); return FALSE; } if (!nego_process_negotiation_request(nego, s)) return FALSE; } return tpkt_ensure_stream_consumed(s, length); } /** * Send protocol security negotiation message. * @param nego */ void nego_send(rdpNego* nego) { if (nego->state == NEGO_STATE_EXT) nego_attempt_ext(nego); else if (nego->state == NEGO_STATE_NLA) nego_attempt_nla(nego); else if (nego->state == NEGO_STATE_TLS) nego_attempt_tls(nego); else if (nego->state == NEGO_STATE_RDP) nego_attempt_rdp(nego); else WLog_ERR(TAG, "invalid negotiation state for sending"); } /** * Send RDP Negotiation Request (RDP_NEG_REQ).\n * @msdn{cc240500}\n * @msdn{cc240470} * @param nego */ BOOL nego_send_negotiation_request(rdpNego* nego) { BOOL rc = FALSE; wStream* s; size_t length; size_t bm, em; BYTE flags = 0; size_t cookie_length; s = Stream_New(NULL, 512); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } length = TPDU_CONNECTION_REQUEST_LENGTH; bm = Stream_GetPosition(s); Stream_Seek(s, length); if (nego->RoutingToken) { Stream_Write(s, nego->RoutingToken, nego->RoutingTokenLength); /* Ensure Routing Token is correctly terminated - may already be present in string */ if ((nego->RoutingTokenLength > 2) && (nego->RoutingToken[nego->RoutingTokenLength - 2] == 0x0D) && (nego->RoutingToken[nego->RoutingTokenLength - 1] == 0x0A)) { WLog_DBG(TAG, "Routing token looks correctly terminated - use verbatim"); length += nego->RoutingTokenLength; } else { WLog_DBG(TAG, "Adding terminating CRLF to routing token"); Stream_Write_UINT8(s, 0x0D); /* CR */ Stream_Write_UINT8(s, 0x0A); /* LF */ length += nego->RoutingTokenLength + 2; } } else if (nego->cookie) { cookie_length = strlen(nego->cookie); if (cookie_length > nego->CookieMaxLength) cookie_length = nego->CookieMaxLength; Stream_Write(s, "Cookie: mstshash=", 17); Stream_Write(s, (BYTE*)nego->cookie, cookie_length); Stream_Write_UINT8(s, 0x0D); /* CR */ Stream_Write_UINT8(s, 0x0A); /* LF */ length += cookie_length + 19; } WLog_DBG(TAG, "RequestedProtocols: %" PRIu32 "", nego->RequestedProtocols); if ((nego->RequestedProtocols > PROTOCOL_RDP) || (nego->sendNegoData)) { /* RDP_NEG_DATA must be present for TLS and NLA */ if (nego->RestrictedAdminModeRequired) flags |= RESTRICTED_ADMIN_MODE_REQUIRED; Stream_Write_UINT8(s, TYPE_RDP_NEG_REQ); Stream_Write_UINT8(s, flags); Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, nego->RequestedProtocols); /* requestedProtocols */ length += 8; } if (length > UINT16_MAX) goto fail; em = Stream_GetPosition(s); Stream_SetPosition(s, bm); tpkt_write_header(s, (UINT16)length); tpdu_write_connection_request(s, (UINT16)length - 5); Stream_SetPosition(s, em); Stream_SealLength(s); rc = (transport_write(nego->transport, s) >= 0); fail: Stream_Free(s, TRUE); return rc; } /** * Process Negotiation Request from Connection Request message. * @param nego * @param s */ BOOL nego_process_negotiation_request(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->RequestedProtocols); WLog_DBG(TAG, "RDP_NEG_REQ: RequestedProtocol: 0x%08" PRIX32 "", nego->RequestedProtocols); nego->state = NEGO_STATE_FINAL; return TRUE; } /** * Process Negotiation Response from Connection Confirm message. * @param nego * @param s */ BOOL nego_process_negotiation_response(rdpNego* nego, wStream* s) { UINT16 length; WLog_DBG(TAG, "RDP_NEG_RSP"); if (Stream_GetRemainingLength(s) < 7) { WLog_ERR(TAG, "Invalid RDP_NEG_RSP"); nego->state = NEGO_STATE_FAIL; return FALSE; } Stream_Read_UINT8(s, nego->flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->SelectedProtocol); nego->state = NEGO_STATE_FINAL; return TRUE; } /** * Process Negotiation Failure from Connection Confirm message. * @param nego * @param s */ BOOL nego_process_negotiation_failure(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; UINT32 failureCode; WLog_DBG(TAG, "RDP_NEG_FAILURE"); if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, failureCode); switch (failureCode) { case SSL_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_REQUIRED_BY_SERVER"); break; case SSL_NOT_ALLOWED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_NOT_ALLOWED_BY_SERVER"); nego->sendNegoData = TRUE; break; case SSL_CERT_NOT_ON_SERVER: WLog_ERR(TAG, "Error: SSL_CERT_NOT_ON_SERVER"); nego->sendNegoData = TRUE; break; case INCONSISTENT_FLAGS: WLog_ERR(TAG, "Error: INCONSISTENT_FLAGS"); break; case HYBRID_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: HYBRID_REQUIRED_BY_SERVER"); break; default: WLog_ERR(TAG, "Error: Unknown protocol security error %" PRIu32 "", failureCode); break; } nego->state = NEGO_STATE_FAIL; return TRUE; } /** * Send RDP Negotiation Response (RDP_NEG_RSP).\n * @param nego */ BOOL nego_send_negotiation_response(rdpNego* nego) { UINT16 length; size_t bm, em; BOOL status; wStream* s; BYTE flags; rdpSettings* settings; status = TRUE; settings = nego->transport->settings; s = Stream_New(NULL, 512); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } length = TPDU_CONNECTION_CONFIRM_LENGTH; bm = Stream_GetPosition(s); Stream_Seek(s, length); if (nego->SelectedProtocol & PROTOCOL_FAILED_NEGO) { UINT32 errorCode = (nego->SelectedProtocol & ~PROTOCOL_FAILED_NEGO); flags = 0; Stream_Write_UINT8(s, TYPE_RDP_NEG_FAILURE); Stream_Write_UINT8(s, flags); /* flags */ Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, errorCode); length += 8; status = FALSE; } else { flags = EXTENDED_CLIENT_DATA_SUPPORTED; if (settings->SupportGraphicsPipeline) flags |= DYNVC_GFX_PROTOCOL_SUPPORTED; /* RDP_NEG_DATA must be present for TLS, NLA, and RDP */ Stream_Write_UINT8(s, TYPE_RDP_NEG_RSP); Stream_Write_UINT8(s, flags); /* flags */ Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, nego->SelectedProtocol); /* selectedProtocol */ length += 8; } em = Stream_GetPosition(s); Stream_SetPosition(s, bm); tpkt_write_header(s, length); tpdu_write_connection_confirm(s, length - 5); Stream_SetPosition(s, em); Stream_SealLength(s); if (transport_write(nego->transport, s) < 0) { Stream_Free(s, TRUE); return FALSE; } Stream_Free(s, TRUE); if (status) { /* update settings with negotiated protocol security */ settings->RequestedProtocols = nego->RequestedProtocols; settings->SelectedProtocol = nego->SelectedProtocol; if (settings->SelectedProtocol == PROTOCOL_RDP) { settings->TlsSecurity = FALSE; settings->NlaSecurity = FALSE; settings->RdpSecurity = TRUE; settings->UseRdpSecurityLayer = TRUE; if (settings->EncryptionLevel == ENCRYPTION_LEVEL_NONE) { /** * If the server implementation did not explicitely set a * encryption level we default to client compatible */ settings->EncryptionLevel = ENCRYPTION_LEVEL_CLIENT_COMPATIBLE; } if (settings->LocalConnection) { /** * Note: This hack was firstly introduced in commit 95f5e115 to * disable the unnecessary encryption with peers connecting to * 127.0.0.1 or local unix sockets. * This also affects connections via port tunnels! (e.g. ssh -L) */ WLog_INFO(TAG, "Turning off encryption for local peer with standard rdp security"); settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } if (!settings->RdpServerRsaKey && !settings->RdpKeyFile && !settings->RdpKeyContent) { WLog_ERR(TAG, "Missing server certificate"); return FALSE; } } else if (settings->SelectedProtocol == PROTOCOL_SSL) { settings->TlsSecurity = TRUE; settings->NlaSecurity = FALSE; settings->RdpSecurity = FALSE; settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } else if (settings->SelectedProtocol == PROTOCOL_HYBRID) { settings->TlsSecurity = TRUE; settings->NlaSecurity = TRUE; settings->RdpSecurity = FALSE; settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } } return status; } /** * Initialize NEGO state machine. * @param nego */ void nego_init(rdpNego* nego) { nego->state = NEGO_STATE_INITIAL; nego->RequestedProtocols = PROTOCOL_RDP; nego->CookieMaxLength = DEFAULT_COOKIE_MAX_LENGTH; nego->sendNegoData = FALSE; nego->flags = 0; } /** * Create a new NEGO state machine instance. * @param transport * @return */ rdpNego* nego_new(rdpTransport* transport) { rdpNego* nego = (rdpNego*)calloc(1, sizeof(rdpNego)); if (!nego) return NULL; nego->transport = transport; nego_init(nego); return nego; } /** * Free NEGO state machine. * @param nego */ void nego_free(rdpNego* nego) { if (nego) { free(nego->RoutingToken); free(nego->cookie); free(nego); } } /** * Set target hostname and port. * @param nego * @param hostname * @param port */ BOOL nego_set_target(rdpNego* nego, const char* hostname, UINT16 port) { if (!nego || !hostname) return FALSE; nego->hostname = hostname; nego->port = port; return TRUE; } /** * Enable security layer negotiation. * @param nego pointer to the negotiation structure * @param enable_rdp whether to enable security layer negotiation (TRUE for enabled, FALSE for * disabled) */ void nego_set_negotiation_enabled(rdpNego* nego, BOOL NegotiateSecurityLayer) { WLog_DBG(TAG, "Enabling security layer negotiation: %s", NegotiateSecurityLayer ? "TRUE" : "FALSE"); nego->NegotiateSecurityLayer = NegotiateSecurityLayer; } /** * Enable restricted admin mode. * @param nego pointer to the negotiation structure * @param enable_restricted whether to enable security layer negotiation (TRUE for enabled, FALSE * for disabled) */ void nego_set_restricted_admin_mode_required(rdpNego* nego, BOOL RestrictedAdminModeRequired) { WLog_DBG(TAG, "Enabling restricted admin mode: %s", RestrictedAdminModeRequired ? "TRUE" : "FALSE"); nego->RestrictedAdminModeRequired = RestrictedAdminModeRequired; } void nego_set_gateway_enabled(rdpNego* nego, BOOL GatewayEnabled) { nego->GatewayEnabled = GatewayEnabled; } void nego_set_gateway_bypass_local(rdpNego* nego, BOOL GatewayBypassLocal) { nego->GatewayBypassLocal = GatewayBypassLocal; } /** * Enable RDP security protocol. * @param nego pointer to the negotiation structure * @param enable_rdp whether to enable normal RDP protocol (TRUE for enabled, FALSE for disabled) */ void nego_enable_rdp(rdpNego* nego, BOOL enable_rdp) { WLog_DBG(TAG, "Enabling RDP security: %s", enable_rdp ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_RDP] = enable_rdp; } /** * Enable TLS security protocol. * @param nego pointer to the negotiation structure * @param enable_tls whether to enable TLS + RDP protocol (TRUE for enabled, FALSE for disabled) */ void nego_enable_tls(rdpNego* nego, BOOL enable_tls) { WLog_DBG(TAG, "Enabling TLS security: %s", enable_tls ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_SSL] = enable_tls; } /** * Enable NLA security protocol. * @param nego pointer to the negotiation structure * @param enable_nla whether to enable network level authentication protocol (TRUE for enabled, * FALSE for disabled) */ void nego_enable_nla(rdpNego* nego, BOOL enable_nla) { WLog_DBG(TAG, "Enabling NLA security: %s", enable_nla ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_HYBRID] = enable_nla; } /** * Enable NLA extended security protocol. * @param nego pointer to the negotiation structure * @param enable_ext whether to enable network level authentication extended protocol (TRUE for * enabled, FALSE for disabled) */ void nego_enable_ext(rdpNego* nego, BOOL enable_ext) { WLog_DBG(TAG, "Enabling NLA extended security: %s", enable_ext ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = enable_ext; } /** * Set routing token. * @param nego * @param RoutingToken * @param RoutingTokenLength */ BOOL nego_set_routing_token(rdpNego* nego, BYTE* RoutingToken, DWORD RoutingTokenLength) { if (RoutingTokenLength == 0) return FALSE; free(nego->RoutingToken); nego->RoutingTokenLength = RoutingTokenLength; nego->RoutingToken = (BYTE*)malloc(nego->RoutingTokenLength); if (!nego->RoutingToken) return FALSE; CopyMemory(nego->RoutingToken, RoutingToken, nego->RoutingTokenLength); return TRUE; } /** * Set cookie. * @param nego * @param cookie */ BOOL nego_set_cookie(rdpNego* nego, char* cookie) { if (nego->cookie) { free(nego->cookie); nego->cookie = NULL; } if (!cookie) return TRUE; nego->cookie = _strdup(cookie); if (!nego->cookie) return FALSE; return TRUE; } /** * Set cookie maximum length * @param nego * @param CookieMaxLength */ void nego_set_cookie_max_length(rdpNego* nego, UINT32 CookieMaxLength) { nego->CookieMaxLength = CookieMaxLength; } /** * Enable / disable preconnection PDU. * @param nego * @param send_pcpdu */ void nego_set_send_preconnection_pdu(rdpNego* nego, BOOL SendPreconnectionPdu) { nego->SendPreconnectionPdu = SendPreconnectionPdu; } /** * Set preconnection id. * @param nego * @param id */ void nego_set_preconnection_id(rdpNego* nego, UINT32 PreconnectionId) { nego->PreconnectionId = PreconnectionId; } /** * Set preconnection blob. * @param nego * @param blob */ void nego_set_preconnection_blob(rdpNego* nego, char* PreconnectionBlob) { nego->PreconnectionBlob = PreconnectionBlob; } UINT32 nego_get_selected_protocol(rdpNego* nego) { if (!nego) return 0; return nego->SelectedProtocol; } BOOL nego_set_selected_protocol(rdpNego* nego, UINT32 SelectedProtocol) { if (!nego) return FALSE; nego->SelectedProtocol = SelectedProtocol; return TRUE; } UINT32 nego_get_requested_protocols(rdpNego* nego) { if (!nego) return 0; return nego->RequestedProtocols; } BOOL nego_set_requested_protocols(rdpNego* nego, UINT32 RequestedProtocols) { if (!nego) return FALSE; nego->RequestedProtocols = RequestedProtocols; return TRUE; } NEGO_STATE nego_get_state(rdpNego* nego) { if (!nego) return NEGO_STATE_FAIL; return nego->state; } BOOL nego_set_state(rdpNego* nego, NEGO_STATE state) { if (!nego) return FALSE; nego->state = state; return TRUE; } SEC_WINNT_AUTH_IDENTITY* nego_get_identity(rdpNego* nego) { if (!nego) return NULL; return nla_get_identity(nego->transport->nla); } void nego_free_nla(rdpNego* nego) { if (!nego || !nego->transport) return; nla_free(nego->transport->nla); nego->transport->nla = NULL; } const BYTE* nego_get_routing_token(rdpNego* nego, DWORD* RoutingTokenLength) { if (!nego) return NULL; if (RoutingTokenLength) *RoutingTokenLength = nego->RoutingTokenLength; return nego->RoutingToken; }
BOOL nego_read_request(rdpNego* nego, wStream* s) { BYTE li; BYTE type; UINT16 length; if (!tpkt_read_header(s, &length)) return FALSE; if (!tpdu_read_connection_request(s, &li, length)) return FALSE; if (li != Stream_GetRemainingLength(s) + 6) { WLog_ERR(TAG, "Incorrect TPDU length indicator."); return FALSE; } if (!nego_read_request_token_or_cookie(nego, s)) { WLog_ERR(TAG, "Failed to parse routing token or cookie."); return FALSE; } if (Stream_GetRemainingLength(s) >= 8) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ if (type != TYPE_RDP_NEG_REQ) { WLog_ERR(TAG, "Incorrect negotiation request type %" PRIu8 "", type); return FALSE; } nego_process_negotiation_request(nego, s); } return tpkt_ensure_stream_consumed(s, length); }
BOOL nego_read_request(rdpNego* nego, wStream* s) { BYTE li; BYTE type; UINT16 length; if (!tpkt_read_header(s, &length)) return FALSE; if (!tpdu_read_connection_request(s, &li, length)) return FALSE; if (li != Stream_GetRemainingLength(s) + 6) { WLog_ERR(TAG, "Incorrect TPDU length indicator."); return FALSE; } if (!nego_read_request_token_or_cookie(nego, s)) { WLog_ERR(TAG, "Failed to parse routing token or cookie."); return FALSE; } if (Stream_GetRemainingLength(s) >= 8) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ if (type != TYPE_RDP_NEG_REQ) { WLog_ERR(TAG, "Incorrect negotiation request type %" PRIu8 "", type); return FALSE; } if (!nego_process_negotiation_request(nego, s)) return FALSE; } return tpkt_ensure_stream_consumed(s, length); }
{'added': [(94, 'static BOOL nego_process_negotiation_request(rdpNego* nego, wStream* s);'), (95, 'static BOOL nego_process_negotiation_response(rdpNego* nego, wStream* s);'), (96, 'static BOOL nego_process_negotiation_failure(rdpNego* nego, wStream* s);'), (621, '\t\t\t\tif (!nego_process_negotiation_response(nego, s))'), (622, '\t\t\t\t\treturn -1;'), (649, '\t\t\t\tif (!nego_process_negotiation_failure(nego, s))'), (650, '\t\t\t\t\treturn -1;'), (800, '\t\tif (!nego_process_negotiation_request(nego, s))'), (801, '\t\t\treturn FALSE;'), (924, 'BOOL nego_process_negotiation_request(rdpNego* nego, wStream* s)'), (928, ''), (929, '\tif (Stream_GetRemainingLength(s) < 7)'), (930, '\t\treturn FALSE;'), (936, '\treturn TRUE;'), (945, 'BOOL nego_process_negotiation_response(rdpNego* nego, wStream* s)'), (954, '\t\treturn FALSE;'), (961, '\treturn TRUE;'), (970, 'BOOL nego_process_negotiation_failure(rdpNego* nego, wStream* s)'), (976, '\tif (Stream_GetRemainingLength(s) < 7)'), (977, '\t\treturn FALSE;'), (1012, '\treturn TRUE;')], 'deleted': [(94, 'static void nego_process_negotiation_request(rdpNego* nego, wStream* s);'), (95, 'static void nego_process_negotiation_response(rdpNego* nego, wStream* s);'), (96, 'static void nego_process_negotiation_failure(rdpNego* nego, wStream* s);'), (621, '\t\t\t\tnego_process_negotiation_response(nego, s);'), (648, '\t\t\t\tnego_process_negotiation_failure(nego, s);'), (798, '\t\tnego_process_negotiation_request(nego, s);'), (921, 'void nego_process_negotiation_request(rdpNego* nego, wStream* s)'), (938, 'void nego_process_negotiation_response(rdpNego* nego, wStream* s)'), (947, '\t\treturn;'), (962, 'void nego_process_negotiation_failure(rdpNego* nego, wStream* s)')]}
21
10
990
4,944
31
151
7
https://github.com/FreeRDP/FreeRDP
CVE-2020-11089
CWE-125
3,159
ldo.c
C
luaD_shrinkstack
/* ** $Id: ldo.c $ ** Stack and Call structure of Lua ** See Copyright Notice in lua.h */ #define ldo_c #define LUA_CORE #include "lprefix.h" #include <setjmp.h> #include <stdlib.h> #include <string.h> #include "lua.h" #include "lapi.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lgc.h" #include "lmem.h" #include "lobject.h" #include "lopcodes.h" #include "lparser.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" #include "lundump.h" #include "lvm.h" #include "lzio.h" #define errorstatus(s) ((s) > LUA_YIELD) /* ** {====================================================== ** Error-recovery functions ** ======================================================= */ /* ** LUAI_THROW/LUAI_TRY define how Lua does exception handling. By ** default, Lua handles errors with exceptions when compiling as ** C++ code, with _longjmp/_setjmp when asked to use them, and with ** longjmp/setjmp otherwise. */ #if !defined(LUAI_THROW) /* { */ #if defined(__cplusplus) && !defined(LUA_USE_LONGJMP) /* { */ /* C++ exceptions */ #define LUAI_THROW(L,c) throw(c) #define LUAI_TRY(L,c,a) \ try { a } catch(...) { if ((c)->status == 0) (c)->status = -1; } #define luai_jmpbuf int /* dummy variable */ #elif defined(LUA_USE_POSIX) /* }{ */ /* in POSIX, try _longjmp/_setjmp (more efficient) */ #define LUAI_THROW(L,c) _longjmp((c)->b, 1) #define LUAI_TRY(L,c,a) if (_setjmp((c)->b) == 0) { a } #define luai_jmpbuf jmp_buf #else /* }{ */ /* ISO C handling with long jumps */ #define LUAI_THROW(L,c) longjmp((c)->b, 1) #define LUAI_TRY(L,c,a) if (setjmp((c)->b) == 0) { a } #define luai_jmpbuf jmp_buf #endif /* } */ #endif /* } */ /* chain list of long jump buffers */ struct lua_longjmp { struct lua_longjmp *previous; luai_jmpbuf b; volatile int status; /* error code */ }; void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop) { switch (errcode) { case LUA_ERRMEM: { /* memory error? */ setsvalue2s(L, oldtop, G(L)->memerrmsg); /* reuse preregistered msg. */ break; } case LUA_ERRERR: { setsvalue2s(L, oldtop, luaS_newliteral(L, "error in error handling")); break; } case CLOSEPROTECT: { setnilvalue(s2v(oldtop)); /* no error message */ break; } default: { setobjs2s(L, oldtop, L->top - 1); /* error message on current top */ break; } } L->top = oldtop + 1; } l_noret luaD_throw (lua_State *L, int errcode) { if (L->errorJmp) { /* thread has an error handler? */ L->errorJmp->status = errcode; /* set status */ LUAI_THROW(L, L->errorJmp); /* jump to it */ } else { /* thread has no error handler */ global_State *g = G(L); errcode = luaF_close(L, L->stack, errcode); /* close all upvalues */ L->status = cast_byte(errcode); /* mark it as dead */ if (g->mainthread->errorJmp) { /* main thread has a handler? */ setobjs2s(L, g->mainthread->top++, L->top - 1); /* copy error obj. */ luaD_throw(g->mainthread, errcode); /* re-throw in main thread */ } else { /* no handler at all; abort */ if (g->panic) { /* panic function? */ luaD_seterrorobj(L, errcode, L->top); /* assume EXTRA_STACK */ if (L->ci->top < L->top) L->ci->top = L->top; /* pushing msg. can break this invariant */ lua_unlock(L); g->panic(L); /* call panic function (last chance to jump out) */ } abort(); } } } int luaD_rawrunprotected (lua_State *L, Pfunc f, void *ud) { global_State *g = G(L); l_uint32 oldnCcalls = g->Cstacklimit - (L->nCcalls + L->nci); struct lua_longjmp lj; lj.status = LUA_OK; lj.previous = L->errorJmp; /* chain new error handler */ L->errorJmp = &lj; LUAI_TRY(L, &lj, (*f)(L, ud); ); L->errorJmp = lj.previous; /* restore old error handler */ L->nCcalls = g->Cstacklimit - oldnCcalls - L->nci; return lj.status; } /* }====================================================== */ /* ** {================================================================== ** Stack reallocation ** =================================================================== */ static void correctstack (lua_State *L, StkId oldstack, StkId newstack) { CallInfo *ci; UpVal *up; if (oldstack == newstack) return; /* stack address did not change */ L->top = (L->top - oldstack) + newstack; for (up = L->openupval; up != NULL; up = up->u.open.next) up->v = s2v((uplevel(up) - oldstack) + newstack); for (ci = L->ci; ci != NULL; ci = ci->previous) { ci->top = (ci->top - oldstack) + newstack; ci->func = (ci->func - oldstack) + newstack; if (isLua(ci)) ci->u.l.trap = 1; /* signal to update 'trap' in 'luaV_execute' */ } } /* some space for error handling */ #define ERRORSTACKSIZE (LUAI_MAXSTACK + 200) int luaD_reallocstack (lua_State *L, int newsize, int raiseerror) { int lim = L->stacksize; StkId newstack = luaM_reallocvector(L, L->stack, lim, newsize, StackValue); lua_assert(newsize <= LUAI_MAXSTACK || newsize == ERRORSTACKSIZE); lua_assert(L->stack_last - L->stack == L->stacksize - EXTRA_STACK); if (unlikely(newstack == NULL)) { /* reallocation failed? */ if (raiseerror) luaM_error(L); else return 0; /* do not raise an error */ } for (; lim < newsize; lim++) setnilvalue(s2v(newstack + lim)); /* erase new segment */ correctstack(L, L->stack, newstack); L->stack = newstack; L->stacksize = newsize; L->stack_last = L->stack + newsize - EXTRA_STACK; return 1; } /* ** Try to grow the stack by at least 'n' elements. when 'raiseerror' ** is true, raises any error; otherwise, return 0 in case of errors. */ int luaD_growstack (lua_State *L, int n, int raiseerror) { int size = L->stacksize; int newsize = 2 * size; /* tentative new size */ if (unlikely(size > LUAI_MAXSTACK)) { /* need more space after extra size? */ if (raiseerror) luaD_throw(L, LUA_ERRERR); /* error inside message handler */ else return 0; } else { int needed = cast_int(L->top - L->stack) + n + EXTRA_STACK; if (newsize > LUAI_MAXSTACK) /* cannot cross the limit */ newsize = LUAI_MAXSTACK; if (newsize < needed) /* but must respect what was asked for */ newsize = needed; if (unlikely(newsize > LUAI_MAXSTACK)) { /* stack overflow? */ /* add extra size to be able to handle the error message */ luaD_reallocstack(L, ERRORSTACKSIZE, raiseerror); if (raiseerror) luaG_runerror(L, "stack overflow"); else return 0; } } /* else no errors */ return luaD_reallocstack(L, newsize, raiseerror); } static int stackinuse (lua_State *L) { CallInfo *ci; StkId lim = L->top; for (ci = L->ci; ci != NULL; ci = ci->previous) { if (lim < ci->top) lim = ci->top; } lua_assert(lim <= L->stack_last); return cast_int(lim - L->stack) + 1; /* part of stack in use */ } void luaD_shrinkstack (lua_State *L) { int inuse = stackinuse(L); int goodsize = inuse + (inuse / 8) + 2*EXTRA_STACK; if (goodsize > LUAI_MAXSTACK) goodsize = LUAI_MAXSTACK; /* respect stack limit */ /* if thread is currently not handling a stack overflow and its good size is smaller than current size, shrink its stack */ if (inuse <= (LUAI_MAXSTACK - EXTRA_STACK) && goodsize < L->stacksize) luaD_reallocstack(L, goodsize, 0); /* ok if that fails */ else /* don't change stack */ condmovestack(L,{},{}); /* (change only for debugging) */ luaE_shrinkCI(L); /* shrink CI list */ } void luaD_inctop (lua_State *L) { luaD_checkstack(L, 1); L->top++; } /* }================================================================== */ /* ** Call a hook for the given event. Make sure there is a hook to be ** called. (Both 'L->hook' and 'L->hookmask', which trigger this ** function, can be changed asynchronously by signals.) */ void luaD_hook (lua_State *L, int event, int line, int ftransfer, int ntransfer) { lua_Hook hook = L->hook; if (hook && L->allowhook) { /* make sure there is a hook */ int mask = CIST_HOOKED; CallInfo *ci = L->ci; ptrdiff_t top = savestack(L, L->top); ptrdiff_t ci_top = savestack(L, ci->top); lua_Debug ar; ar.event = event; ar.currentline = line; ar.i_ci = ci; if (ntransfer != 0) { mask |= CIST_TRAN; /* 'ci' has transfer information */ ci->u2.transferinfo.ftransfer = ftransfer; ci->u2.transferinfo.ntransfer = ntransfer; } luaD_checkstack(L, LUA_MINSTACK); /* ensure minimum stack size */ if (L->top + LUA_MINSTACK > ci->top) ci->top = L->top + LUA_MINSTACK; L->allowhook = 0; /* cannot call hooks inside a hook */ ci->callstatus |= mask; lua_unlock(L); (*hook)(L, &ar); lua_lock(L); lua_assert(!L->allowhook); L->allowhook = 1; ci->top = restorestack(L, ci_top); L->top = restorestack(L, top); ci->callstatus &= ~mask; } } /* ** Executes a call hook for Lua functions. This function is called ** whenever 'hookmask' is not zero, so it checks whether call hooks are ** active. */ void luaD_hookcall (lua_State *L, CallInfo *ci) { int hook = (ci->callstatus & CIST_TAIL) ? LUA_HOOKTAILCALL : LUA_HOOKCALL; Proto *p; if (!(L->hookmask & LUA_MASKCALL)) /* some other hook? */ return; /* don't call hook */ p = clLvalue(s2v(ci->func))->p; L->top = ci->top; /* prepare top */ ci->u.l.savedpc++; /* hooks assume 'pc' is already incremented */ luaD_hook(L, hook, -1, 1, p->numparams); ci->u.l.savedpc--; /* correct 'pc' */ } static StkId rethook (lua_State *L, CallInfo *ci, StkId firstres, int nres) { ptrdiff_t oldtop = savestack(L, L->top); /* hook may change top */ int delta = 0; if (isLuacode(ci)) { Proto *p = clLvalue(s2v(ci->func))->p; if (p->is_vararg) delta = ci->u.l.nextraargs + p->numparams + 1; if (L->top < ci->top) L->top = ci->top; /* correct top to run hook */ } if (L->hookmask & LUA_MASKRET) { /* is return hook on? */ int ftransfer; ci->func += delta; /* if vararg, back to virtual 'func' */ ftransfer = cast(unsigned short, firstres - ci->func); luaD_hook(L, LUA_HOOKRET, -1, ftransfer, nres); /* call it */ ci->func -= delta; } if (isLua(ci->previous)) L->oldpc = ci->previous->u.l.savedpc; /* update 'oldpc' */ return restorestack(L, oldtop); } /* ** Check whether 'func' has a '__call' metafield. If so, put it in the ** stack, below original 'func', so that 'luaD_call' can call it. Raise ** an error if there is no '__call' metafield. */ void luaD_tryfuncTM (lua_State *L, StkId func) { const TValue *tm = luaT_gettmbyobj(L, s2v(func), TM_CALL); StkId p; if (unlikely(ttisnil(tm))) luaG_typeerror(L, s2v(func), "call"); /* nothing to call */ for (p = L->top; p > func; p--) /* open space for metamethod */ setobjs2s(L, p, p-1); L->top++; /* stack space pre-allocated by the caller */ setobj2s(L, func, tm); /* metamethod is the new function to be called */ } /* ** Given 'nres' results at 'firstResult', move 'wanted' of them to 'res'. ** Handle most typical cases (zero results for commands, one result for ** expressions, multiple results for tail calls/single parameters) ** separated. */ static void moveresults (lua_State *L, StkId res, int nres, int wanted) { StkId firstresult; int i; switch (wanted) { /* handle typical cases separately */ case 0: /* no values needed */ L->top = res; return; case 1: /* one value needed */ if (nres == 0) /* no results? */ setnilvalue(s2v(res)); /* adjust with nil */ else setobjs2s(L, res, L->top - nres); /* move it to proper place */ L->top = res + 1; return; case LUA_MULTRET: wanted = nres; /* we want all results */ break; default: /* multiple results (or to-be-closed variables) */ if (hastocloseCfunc(wanted)) { /* to-be-closed variables? */ ptrdiff_t savedres = savestack(L, res); luaF_close(L, res, LUA_OK); /* may change the stack */ res = restorestack(L, savedres); wanted = codeNresults(wanted); /* correct value */ if (wanted == LUA_MULTRET) wanted = nres; } break; } firstresult = L->top - nres; /* index of first result */ /* move all results to correct place */ for (i = 0; i < nres && i < wanted; i++) setobjs2s(L, res + i, firstresult + i); for (; i < wanted; i++) /* complete wanted number of results */ setnilvalue(s2v(res + i)); L->top = res + wanted; /* top points after the last result */ } /* ** Finishes a function call: calls hook if necessary, removes CallInfo, ** moves current number of results to proper place. */ void luaD_poscall (lua_State *L, CallInfo *ci, int nres) { if (L->hookmask) L->top = rethook(L, ci, L->top - nres, nres); L->ci = ci->previous; /* back to caller */ /* move results to proper place */ moveresults(L, ci->func, nres, ci->nresults); } #define next_ci(L) (L->ci->next ? L->ci->next : luaE_extendCI(L)) /* ** Prepare a function for a tail call, building its call info on top ** of the current call info. 'narg1' is the number of arguments plus 1 ** (so that it includes the function itself). */ void luaD_pretailcall (lua_State *L, CallInfo *ci, StkId func, int narg1) { Proto *p = clLvalue(s2v(func))->p; int fsize = p->maxstacksize; /* frame size */ int nfixparams = p->numparams; int i; for (i = 0; i < narg1; i++) /* move down function and arguments */ setobjs2s(L, ci->func + i, func + i); checkstackGC(L, fsize); func = ci->func; /* moved-down function */ for (; narg1 <= nfixparams; narg1++) setnilvalue(s2v(func + narg1)); /* complete missing arguments */ ci->top = func + 1 + fsize; /* top for new function */ lua_assert(ci->top <= L->stack_last); ci->u.l.savedpc = p->code; /* starting point */ ci->callstatus |= CIST_TAIL; L->top = func + narg1; /* set top */ } /* ** Call a function (C or Lua). The function to be called is at *func. ** The arguments are on the stack, right after the function. ** When returns, all the results are on the stack, starting at the original ** function position. */ void luaD_call (lua_State *L, StkId func, int nresults) { lua_CFunction f; retry: switch (ttypetag(s2v(func))) { case LUA_VCCL: /* C closure */ f = clCvalue(s2v(func))->f; goto Cfunc; case LUA_VLCF: /* light C function */ f = fvalue(s2v(func)); Cfunc: { int n; /* number of returns */ CallInfo *ci = next_ci(L); checkstackp(L, LUA_MINSTACK, func); /* ensure minimum stack size */ ci->nresults = nresults; ci->callstatus = CIST_C; ci->top = L->top + LUA_MINSTACK; ci->func = func; L->ci = ci; lua_assert(ci->top <= L->stack_last); if (L->hookmask & LUA_MASKCALL) { int narg = cast_int(L->top - func) - 1; luaD_hook(L, LUA_HOOKCALL, -1, 1, narg); } lua_unlock(L); n = (*f)(L); /* do the actual call */ lua_lock(L); api_checknelems(L, n); luaD_poscall(L, ci, n); break; } case LUA_VLCL: { /* Lua function */ CallInfo *ci = next_ci(L); Proto *p = clLvalue(s2v(func))->p; int narg = cast_int(L->top - func) - 1; /* number of real arguments */ int nfixparams = p->numparams; int fsize = p->maxstacksize; /* frame size */ checkstackp(L, fsize, func); ci->nresults = nresults; ci->u.l.savedpc = p->code; /* starting point */ ci->callstatus = 0; ci->top = func + 1 + fsize; ci->func = func; L->ci = ci; for (; narg < nfixparams; narg++) setnilvalue(s2v(L->top++)); /* complete missing arguments */ lua_assert(ci->top <= L->stack_last); luaV_execute(L, ci); /* run the function */ break; } default: { /* not a function */ checkstackp(L, 1, func); /* space for metamethod */ luaD_tryfuncTM(L, func); /* try to get '__call' metamethod */ goto retry; /* try again with metamethod */ } } } /* ** Similar to 'luaD_call', but does not allow yields during the call. ** If there is a stack overflow, freeing all CI structures will ** force the subsequent call to invoke 'luaE_extendCI', which then ** will raise any errors. */ void luaD_callnoyield (lua_State *L, StkId func, int nResults) { incXCcalls(L); if (getCcalls(L) <= CSTACKERR) /* possible stack overflow? */ luaE_freeCI(L); luaD_call(L, func, nResults); decXCcalls(L); } /* ** Completes the execution of an interrupted C function, calling its ** continuation function. */ static void finishCcall (lua_State *L, int status) { CallInfo *ci = L->ci; int n; /* must have a continuation and must be able to call it */ lua_assert(ci->u.c.k != NULL && yieldable(L)); /* error status can only happen in a protected call */ lua_assert((ci->callstatus & CIST_YPCALL) || status == LUA_YIELD); if (ci->callstatus & CIST_YPCALL) { /* was inside a pcall? */ ci->callstatus &= ~CIST_YPCALL; /* continuation is also inside it */ L->errfunc = ci->u.c.old_errfunc; /* with the same error function */ } /* finish 'lua_callk'/'lua_pcall'; CIST_YPCALL and 'errfunc' already handled */ adjustresults(L, ci->nresults); lua_unlock(L); n = (*ci->u.c.k)(L, status, ci->u.c.ctx); /* call continuation function */ lua_lock(L); api_checknelems(L, n); luaD_poscall(L, ci, n); /* finish 'luaD_call' */ } /* ** Executes "full continuation" (everything in the stack) of a ** previously interrupted coroutine until the stack is empty (or another ** interruption long-jumps out of the loop). If the coroutine is ** recovering from an error, 'ud' points to the error status, which must ** be passed to the first continuation function (otherwise the default ** status is LUA_YIELD). */ static void unroll (lua_State *L, void *ud) { CallInfo *ci; if (ud != NULL) /* error status? */ finishCcall(L, *(int *)ud); /* finish 'lua_pcallk' callee */ while ((ci = L->ci) != &L->base_ci) { /* something in the stack */ if (!isLua(ci)) /* C function? */ finishCcall(L, LUA_YIELD); /* complete its execution */ else { /* Lua function */ luaV_finishOp(L); /* finish interrupted instruction */ luaV_execute(L, ci); /* execute down to higher C 'boundary' */ } } } /* ** Try to find a suspended protected call (a "recover point") for the ** given thread. */ static CallInfo *findpcall (lua_State *L) { CallInfo *ci; for (ci = L->ci; ci != NULL; ci = ci->previous) { /* search for a pcall */ if (ci->callstatus & CIST_YPCALL) return ci; } return NULL; /* no pending pcall */ } /* ** Recovers from an error in a coroutine. Finds a recover point (if ** there is one) and completes the execution of the interrupted ** 'luaD_pcall'. If there is no recover point, returns zero. */ static int recover (lua_State *L, int status) { StkId oldtop; CallInfo *ci = findpcall(L); if (ci == NULL) return 0; /* no recovery point */ /* "finish" luaD_pcall */ oldtop = restorestack(L, ci->u2.funcidx); luaF_close(L, oldtop, status); /* may change the stack */ oldtop = restorestack(L, ci->u2.funcidx); luaD_seterrorobj(L, status, oldtop); L->ci = ci; L->allowhook = getoah(ci->callstatus); /* restore original 'allowhook' */ luaD_shrinkstack(L); L->errfunc = ci->u.c.old_errfunc; return 1; /* continue running the coroutine */ } /* ** Signal an error in the call to 'lua_resume', not in the execution ** of the coroutine itself. (Such errors should not be handled by any ** coroutine error handler and should not kill the coroutine.) */ static int resume_error (lua_State *L, const char *msg, int narg) { L->top -= narg; /* remove args from the stack */ setsvalue2s(L, L->top, luaS_new(L, msg)); /* push error message */ api_incr_top(L); lua_unlock(L); return LUA_ERRRUN; } /* ** Do the work for 'lua_resume' in protected mode. Most of the work ** depends on the status of the coroutine: initial state, suspended ** inside a hook, or regularly suspended (optionally with a continuation ** function), plus erroneous cases: non-suspended coroutine or dead ** coroutine. */ static void resume (lua_State *L, void *ud) { int n = *(cast(int*, ud)); /* number of arguments */ StkId firstArg = L->top - n; /* first argument */ CallInfo *ci = L->ci; if (L->status == LUA_OK) { /* starting a coroutine? */ luaD_call(L, firstArg - 1, LUA_MULTRET); } else { /* resuming from previous yield */ lua_assert(L->status == LUA_YIELD); L->status = LUA_OK; /* mark that it is running (again) */ if (isLua(ci)) /* yielded inside a hook? */ luaV_execute(L, ci); /* just continue running Lua code */ else { /* 'common' yield */ if (ci->u.c.k != NULL) { /* does it have a continuation function? */ lua_unlock(L); n = (*ci->u.c.k)(L, LUA_YIELD, ci->u.c.ctx); /* call continuation */ lua_lock(L); api_checknelems(L, n); } luaD_poscall(L, ci, n); /* finish 'luaD_call' */ } unroll(L, NULL); /* run continuation */ } } LUA_API int lua_resume (lua_State *L, lua_State *from, int nargs, int *nresults) { int status; lua_lock(L); if (L->status == LUA_OK) { /* may be starting a coroutine */ if (L->ci != &L->base_ci) /* not in base level? */ return resume_error(L, "cannot resume non-suspended coroutine", nargs); else if (L->top - (L->ci->func + 1) == nargs) /* no function? */ return resume_error(L, "cannot resume dead coroutine", nargs); } else if (L->status != LUA_YIELD) /* ended with errors? */ return resume_error(L, "cannot resume dead coroutine", nargs); if (from == NULL) L->nCcalls = CSTACKTHREAD; else /* correct 'nCcalls' for this thread */ L->nCcalls = getCcalls(from) + from->nci - L->nci - CSTACKCF; if (L->nCcalls <= CSTACKERR) return resume_error(L, "C stack overflow", nargs); luai_userstateresume(L, nargs); api_checknelems(L, (L->status == LUA_OK) ? nargs + 1 : nargs); status = luaD_rawrunprotected(L, resume, &nargs); /* continue running after recoverable errors */ while (errorstatus(status) && recover(L, status)) { /* unroll continuation */ status = luaD_rawrunprotected(L, unroll, &status); } if (likely(!errorstatus(status))) lua_assert(status == L->status); /* normal end or yield */ else { /* unrecoverable error */ L->status = cast_byte(status); /* mark thread as 'dead' */ luaD_seterrorobj(L, status, L->top); /* push error message */ L->ci->top = L->top; } *nresults = (status == LUA_YIELD) ? L->ci->u2.nyield : cast_int(L->top - (L->ci->func + 1)); lua_unlock(L); return status; } LUA_API int lua_isyieldable (lua_State *L) { return yieldable(L); } LUA_API int lua_yieldk (lua_State *L, int nresults, lua_KContext ctx, lua_KFunction k) { CallInfo *ci = L->ci; luai_userstateyield(L, nresults); lua_lock(L); api_checknelems(L, nresults); if (unlikely(!yieldable(L))) { if (L != G(L)->mainthread) luaG_runerror(L, "attempt to yield across a C-call boundary"); else luaG_runerror(L, "attempt to yield from outside a coroutine"); } L->status = LUA_YIELD; if (isLua(ci)) { /* inside a hook? */ lua_assert(!isLuacode(ci)); api_check(L, k == NULL, "hooks cannot continue after yielding"); ci->u2.nyield = 0; /* no results */ } else { if ((ci->u.c.k = k) != NULL) /* is there a continuation? */ ci->u.c.ctx = ctx; /* save context */ ci->u2.nyield = nresults; /* save number of results */ luaD_throw(L, LUA_YIELD); } lua_assert(ci->callstatus & CIST_HOOKED); /* must be inside a hook */ lua_unlock(L); return 0; /* return to 'luaD_hook' */ } /* ** Call the C function 'func' in protected mode, restoring basic ** thread information ('allowhook', etc.) and in particular ** its stack level in case of errors. */ int luaD_pcall (lua_State *L, Pfunc func, void *u, ptrdiff_t old_top, ptrdiff_t ef) { int status; CallInfo *old_ci = L->ci; lu_byte old_allowhooks = L->allowhook; ptrdiff_t old_errfunc = L->errfunc; L->errfunc = ef; status = luaD_rawrunprotected(L, func, u); if (unlikely(status != LUA_OK)) { /* an error occurred? */ StkId oldtop = restorestack(L, old_top); L->ci = old_ci; L->allowhook = old_allowhooks; status = luaF_close(L, oldtop, status); oldtop = restorestack(L, old_top); /* previous call may change stack */ luaD_seterrorobj(L, status, oldtop); luaD_shrinkstack(L); } L->errfunc = old_errfunc; return status; } /* ** Execute a protected parser. */ struct SParser { /* data to 'f_parser' */ ZIO *z; Mbuffer buff; /* dynamic structure used by the scanner */ Dyndata dyd; /* dynamic structures used by the parser */ const char *mode; const char *name; }; static void checkmode (lua_State *L, const char *mode, const char *x) { if (mode && strchr(mode, x[0]) == NULL) { luaO_pushfstring(L, "attempt to load a %s chunk (mode is '%s')", x, mode); luaD_throw(L, LUA_ERRSYNTAX); } } static void f_parser (lua_State *L, void *ud) { LClosure *cl; struct SParser *p = cast(struct SParser *, ud); int c = zgetc(p->z); /* read first character */ if (c == LUA_SIGNATURE[0]) { checkmode(L, p->mode, "binary"); cl = luaU_undump(L, p->z, p->name); } else { checkmode(L, p->mode, "text"); cl = luaY_parser(L, p->z, &p->buff, &p->dyd, p->name, c); } lua_assert(cl->nupvalues == cl->p->sizeupvalues); luaF_initupvals(L, cl); } int luaD_protectedparser (lua_State *L, ZIO *z, const char *name, const char *mode) { struct SParser p; int status; incnny(L); /* cannot yield during parsing */ p.z = z; p.name = name; p.mode = mode; p.dyd.actvar.arr = NULL; p.dyd.actvar.size = 0; p.dyd.gt.arr = NULL; p.dyd.gt.size = 0; p.dyd.label.arr = NULL; p.dyd.label.size = 0; luaZ_initbuffer(L, &p.buff); status = luaD_pcall(L, f_parser, &p, savestack(L, L->top), L->errfunc); luaZ_freebuffer(L, &p.buff); luaM_freearray(L, p.dyd.actvar.arr, p.dyd.actvar.size); luaM_freearray(L, p.dyd.gt.arr, p.dyd.gt.size); luaM_freearray(L, p.dyd.label.arr, p.dyd.label.size); decnny(L); return status; }
/* ** $Id: ldo.c $ ** Stack and Call structure of Lua ** See Copyright Notice in lua.h */ #define ldo_c #define LUA_CORE #include "lprefix.h" #include <setjmp.h> #include <stdlib.h> #include <string.h> #include "lua.h" #include "lapi.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lgc.h" #include "lmem.h" #include "lobject.h" #include "lopcodes.h" #include "lparser.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" #include "lundump.h" #include "lvm.h" #include "lzio.h" #define errorstatus(s) ((s) > LUA_YIELD) /* ** {====================================================== ** Error-recovery functions ** ======================================================= */ /* ** LUAI_THROW/LUAI_TRY define how Lua does exception handling. By ** default, Lua handles errors with exceptions when compiling as ** C++ code, with _longjmp/_setjmp when asked to use them, and with ** longjmp/setjmp otherwise. */ #if !defined(LUAI_THROW) /* { */ #if defined(__cplusplus) && !defined(LUA_USE_LONGJMP) /* { */ /* C++ exceptions */ #define LUAI_THROW(L,c) throw(c) #define LUAI_TRY(L,c,a) \ try { a } catch(...) { if ((c)->status == 0) (c)->status = -1; } #define luai_jmpbuf int /* dummy variable */ #elif defined(LUA_USE_POSIX) /* }{ */ /* in POSIX, try _longjmp/_setjmp (more efficient) */ #define LUAI_THROW(L,c) _longjmp((c)->b, 1) #define LUAI_TRY(L,c,a) if (_setjmp((c)->b) == 0) { a } #define luai_jmpbuf jmp_buf #else /* }{ */ /* ISO C handling with long jumps */ #define LUAI_THROW(L,c) longjmp((c)->b, 1) #define LUAI_TRY(L,c,a) if (setjmp((c)->b) == 0) { a } #define luai_jmpbuf jmp_buf #endif /* } */ #endif /* } */ /* chain list of long jump buffers */ struct lua_longjmp { struct lua_longjmp *previous; luai_jmpbuf b; volatile int status; /* error code */ }; void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop) { switch (errcode) { case LUA_ERRMEM: { /* memory error? */ setsvalue2s(L, oldtop, G(L)->memerrmsg); /* reuse preregistered msg. */ break; } case LUA_ERRERR: { setsvalue2s(L, oldtop, luaS_newliteral(L, "error in error handling")); break; } case CLOSEPROTECT: { setnilvalue(s2v(oldtop)); /* no error message */ break; } default: { setobjs2s(L, oldtop, L->top - 1); /* error message on current top */ break; } } L->top = oldtop + 1; } l_noret luaD_throw (lua_State *L, int errcode) { if (L->errorJmp) { /* thread has an error handler? */ L->errorJmp->status = errcode; /* set status */ LUAI_THROW(L, L->errorJmp); /* jump to it */ } else { /* thread has no error handler */ global_State *g = G(L); errcode = luaF_close(L, L->stack, errcode); /* close all upvalues */ L->status = cast_byte(errcode); /* mark it as dead */ if (g->mainthread->errorJmp) { /* main thread has a handler? */ setobjs2s(L, g->mainthread->top++, L->top - 1); /* copy error obj. */ luaD_throw(g->mainthread, errcode); /* re-throw in main thread */ } else { /* no handler at all; abort */ if (g->panic) { /* panic function? */ luaD_seterrorobj(L, errcode, L->top); /* assume EXTRA_STACK */ if (L->ci->top < L->top) L->ci->top = L->top; /* pushing msg. can break this invariant */ lua_unlock(L); g->panic(L); /* call panic function (last chance to jump out) */ } abort(); } } } int luaD_rawrunprotected (lua_State *L, Pfunc f, void *ud) { global_State *g = G(L); l_uint32 oldnCcalls = g->Cstacklimit - (L->nCcalls + L->nci); struct lua_longjmp lj; lj.status = LUA_OK; lj.previous = L->errorJmp; /* chain new error handler */ L->errorJmp = &lj; LUAI_TRY(L, &lj, (*f)(L, ud); ); L->errorJmp = lj.previous; /* restore old error handler */ L->nCcalls = g->Cstacklimit - oldnCcalls - L->nci; return lj.status; } /* }====================================================== */ /* ** {================================================================== ** Stack reallocation ** =================================================================== */ static void correctstack (lua_State *L, StkId oldstack, StkId newstack) { CallInfo *ci; UpVal *up; if (oldstack == newstack) return; /* stack address did not change */ L->top = (L->top - oldstack) + newstack; for (up = L->openupval; up != NULL; up = up->u.open.next) up->v = s2v((uplevel(up) - oldstack) + newstack); for (ci = L->ci; ci != NULL; ci = ci->previous) { ci->top = (ci->top - oldstack) + newstack; ci->func = (ci->func - oldstack) + newstack; if (isLua(ci)) ci->u.l.trap = 1; /* signal to update 'trap' in 'luaV_execute' */ } } /* some space for error handling */ #define ERRORSTACKSIZE (LUAI_MAXSTACK + 200) int luaD_reallocstack (lua_State *L, int newsize, int raiseerror) { int lim = L->stacksize; StkId newstack = luaM_reallocvector(L, L->stack, lim, newsize, StackValue); lua_assert(newsize <= LUAI_MAXSTACK || newsize == ERRORSTACKSIZE); lua_assert(L->stack_last - L->stack == L->stacksize - EXTRA_STACK); if (unlikely(newstack == NULL)) { /* reallocation failed? */ if (raiseerror) luaM_error(L); else return 0; /* do not raise an error */ } for (; lim < newsize; lim++) setnilvalue(s2v(newstack + lim)); /* erase new segment */ correctstack(L, L->stack, newstack); L->stack = newstack; L->stacksize = newsize; L->stack_last = L->stack + newsize - EXTRA_STACK; return 1; } /* ** Try to grow the stack by at least 'n' elements. when 'raiseerror' ** is true, raises any error; otherwise, return 0 in case of errors. */ int luaD_growstack (lua_State *L, int n, int raiseerror) { int size = L->stacksize; int newsize = 2 * size; /* tentative new size */ if (unlikely(size > LUAI_MAXSTACK)) { /* need more space after extra size? */ if (raiseerror) luaD_throw(L, LUA_ERRERR); /* error inside message handler */ else return 0; } else { int needed = cast_int(L->top - L->stack) + n + EXTRA_STACK; if (newsize > LUAI_MAXSTACK) /* cannot cross the limit */ newsize = LUAI_MAXSTACK; if (newsize < needed) /* but must respect what was asked for */ newsize = needed; if (unlikely(newsize > LUAI_MAXSTACK)) { /* stack overflow? */ /* add extra size to be able to handle the error message */ luaD_reallocstack(L, ERRORSTACKSIZE, raiseerror); if (raiseerror) luaG_runerror(L, "stack overflow"); else return 0; } } /* else no errors */ return luaD_reallocstack(L, newsize, raiseerror); } static int stackinuse (lua_State *L) { CallInfo *ci; StkId lim = L->top; for (ci = L->ci; ci != NULL; ci = ci->previous) { if (lim < ci->top) lim = ci->top; } lua_assert(lim <= L->stack_last); return cast_int(lim - L->stack) + 1; /* part of stack in use */ } void luaD_shrinkstack (lua_State *L) { int inuse = stackinuse(L); int goodsize = inuse + BASIC_STACK_SIZE; if (goodsize > LUAI_MAXSTACK) goodsize = LUAI_MAXSTACK; /* respect stack limit */ /* if thread is currently not handling a stack overflow and its good size is smaller than current size, shrink its stack */ if (inuse <= (LUAI_MAXSTACK - EXTRA_STACK) && goodsize < L->stacksize) luaD_reallocstack(L, goodsize, 0); /* ok if that fails */ else /* don't change stack */ condmovestack(L,{},{}); /* (change only for debugging) */ luaE_shrinkCI(L); /* shrink CI list */ } void luaD_inctop (lua_State *L) { luaD_checkstack(L, 1); L->top++; } /* }================================================================== */ /* ** Call a hook for the given event. Make sure there is a hook to be ** called. (Both 'L->hook' and 'L->hookmask', which trigger this ** function, can be changed asynchronously by signals.) */ void luaD_hook (lua_State *L, int event, int line, int ftransfer, int ntransfer) { lua_Hook hook = L->hook; if (hook && L->allowhook) { /* make sure there is a hook */ int mask = CIST_HOOKED; CallInfo *ci = L->ci; ptrdiff_t top = savestack(L, L->top); ptrdiff_t ci_top = savestack(L, ci->top); lua_Debug ar; ar.event = event; ar.currentline = line; ar.i_ci = ci; if (ntransfer != 0) { mask |= CIST_TRAN; /* 'ci' has transfer information */ ci->u2.transferinfo.ftransfer = ftransfer; ci->u2.transferinfo.ntransfer = ntransfer; } luaD_checkstack(L, LUA_MINSTACK); /* ensure minimum stack size */ if (L->top + LUA_MINSTACK > ci->top) ci->top = L->top + LUA_MINSTACK; L->allowhook = 0; /* cannot call hooks inside a hook */ ci->callstatus |= mask; lua_unlock(L); (*hook)(L, &ar); lua_lock(L); lua_assert(!L->allowhook); L->allowhook = 1; ci->top = restorestack(L, ci_top); L->top = restorestack(L, top); ci->callstatus &= ~mask; } } /* ** Executes a call hook for Lua functions. This function is called ** whenever 'hookmask' is not zero, so it checks whether call hooks are ** active. */ void luaD_hookcall (lua_State *L, CallInfo *ci) { int hook = (ci->callstatus & CIST_TAIL) ? LUA_HOOKTAILCALL : LUA_HOOKCALL; Proto *p; if (!(L->hookmask & LUA_MASKCALL)) /* some other hook? */ return; /* don't call hook */ p = clLvalue(s2v(ci->func))->p; L->top = ci->top; /* prepare top */ ci->u.l.savedpc++; /* hooks assume 'pc' is already incremented */ luaD_hook(L, hook, -1, 1, p->numparams); ci->u.l.savedpc--; /* correct 'pc' */ } static StkId rethook (lua_State *L, CallInfo *ci, StkId firstres, int nres) { ptrdiff_t oldtop = savestack(L, L->top); /* hook may change top */ int delta = 0; if (isLuacode(ci)) { Proto *p = clLvalue(s2v(ci->func))->p; if (p->is_vararg) delta = ci->u.l.nextraargs + p->numparams + 1; if (L->top < ci->top) L->top = ci->top; /* correct top to run hook */ } if (L->hookmask & LUA_MASKRET) { /* is return hook on? */ int ftransfer; ci->func += delta; /* if vararg, back to virtual 'func' */ ftransfer = cast(unsigned short, firstres - ci->func); luaD_hook(L, LUA_HOOKRET, -1, ftransfer, nres); /* call it */ ci->func -= delta; } if (isLua(ci->previous)) L->oldpc = ci->previous->u.l.savedpc; /* update 'oldpc' */ return restorestack(L, oldtop); } /* ** Check whether 'func' has a '__call' metafield. If so, put it in the ** stack, below original 'func', so that 'luaD_call' can call it. Raise ** an error if there is no '__call' metafield. */ void luaD_tryfuncTM (lua_State *L, StkId func) { const TValue *tm = luaT_gettmbyobj(L, s2v(func), TM_CALL); StkId p; if (unlikely(ttisnil(tm))) luaG_typeerror(L, s2v(func), "call"); /* nothing to call */ for (p = L->top; p > func; p--) /* open space for metamethod */ setobjs2s(L, p, p-1); L->top++; /* stack space pre-allocated by the caller */ setobj2s(L, func, tm); /* metamethod is the new function to be called */ } /* ** Given 'nres' results at 'firstResult', move 'wanted' of them to 'res'. ** Handle most typical cases (zero results for commands, one result for ** expressions, multiple results for tail calls/single parameters) ** separated. */ static void moveresults (lua_State *L, StkId res, int nres, int wanted) { StkId firstresult; int i; switch (wanted) { /* handle typical cases separately */ case 0: /* no values needed */ L->top = res; return; case 1: /* one value needed */ if (nres == 0) /* no results? */ setnilvalue(s2v(res)); /* adjust with nil */ else setobjs2s(L, res, L->top - nres); /* move it to proper place */ L->top = res + 1; return; case LUA_MULTRET: wanted = nres; /* we want all results */ break; default: /* multiple results (or to-be-closed variables) */ if (hastocloseCfunc(wanted)) { /* to-be-closed variables? */ ptrdiff_t savedres = savestack(L, res); luaF_close(L, res, LUA_OK); /* may change the stack */ res = restorestack(L, savedres); wanted = codeNresults(wanted); /* correct value */ if (wanted == LUA_MULTRET) wanted = nres; } break; } firstresult = L->top - nres; /* index of first result */ /* move all results to correct place */ for (i = 0; i < nres && i < wanted; i++) setobjs2s(L, res + i, firstresult + i); for (; i < wanted; i++) /* complete wanted number of results */ setnilvalue(s2v(res + i)); L->top = res + wanted; /* top points after the last result */ } /* ** Finishes a function call: calls hook if necessary, removes CallInfo, ** moves current number of results to proper place. */ void luaD_poscall (lua_State *L, CallInfo *ci, int nres) { if (L->hookmask) L->top = rethook(L, ci, L->top - nres, nres); L->ci = ci->previous; /* back to caller */ /* move results to proper place */ moveresults(L, ci->func, nres, ci->nresults); } #define next_ci(L) (L->ci->next ? L->ci->next : luaE_extendCI(L)) /* ** Prepare a function for a tail call, building its call info on top ** of the current call info. 'narg1' is the number of arguments plus 1 ** (so that it includes the function itself). */ void luaD_pretailcall (lua_State *L, CallInfo *ci, StkId func, int narg1) { Proto *p = clLvalue(s2v(func))->p; int fsize = p->maxstacksize; /* frame size */ int nfixparams = p->numparams; int i; for (i = 0; i < narg1; i++) /* move down function and arguments */ setobjs2s(L, ci->func + i, func + i); checkstackGC(L, fsize); func = ci->func; /* moved-down function */ for (; narg1 <= nfixparams; narg1++) setnilvalue(s2v(func + narg1)); /* complete missing arguments */ ci->top = func + 1 + fsize; /* top for new function */ lua_assert(ci->top <= L->stack_last); ci->u.l.savedpc = p->code; /* starting point */ ci->callstatus |= CIST_TAIL; L->top = func + narg1; /* set top */ } /* ** Call a function (C or Lua). The function to be called is at *func. ** The arguments are on the stack, right after the function. ** When returns, all the results are on the stack, starting at the original ** function position. */ void luaD_call (lua_State *L, StkId func, int nresults) { lua_CFunction f; retry: switch (ttypetag(s2v(func))) { case LUA_VCCL: /* C closure */ f = clCvalue(s2v(func))->f; goto Cfunc; case LUA_VLCF: /* light C function */ f = fvalue(s2v(func)); Cfunc: { int n; /* number of returns */ CallInfo *ci = next_ci(L); checkstackp(L, LUA_MINSTACK, func); /* ensure minimum stack size */ ci->nresults = nresults; ci->callstatus = CIST_C; ci->top = L->top + LUA_MINSTACK; ci->func = func; L->ci = ci; lua_assert(ci->top <= L->stack_last); if (L->hookmask & LUA_MASKCALL) { int narg = cast_int(L->top - func) - 1; luaD_hook(L, LUA_HOOKCALL, -1, 1, narg); } lua_unlock(L); n = (*f)(L); /* do the actual call */ lua_lock(L); api_checknelems(L, n); luaD_poscall(L, ci, n); break; } case LUA_VLCL: { /* Lua function */ CallInfo *ci = next_ci(L); Proto *p = clLvalue(s2v(func))->p; int narg = cast_int(L->top - func) - 1; /* number of real arguments */ int nfixparams = p->numparams; int fsize = p->maxstacksize; /* frame size */ checkstackp(L, fsize, func); ci->nresults = nresults; ci->u.l.savedpc = p->code; /* starting point */ ci->callstatus = 0; ci->top = func + 1 + fsize; ci->func = func; L->ci = ci; for (; narg < nfixparams; narg++) setnilvalue(s2v(L->top++)); /* complete missing arguments */ lua_assert(ci->top <= L->stack_last); luaV_execute(L, ci); /* run the function */ break; } default: { /* not a function */ checkstackp(L, 1, func); /* space for metamethod */ luaD_tryfuncTM(L, func); /* try to get '__call' metamethod */ goto retry; /* try again with metamethod */ } } } /* ** Similar to 'luaD_call', but does not allow yields during the call. ** If there is a stack overflow, freeing all CI structures will ** force the subsequent call to invoke 'luaE_extendCI', which then ** will raise any errors. */ void luaD_callnoyield (lua_State *L, StkId func, int nResults) { incXCcalls(L); if (getCcalls(L) <= CSTACKERR) /* possible stack overflow? */ luaE_freeCI(L); luaD_call(L, func, nResults); decXCcalls(L); } /* ** Completes the execution of an interrupted C function, calling its ** continuation function. */ static void finishCcall (lua_State *L, int status) { CallInfo *ci = L->ci; int n; /* must have a continuation and must be able to call it */ lua_assert(ci->u.c.k != NULL && yieldable(L)); /* error status can only happen in a protected call */ lua_assert((ci->callstatus & CIST_YPCALL) || status == LUA_YIELD); if (ci->callstatus & CIST_YPCALL) { /* was inside a pcall? */ ci->callstatus &= ~CIST_YPCALL; /* continuation is also inside it */ L->errfunc = ci->u.c.old_errfunc; /* with the same error function */ } /* finish 'lua_callk'/'lua_pcall'; CIST_YPCALL and 'errfunc' already handled */ adjustresults(L, ci->nresults); lua_unlock(L); n = (*ci->u.c.k)(L, status, ci->u.c.ctx); /* call continuation function */ lua_lock(L); api_checknelems(L, n); luaD_poscall(L, ci, n); /* finish 'luaD_call' */ } /* ** Executes "full continuation" (everything in the stack) of a ** previously interrupted coroutine until the stack is empty (or another ** interruption long-jumps out of the loop). If the coroutine is ** recovering from an error, 'ud' points to the error status, which must ** be passed to the first continuation function (otherwise the default ** status is LUA_YIELD). */ static void unroll (lua_State *L, void *ud) { CallInfo *ci; if (ud != NULL) /* error status? */ finishCcall(L, *(int *)ud); /* finish 'lua_pcallk' callee */ while ((ci = L->ci) != &L->base_ci) { /* something in the stack */ if (!isLua(ci)) /* C function? */ finishCcall(L, LUA_YIELD); /* complete its execution */ else { /* Lua function */ luaV_finishOp(L); /* finish interrupted instruction */ luaV_execute(L, ci); /* execute down to higher C 'boundary' */ } } } /* ** Try to find a suspended protected call (a "recover point") for the ** given thread. */ static CallInfo *findpcall (lua_State *L) { CallInfo *ci; for (ci = L->ci; ci != NULL; ci = ci->previous) { /* search for a pcall */ if (ci->callstatus & CIST_YPCALL) return ci; } return NULL; /* no pending pcall */ } /* ** Recovers from an error in a coroutine. Finds a recover point (if ** there is one) and completes the execution of the interrupted ** 'luaD_pcall'. If there is no recover point, returns zero. */ static int recover (lua_State *L, int status) { StkId oldtop; CallInfo *ci = findpcall(L); if (ci == NULL) return 0; /* no recovery point */ /* "finish" luaD_pcall */ oldtop = restorestack(L, ci->u2.funcidx); luaF_close(L, oldtop, status); /* may change the stack */ oldtop = restorestack(L, ci->u2.funcidx); luaD_seterrorobj(L, status, oldtop); L->ci = ci; L->allowhook = getoah(ci->callstatus); /* restore original 'allowhook' */ luaD_shrinkstack(L); L->errfunc = ci->u.c.old_errfunc; return 1; /* continue running the coroutine */ } /* ** Signal an error in the call to 'lua_resume', not in the execution ** of the coroutine itself. (Such errors should not be handled by any ** coroutine error handler and should not kill the coroutine.) */ static int resume_error (lua_State *L, const char *msg, int narg) { L->top -= narg; /* remove args from the stack */ setsvalue2s(L, L->top, luaS_new(L, msg)); /* push error message */ api_incr_top(L); lua_unlock(L); return LUA_ERRRUN; } /* ** Do the work for 'lua_resume' in protected mode. Most of the work ** depends on the status of the coroutine: initial state, suspended ** inside a hook, or regularly suspended (optionally with a continuation ** function), plus erroneous cases: non-suspended coroutine or dead ** coroutine. */ static void resume (lua_State *L, void *ud) { int n = *(cast(int*, ud)); /* number of arguments */ StkId firstArg = L->top - n; /* first argument */ CallInfo *ci = L->ci; if (L->status == LUA_OK) { /* starting a coroutine? */ luaD_call(L, firstArg - 1, LUA_MULTRET); } else { /* resuming from previous yield */ lua_assert(L->status == LUA_YIELD); L->status = LUA_OK; /* mark that it is running (again) */ if (isLua(ci)) /* yielded inside a hook? */ luaV_execute(L, ci); /* just continue running Lua code */ else { /* 'common' yield */ if (ci->u.c.k != NULL) { /* does it have a continuation function? */ lua_unlock(L); n = (*ci->u.c.k)(L, LUA_YIELD, ci->u.c.ctx); /* call continuation */ lua_lock(L); api_checknelems(L, n); } luaD_poscall(L, ci, n); /* finish 'luaD_call' */ } unroll(L, NULL); /* run continuation */ } } LUA_API int lua_resume (lua_State *L, lua_State *from, int nargs, int *nresults) { int status; lua_lock(L); if (L->status == LUA_OK) { /* may be starting a coroutine */ if (L->ci != &L->base_ci) /* not in base level? */ return resume_error(L, "cannot resume non-suspended coroutine", nargs); else if (L->top - (L->ci->func + 1) == nargs) /* no function? */ return resume_error(L, "cannot resume dead coroutine", nargs); } else if (L->status != LUA_YIELD) /* ended with errors? */ return resume_error(L, "cannot resume dead coroutine", nargs); if (from == NULL) L->nCcalls = CSTACKTHREAD; else /* correct 'nCcalls' for this thread */ L->nCcalls = getCcalls(from) + from->nci - L->nci - CSTACKCF; if (L->nCcalls <= CSTACKERR) return resume_error(L, "C stack overflow", nargs); luai_userstateresume(L, nargs); api_checknelems(L, (L->status == LUA_OK) ? nargs + 1 : nargs); status = luaD_rawrunprotected(L, resume, &nargs); /* continue running after recoverable errors */ while (errorstatus(status) && recover(L, status)) { /* unroll continuation */ status = luaD_rawrunprotected(L, unroll, &status); } if (likely(!errorstatus(status))) lua_assert(status == L->status); /* normal end or yield */ else { /* unrecoverable error */ L->status = cast_byte(status); /* mark thread as 'dead' */ luaD_seterrorobj(L, status, L->top); /* push error message */ L->ci->top = L->top; } *nresults = (status == LUA_YIELD) ? L->ci->u2.nyield : cast_int(L->top - (L->ci->func + 1)); lua_unlock(L); return status; } LUA_API int lua_isyieldable (lua_State *L) { return yieldable(L); } LUA_API int lua_yieldk (lua_State *L, int nresults, lua_KContext ctx, lua_KFunction k) { CallInfo *ci = L->ci; luai_userstateyield(L, nresults); lua_lock(L); api_checknelems(L, nresults); if (unlikely(!yieldable(L))) { if (L != G(L)->mainthread) luaG_runerror(L, "attempt to yield across a C-call boundary"); else luaG_runerror(L, "attempt to yield from outside a coroutine"); } L->status = LUA_YIELD; if (isLua(ci)) { /* inside a hook? */ lua_assert(!isLuacode(ci)); api_check(L, k == NULL, "hooks cannot continue after yielding"); ci->u2.nyield = 0; /* no results */ } else { if ((ci->u.c.k = k) != NULL) /* is there a continuation? */ ci->u.c.ctx = ctx; /* save context */ ci->u2.nyield = nresults; /* save number of results */ luaD_throw(L, LUA_YIELD); } lua_assert(ci->callstatus & CIST_HOOKED); /* must be inside a hook */ lua_unlock(L); return 0; /* return to 'luaD_hook' */ } /* ** Call the C function 'func' in protected mode, restoring basic ** thread information ('allowhook', etc.) and in particular ** its stack level in case of errors. */ int luaD_pcall (lua_State *L, Pfunc func, void *u, ptrdiff_t old_top, ptrdiff_t ef) { int status; CallInfo *old_ci = L->ci; lu_byte old_allowhooks = L->allowhook; ptrdiff_t old_errfunc = L->errfunc; L->errfunc = ef; status = luaD_rawrunprotected(L, func, u); if (unlikely(status != LUA_OK)) { /* an error occurred? */ StkId oldtop = restorestack(L, old_top); L->ci = old_ci; L->allowhook = old_allowhooks; status = luaF_close(L, oldtop, status); oldtop = restorestack(L, old_top); /* previous call may change stack */ luaD_seterrorobj(L, status, oldtop); luaD_shrinkstack(L); } L->errfunc = old_errfunc; return status; } /* ** Execute a protected parser. */ struct SParser { /* data to 'f_parser' */ ZIO *z; Mbuffer buff; /* dynamic structure used by the scanner */ Dyndata dyd; /* dynamic structures used by the parser */ const char *mode; const char *name; }; static void checkmode (lua_State *L, const char *mode, const char *x) { if (mode && strchr(mode, x[0]) == NULL) { luaO_pushfstring(L, "attempt to load a %s chunk (mode is '%s')", x, mode); luaD_throw(L, LUA_ERRSYNTAX); } } static void f_parser (lua_State *L, void *ud) { LClosure *cl; struct SParser *p = cast(struct SParser *, ud); int c = zgetc(p->z); /* read first character */ if (c == LUA_SIGNATURE[0]) { checkmode(L, p->mode, "binary"); cl = luaU_undump(L, p->z, p->name); } else { checkmode(L, p->mode, "text"); cl = luaY_parser(L, p->z, &p->buff, &p->dyd, p->name, c); } lua_assert(cl->nupvalues == cl->p->sizeupvalues); luaF_initupvals(L, cl); } int luaD_protectedparser (lua_State *L, ZIO *z, const char *name, const char *mode) { struct SParser p; int status; incnny(L); /* cannot yield during parsing */ p.z = z; p.name = name; p.mode = mode; p.dyd.actvar.arr = NULL; p.dyd.actvar.size = 0; p.dyd.gt.arr = NULL; p.dyd.gt.size = 0; p.dyd.label.arr = NULL; p.dyd.label.size = 0; luaZ_initbuffer(L, &p.buff); status = luaD_pcall(L, f_parser, &p, savestack(L, L->top), L->errfunc); luaZ_freebuffer(L, &p.buff); luaM_freearray(L, p.dyd.actvar.arr, p.dyd.actvar.size); luaM_freearray(L, p.dyd.gt.arr, p.dyd.gt.size); luaM_freearray(L, p.dyd.label.arr, p.dyd.label.size); decnny(L); return status; }
void luaD_shrinkstack (lua_State *L) { int inuse = stackinuse(L); int goodsize = inuse + (inuse / 8) + 2*EXTRA_STACK; if (goodsize > LUAI_MAXSTACK) goodsize = LUAI_MAXSTACK; /* respect stack limit */ /* if thread is currently not handling a stack overflow and its good size is smaller than current size, shrink its stack */ if (inuse <= (LUAI_MAXSTACK - EXTRA_STACK) && goodsize < L->stacksize) luaD_reallocstack(L, goodsize, 0); /* ok if that fails */ else /* don't change stack */ condmovestack(L,{},{}); /* (change only for debugging) */ luaE_shrinkCI(L); /* shrink CI list */ }
void luaD_shrinkstack (lua_State *L) { int inuse = stackinuse(L); int goodsize = inuse + BASIC_STACK_SIZE; if (goodsize > LUAI_MAXSTACK) goodsize = LUAI_MAXSTACK; /* respect stack limit */ /* if thread is currently not handling a stack overflow and its good size is smaller than current size, shrink its stack */ if (inuse <= (LUAI_MAXSTACK - EXTRA_STACK) && goodsize < L->stacksize) luaD_reallocstack(L, goodsize, 0); /* ok if that fails */ else /* don't change stack */ condmovestack(L,{},{}); /* (change only for debugging) */ luaE_shrinkCI(L); /* shrink CI list */ }
{'added': [(248, ' int goodsize = inuse + BASIC_STACK_SIZE;'), (253, ' if (inuse <= (LUAI_MAXSTACK - EXTRA_STACK) && goodsize < L->stacksize)')], 'deleted': [(248, ' int goodsize = inuse + (inuse / 8) + 2*EXTRA_STACK;'), (253, ' if (inuse <= (LUAI_MAXSTACK - EXTRA_STACK) &&'), (254, ' goodsize < L->stacksize)')]}
2
3
577
4,277
12
83
4
https://github.com/lua/lua
CVE-2020-15888
CWE-125
2,235
zgfx.c
C++
zgfx_decompress_segment
/** * FreeRDP: A Remote Desktop Protocol Implementation * ZGFX (RDP8) Bulk Data Compression * * Copyright 2014 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2017 Armin Novak <armin.novak@thincast.com> * Copyright 2017 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <winpr/crt.h> #include <winpr/print.h> #include <winpr/bitstream.h> #include <freerdp/log.h> #include <freerdp/codec/zgfx.h> #define TAG FREERDP_TAG("codec") /** * RDP8 Compressor Limits: * * Maximum number of uncompressed bytes in a single segment: 65535 * Maximum match distance / minimum history size: 2500000 bytes. * Maximum number of segments: 65535 * Maximum expansion of a segment (when compressed size exceeds uncompressed): 1000 bytes * Minimum match length: 3 bytes */ struct _ZGFX_TOKEN { UINT32 prefixLength; UINT32 prefixCode; UINT32 valueBits; UINT32 tokenType; UINT32 valueBase; }; typedef struct _ZGFX_TOKEN ZGFX_TOKEN; struct _ZGFX_CONTEXT { BOOL Compressor; const BYTE* pbInputCurrent; const BYTE* pbInputEnd; UINT32 bits; UINT32 cBitsRemaining; UINT32 BitsCurrent; UINT32 cBitsCurrent; BYTE OutputBuffer[65536]; UINT32 OutputCount; BYTE HistoryBuffer[2500000]; UINT32 HistoryIndex; UINT32 HistoryBufferSize; }; static const ZGFX_TOKEN ZGFX_TOKEN_TABLE[] = { // len code vbits type vbase { 1, 0, 8, 0, 0 }, // 0 { 5, 17, 5, 1, 0 }, // 10001 { 5, 18, 7, 1, 32 }, // 10010 { 5, 19, 9, 1, 160 }, // 10011 { 5, 20, 10, 1, 672 }, // 10100 { 5, 21, 12, 1, 1696 }, // 10101 { 5, 24, 0, 0, 0x00 }, // 11000 { 5, 25, 0, 0, 0x01 }, // 11001 { 6, 44, 14, 1, 5792 }, // 101100 { 6, 45, 15, 1, 22176 }, // 101101 { 6, 52, 0, 0, 0x02 }, // 110100 { 6, 53, 0, 0, 0x03 }, // 110101 { 6, 54, 0, 0, 0xFF }, // 110110 { 7, 92, 18, 1, 54944 }, // 1011100 { 7, 93, 20, 1, 317088 }, // 1011101 { 7, 110, 0, 0, 0x04 }, // 1101110 { 7, 111, 0, 0, 0x05 }, // 1101111 { 7, 112, 0, 0, 0x06 }, // 1110000 { 7, 113, 0, 0, 0x07 }, // 1110001 { 7, 114, 0, 0, 0x08 }, // 1110010 { 7, 115, 0, 0, 0x09 }, // 1110011 { 7, 116, 0, 0, 0x0A }, // 1110100 { 7, 117, 0, 0, 0x0B }, // 1110101 { 7, 118, 0, 0, 0x3A }, // 1110110 { 7, 119, 0, 0, 0x3B }, // 1110111 { 7, 120, 0, 0, 0x3C }, // 1111000 { 7, 121, 0, 0, 0x3D }, // 1111001 { 7, 122, 0, 0, 0x3E }, // 1111010 { 7, 123, 0, 0, 0x3F }, // 1111011 { 7, 124, 0, 0, 0x40 }, // 1111100 { 7, 125, 0, 0, 0x80 }, // 1111101 { 8, 188, 20, 1, 1365664 }, // 10111100 { 8, 189, 21, 1, 2414240 }, // 10111101 { 8, 252, 0, 0, 0x0C }, // 11111100 { 8, 253, 0, 0, 0x38 }, // 11111101 { 8, 254, 0, 0, 0x39 }, // 11111110 { 8, 255, 0, 0, 0x66 }, // 11111111 { 9, 380, 22, 1, 4511392 }, // 101111100 { 9, 381, 23, 1, 8705696 }, // 101111101 { 9, 382, 24, 1, 17094304 }, // 101111110 { 0 } }; static INLINE BOOL zgfx_GetBits(ZGFX_CONTEXT* _zgfx, UINT32 _nbits) { if (!_zgfx) return FALSE; while (_zgfx->cBitsCurrent < _nbits) { _zgfx->BitsCurrent <<= 8; if (_zgfx->pbInputCurrent < _zgfx->pbInputEnd) _zgfx->BitsCurrent += *(_zgfx->pbInputCurrent)++; _zgfx->cBitsCurrent += 8; } _zgfx->cBitsRemaining -= _nbits; _zgfx->cBitsCurrent -= _nbits; _zgfx->bits = _zgfx->BitsCurrent >> _zgfx->cBitsCurrent; _zgfx->BitsCurrent &= ((1 << _zgfx->cBitsCurrent) - 1); } static void zgfx_history_buffer_ring_write(ZGFX_CONTEXT* zgfx, const BYTE* src, size_t count) { UINT32 front; if (count <= 0) return; if (count > zgfx->HistoryBufferSize) { const size_t residue = count - zgfx->HistoryBufferSize; count = zgfx->HistoryBufferSize; src += residue; zgfx->HistoryIndex = (zgfx->HistoryIndex + residue) % zgfx->HistoryBufferSize; } if (zgfx->HistoryIndex + count <= zgfx->HistoryBufferSize) { CopyMemory(&(zgfx->HistoryBuffer[zgfx->HistoryIndex]), src, count); if ((zgfx->HistoryIndex += count) == zgfx->HistoryBufferSize) zgfx->HistoryIndex = 0; } else { front = zgfx->HistoryBufferSize - zgfx->HistoryIndex; CopyMemory(&(zgfx->HistoryBuffer[zgfx->HistoryIndex]), src, front); CopyMemory(zgfx->HistoryBuffer, &src[front], count - front); zgfx->HistoryIndex = count - front; } } static void zgfx_history_buffer_ring_read(ZGFX_CONTEXT* zgfx, int offset, BYTE* dst, UINT32 count) { UINT32 front; UINT32 index; UINT32 bytes; UINT32 valid; UINT32 bytesLeft; BYTE* dptr = dst; BYTE* origDst = dst; if (count <= 0) return; bytesLeft = count; index = (zgfx->HistoryIndex + zgfx->HistoryBufferSize - offset) % zgfx->HistoryBufferSize; bytes = MIN(bytesLeft, offset); if ((index + bytes) <= zgfx->HistoryBufferSize) { CopyMemory(dptr, &(zgfx->HistoryBuffer[index]), bytes); } else { front = zgfx->HistoryBufferSize - index; CopyMemory(dptr, &(zgfx->HistoryBuffer[index]), front); CopyMemory(&dptr[front], zgfx->HistoryBuffer, bytes - front); } if ((bytesLeft -= bytes) == 0) return; dptr += bytes; valid = bytes; do { bytes = valid; if (bytes > bytesLeft) bytes = bytesLeft; CopyMemory(dptr, origDst, bytes); dptr += bytes; valid <<= 1; } while ((bytesLeft -= bytes) > 0); } static BOOL zgfx_decompress_segment(ZGFX_CONTEXT* zgfx, wStream* stream, size_t segmentSize) { BYTE c; BYTE flags; UINT32 extra = 0; int opIndex; int haveBits; int inPrefix; UINT32 count; UINT32 distance; BYTE* pbSegment; size_t cbSegment = segmentSize - 1; if ((Stream_GetRemainingLength(stream) < segmentSize) || (segmentSize < 1)) return FALSE; Stream_Read_UINT8(stream, flags); /* header (1 byte) */ zgfx->OutputCount = 0; pbSegment = Stream_Pointer(stream); Stream_Seek(stream, cbSegment); if (!(flags & PACKET_COMPRESSED)) { zgfx_history_buffer_ring_write(zgfx, pbSegment, cbSegment); CopyMemory(zgfx->OutputBuffer, pbSegment, cbSegment); zgfx->OutputCount = cbSegment; return TRUE; } zgfx->pbInputCurrent = pbSegment; zgfx->pbInputEnd = &pbSegment[cbSegment - 1]; /* NumberOfBitsToDecode = ((NumberOfBytesToDecode - 1) * 8) - ValueOfLastByte */ zgfx->cBitsRemaining = 8 * (cbSegment - 1) - *zgfx->pbInputEnd; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; while (zgfx->cBitsRemaining) { haveBits = 0; inPrefix = 0; for (opIndex = 0; ZGFX_TOKEN_TABLE[opIndex].prefixLength != 0; opIndex++) { while (haveBits < ZGFX_TOKEN_TABLE[opIndex].prefixLength) { zgfx_GetBits(zgfx, 1); inPrefix = (inPrefix << 1) + zgfx->bits; haveBits++; } if (inPrefix == ZGFX_TOKEN_TABLE[opIndex].prefixCode) { if (ZGFX_TOKEN_TABLE[opIndex].tokenType == 0) { /* Literal */ zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); c = (BYTE)(ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits); zgfx->HistoryBuffer[zgfx->HistoryIndex] = c; if (++zgfx->HistoryIndex == zgfx->HistoryBufferSize) zgfx->HistoryIndex = 0; zgfx->OutputBuffer[zgfx->OutputCount++] = c; } else { zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); distance = ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits; if (distance != 0) { /* Match */ zgfx_GetBits(zgfx, 1); if (zgfx->bits == 0) { count = 3; } else { count = 4; extra = 2; zgfx_GetBits(zgfx, 1); while (zgfx->bits == 1) { count *= 2; extra++; zgfx_GetBits(zgfx, 1); } zgfx_GetBits(zgfx, extra); count += zgfx->bits; } zgfx_history_buffer_ring_read(zgfx, distance, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx_history_buffer_ring_write(zgfx, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx->OutputCount += count; } else { /* Unencoded */ zgfx_GetBits(zgfx, 15); count = zgfx->bits; zgfx->cBitsRemaining -= zgfx->cBitsCurrent; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; CopyMemory(&(zgfx->OutputBuffer[zgfx->OutputCount]), zgfx->pbInputCurrent, count); zgfx_history_buffer_ring_write(zgfx, zgfx->pbInputCurrent, count); zgfx->pbInputCurrent += count; zgfx->cBitsRemaining -= (8 * count); zgfx->OutputCount += count; } } break; } } } return TRUE; } int zgfx_decompress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData, UINT32* pDstSize, UINT32 flags) { int status = -1; BYTE descriptor; wStream* stream = Stream_New((BYTE*)pSrcData, SrcSize); if (!stream) return -1; if (Stream_GetRemainingLength(stream) < 1) goto fail; Stream_Read_UINT8(stream, descriptor); /* descriptor (1 byte) */ if (descriptor == ZGFX_SEGMENTED_SINGLE) { if (!zgfx_decompress_segment(zgfx, stream, Stream_GetRemainingLength(stream))) goto fail; *ppDstData = NULL; if (zgfx->OutputCount > 0) *ppDstData = (BYTE*) malloc(zgfx->OutputCount); if (!*ppDstData) goto fail; *pDstSize = zgfx->OutputCount; CopyMemory(*ppDstData, zgfx->OutputBuffer, zgfx->OutputCount); } else if (descriptor == ZGFX_SEGMENTED_MULTIPART) { UINT32 segmentSize; UINT16 segmentNumber; UINT16 segmentCount; UINT32 uncompressedSize; BYTE* pConcatenated; size_t used = 0; if (Stream_GetRemainingLength(stream) < 6) goto fail; Stream_Read_UINT16(stream, segmentCount); /* segmentCount (2 bytes) */ Stream_Read_UINT32(stream, uncompressedSize); /* uncompressedSize (4 bytes) */ if (Stream_GetRemainingLength(stream) < segmentCount * sizeof(UINT32)) goto fail; pConcatenated = (BYTE*) malloc(uncompressedSize); if (!pConcatenated) goto fail; *ppDstData = pConcatenated; *pDstSize = uncompressedSize; for (segmentNumber = 0; segmentNumber < segmentCount; segmentNumber++) { if (Stream_GetRemainingLength(stream) < sizeof(UINT32)) goto fail; Stream_Read_UINT32(stream, segmentSize); /* segmentSize (4 bytes) */ if (!zgfx_decompress_segment(zgfx, stream, segmentSize)) goto fail; if (zgfx->OutputCount > UINT32_MAX - used) goto fail; if (used + zgfx->OutputCount > uncompressedSize) goto fail; CopyMemory(pConcatenated, zgfx->OutputBuffer, zgfx->OutputCount); pConcatenated += zgfx->OutputCount; used += zgfx->OutputCount; } } else { goto fail; } status = 1; fail: Stream_Free(stream, FALSE); return status; } static BOOL zgfx_compress_segment(ZGFX_CONTEXT* zgfx, wStream* s, const BYTE* pSrcData, UINT32 SrcSize, UINT32* pFlags) { /* FIXME: Currently compression not implemented. Just copy the raw source */ if (!Stream_EnsureRemainingCapacity(s, SrcSize + 1)) { WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!"); return FALSE; } (*pFlags) |= ZGFX_PACKET_COMPR_TYPE_RDP8; /* RDP 8.0 compression format */ Stream_Write_UINT8(s, (*pFlags)); /* header (1 byte) */ Stream_Write(s, pSrcData, SrcSize); return TRUE; } int zgfx_compress_to_stream(ZGFX_CONTEXT* zgfx, wStream* sDst, const BYTE* pUncompressed, UINT32 uncompressedSize, UINT32* pFlags) { int fragment; UINT16 maxLength; UINT32 totalLength; size_t posSegmentCount = 0; const BYTE* pSrcData; int status = 0; maxLength = ZGFX_SEGMENTED_MAXSIZE; totalLength = uncompressedSize; pSrcData = pUncompressed; for (fragment = 0; (totalLength > 0) || (fragment == 0); fragment++) { UINT32 SrcSize; size_t posDstSize; size_t posDataStart; UINT32 DstSize; SrcSize = (totalLength > maxLength) ? maxLength : totalLength; posDstSize = 0; totalLength -= SrcSize; /* Ensure we have enough space for headers */ if (!Stream_EnsureRemainingCapacity(sDst, 12)) { WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!"); return -1; } if (fragment == 0) { /* First fragment */ /* descriptor (1 byte) */ Stream_Write_UINT8(sDst, (totalLength == 0) ? ZGFX_SEGMENTED_SINGLE : ZGFX_SEGMENTED_MULTIPART); if (totalLength > 0) { posSegmentCount = Stream_GetPosition(sDst); /* segmentCount (2 bytes) */ Stream_Seek(sDst, 2); Stream_Write_UINT32(sDst, uncompressedSize); /* uncompressedSize (4 bytes) */ } } if (fragment > 0 || totalLength > 0) { /* Multipart */ posDstSize = Stream_GetPosition(sDst); /* size (4 bytes) */ Stream_Seek(sDst, 4); } posDataStart = Stream_GetPosition(sDst); if (!zgfx_compress_segment(zgfx, sDst, pSrcData, SrcSize, pFlags)) return -1; if (posDstSize) { /* Fill segment data size */ DstSize = Stream_GetPosition(sDst) - posDataStart; Stream_SetPosition(sDst, posDstSize); Stream_Write_UINT32(sDst, DstSize); Stream_SetPosition(sDst, posDataStart + DstSize); } pSrcData += SrcSize; } Stream_SealLength(sDst); /* fill back segmentCount */ if (posSegmentCount) { Stream_SetPosition(sDst, posSegmentCount); Stream_Write_UINT16(sDst, fragment); Stream_SetPosition(sDst, Stream_Length(sDst)); } return status; } int zgfx_compress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData, UINT32* pDstSize, UINT32* pFlags) { int status; wStream* s = Stream_New(NULL, SrcSize); status = zgfx_compress_to_stream(zgfx, s, pSrcData, SrcSize, pFlags); (*ppDstData) = Stream_Buffer(s); (*pDstSize) = Stream_GetPosition(s); Stream_Free(s, FALSE); return status; } void zgfx_context_reset(ZGFX_CONTEXT* zgfx, BOOL flush) { zgfx->HistoryIndex = 0; } ZGFX_CONTEXT* zgfx_context_new(BOOL Compressor) { ZGFX_CONTEXT* zgfx; zgfx = (ZGFX_CONTEXT*) calloc(1, sizeof(ZGFX_CONTEXT)); if (zgfx) { zgfx->Compressor = Compressor; zgfx->HistoryBufferSize = sizeof(zgfx->HistoryBuffer); zgfx_context_reset(zgfx, FALSE); } return zgfx; } void zgfx_context_free(ZGFX_CONTEXT* zgfx) { free(zgfx); }
/** * FreeRDP: A Remote Desktop Protocol Implementation * ZGFX (RDP8) Bulk Data Compression * * Copyright 2014 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2017 Armin Novak <armin.novak@thincast.com> * Copyright 2017 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <winpr/crt.h> #include <winpr/print.h> #include <winpr/bitstream.h> #include <freerdp/log.h> #include <freerdp/codec/zgfx.h> #define TAG FREERDP_TAG("codec") /** * RDP8 Compressor Limits: * * Maximum number of uncompressed bytes in a single segment: 65535 * Maximum match distance / minimum history size: 2500000 bytes. * Maximum number of segments: 65535 * Maximum expansion of a segment (when compressed size exceeds uncompressed): 1000 bytes * Minimum match length: 3 bytes */ struct _ZGFX_TOKEN { UINT32 prefixLength; UINT32 prefixCode; UINT32 valueBits; UINT32 tokenType; UINT32 valueBase; }; typedef struct _ZGFX_TOKEN ZGFX_TOKEN; struct _ZGFX_CONTEXT { BOOL Compressor; const BYTE* pbInputCurrent; const BYTE* pbInputEnd; UINT32 bits; UINT32 cBitsRemaining; UINT32 BitsCurrent; UINT32 cBitsCurrent; BYTE OutputBuffer[65536]; UINT32 OutputCount; BYTE HistoryBuffer[2500000]; UINT32 HistoryIndex; UINT32 HistoryBufferSize; }; static const ZGFX_TOKEN ZGFX_TOKEN_TABLE[] = { // len code vbits type vbase { 1, 0, 8, 0, 0 }, // 0 { 5, 17, 5, 1, 0 }, // 10001 { 5, 18, 7, 1, 32 }, // 10010 { 5, 19, 9, 1, 160 }, // 10011 { 5, 20, 10, 1, 672 }, // 10100 { 5, 21, 12, 1, 1696 }, // 10101 { 5, 24, 0, 0, 0x00 }, // 11000 { 5, 25, 0, 0, 0x01 }, // 11001 { 6, 44, 14, 1, 5792 }, // 101100 { 6, 45, 15, 1, 22176 }, // 101101 { 6, 52, 0, 0, 0x02 }, // 110100 { 6, 53, 0, 0, 0x03 }, // 110101 { 6, 54, 0, 0, 0xFF }, // 110110 { 7, 92, 18, 1, 54944 }, // 1011100 { 7, 93, 20, 1, 317088 }, // 1011101 { 7, 110, 0, 0, 0x04 }, // 1101110 { 7, 111, 0, 0, 0x05 }, // 1101111 { 7, 112, 0, 0, 0x06 }, // 1110000 { 7, 113, 0, 0, 0x07 }, // 1110001 { 7, 114, 0, 0, 0x08 }, // 1110010 { 7, 115, 0, 0, 0x09 }, // 1110011 { 7, 116, 0, 0, 0x0A }, // 1110100 { 7, 117, 0, 0, 0x0B }, // 1110101 { 7, 118, 0, 0, 0x3A }, // 1110110 { 7, 119, 0, 0, 0x3B }, // 1110111 { 7, 120, 0, 0, 0x3C }, // 1111000 { 7, 121, 0, 0, 0x3D }, // 1111001 { 7, 122, 0, 0, 0x3E }, // 1111010 { 7, 123, 0, 0, 0x3F }, // 1111011 { 7, 124, 0, 0, 0x40 }, // 1111100 { 7, 125, 0, 0, 0x80 }, // 1111101 { 8, 188, 20, 1, 1365664 }, // 10111100 { 8, 189, 21, 1, 2414240 }, // 10111101 { 8, 252, 0, 0, 0x0C }, // 11111100 { 8, 253, 0, 0, 0x38 }, // 11111101 { 8, 254, 0, 0, 0x39 }, // 11111110 { 8, 255, 0, 0, 0x66 }, // 11111111 { 9, 380, 22, 1, 4511392 }, // 101111100 { 9, 381, 23, 1, 8705696 }, // 101111101 { 9, 382, 24, 1, 17094304 }, // 101111110 { 0 } }; static INLINE BOOL zgfx_GetBits(ZGFX_CONTEXT* _zgfx, UINT32 _nbits) { if (!_zgfx) return FALSE; while (_zgfx->cBitsCurrent < _nbits) { _zgfx->BitsCurrent <<= 8; if (_zgfx->pbInputCurrent < _zgfx->pbInputEnd) _zgfx->BitsCurrent += *(_zgfx->pbInputCurrent)++; _zgfx->cBitsCurrent += 8; } _zgfx->cBitsRemaining -= _nbits; _zgfx->cBitsCurrent -= _nbits; _zgfx->bits = _zgfx->BitsCurrent >> _zgfx->cBitsCurrent; _zgfx->BitsCurrent &= ((1 << _zgfx->cBitsCurrent) - 1); return TRUE; } static void zgfx_history_buffer_ring_write(ZGFX_CONTEXT* zgfx, const BYTE* src, size_t count) { UINT32 front; if (count <= 0) return; if (count > zgfx->HistoryBufferSize) { const size_t residue = count - zgfx->HistoryBufferSize; count = zgfx->HistoryBufferSize; src += residue; zgfx->HistoryIndex = (zgfx->HistoryIndex + residue) % zgfx->HistoryBufferSize; } if (zgfx->HistoryIndex + count <= zgfx->HistoryBufferSize) { CopyMemory(&(zgfx->HistoryBuffer[zgfx->HistoryIndex]), src, count); if ((zgfx->HistoryIndex += count) == zgfx->HistoryBufferSize) zgfx->HistoryIndex = 0; } else { front = zgfx->HistoryBufferSize - zgfx->HistoryIndex; CopyMemory(&(zgfx->HistoryBuffer[zgfx->HistoryIndex]), src, front); CopyMemory(zgfx->HistoryBuffer, &src[front], count - front); zgfx->HistoryIndex = count - front; } } static void zgfx_history_buffer_ring_read(ZGFX_CONTEXT* zgfx, int offset, BYTE* dst, UINT32 count) { UINT32 front; UINT32 index; UINT32 bytes; UINT32 valid; UINT32 bytesLeft; BYTE* dptr = dst; BYTE* origDst = dst; if (count <= 0) return; bytesLeft = count; index = (zgfx->HistoryIndex + zgfx->HistoryBufferSize - offset) % zgfx->HistoryBufferSize; bytes = MIN(bytesLeft, offset); if ((index + bytes) <= zgfx->HistoryBufferSize) { CopyMemory(dptr, &(zgfx->HistoryBuffer[index]), bytes); } else { front = zgfx->HistoryBufferSize - index; CopyMemory(dptr, &(zgfx->HistoryBuffer[index]), front); CopyMemory(&dptr[front], zgfx->HistoryBuffer, bytes - front); } if ((bytesLeft -= bytes) == 0) return; dptr += bytes; valid = bytes; do { bytes = valid; if (bytes > bytesLeft) bytes = bytesLeft; CopyMemory(dptr, origDst, bytes); dptr += bytes; valid <<= 1; } while ((bytesLeft -= bytes) > 0); } static BOOL zgfx_decompress_segment(ZGFX_CONTEXT* zgfx, wStream* stream, size_t segmentSize) { BYTE c; BYTE flags; UINT32 extra = 0; int opIndex; int haveBits; int inPrefix; UINT32 count; UINT32 distance; BYTE* pbSegment; size_t cbSegment; if (!zgfx || !stream) return FALSE; cbSegment = segmentSize - 1; if ((Stream_GetRemainingLength(stream) < segmentSize) || (segmentSize < 1) || (segmentSize > UINT32_MAX)) return FALSE; Stream_Read_UINT8(stream, flags); /* header (1 byte) */ zgfx->OutputCount = 0; pbSegment = Stream_Pointer(stream); Stream_Seek(stream, cbSegment); if (!(flags & PACKET_COMPRESSED)) { zgfx_history_buffer_ring_write(zgfx, pbSegment, cbSegment); if (cbSegment > sizeof(zgfx->OutputBuffer)) return FALSE; CopyMemory(zgfx->OutputBuffer, pbSegment, cbSegment); zgfx->OutputCount = cbSegment; return TRUE; } zgfx->pbInputCurrent = pbSegment; zgfx->pbInputEnd = &pbSegment[cbSegment - 1]; /* NumberOfBitsToDecode = ((NumberOfBytesToDecode - 1) * 8) - ValueOfLastByte */ zgfx->cBitsRemaining = 8 * (cbSegment - 1) - *zgfx->pbInputEnd; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; while (zgfx->cBitsRemaining) { haveBits = 0; inPrefix = 0; for (opIndex = 0; ZGFX_TOKEN_TABLE[opIndex].prefixLength != 0; opIndex++) { while (haveBits < ZGFX_TOKEN_TABLE[opIndex].prefixLength) { zgfx_GetBits(zgfx, 1); inPrefix = (inPrefix << 1) + zgfx->bits; haveBits++; } if (inPrefix == ZGFX_TOKEN_TABLE[opIndex].prefixCode) { if (ZGFX_TOKEN_TABLE[opIndex].tokenType == 0) { /* Literal */ zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); c = (BYTE)(ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits); zgfx->HistoryBuffer[zgfx->HistoryIndex] = c; if (++zgfx->HistoryIndex == zgfx->HistoryBufferSize) zgfx->HistoryIndex = 0; if (zgfx->OutputCount >= sizeof(zgfx->OutputBuffer)) return FALSE; zgfx->OutputBuffer[zgfx->OutputCount++] = c; } else { zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); distance = ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits; if (distance != 0) { /* Match */ zgfx_GetBits(zgfx, 1); if (zgfx->bits == 0) { count = 3; } else { count = 4; extra = 2; zgfx_GetBits(zgfx, 1); while (zgfx->bits == 1) { count *= 2; extra++; zgfx_GetBits(zgfx, 1); } zgfx_GetBits(zgfx, extra); count += zgfx->bits; } if (count > sizeof(zgfx->OutputBuffer) - zgfx->OutputCount) return FALSE; zgfx_history_buffer_ring_read(zgfx, distance, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx_history_buffer_ring_write(zgfx, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx->OutputCount += count; } else { /* Unencoded */ zgfx_GetBits(zgfx, 15); count = zgfx->bits; zgfx->cBitsRemaining -= zgfx->cBitsCurrent; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; if (count > sizeof(zgfx->OutputBuffer) - zgfx->OutputCount) return FALSE; CopyMemory(&(zgfx->OutputBuffer[zgfx->OutputCount]), zgfx->pbInputCurrent, count); zgfx_history_buffer_ring_write(zgfx, zgfx->pbInputCurrent, count); zgfx->pbInputCurrent += count; zgfx->cBitsRemaining -= (8 * count); zgfx->OutputCount += count; } } break; } } } return TRUE; } int zgfx_decompress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData, UINT32* pDstSize, UINT32 flags) { int status = -1; BYTE descriptor; wStream* stream = Stream_New((BYTE*)pSrcData, SrcSize); if (!stream) return -1; if (Stream_GetRemainingLength(stream) < 1) goto fail; Stream_Read_UINT8(stream, descriptor); /* descriptor (1 byte) */ if (descriptor == ZGFX_SEGMENTED_SINGLE) { if (!zgfx_decompress_segment(zgfx, stream, Stream_GetRemainingLength(stream))) goto fail; *ppDstData = NULL; if (zgfx->OutputCount > 0) *ppDstData = (BYTE*) malloc(zgfx->OutputCount); if (!*ppDstData) goto fail; *pDstSize = zgfx->OutputCount; CopyMemory(*ppDstData, zgfx->OutputBuffer, zgfx->OutputCount); } else if (descriptor == ZGFX_SEGMENTED_MULTIPART) { UINT32 segmentSize; UINT16 segmentNumber; UINT16 segmentCount; UINT32 uncompressedSize; BYTE* pConcatenated; size_t used = 0; if (Stream_GetRemainingLength(stream) < 6) goto fail; Stream_Read_UINT16(stream, segmentCount); /* segmentCount (2 bytes) */ Stream_Read_UINT32(stream, uncompressedSize); /* uncompressedSize (4 bytes) */ if (Stream_GetRemainingLength(stream) < segmentCount * sizeof(UINT32)) goto fail; pConcatenated = (BYTE*) malloc(uncompressedSize); if (!pConcatenated) goto fail; *ppDstData = pConcatenated; *pDstSize = uncompressedSize; for (segmentNumber = 0; segmentNumber < segmentCount; segmentNumber++) { if (Stream_GetRemainingLength(stream) < sizeof(UINT32)) goto fail; Stream_Read_UINT32(stream, segmentSize); /* segmentSize (4 bytes) */ if (!zgfx_decompress_segment(zgfx, stream, segmentSize)) goto fail; if (zgfx->OutputCount > UINT32_MAX - used) goto fail; if (used + zgfx->OutputCount > uncompressedSize) goto fail; CopyMemory(pConcatenated, zgfx->OutputBuffer, zgfx->OutputCount); pConcatenated += zgfx->OutputCount; used += zgfx->OutputCount; } } else { goto fail; } status = 1; fail: Stream_Free(stream, FALSE); return status; } static BOOL zgfx_compress_segment(ZGFX_CONTEXT* zgfx, wStream* s, const BYTE* pSrcData, UINT32 SrcSize, UINT32* pFlags) { /* FIXME: Currently compression not implemented. Just copy the raw source */ if (!Stream_EnsureRemainingCapacity(s, SrcSize + 1)) { WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!"); return FALSE; } (*pFlags) |= ZGFX_PACKET_COMPR_TYPE_RDP8; /* RDP 8.0 compression format */ Stream_Write_UINT8(s, (*pFlags)); /* header (1 byte) */ Stream_Write(s, pSrcData, SrcSize); return TRUE; } int zgfx_compress_to_stream(ZGFX_CONTEXT* zgfx, wStream* sDst, const BYTE* pUncompressed, UINT32 uncompressedSize, UINT32* pFlags) { int fragment; UINT16 maxLength; UINT32 totalLength; size_t posSegmentCount = 0; const BYTE* pSrcData; int status = 0; maxLength = ZGFX_SEGMENTED_MAXSIZE; totalLength = uncompressedSize; pSrcData = pUncompressed; for (fragment = 0; (totalLength > 0) || (fragment == 0); fragment++) { UINT32 SrcSize; size_t posDstSize; size_t posDataStart; UINT32 DstSize; SrcSize = (totalLength > maxLength) ? maxLength : totalLength; posDstSize = 0; totalLength -= SrcSize; /* Ensure we have enough space for headers */ if (!Stream_EnsureRemainingCapacity(sDst, 12)) { WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!"); return -1; } if (fragment == 0) { /* First fragment */ /* descriptor (1 byte) */ Stream_Write_UINT8(sDst, (totalLength == 0) ? ZGFX_SEGMENTED_SINGLE : ZGFX_SEGMENTED_MULTIPART); if (totalLength > 0) { posSegmentCount = Stream_GetPosition(sDst); /* segmentCount (2 bytes) */ Stream_Seek(sDst, 2); Stream_Write_UINT32(sDst, uncompressedSize); /* uncompressedSize (4 bytes) */ } } if (fragment > 0 || totalLength > 0) { /* Multipart */ posDstSize = Stream_GetPosition(sDst); /* size (4 bytes) */ Stream_Seek(sDst, 4); } posDataStart = Stream_GetPosition(sDst); if (!zgfx_compress_segment(zgfx, sDst, pSrcData, SrcSize, pFlags)) return -1; if (posDstSize) { /* Fill segment data size */ DstSize = Stream_GetPosition(sDst) - posDataStart; Stream_SetPosition(sDst, posDstSize); Stream_Write_UINT32(sDst, DstSize); Stream_SetPosition(sDst, posDataStart + DstSize); } pSrcData += SrcSize; } Stream_SealLength(sDst); /* fill back segmentCount */ if (posSegmentCount) { Stream_SetPosition(sDst, posSegmentCount); Stream_Write_UINT16(sDst, fragment); Stream_SetPosition(sDst, Stream_Length(sDst)); } return status; } int zgfx_compress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData, UINT32* pDstSize, UINT32* pFlags) { int status; wStream* s = Stream_New(NULL, SrcSize); status = zgfx_compress_to_stream(zgfx, s, pSrcData, SrcSize, pFlags); (*ppDstData) = Stream_Buffer(s); (*pDstSize) = Stream_GetPosition(s); Stream_Free(s, FALSE); return status; } void zgfx_context_reset(ZGFX_CONTEXT* zgfx, BOOL flush) { zgfx->HistoryIndex = 0; } ZGFX_CONTEXT* zgfx_context_new(BOOL Compressor) { ZGFX_CONTEXT* zgfx; zgfx = (ZGFX_CONTEXT*) calloc(1, sizeof(ZGFX_CONTEXT)); if (zgfx) { zgfx->Compressor = Compressor; zgfx->HistoryBufferSize = sizeof(zgfx->HistoryBuffer); zgfx_context_reset(zgfx, FALSE); } return zgfx; } void zgfx_context_free(ZGFX_CONTEXT* zgfx) { free(zgfx); }
static BOOL zgfx_decompress_segment(ZGFX_CONTEXT* zgfx, wStream* stream, size_t segmentSize) { BYTE c; BYTE flags; UINT32 extra = 0; int opIndex; int haveBits; int inPrefix; UINT32 count; UINT32 distance; BYTE* pbSegment; size_t cbSegment = segmentSize - 1; if ((Stream_GetRemainingLength(stream) < segmentSize) || (segmentSize < 1)) return FALSE; Stream_Read_UINT8(stream, flags); /* header (1 byte) */ zgfx->OutputCount = 0; pbSegment = Stream_Pointer(stream); Stream_Seek(stream, cbSegment); if (!(flags & PACKET_COMPRESSED)) { zgfx_history_buffer_ring_write(zgfx, pbSegment, cbSegment); CopyMemory(zgfx->OutputBuffer, pbSegment, cbSegment); zgfx->OutputCount = cbSegment; return TRUE; } zgfx->pbInputCurrent = pbSegment; zgfx->pbInputEnd = &pbSegment[cbSegment - 1]; /* NumberOfBitsToDecode = ((NumberOfBytesToDecode - 1) * 8) - ValueOfLastByte */ zgfx->cBitsRemaining = 8 * (cbSegment - 1) - *zgfx->pbInputEnd; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; while (zgfx->cBitsRemaining) { haveBits = 0; inPrefix = 0; for (opIndex = 0; ZGFX_TOKEN_TABLE[opIndex].prefixLength != 0; opIndex++) { while (haveBits < ZGFX_TOKEN_TABLE[opIndex].prefixLength) { zgfx_GetBits(zgfx, 1); inPrefix = (inPrefix << 1) + zgfx->bits; haveBits++; } if (inPrefix == ZGFX_TOKEN_TABLE[opIndex].prefixCode) { if (ZGFX_TOKEN_TABLE[opIndex].tokenType == 0) { /* Literal */ zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); c = (BYTE)(ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits); zgfx->HistoryBuffer[zgfx->HistoryIndex] = c; if (++zgfx->HistoryIndex == zgfx->HistoryBufferSize) zgfx->HistoryIndex = 0; zgfx->OutputBuffer[zgfx->OutputCount++] = c; } else { zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); distance = ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits; if (distance != 0) { /* Match */ zgfx_GetBits(zgfx, 1); if (zgfx->bits == 0) { count = 3; } else { count = 4; extra = 2; zgfx_GetBits(zgfx, 1); while (zgfx->bits == 1) { count *= 2; extra++; zgfx_GetBits(zgfx, 1); } zgfx_GetBits(zgfx, extra); count += zgfx->bits; } zgfx_history_buffer_ring_read(zgfx, distance, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx_history_buffer_ring_write(zgfx, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx->OutputCount += count; } else { /* Unencoded */ zgfx_GetBits(zgfx, 15); count = zgfx->bits; zgfx->cBitsRemaining -= zgfx->cBitsCurrent; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; CopyMemory(&(zgfx->OutputBuffer[zgfx->OutputCount]), zgfx->pbInputCurrent, count); zgfx_history_buffer_ring_write(zgfx, zgfx->pbInputCurrent, count); zgfx->pbInputCurrent += count; zgfx->cBitsRemaining -= (8 * count); zgfx->OutputCount += count; } } break; } } } return TRUE; }
static BOOL zgfx_decompress_segment(ZGFX_CONTEXT* zgfx, wStream* stream, size_t segmentSize) { BYTE c; BYTE flags; UINT32 extra = 0; int opIndex; int haveBits; int inPrefix; UINT32 count; UINT32 distance; BYTE* pbSegment; size_t cbSegment; if (!zgfx || !stream) return FALSE; cbSegment = segmentSize - 1; if ((Stream_GetRemainingLength(stream) < segmentSize) || (segmentSize < 1) || (segmentSize > UINT32_MAX)) return FALSE; Stream_Read_UINT8(stream, flags); /* header (1 byte) */ zgfx->OutputCount = 0; pbSegment = Stream_Pointer(stream); Stream_Seek(stream, cbSegment); if (!(flags & PACKET_COMPRESSED)) { zgfx_history_buffer_ring_write(zgfx, pbSegment, cbSegment); if (cbSegment > sizeof(zgfx->OutputBuffer)) return FALSE; CopyMemory(zgfx->OutputBuffer, pbSegment, cbSegment); zgfx->OutputCount = cbSegment; return TRUE; } zgfx->pbInputCurrent = pbSegment; zgfx->pbInputEnd = &pbSegment[cbSegment - 1]; /* NumberOfBitsToDecode = ((NumberOfBytesToDecode - 1) * 8) - ValueOfLastByte */ zgfx->cBitsRemaining = 8 * (cbSegment - 1) - *zgfx->pbInputEnd; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; while (zgfx->cBitsRemaining) { haveBits = 0; inPrefix = 0; for (opIndex = 0; ZGFX_TOKEN_TABLE[opIndex].prefixLength != 0; opIndex++) { while (haveBits < ZGFX_TOKEN_TABLE[opIndex].prefixLength) { zgfx_GetBits(zgfx, 1); inPrefix = (inPrefix << 1) + zgfx->bits; haveBits++; } if (inPrefix == ZGFX_TOKEN_TABLE[opIndex].prefixCode) { if (ZGFX_TOKEN_TABLE[opIndex].tokenType == 0) { /* Literal */ zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); c = (BYTE)(ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits); zgfx->HistoryBuffer[zgfx->HistoryIndex] = c; if (++zgfx->HistoryIndex == zgfx->HistoryBufferSize) zgfx->HistoryIndex = 0; if (zgfx->OutputCount >= sizeof(zgfx->OutputBuffer)) return FALSE; zgfx->OutputBuffer[zgfx->OutputCount++] = c; } else { zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); distance = ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits; if (distance != 0) { /* Match */ zgfx_GetBits(zgfx, 1); if (zgfx->bits == 0) { count = 3; } else { count = 4; extra = 2; zgfx_GetBits(zgfx, 1); while (zgfx->bits == 1) { count *= 2; extra++; zgfx_GetBits(zgfx, 1); } zgfx_GetBits(zgfx, extra); count += zgfx->bits; } if (count > sizeof(zgfx->OutputBuffer) - zgfx->OutputCount) return FALSE; zgfx_history_buffer_ring_read(zgfx, distance, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx_history_buffer_ring_write(zgfx, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx->OutputCount += count; } else { /* Unencoded */ zgfx_GetBits(zgfx, 15); count = zgfx->bits; zgfx->cBitsRemaining -= zgfx->cBitsCurrent; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; if (count > sizeof(zgfx->OutputBuffer) - zgfx->OutputCount) return FALSE; CopyMemory(&(zgfx->OutputBuffer[zgfx->OutputCount]), zgfx->pbInputCurrent, count); zgfx_history_buffer_ring_write(zgfx, zgfx->pbInputCurrent, count); zgfx->pbInputCurrent += count; zgfx->cBitsRemaining -= (8 * count); zgfx->OutputCount += count; } } break; } } } return TRUE; }
{'added': [(140, '\treturn TRUE;'), (233, '\tsize_t cbSegment;'), (235, '\tif (!zgfx || !stream)'), (236, '\t\treturn FALSE;'), (237, ''), (238, '\tcbSegment = segmentSize - 1;'), (239, ''), (240, '\tif ((Stream_GetRemainingLength(stream) < segmentSize) || (segmentSize < 1) ||'), (241, '\t (segmentSize > UINT32_MAX))'), (252, ''), (253, '\t\tif (cbSegment > sizeof(zgfx->OutputBuffer))'), (254, '\t\t\treturn FALSE;'), (255, ''), (294, '\t\t\t\t\tif (zgfx->OutputCount >= sizeof(zgfx->OutputBuffer))'), (295, '\t\t\t\t\t\treturn FALSE;'), (296, ''), (330, '\t\t\t\t\t\tif (count > sizeof(zgfx->OutputBuffer) - zgfx->OutputCount)'), (331, '\t\t\t\t\t\t\treturn FALSE;'), (332, ''), (345, ''), (346, '\t\t\t\t\t\tif (count > sizeof(zgfx->OutputBuffer) - zgfx->OutputCount)'), (347, '\t\t\t\t\t\t\treturn FALSE;'), (348, '')], 'deleted': [(232, '\tsize_t cbSegment = segmentSize - 1;'), (234, '\tif ((Stream_GetRemainingLength(stream) < segmentSize) || (segmentSize < 1))')]}
23
2
444
2,774
102
608
13
https://github.com/FreeRDP/FreeRDP
CVE-2018-8784
CWE-787
3,245
color.c
C
sycc420_to_rgb
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2001-2003, David Janssens * Copyright (c) 2002-2003, Yannick Verschueren * Copyright (c) 2003-2007, Francois-Olivier Devaux * Copyright (c) 2003-2014, Antonin Descampe * Copyright (c) 2005, Herve Drolon, FreeImage Team * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include "opj_apps_config.h" #include "openjpeg.h" #include "color.h" #ifdef OPJ_HAVE_LIBLCMS2 #include <lcms2.h> #endif #ifdef OPJ_HAVE_LIBLCMS1 #include <lcms.h> #endif #ifdef OPJ_USE_LEGACY #define OPJ_CLRSPC_GRAY CLRSPC_GRAY #define OPJ_CLRSPC_SRGB CLRSPC_SRGB #endif /*-------------------------------------------------------- Matrix for sYCC, Amendment 1 to IEC 61966-2-1 Y : 0.299 0.587 0.114 :R Cb: -0.1687 -0.3312 0.5 :G Cr: 0.5 -0.4187 -0.0812 :B Inverse: R: 1 -3.68213e-05 1.40199 :Y G: 1.00003 -0.344125 -0.714128 :Cb - 2^(prec - 1) B: 0.999823 1.77204 -8.04142e-06 :Cr - 2^(prec - 1) -----------------------------------------------------------*/ static void sycc_to_rgb(int offset, int upb, int y, int cb, int cr, int *out_r, int *out_g, int *out_b) { int r, g, b; cb -= offset; cr -= offset; r = y + (int)(1.402 * (float)cr); if(r < 0) r = 0; else if(r > upb) r = upb; *out_r = r; g = y - (int)(0.344 * (float)cb + 0.714 * (float)cr); if(g < 0) g = 0; else if(g > upb) g = upb; *out_g = g; b = y + (int)(1.772 * (float)cb); if(b < 0) b = 0; else if(b > upb) b = upb; *out_b = b; } static void sycc444_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; unsigned int maxw, maxh, max, i; int offset, upb; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * (size_t)max); d1 = g = (int*)malloc(sizeof(int) * (size_t)max); d2 = b = (int*)malloc(sizeof(int) * (size_t)max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i = 0U; i < max; ++i) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++cb; ++cr; ++r; ++g; ++b; } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; return; fails: if(r) free(r); if(g) free(g); if(b) free(b); }/* sycc444_to_rgb() */ static void sycc422_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; unsigned int maxw, maxh, max; int offset, upb; unsigned int i, j; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * (size_t)max); d1 = g = (int*)malloc(sizeof(int) * (size_t)max); d2 = b = (int*)malloc(sizeof(int) * (size_t)max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i=0U; i < maxh; ++i) { for(j=0U; j < (maxw & ~(unsigned int)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if (j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; #if defined(USE_JPWL) || defined(USE_MJ2) img->comps[1].w = maxw; img->comps[1].h = maxh; img->comps[2].w = maxw; img->comps[2].h = maxh; #else img->comps[1].w = (OPJ_UINT32)maxw; img->comps[1].h = (OPJ_UINT32)maxh; img->comps[2].w = (OPJ_UINT32)maxw; img->comps[2].h = (OPJ_UINT32)maxh; #endif img->comps[1].dx = img->comps[0].dx; img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[0].dy; img->comps[2].dy = img->comps[0].dy; return; fails: if(r) free(r); if(g) free(g); if(b) free(b); }/* sycc422_to_rgb() */ static void sycc420_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b, *nr, *ng, *nb; const int *y, *cb, *cr, *ny; unsigned int maxw, maxh, max; int offset, upb; unsigned int i, j; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * (size_t)max); d1 = g = (int*)malloc(sizeof(int) * (size_t)max); d2 = b = (int*)malloc(sizeof(int) * (size_t)max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i=0U; i < (maxh & ~(unsigned int)1U); i += 2U) { ny = y + maxw; nr = r + maxw; ng = g + maxw; nb = b + maxw; for(j=0; j < (maxw & ~(unsigned int)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } if(j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } y += maxw; r += maxw; g += maxw; b += maxw; } if(i < maxh) { for(j=0U; j < (maxw & ~(unsigned int)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if(j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); } } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; #if defined(USE_JPWL) || defined(USE_MJ2) img->comps[1].w = maxw; img->comps[1].h = maxh; img->comps[2].w = maxw; img->comps[2].h = maxh; #else img->comps[1].w = (OPJ_UINT32)maxw; img->comps[1].h = (OPJ_UINT32)maxh; img->comps[2].w = (OPJ_UINT32)maxw; img->comps[2].h = (OPJ_UINT32)maxh; #endif img->comps[1].dx = img->comps[0].dx; img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[0].dy; img->comps[2].dy = img->comps[0].dy; return; fails: if(r) free(r); if(g) free(g); if(b) free(b); }/* sycc420_to_rgb() */ void color_sycc_to_rgb(opj_image_t *img) { if(img->numcomps < 3) { img->color_space = OPJ_CLRSPC_GRAY; return; } if((img->comps[0].dx == 1) && (img->comps[1].dx == 2) && (img->comps[2].dx == 2) && (img->comps[0].dy == 1) && (img->comps[1].dy == 2) && (img->comps[2].dy == 2))/* horizontal and vertical sub-sample */ { sycc420_to_rgb(img); } else if((img->comps[0].dx == 1) && (img->comps[1].dx == 2) && (img->comps[2].dx == 2) && (img->comps[0].dy == 1) && (img->comps[1].dy == 1) && (img->comps[2].dy == 1))/* horizontal sub-sample only */ { sycc422_to_rgb(img); } else if((img->comps[0].dx == 1) && (img->comps[1].dx == 1) && (img->comps[2].dx == 1) && (img->comps[0].dy == 1) && (img->comps[1].dy == 1) && (img->comps[2].dy == 1))/* no sub-sample */ { sycc444_to_rgb(img); } else { fprintf(stderr,"%s:%d:color_sycc_to_rgb\n\tCAN NOT CONVERT\n", __FILE__,__LINE__); return; } img->color_space = OPJ_CLRSPC_SRGB; }/* color_sycc_to_rgb() */ #if defined(OPJ_HAVE_LIBLCMS2) || defined(OPJ_HAVE_LIBLCMS1) #ifdef OPJ_HAVE_LIBLCMS1 /* Bob Friesenhahn proposed:*/ #define cmsSigXYZData icSigXYZData #define cmsSigLabData icSigLabData #define cmsSigCmykData icSigCmykData #define cmsSigYCbCrData icSigYCbCrData #define cmsSigLuvData icSigLuvData #define cmsSigGrayData icSigGrayData #define cmsSigRgbData icSigRgbData #define cmsUInt32Number DWORD #define cmsColorSpaceSignature icColorSpaceSignature #define cmsGetHeaderRenderingIntent cmsTakeRenderingIntent #endif /* OPJ_HAVE_LIBLCMS1 */ /*#define DEBUG_PROFILE*/ void color_apply_icc_profile(opj_image_t *image) { cmsHPROFILE in_prof, out_prof; cmsHTRANSFORM transform; cmsColorSpaceSignature in_space, out_space; cmsUInt32Number intent, in_type, out_type; int *r, *g, *b; size_t nr_samples; int prec, i, max, max_w, max_h, ok = 0; OPJ_COLOR_SPACE new_space; in_prof = cmsOpenProfileFromMem(image->icc_profile_buf, image->icc_profile_len); #ifdef DEBUG_PROFILE FILE *icm = fopen("debug.icm","wb"); fwrite( image->icc_profile_buf,1, image->icc_profile_len,icm); fclose(icm); #endif if(in_prof == NULL) return; in_space = cmsGetPCS(in_prof); out_space = cmsGetColorSpace(in_prof); intent = cmsGetHeaderRenderingIntent(in_prof); max_w = (int)image->comps[0].w; max_h = (int)image->comps[0].h; prec = (int)image->comps[0].prec; if(out_space == cmsSigRgbData) /* enumCS 16 */ { if( prec <= 8 ) { in_type = TYPE_RGB_8; out_type = TYPE_RGB_8; } else { in_type = TYPE_RGB_16; out_type = TYPE_RGB_16; } out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else if(out_space == cmsSigGrayData) /* enumCS 17 */ { in_type = TYPE_GRAY_8; out_type = TYPE_RGB_8; out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else if(out_space == cmsSigYCbCrData) /* enumCS 18 */ { in_type = TYPE_YCbCr_16; out_type = TYPE_RGB_16; out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else { #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d: color_apply_icc_profile\n\tICC Profile has unknown " "output colorspace(%#x)(%c%c%c%c)\n\tICC Profile ignored.\n", __FILE__,__LINE__,out_space, (out_space>>24) & 0xff,(out_space>>16) & 0xff, (out_space>>8) & 0xff, out_space & 0xff); #endif cmsCloseProfile(in_prof); return; } if(out_prof == NULL) { cmsCloseProfile(in_prof); return; } #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d:color_apply_icc_profile\n\tchannels(%d) prec(%d) w(%d) h(%d)" "\n\tprofile: in(%p) out(%p)\n",__FILE__,__LINE__,image->numcomps,prec, max_w,max_h, (void*)in_prof,(void*)out_prof); fprintf(stderr,"\trender_intent (%u)\n\t" "color_space: in(%#x)(%c%c%c%c) out:(%#x)(%c%c%c%c)\n\t" " type: in(%u) out:(%u)\n", intent, in_space, (in_space>>24) & 0xff,(in_space>>16) & 0xff, (in_space>>8) & 0xff, in_space & 0xff, out_space, (out_space>>24) & 0xff,(out_space>>16) & 0xff, (out_space>>8) & 0xff, out_space & 0xff, in_type,out_type ); #else (void)prec; (void)in_space; #endif /* DEBUG_PROFILE */ transform = cmsCreateTransform(in_prof, in_type, out_prof, out_type, intent, 0); #ifdef OPJ_HAVE_LIBLCMS2 /* Possible for: LCMS_VERSION >= 2000 :*/ cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif if(transform == NULL) { #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d:color_apply_icc_profile\n\tcmsCreateTransform failed. " "ICC Profile ignored.\n",__FILE__,__LINE__); #endif #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif return; } if(image->numcomps > 2)/* RGB, RGBA */ { if( prec <= 8 ) { unsigned char *inbuf, *outbuf, *in, *out; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned char)); in = inbuf = (unsigned char*)malloc(nr_samples); out = outbuf = (unsigned char*)malloc(nr_samples); if(inbuf == NULL || outbuf == NULL) goto fails0; r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *in++ = (unsigned char)*r++; *in++ = (unsigned char)*g++; *in++ = (unsigned char)*b++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } ok = 1; fails0: if(inbuf) free(inbuf); if(outbuf) free(outbuf); } else /* prec > 8 */ { unsigned short *inbuf, *outbuf, *in, *out; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned short)); in = inbuf = (unsigned short*)malloc(nr_samples); out = outbuf = (unsigned short*)malloc(nr_samples); if(inbuf == NULL || outbuf == NULL) goto fails1; r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *in++ = (unsigned short)*r++; *in++ = (unsigned short)*g++; *in++ = (unsigned short)*b++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } ok = 1; fails1: if(inbuf) free(inbuf); if(outbuf) free(outbuf); } } else /* image->numcomps <= 2 : GRAY, GRAYA */ { if(prec <= 8) { unsigned char *in, *inbuf, *out, *outbuf; opj_image_comp_t *new_comps; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned char)); in = inbuf = (unsigned char*)malloc(nr_samples); out = outbuf = (unsigned char*)malloc(nr_samples); g = (int*)calloc((size_t)max, sizeof(int)); b = (int*)calloc((size_t)max, sizeof(int)); if(inbuf == NULL || outbuf == NULL || g == NULL || b == NULL) goto fails2; new_comps = (opj_image_comp_t*) realloc(image->comps, (image->numcomps+2)*sizeof(opj_image_comp_t)); if(new_comps == NULL) goto fails2; image->comps = new_comps; if(image->numcomps == 2) image->comps[3] = image->comps[1]; image->comps[1] = image->comps[0]; image->comps[2] = image->comps[0]; image->comps[1].data = g; image->comps[2].data = b; image->numcomps += 2; r = image->comps[0].data; for(i = 0; i < max; ++i) { *in++ = (unsigned char)*r++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } r = g = b = NULL; ok = 1; fails2: if(inbuf) free(inbuf); if(outbuf) free(outbuf); if(g) free(g); if(b) free(b); } else /* prec > 8 */ { unsigned short *in, *inbuf, *out, *outbuf; opj_image_comp_t *new_comps; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned short)); in = inbuf = (unsigned short*)malloc(nr_samples); out = outbuf = (unsigned short*)malloc(nr_samples); g = (int*)calloc((size_t)max, sizeof(int)); b = (int*)calloc((size_t)max, sizeof(int)); if(inbuf == NULL || outbuf == NULL || g == NULL || b == NULL) goto fails3; new_comps = (opj_image_comp_t*) realloc(image->comps, (image->numcomps+2)*sizeof(opj_image_comp_t)); if(new_comps == NULL) goto fails3; image->comps = new_comps; if(image->numcomps == 2) image->comps[3] = image->comps[1]; image->comps[1] = image->comps[0]; image->comps[2] = image->comps[0]; image->comps[1].data = g; image->comps[2].data = b; image->numcomps += 2; r = image->comps[0].data; for(i = 0; i < max; ++i) { *in++ = (unsigned short)*r++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } r = g = b = NULL; ok = 1; fails3: if(inbuf) free(inbuf); if(outbuf) free(outbuf); if(g) free(g); if(b) free(b); } }/* if(image->numcomps > 2) */ cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif if(ok) { image->color_space = new_space; } }/* color_apply_icc_profile() */ void color_cielab_to_rgb(opj_image_t *image) { int *row; int enumcs, numcomps; OPJ_COLOR_SPACE new_space; numcomps = (int)image->numcomps; if(numcomps != 3) { fprintf(stderr,"%s:%d:\n\tnumcomps %d not handled. Quitting.\n", __FILE__,__LINE__,numcomps); return; } row = (int*)image->icc_profile_buf; enumcs = row[0]; if(enumcs == 14) /* CIELab */ { int *L, *a, *b, *red, *green, *blue; int *src0, *src1, *src2, *dst0, *dst1, *dst2; double rl, ol, ra, oa, rb, ob, prec0, prec1, prec2; double minL, maxL, mina, maxa, minb, maxb; unsigned int default_type; unsigned int i, max; cmsHPROFILE in, out; cmsHTRANSFORM transform; cmsUInt16Number RGB[3]; cmsCIELab Lab; in = cmsCreateLab4Profile(NULL); if(in == NULL){ return; } out = cmsCreate_sRGBProfile(); if(out == NULL){ cmsCloseProfile(in); return; } transform = cmsCreateTransform(in, TYPE_Lab_DBL, out, TYPE_RGB_16, INTENT_PERCEPTUAL, 0); #ifdef OPJ_HAVE_LIBLCMS2 cmsCloseProfile(in); cmsCloseProfile(out); #endif if(transform == NULL) { #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif return; } new_space = OPJ_CLRSPC_SRGB; prec0 = (double)image->comps[0].prec; prec1 = (double)image->comps[1].prec; prec2 = (double)image->comps[2].prec; default_type = (unsigned int)row[1]; if(default_type == 0x44454600)/* DEF : default */ { rl = 100; ra = 170; rb = 200; ol = 0; oa = pow(2, prec1 - 1); ob = pow(2, prec2 - 2) + pow(2, prec2 - 3); } else { rl = row[2]; ra = row[4]; rb = row[6]; ol = row[3]; oa = row[5]; ob = row[7]; } L = src0 = image->comps[0].data; a = src1 = image->comps[1].data; b = src2 = image->comps[2].data; max = image->comps[0].w * image->comps[0].h; red = dst0 = (int*)malloc(max * sizeof(int)); green = dst1 = (int*)malloc(max * sizeof(int)); blue = dst2 = (int*)malloc(max * sizeof(int)); if(red == NULL || green == NULL || blue == NULL) goto fails; minL = -(rl * ol)/(pow(2, prec0)-1); maxL = minL + rl; mina = -(ra * oa)/(pow(2, prec1)-1); maxa = mina + ra; minb = -(rb * ob)/(pow(2, prec2)-1); maxb = minb + rb; for(i = 0; i < max; ++i) { Lab.L = minL + (double)(*L) * (maxL - minL)/(pow(2, prec0)-1); ++L; Lab.a = mina + (double)(*a) * (maxa - mina)/(pow(2, prec1)-1); ++a; Lab.b = minb + (double)(*b) * (maxb - minb)/(pow(2, prec2)-1); ++b; cmsDoTransform(transform, &Lab, RGB, 1); *red++ = RGB[0]; *green++ = RGB[1]; *blue++ = RGB[2]; } cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif free(src0); image->comps[0].data = dst0; free(src1); image->comps[1].data = dst1; free(src2); image->comps[2].data = dst2; image->color_space = new_space; image->comps[0].prec = 16; image->comps[1].prec = 16; image->comps[2].prec = 16; return; fails: cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif if(red) free(red); if(green) free(green); if(blue) free(blue); return; } fprintf(stderr,"%s:%d:\n\tenumCS %d not handled. Ignoring.\n", __FILE__,__LINE__, enumcs); }/* color_cielab_to_rgb() */ #endif /* OPJ_HAVE_LIBLCMS2 || OPJ_HAVE_LIBLCMS1 */ void color_cmyk_to_rgb(opj_image_t *image) { float C, M, Y, K; float sC, sM, sY, sK; unsigned int w, h, max, i; w = image->comps[0].w; h = image->comps[0].h; if(image->numcomps < 4) return; max = w * h; sC = 1.0F / (float)((1 << image->comps[0].prec) - 1); sM = 1.0F / (float)((1 << image->comps[1].prec) - 1); sY = 1.0F / (float)((1 << image->comps[2].prec) - 1); sK = 1.0F / (float)((1 << image->comps[3].prec) - 1); for(i = 0; i < max; ++i) { /* CMYK values from 0 to 1 */ C = (float)(image->comps[0].data[i]) * sC; M = (float)(image->comps[1].data[i]) * sM; Y = (float)(image->comps[2].data[i]) * sY; K = (float)(image->comps[3].data[i]) * sK; /* Invert all CMYK values */ C = 1.0F - C; M = 1.0F - M; Y = 1.0F - Y; K = 1.0F - K; /* CMYK -> RGB : RGB results from 0 to 255 */ image->comps[0].data[i] = (int)(255.0F * C * K); /* R */ image->comps[1].data[i] = (int)(255.0F * M * K); /* G */ image->comps[2].data[i] = (int)(255.0F * Y * K); /* B */ } free(image->comps[3].data); image->comps[3].data = NULL; image->comps[0].prec = 8; image->comps[1].prec = 8; image->comps[2].prec = 8; image->numcomps -= 1; image->color_space = OPJ_CLRSPC_SRGB; for (i = 3; i < image->numcomps; ++i) { memcpy(&(image->comps[i]), &(image->comps[i+1]), sizeof(image->comps[i])); } }/* color_cmyk_to_rgb() */ /* * This code has been adopted from sjpx_openjpeg.c of ghostscript */ void color_esycc_to_rgb(opj_image_t *image) { int y, cb, cr, sign1, sign2, val; unsigned int w, h, max, i; int flip_value = (1 << (image->comps[0].prec-1)); int max_value = (1 << image->comps[0].prec) - 1; if ( (image->numcomps < 3) || (image->comps[0].dx != image->comps[1].dx) || (image->comps[0].dx != image->comps[2].dx) || (image->comps[0].dy != image->comps[1].dy) || (image->comps[0].dy != image->comps[2].dy) ) { fprintf(stderr,"%s:%d:color_esycc_to_rgb\n\tCAN NOT CONVERT\n", __FILE__,__LINE__); return; } w = image->comps[0].w; h = image->comps[0].h; sign1 = (int)image->comps[1].sgnd; sign2 = (int)image->comps[2].sgnd; max = w * h; for(i = 0; i < max; ++i) { y = image->comps[0].data[i]; cb = image->comps[1].data[i]; cr = image->comps[2].data[i]; if( !sign1) cb -= flip_value; if( !sign2) cr -= flip_value; val = (int) ((float)y - (float)0.0000368 * (float)cb + (float)1.40199 * (float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[0].data[i] = val; val = (int) ((float)1.0003 * (float)y - (float)0.344125 * (float)cb - (float)0.7141128 * (float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[1].data[i] = val; val = (int) ((float)0.999823 * (float)y + (float)1.77204 * (float)cb - (float)0.000008 *(float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[2].data[i] = val; } image->color_space = OPJ_CLRSPC_SRGB; }/* color_esycc_to_rgb() */
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2001-2003, David Janssens * Copyright (c) 2002-2003, Yannick Verschueren * Copyright (c) 2003-2007, Francois-Olivier Devaux * Copyright (c) 2003-2014, Antonin Descampe * Copyright (c) 2005, Herve Drolon, FreeImage Team * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include "opj_apps_config.h" #include "openjpeg.h" #include "color.h" #ifdef OPJ_HAVE_LIBLCMS2 #include <lcms2.h> #endif #ifdef OPJ_HAVE_LIBLCMS1 #include <lcms.h> #endif #ifdef OPJ_USE_LEGACY #define OPJ_CLRSPC_GRAY CLRSPC_GRAY #define OPJ_CLRSPC_SRGB CLRSPC_SRGB #endif /*-------------------------------------------------------- Matrix for sYCC, Amendment 1 to IEC 61966-2-1 Y : 0.299 0.587 0.114 :R Cb: -0.1687 -0.3312 0.5 :G Cr: 0.5 -0.4187 -0.0812 :B Inverse: R: 1 -3.68213e-05 1.40199 :Y G: 1.00003 -0.344125 -0.714128 :Cb - 2^(prec - 1) B: 0.999823 1.77204 -8.04142e-06 :Cr - 2^(prec - 1) -----------------------------------------------------------*/ static void sycc_to_rgb(int offset, int upb, int y, int cb, int cr, int *out_r, int *out_g, int *out_b) { int r, g, b; cb -= offset; cr -= offset; r = y + (int)(1.402 * (float)cr); if(r < 0) r = 0; else if(r > upb) r = upb; *out_r = r; g = y - (int)(0.344 * (float)cb + 0.714 * (float)cr); if(g < 0) g = 0; else if(g > upb) g = upb; *out_g = g; b = y + (int)(1.772 * (float)cb); if(b < 0) b = 0; else if(b > upb) b = upb; *out_b = b; } static void sycc444_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; size_t maxw, maxh, max, i; int offset, upb; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * max); d1 = g = (int*)malloc(sizeof(int) * max); d2 = b = (int*)malloc(sizeof(int) * max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i = 0U; i < max; ++i) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++cb; ++cr; ++r; ++g; ++b; } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; img->color_space = OPJ_CLRSPC_SRGB; return; fails: free(r); free(g); free(b); }/* sycc444_to_rgb() */ static void sycc422_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; size_t maxw, maxh, max, offx, loopmaxw; int offset, upb; size_t i; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * max); d1 = g = (int*)malloc(sizeof(int) * max); d2 = b = (int*)malloc(sizeof(int) * max); if(r == NULL || g == NULL || b == NULL) goto fails; /* if img->x0 is odd, then first column shall use Cb/Cr = 0 */ offx = img->x0 & 1U; loopmaxw = maxw - offx; for(i=0U; i < maxh; ++i) { size_t j; if (offx > 0U) { sycc_to_rgb(offset, upb, *y, 0, 0, r, g, b); ++y; ++r; ++g; ++b; } for(j=0U; j < (loopmaxw & ~(size_t)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if (j < loopmaxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; img->comps[1].w = img->comps[2].w = img->comps[0].w; img->comps[1].h = img->comps[2].h = img->comps[0].h; img->comps[1].dx = img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[2].dy = img->comps[0].dy; img->color_space = OPJ_CLRSPC_SRGB; return; fails: free(r); free(g); free(b); }/* sycc422_to_rgb() */ static void sycc420_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b, *nr, *ng, *nb; const int *y, *cb, *cr, *ny; size_t maxw, maxh, max, offx, loopmaxw, offy, loopmaxh; int offset, upb; size_t i; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * max); d1 = g = (int*)malloc(sizeof(int) * max); d2 = b = (int*)malloc(sizeof(int) * max); if (r == NULL || g == NULL || b == NULL) goto fails; /* if img->x0 is odd, then first column shall use Cb/Cr = 0 */ offx = img->x0 & 1U; loopmaxw = maxw - offx; /* if img->y0 is odd, then first line shall use Cb/Cr = 0 */ offy = img->y0 & 1U; loopmaxh = maxh - offy; if (offy > 0U) { size_t j; for(j=0; j < maxw; ++j) { sycc_to_rgb(offset, upb, *y, 0, 0, r, g, b); ++y; ++r; ++g; ++b; } } for(i=0U; i < (loopmaxh & ~(size_t)1U); i += 2U) { size_t j; ny = y + maxw; nr = r + maxw; ng = g + maxw; nb = b + maxw; if (offx > 0U) { sycc_to_rgb(offset, upb, *y, 0, 0, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; } for(j=0; j < (loopmaxw & ~(size_t)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } if(j < loopmaxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } y += maxw; r += maxw; g += maxw; b += maxw; } if(i < loopmaxh) { size_t j; for(j=0U; j < (maxw & ~(size_t)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if(j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); } } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; img->comps[1].w = img->comps[2].w = img->comps[0].w; img->comps[1].h = img->comps[2].h = img->comps[0].h; img->comps[1].dx = img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[2].dy = img->comps[0].dy; img->color_space = OPJ_CLRSPC_SRGB; return; fails: free(r); free(g); free(b); }/* sycc420_to_rgb() */ void color_sycc_to_rgb(opj_image_t *img) { if(img->numcomps < 3) { img->color_space = OPJ_CLRSPC_GRAY; return; } if((img->comps[0].dx == 1) && (img->comps[1].dx == 2) && (img->comps[2].dx == 2) && (img->comps[0].dy == 1) && (img->comps[1].dy == 2) && (img->comps[2].dy == 2))/* horizontal and vertical sub-sample */ { sycc420_to_rgb(img); } else if((img->comps[0].dx == 1) && (img->comps[1].dx == 2) && (img->comps[2].dx == 2) && (img->comps[0].dy == 1) && (img->comps[1].dy == 1) && (img->comps[2].dy == 1))/* horizontal sub-sample only */ { sycc422_to_rgb(img); } else if((img->comps[0].dx == 1) && (img->comps[1].dx == 1) && (img->comps[2].dx == 1) && (img->comps[0].dy == 1) && (img->comps[1].dy == 1) && (img->comps[2].dy == 1))/* no sub-sample */ { sycc444_to_rgb(img); } else { fprintf(stderr,"%s:%d:color_sycc_to_rgb\n\tCAN NOT CONVERT\n", __FILE__,__LINE__); return; } }/* color_sycc_to_rgb() */ #if defined(OPJ_HAVE_LIBLCMS2) || defined(OPJ_HAVE_LIBLCMS1) #ifdef OPJ_HAVE_LIBLCMS1 /* Bob Friesenhahn proposed:*/ #define cmsSigXYZData icSigXYZData #define cmsSigLabData icSigLabData #define cmsSigCmykData icSigCmykData #define cmsSigYCbCrData icSigYCbCrData #define cmsSigLuvData icSigLuvData #define cmsSigGrayData icSigGrayData #define cmsSigRgbData icSigRgbData #define cmsUInt32Number DWORD #define cmsColorSpaceSignature icColorSpaceSignature #define cmsGetHeaderRenderingIntent cmsTakeRenderingIntent #endif /* OPJ_HAVE_LIBLCMS1 */ /*#define DEBUG_PROFILE*/ void color_apply_icc_profile(opj_image_t *image) { cmsHPROFILE in_prof, out_prof; cmsHTRANSFORM transform; cmsColorSpaceSignature in_space, out_space; cmsUInt32Number intent, in_type, out_type; int *r, *g, *b; size_t nr_samples; int prec, i, max, max_w, max_h, ok = 0; OPJ_COLOR_SPACE new_space; in_prof = cmsOpenProfileFromMem(image->icc_profile_buf, image->icc_profile_len); #ifdef DEBUG_PROFILE FILE *icm = fopen("debug.icm","wb"); fwrite( image->icc_profile_buf,1, image->icc_profile_len,icm); fclose(icm); #endif if(in_prof == NULL) return; in_space = cmsGetPCS(in_prof); out_space = cmsGetColorSpace(in_prof); intent = cmsGetHeaderRenderingIntent(in_prof); max_w = (int)image->comps[0].w; max_h = (int)image->comps[0].h; prec = (int)image->comps[0].prec; if(out_space == cmsSigRgbData) /* enumCS 16 */ { if( prec <= 8 ) { in_type = TYPE_RGB_8; out_type = TYPE_RGB_8; } else { in_type = TYPE_RGB_16; out_type = TYPE_RGB_16; } out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else if(out_space == cmsSigGrayData) /* enumCS 17 */ { in_type = TYPE_GRAY_8; out_type = TYPE_RGB_8; out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else if(out_space == cmsSigYCbCrData) /* enumCS 18 */ { in_type = TYPE_YCbCr_16; out_type = TYPE_RGB_16; out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else { #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d: color_apply_icc_profile\n\tICC Profile has unknown " "output colorspace(%#x)(%c%c%c%c)\n\tICC Profile ignored.\n", __FILE__,__LINE__,out_space, (out_space>>24) & 0xff,(out_space>>16) & 0xff, (out_space>>8) & 0xff, out_space & 0xff); #endif cmsCloseProfile(in_prof); return; } if(out_prof == NULL) { cmsCloseProfile(in_prof); return; } #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d:color_apply_icc_profile\n\tchannels(%d) prec(%d) w(%d) h(%d)" "\n\tprofile: in(%p) out(%p)\n",__FILE__,__LINE__,image->numcomps,prec, max_w,max_h, (void*)in_prof,(void*)out_prof); fprintf(stderr,"\trender_intent (%u)\n\t" "color_space: in(%#x)(%c%c%c%c) out:(%#x)(%c%c%c%c)\n\t" " type: in(%u) out:(%u)\n", intent, in_space, (in_space>>24) & 0xff,(in_space>>16) & 0xff, (in_space>>8) & 0xff, in_space & 0xff, out_space, (out_space>>24) & 0xff,(out_space>>16) & 0xff, (out_space>>8) & 0xff, out_space & 0xff, in_type,out_type ); #else (void)prec; (void)in_space; #endif /* DEBUG_PROFILE */ transform = cmsCreateTransform(in_prof, in_type, out_prof, out_type, intent, 0); #ifdef OPJ_HAVE_LIBLCMS2 /* Possible for: LCMS_VERSION >= 2000 :*/ cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif if(transform == NULL) { #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d:color_apply_icc_profile\n\tcmsCreateTransform failed. " "ICC Profile ignored.\n",__FILE__,__LINE__); #endif #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif return; } if(image->numcomps > 2)/* RGB, RGBA */ { if( prec <= 8 ) { unsigned char *inbuf, *outbuf, *in, *out; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned char)); in = inbuf = (unsigned char*)malloc(nr_samples); out = outbuf = (unsigned char*)malloc(nr_samples); if(inbuf == NULL || outbuf == NULL) goto fails0; r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *in++ = (unsigned char)*r++; *in++ = (unsigned char)*g++; *in++ = (unsigned char)*b++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } ok = 1; fails0: if(inbuf) free(inbuf); if(outbuf) free(outbuf); } else /* prec > 8 */ { unsigned short *inbuf, *outbuf, *in, *out; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned short)); in = inbuf = (unsigned short*)malloc(nr_samples); out = outbuf = (unsigned short*)malloc(nr_samples); if(inbuf == NULL || outbuf == NULL) goto fails1; r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *in++ = (unsigned short)*r++; *in++ = (unsigned short)*g++; *in++ = (unsigned short)*b++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } ok = 1; fails1: if(inbuf) free(inbuf); if(outbuf) free(outbuf); } } else /* image->numcomps <= 2 : GRAY, GRAYA */ { if(prec <= 8) { unsigned char *in, *inbuf, *out, *outbuf; opj_image_comp_t *new_comps; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned char)); in = inbuf = (unsigned char*)malloc(nr_samples); out = outbuf = (unsigned char*)malloc(nr_samples); g = (int*)calloc((size_t)max, sizeof(int)); b = (int*)calloc((size_t)max, sizeof(int)); if(inbuf == NULL || outbuf == NULL || g == NULL || b == NULL) goto fails2; new_comps = (opj_image_comp_t*) realloc(image->comps, (image->numcomps+2)*sizeof(opj_image_comp_t)); if(new_comps == NULL) goto fails2; image->comps = new_comps; if(image->numcomps == 2) image->comps[3] = image->comps[1]; image->comps[1] = image->comps[0]; image->comps[2] = image->comps[0]; image->comps[1].data = g; image->comps[2].data = b; image->numcomps += 2; r = image->comps[0].data; for(i = 0; i < max; ++i) { *in++ = (unsigned char)*r++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } r = g = b = NULL; ok = 1; fails2: if(inbuf) free(inbuf); if(outbuf) free(outbuf); if(g) free(g); if(b) free(b); } else /* prec > 8 */ { unsigned short *in, *inbuf, *out, *outbuf; opj_image_comp_t *new_comps; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned short)); in = inbuf = (unsigned short*)malloc(nr_samples); out = outbuf = (unsigned short*)malloc(nr_samples); g = (int*)calloc((size_t)max, sizeof(int)); b = (int*)calloc((size_t)max, sizeof(int)); if(inbuf == NULL || outbuf == NULL || g == NULL || b == NULL) goto fails3; new_comps = (opj_image_comp_t*) realloc(image->comps, (image->numcomps+2)*sizeof(opj_image_comp_t)); if(new_comps == NULL) goto fails3; image->comps = new_comps; if(image->numcomps == 2) image->comps[3] = image->comps[1]; image->comps[1] = image->comps[0]; image->comps[2] = image->comps[0]; image->comps[1].data = g; image->comps[2].data = b; image->numcomps += 2; r = image->comps[0].data; for(i = 0; i < max; ++i) { *in++ = (unsigned short)*r++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } r = g = b = NULL; ok = 1; fails3: if(inbuf) free(inbuf); if(outbuf) free(outbuf); if(g) free(g); if(b) free(b); } }/* if(image->numcomps > 2) */ cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif if(ok) { image->color_space = new_space; } }/* color_apply_icc_profile() */ void color_cielab_to_rgb(opj_image_t *image) { int *row; int enumcs, numcomps; OPJ_COLOR_SPACE new_space; numcomps = (int)image->numcomps; if(numcomps != 3) { fprintf(stderr,"%s:%d:\n\tnumcomps %d not handled. Quitting.\n", __FILE__,__LINE__,numcomps); return; } row = (int*)image->icc_profile_buf; enumcs = row[0]; if(enumcs == 14) /* CIELab */ { int *L, *a, *b, *red, *green, *blue; int *src0, *src1, *src2, *dst0, *dst1, *dst2; double rl, ol, ra, oa, rb, ob, prec0, prec1, prec2; double minL, maxL, mina, maxa, minb, maxb; unsigned int default_type; unsigned int i, max; cmsHPROFILE in, out; cmsHTRANSFORM transform; cmsUInt16Number RGB[3]; cmsCIELab Lab; in = cmsCreateLab4Profile(NULL); if(in == NULL){ return; } out = cmsCreate_sRGBProfile(); if(out == NULL){ cmsCloseProfile(in); return; } transform = cmsCreateTransform(in, TYPE_Lab_DBL, out, TYPE_RGB_16, INTENT_PERCEPTUAL, 0); #ifdef OPJ_HAVE_LIBLCMS2 cmsCloseProfile(in); cmsCloseProfile(out); #endif if(transform == NULL) { #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif return; } new_space = OPJ_CLRSPC_SRGB; prec0 = (double)image->comps[0].prec; prec1 = (double)image->comps[1].prec; prec2 = (double)image->comps[2].prec; default_type = (unsigned int)row[1]; if(default_type == 0x44454600)/* DEF : default */ { rl = 100; ra = 170; rb = 200; ol = 0; oa = pow(2, prec1 - 1); ob = pow(2, prec2 - 2) + pow(2, prec2 - 3); } else { rl = row[2]; ra = row[4]; rb = row[6]; ol = row[3]; oa = row[5]; ob = row[7]; } L = src0 = image->comps[0].data; a = src1 = image->comps[1].data; b = src2 = image->comps[2].data; max = image->comps[0].w * image->comps[0].h; red = dst0 = (int*)malloc(max * sizeof(int)); green = dst1 = (int*)malloc(max * sizeof(int)); blue = dst2 = (int*)malloc(max * sizeof(int)); if(red == NULL || green == NULL || blue == NULL) goto fails; minL = -(rl * ol)/(pow(2, prec0)-1); maxL = minL + rl; mina = -(ra * oa)/(pow(2, prec1)-1); maxa = mina + ra; minb = -(rb * ob)/(pow(2, prec2)-1); maxb = minb + rb; for(i = 0; i < max; ++i) { Lab.L = minL + (double)(*L) * (maxL - minL)/(pow(2, prec0)-1); ++L; Lab.a = mina + (double)(*a) * (maxa - mina)/(pow(2, prec1)-1); ++a; Lab.b = minb + (double)(*b) * (maxb - minb)/(pow(2, prec2)-1); ++b; cmsDoTransform(transform, &Lab, RGB, 1); *red++ = RGB[0]; *green++ = RGB[1]; *blue++ = RGB[2]; } cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif free(src0); image->comps[0].data = dst0; free(src1); image->comps[1].data = dst1; free(src2); image->comps[2].data = dst2; image->color_space = new_space; image->comps[0].prec = 16; image->comps[1].prec = 16; image->comps[2].prec = 16; return; fails: cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif if(red) free(red); if(green) free(green); if(blue) free(blue); return; } fprintf(stderr,"%s:%d:\n\tenumCS %d not handled. Ignoring.\n", __FILE__,__LINE__, enumcs); }/* color_cielab_to_rgb() */ #endif /* OPJ_HAVE_LIBLCMS2 || OPJ_HAVE_LIBLCMS1 */ void color_cmyk_to_rgb(opj_image_t *image) { float C, M, Y, K; float sC, sM, sY, sK; unsigned int w, h, max, i; w = image->comps[0].w; h = image->comps[0].h; if(image->numcomps < 4) return; max = w * h; sC = 1.0F / (float)((1 << image->comps[0].prec) - 1); sM = 1.0F / (float)((1 << image->comps[1].prec) - 1); sY = 1.0F / (float)((1 << image->comps[2].prec) - 1); sK = 1.0F / (float)((1 << image->comps[3].prec) - 1); for(i = 0; i < max; ++i) { /* CMYK values from 0 to 1 */ C = (float)(image->comps[0].data[i]) * sC; M = (float)(image->comps[1].data[i]) * sM; Y = (float)(image->comps[2].data[i]) * sY; K = (float)(image->comps[3].data[i]) * sK; /* Invert all CMYK values */ C = 1.0F - C; M = 1.0F - M; Y = 1.0F - Y; K = 1.0F - K; /* CMYK -> RGB : RGB results from 0 to 255 */ image->comps[0].data[i] = (int)(255.0F * C * K); /* R */ image->comps[1].data[i] = (int)(255.0F * M * K); /* G */ image->comps[2].data[i] = (int)(255.0F * Y * K); /* B */ } free(image->comps[3].data); image->comps[3].data = NULL; image->comps[0].prec = 8; image->comps[1].prec = 8; image->comps[2].prec = 8; image->numcomps -= 1; image->color_space = OPJ_CLRSPC_SRGB; for (i = 3; i < image->numcomps; ++i) { memcpy(&(image->comps[i]), &(image->comps[i+1]), sizeof(image->comps[i])); } }/* color_cmyk_to_rgb() */ /* * This code has been adopted from sjpx_openjpeg.c of ghostscript */ void color_esycc_to_rgb(opj_image_t *image) { int y, cb, cr, sign1, sign2, val; unsigned int w, h, max, i; int flip_value = (1 << (image->comps[0].prec-1)); int max_value = (1 << image->comps[0].prec) - 1; if ( (image->numcomps < 3) || (image->comps[0].dx != image->comps[1].dx) || (image->comps[0].dx != image->comps[2].dx) || (image->comps[0].dy != image->comps[1].dy) || (image->comps[0].dy != image->comps[2].dy) ) { fprintf(stderr,"%s:%d:color_esycc_to_rgb\n\tCAN NOT CONVERT\n", __FILE__,__LINE__); return; } w = image->comps[0].w; h = image->comps[0].h; sign1 = (int)image->comps[1].sgnd; sign2 = (int)image->comps[2].sgnd; max = w * h; for(i = 0; i < max; ++i) { y = image->comps[0].data[i]; cb = image->comps[1].data[i]; cr = image->comps[2].data[i]; if( !sign1) cb -= flip_value; if( !sign2) cr -= flip_value; val = (int) ((float)y - (float)0.0000368 * (float)cb + (float)1.40199 * (float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[0].data[i] = val; val = (int) ((float)1.0003 * (float)y - (float)0.344125 * (float)cb - (float)0.7141128 * (float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[1].data[i] = val; val = (int) ((float)0.999823 * (float)y + (float)1.77204 * (float)cb - (float)0.000008 *(float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[2].data[i] = val; } image->color_space = OPJ_CLRSPC_SRGB; }/* color_esycc_to_rgb() */
static void sycc420_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b, *nr, *ng, *nb; const int *y, *cb, *cr, *ny; unsigned int maxw, maxh, max; int offset, upb; unsigned int i, j; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * (size_t)max); d1 = g = (int*)malloc(sizeof(int) * (size_t)max); d2 = b = (int*)malloc(sizeof(int) * (size_t)max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i=0U; i < (maxh & ~(unsigned int)1U); i += 2U) { ny = y + maxw; nr = r + maxw; ng = g + maxw; nb = b + maxw; for(j=0; j < (maxw & ~(unsigned int)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } if(j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } y += maxw; r += maxw; g += maxw; b += maxw; } if(i < maxh) { for(j=0U; j < (maxw & ~(unsigned int)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if(j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); } } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; #if defined(USE_JPWL) || defined(USE_MJ2) img->comps[1].w = maxw; img->comps[1].h = maxh; img->comps[2].w = maxw; img->comps[2].h = maxh; #else img->comps[1].w = (OPJ_UINT32)maxw; img->comps[1].h = (OPJ_UINT32)maxh; img->comps[2].w = (OPJ_UINT32)maxw; img->comps[2].h = (OPJ_UINT32)maxh; #endif img->comps[1].dx = img->comps[0].dx; img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[0].dy; img->comps[2].dy = img->comps[0].dy; return; fails: if(r) free(r); if(g) free(g); if(b) free(b); }/* sycc420_to_rgb() */
static void sycc420_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b, *nr, *ng, *nb; const int *y, *cb, *cr, *ny; size_t maxw, maxh, max, offx, loopmaxw, offy, loopmaxh; int offset, upb; size_t i; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * max); d1 = g = (int*)malloc(sizeof(int) * max); d2 = b = (int*)malloc(sizeof(int) * max); if (r == NULL || g == NULL || b == NULL) goto fails; /* if img->x0 is odd, then first column shall use Cb/Cr = 0 */ offx = img->x0 & 1U; loopmaxw = maxw - offx; /* if img->y0 is odd, then first line shall use Cb/Cr = 0 */ offy = img->y0 & 1U; loopmaxh = maxh - offy; if (offy > 0U) { size_t j; for(j=0; j < maxw; ++j) { sycc_to_rgb(offset, upb, *y, 0, 0, r, g, b); ++y; ++r; ++g; ++b; } } for(i=0U; i < (loopmaxh & ~(size_t)1U); i += 2U) { size_t j; ny = y + maxw; nr = r + maxw; ng = g + maxw; nb = b + maxw; if (offx > 0U) { sycc_to_rgb(offset, upb, *y, 0, 0, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; } for(j=0; j < (loopmaxw & ~(size_t)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } if(j < loopmaxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } y += maxw; r += maxw; g += maxw; b += maxw; } if(i < loopmaxh) { size_t j; for(j=0U; j < (maxw & ~(size_t)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if(j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); } } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; img->comps[1].w = img->comps[2].w = img->comps[0].w; img->comps[1].h = img->comps[2].h = img->comps[0].h; img->comps[1].dx = img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[2].dy = img->comps[0].dy; img->color_space = OPJ_CLRSPC_SRGB; return; fails: free(r); free(g); free(b); }/* sycc420_to_rgb() */
{'added': [(94, '\tsize_t maxw, maxh, max, i;'), (100, '\tmaxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h;'), (107, '\td0 = r = (int*)malloc(sizeof(int) * max);'), (108, '\td1 = g = (int*)malloc(sizeof(int) * max);'), (109, '\td2 = b = (int*)malloc(sizeof(int) * max);'), (121, '\timg->color_space = OPJ_CLRSPC_SRGB;'), (125, '\tfree(r);'), (126, '\tfree(g);'), (127, '\tfree(b);'), (134, '\tsize_t maxw, maxh, max, offx, loopmaxw;'), (136, '\tsize_t i;'), (141, '\tmaxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h;'), (148, '\td0 = r = (int*)malloc(sizeof(int) * max);'), (149, '\td1 = g = (int*)malloc(sizeof(int) * max);'), (150, '\td2 = b = (int*)malloc(sizeof(int) * max);'), (154, '\t/* if img->x0 is odd, then first column shall use Cb/Cr = 0 */'), (155, '\toffx = img->x0 & 1U;'), (156, '\tloopmaxw = maxw - offx;'), (157, ''), (160, '\t\tsize_t j;'), (161, ''), (162, '\t\tif (offx > 0U) {'), (163, '\t\t\tsycc_to_rgb(offset, upb, *y, 0, 0, r, g, b);'), (164, '\t\t\t++y; ++r; ++g; ++b;'), (165, '\t\t}'), (166, ''), (167, '\t\tfor(j=0U; j < (loopmaxw & ~(size_t)1U); j += 2U)'), (174, '\t\tif (j < loopmaxw) {'), (179, ''), (184, '\timg->comps[1].w = img->comps[2].w = img->comps[0].w;'), (185, '\timg->comps[1].h = img->comps[2].h = img->comps[0].h;'), (186, '\timg->comps[1].dx = img->comps[2].dx = img->comps[0].dx;'), (187, '\timg->comps[1].dy = img->comps[2].dy = img->comps[0].dy;'), (188, '\timg->color_space = OPJ_CLRSPC_SRGB;'), (192, '\tfree(r);'), (193, '\tfree(g);'), (194, '\tfree(b);'), (201, '\tsize_t maxw, maxh, max, offx, loopmaxw, offy, loopmaxh;'), (203, '\tsize_t i;'), (208, '\tmaxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h;'), (215, '\td0 = r = (int*)malloc(sizeof(int) * max);'), (216, '\td1 = g = (int*)malloc(sizeof(int) * max);'), (217, '\td2 = b = (int*)malloc(sizeof(int) * max);'), (218, ''), (219, '\tif (r == NULL || g == NULL || b == NULL) goto fails;'), (220, ''), (221, '\t/* if img->x0 is odd, then first column shall use Cb/Cr = 0 */'), (222, '\toffx = img->x0 & 1U;'), (223, '\tloopmaxw = maxw - offx;'), (224, '\t/* if img->y0 is odd, then first line shall use Cb/Cr = 0 */'), (225, '\toffy = img->y0 & 1U;'), (226, '\tloopmaxh = maxh - offy;'), (227, ''), (228, '\tif (offy > 0U) {'), (229, '\t\tsize_t j;'), (230, ''), (231, '\t\tfor(j=0; j < maxw; ++j)'), (232, '\t\t{'), (233, '\t\t\tsycc_to_rgb(offset, upb, *y, 0, 0, r, g, b);'), (234, '\t\t\t++y; ++r; ++g; ++b;'), (235, '\t\t}'), (236, '\t}'), (238, '\tfor(i=0U; i < (loopmaxh & ~(size_t)1U); i += 2U)'), (240, '\t\tsize_t j;'), (241, ''), (244, ''), (245, '\t\tif (offx > 0U) {'), (246, '\t\t\tsycc_to_rgb(offset, upb, *y, 0, 0, r, g, b);'), (247, '\t\t\t++y; ++r; ++g; ++b;'), (248, '\t\t\tsycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb);'), (249, '\t\t\t++ny; ++nr; ++ng; ++nb;'), (250, '\t\t}'), (252, '\t\tfor(j=0; j < (loopmaxw & ~(size_t)1U); j += 2U)'), (264, '\t\tif(j < loopmaxw)'), (274, '\tif(i < loopmaxh)'), (276, '\t\tsize_t j;'), (277, ''), (278, '\t\tfor(j=0U; j < (maxw & ~(size_t)1U); j += 2U)'), (298, '\timg->comps[1].w = img->comps[2].w = img->comps[0].w;'), (299, '\timg->comps[1].h = img->comps[2].h = img->comps[0].h;'), (300, '\timg->comps[1].dx = img->comps[2].dx = img->comps[0].dx;'), (301, '\timg->comps[1].dy = img->comps[2].dy = img->comps[0].dy;'), (302, '\timg->color_space = OPJ_CLRSPC_SRGB;'), (306, '\tfree(r);'), (307, '\tfree(g);'), (308, '\tfree(b);')], 'deleted': [(94, '\tunsigned int maxw, maxh, max, i;'), (100, '\tmaxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h;'), (107, '\td0 = r = (int*)malloc(sizeof(int) * (size_t)max);'), (108, '\td1 = g = (int*)malloc(sizeof(int) * (size_t)max);'), (109, '\td2 = b = (int*)malloc(sizeof(int) * (size_t)max);'), (124, '\tif(r) free(r);'), (125, '\tif(g) free(g);'), (126, '\tif(b) free(b);'), (127, ''), (134, '\tunsigned int maxw, maxh, max;'), (136, '\tunsigned int i, j;'), (141, '\tmaxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h;'), (148, '\td0 = r = (int*)malloc(sizeof(int) * (size_t)max);'), (149, '\td1 = g = (int*)malloc(sizeof(int) * (size_t)max);'), (150, '\td2 = b = (int*)malloc(sizeof(int) * (size_t)max);'), (156, '\t\tfor(j=0U; j < (maxw & ~(unsigned int)1U); j += 2U)'), (163, '\t\tif (j < maxw) {'), (172, '#if defined(USE_JPWL) || defined(USE_MJ2)'), (173, '\timg->comps[1].w = maxw; img->comps[1].h = maxh;'), (174, '\timg->comps[2].w = maxw; img->comps[2].h = maxh;'), (175, '#else'), (176, '\timg->comps[1].w = (OPJ_UINT32)maxw; img->comps[1].h = (OPJ_UINT32)maxh;'), (177, '\timg->comps[2].w = (OPJ_UINT32)maxw; img->comps[2].h = (OPJ_UINT32)maxh;'), (178, '#endif'), (179, '\timg->comps[1].dx = img->comps[0].dx;'), (180, '\timg->comps[2].dx = img->comps[0].dx;'), (181, '\timg->comps[1].dy = img->comps[0].dy;'), (182, '\timg->comps[2].dy = img->comps[0].dy;'), (186, '\tif(r) free(r);'), (187, '\tif(g) free(g);'), (188, '\tif(b) free(b);'), (189, ''), (196, '\tunsigned int maxw, maxh, max;'), (198, '\tunsigned int i, j;'), (203, '\tmaxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h;'), (210, '\td0 = r = (int*)malloc(sizeof(int) * (size_t)max);'), (211, '\td1 = g = (int*)malloc(sizeof(int) * (size_t)max);'), (212, '\td2 = b = (int*)malloc(sizeof(int) * (size_t)max);'), (213, ''), (214, '\tif(r == NULL || g == NULL || b == NULL) goto fails;'), (216, '\tfor(i=0U; i < (maxh & ~(unsigned int)1U); i += 2U)'), (221, '\t\tfor(j=0; j < (maxw & ~(unsigned int)1U); j += 2U)'), (233, '\t\tif(j < maxw)'), (243, '\tif(i < maxh)'), (245, '\t\tfor(j=0U; j < (maxw & ~(unsigned int)1U); j += 2U)'), (265, '#if defined(USE_JPWL) || defined(USE_MJ2)'), (266, '\timg->comps[1].w = maxw; img->comps[1].h = maxh;'), (267, '\timg->comps[2].w = maxw; img->comps[2].h = maxh;'), (268, '#else'), (269, '\timg->comps[1].w = (OPJ_UINT32)maxw; img->comps[1].h = (OPJ_UINT32)maxh;'), (270, '\timg->comps[2].w = (OPJ_UINT32)maxw; img->comps[2].h = (OPJ_UINT32)maxh;'), (271, '#endif'), (272, '\timg->comps[1].dx = img->comps[0].dx;'), (273, '\timg->comps[2].dx = img->comps[0].dx;'), (274, '\timg->comps[1].dy = img->comps[0].dy;'), (275, '\timg->comps[2].dy = img->comps[0].dy;'), (279, '\tif(r) free(r);'), (280, '\tif(g) free(g);'), (281, '\tif(b) free(b);'), (282, ''), (327, '\timg->color_space = OPJ_CLRSPC_SRGB;'), (328, '')]}
86
62
686
6,633
73
974
14
https://github.com/uclouvain/openjpeg
CVE-2016-3183
CWE-125
2,718
IRCNetwork.cpp
C++
CIRCNetwork::SetEncoding
/* * Copyright (C) 2004-2018 ZNC, see the NOTICE file for details. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <znc/IRCNetwork.h> #include <znc/User.h> #include <znc/FileUtils.h> #include <znc/Config.h> #include <znc/IRCSock.h> #include <znc/Server.h> #include <znc/Chan.h> #include <znc/Query.h> #include <znc/Message.h> #include <algorithm> #include <memory> using std::vector; using std::set; class CIRCNetworkPingTimer : public CCron { public: CIRCNetworkPingTimer(CIRCNetwork* pNetwork) : CCron(), m_pNetwork(pNetwork) { SetName("CIRCNetworkPingTimer::" + m_pNetwork->GetUser()->GetUserName() + "::" + m_pNetwork->GetName()); Start(m_pNetwork->GetUser()->GetPingSlack()); } ~CIRCNetworkPingTimer() override {} CIRCNetworkPingTimer(const CIRCNetworkPingTimer&) = delete; CIRCNetworkPingTimer& operator=(const CIRCNetworkPingTimer&) = delete; protected: void RunJob() override { CIRCSock* pIRCSock = m_pNetwork->GetIRCSock(); auto uFrequency = m_pNetwork->GetUser()->GetPingFrequency(); if (pIRCSock && pIRCSock->GetTimeSinceLastDataTransaction() >= uFrequency) { pIRCSock->PutIRC("PING :ZNC"); } const vector<CClient*>& vClients = m_pNetwork->GetClients(); for (CClient* pClient : vClients) { if (pClient->GetTimeSinceLastDataTransaction() >= uFrequency) { pClient->PutClient("PING :ZNC"); } } // Restart timer for the case if the period had changed. Usually this is // noop Start(m_pNetwork->GetUser()->GetPingSlack()); } private: CIRCNetwork* m_pNetwork; }; class CIRCNetworkJoinTimer : public CCron { constexpr static int JOIN_FREQUENCY = 30 /* seconds */; public: CIRCNetworkJoinTimer(CIRCNetwork* pNetwork) : CCron(), m_bDelayed(false), m_pNetwork(pNetwork) { SetName("CIRCNetworkJoinTimer::" + m_pNetwork->GetUser()->GetUserName() + "::" + m_pNetwork->GetName()); Start(JOIN_FREQUENCY); } ~CIRCNetworkJoinTimer() override {} CIRCNetworkJoinTimer(const CIRCNetworkJoinTimer&) = delete; CIRCNetworkJoinTimer& operator=(const CIRCNetworkJoinTimer&) = delete; void Delay(unsigned short int uDelay) { m_bDelayed = true; Start(uDelay); } protected: void RunJob() override { if (m_bDelayed) { m_bDelayed = false; Start(JOIN_FREQUENCY); } if (m_pNetwork->IsIRCConnected()) { m_pNetwork->JoinChans(); } } private: bool m_bDelayed; CIRCNetwork* m_pNetwork; }; bool CIRCNetwork::IsValidNetwork(const CString& sNetwork) { // ^[-\w]+$ if (sNetwork.empty()) { return false; } const char* p = sNetwork.c_str(); while (*p) { if (*p != '_' && *p != '-' && !isalnum(*p)) { return false; } p++; } return true; } CIRCNetwork::CIRCNetwork(CUser* pUser, const CString& sName) : m_sName(sName), m_pUser(nullptr), m_sNick(""), m_sAltNick(""), m_sIdent(""), m_sRealName(""), m_sBindHost(""), m_sEncoding(""), m_sQuitMsg(""), m_ssTrustedFingerprints(), m_pModules(new CModules), m_vClients(), m_pIRCSock(nullptr), m_vChans(), m_vQueries(), m_sChanPrefixes(""), m_bIRCConnectEnabled(true), m_bTrustAllCerts(false), m_bTrustPKI(true), m_sIRCServer(""), m_vServers(), m_uServerIdx(0), m_IRCNick(), m_bIRCAway(false), m_fFloodRate(2), m_uFloodBurst(9), m_RawBuffer(), m_MotdBuffer(), m_NoticeBuffer(), m_pPingTimer(nullptr), m_pJoinTimer(nullptr), m_uJoinDelay(0), m_uBytesRead(0), m_uBytesWritten(0) { SetUser(pUser); // This should be more than enough raws, especially since we are buffering // the MOTD separately m_RawBuffer.SetLineCount(100, true); // This should be more than enough motd lines m_MotdBuffer.SetLineCount(200, true); m_NoticeBuffer.SetLineCount(250, true); m_pPingTimer = new CIRCNetworkPingTimer(this); CZNC::Get().GetManager().AddCron(m_pPingTimer); m_pJoinTimer = new CIRCNetworkJoinTimer(this); CZNC::Get().GetManager().AddCron(m_pJoinTimer); SetIRCConnectEnabled(true); } CIRCNetwork::CIRCNetwork(CUser* pUser, const CIRCNetwork& Network) : CIRCNetwork(pUser, "") { Clone(Network); } void CIRCNetwork::Clone(const CIRCNetwork& Network, bool bCloneName) { if (bCloneName) { m_sName = Network.GetName(); } m_fFloodRate = Network.GetFloodRate(); m_uFloodBurst = Network.GetFloodBurst(); m_uJoinDelay = Network.GetJoinDelay(); SetNick(Network.GetNick()); SetAltNick(Network.GetAltNick()); SetIdent(Network.GetIdent()); SetRealName(Network.GetRealName()); SetBindHost(Network.GetBindHost()); SetEncoding(Network.GetEncoding()); SetQuitMsg(Network.GetQuitMsg()); m_ssTrustedFingerprints = Network.m_ssTrustedFingerprints; // Servers const vector<CServer*>& vServers = Network.GetServers(); CString sServer; CServer* pCurServ = GetCurrentServer(); if (pCurServ) { sServer = pCurServ->GetName(); } DelServers(); for (CServer* pServer : vServers) { AddServer(pServer->GetName(), pServer->GetPort(), pServer->GetPass(), pServer->IsSSL()); } m_uServerIdx = 0; for (size_t a = 0; a < m_vServers.size(); a++) { if (sServer.Equals(m_vServers[a]->GetName())) { m_uServerIdx = a + 1; break; } } if (m_uServerIdx == 0) { m_uServerIdx = m_vServers.size(); CIRCSock* pSock = GetIRCSock(); if (pSock) { PutStatus( t_s("Jumping servers because this server is no longer in the " "list")); pSock->Quit(); } } // !Servers // Chans const vector<CChan*>& vChans = Network.GetChans(); for (CChan* pNewChan : vChans) { CChan* pChan = FindChan(pNewChan->GetName()); if (pChan) { pChan->SetInConfig(pNewChan->InConfig()); } else { AddChan(pNewChan->GetName(), pNewChan->InConfig()); } } for (CChan* pChan : m_vChans) { CChan* pNewChan = Network.FindChan(pChan->GetName()); if (!pNewChan) { pChan->SetInConfig(false); } else { pChan->Clone(*pNewChan); } } // !Chans // Modules set<CString> ssUnloadMods; CModules& vCurMods = GetModules(); const CModules& vNewMods = Network.GetModules(); for (CModule* pNewMod : vNewMods) { CString sModRet; CModule* pCurMod = vCurMods.FindModule(pNewMod->GetModName()); if (!pCurMod) { vCurMods.LoadModule(pNewMod->GetModName(), pNewMod->GetArgs(), CModInfo::NetworkModule, m_pUser, this, sModRet); } else if (pNewMod->GetArgs() != pCurMod->GetArgs()) { vCurMods.ReloadModule(pNewMod->GetModName(), pNewMod->GetArgs(), m_pUser, this, sModRet); } } for (CModule* pCurMod : vCurMods) { CModule* pNewMod = vNewMods.FindModule(pCurMod->GetModName()); if (!pNewMod) { ssUnloadMods.insert(pCurMod->GetModName()); } } for (const CString& sMod : ssUnloadMods) { vCurMods.UnloadModule(sMod); } // !Modules SetIRCConnectEnabled(Network.GetIRCConnectEnabled()); } CIRCNetwork::~CIRCNetwork() { if (m_pIRCSock) { CZNC::Get().GetManager().DelSockByAddr(m_pIRCSock); m_pIRCSock = nullptr; } // Delete clients while (!m_vClients.empty()) { CZNC::Get().GetManager().DelSockByAddr(m_vClients[0]); } m_vClients.clear(); // Delete servers DelServers(); // Delete modules (this unloads all modules) delete m_pModules; m_pModules = nullptr; // Delete Channels for (CChan* pChan : m_vChans) { delete pChan; } m_vChans.clear(); // Delete Queries for (CQuery* pQuery : m_vQueries) { delete pQuery; } m_vQueries.clear(); CUser* pUser = GetUser(); SetUser(nullptr); // Make sure we are not in the connection queue CZNC::Get().GetConnectionQueue().remove(this); CZNC::Get().GetManager().DelCronByAddr(m_pPingTimer); CZNC::Get().GetManager().DelCronByAddr(m_pJoinTimer); if (pUser) { pUser->AddBytesRead(m_uBytesRead); pUser->AddBytesWritten(m_uBytesWritten); } else { CZNC::Get().AddBytesRead(m_uBytesRead); CZNC::Get().AddBytesWritten(m_uBytesWritten); } } void CIRCNetwork::DelServers() { for (CServer* pServer : m_vServers) { delete pServer; } m_vServers.clear(); } CString CIRCNetwork::GetNetworkPath() const { CString sNetworkPath = m_pUser->GetUserPath() + "/networks/" + m_sName; if (!CFile::Exists(sNetworkPath)) { CDir::MakeDir(sNetworkPath); } return sNetworkPath; } template <class T> struct TOption { const char* name; void (CIRCNetwork::*pSetter)(T); }; bool CIRCNetwork::ParseConfig(CConfig* pConfig, CString& sError, bool bUpgrade) { VCString vsList; if (!bUpgrade) { TOption<const CString&> StringOptions[] = { {"nick", &CIRCNetwork::SetNick}, {"altnick", &CIRCNetwork::SetAltNick}, {"ident", &CIRCNetwork::SetIdent}, {"realname", &CIRCNetwork::SetRealName}, {"bindhost", &CIRCNetwork::SetBindHost}, {"encoding", &CIRCNetwork::SetEncoding}, {"quitmsg", &CIRCNetwork::SetQuitMsg}, }; TOption<bool> BoolOptions[] = { {"ircconnectenabled", &CIRCNetwork::SetIRCConnectEnabled}, {"trustallcerts", &CIRCNetwork::SetTrustAllCerts}, {"trustpki", &CIRCNetwork::SetTrustPKI}, }; TOption<double> DoubleOptions[] = { {"floodrate", &CIRCNetwork::SetFloodRate}, }; TOption<short unsigned int> SUIntOptions[] = { {"floodburst", &CIRCNetwork::SetFloodBurst}, {"joindelay", &CIRCNetwork::SetJoinDelay}, }; for (const auto& Option : StringOptions) { CString sValue; if (pConfig->FindStringEntry(Option.name, sValue)) (this->*Option.pSetter)(sValue); } for (const auto& Option : BoolOptions) { CString sValue; if (pConfig->FindStringEntry(Option.name, sValue)) (this->*Option.pSetter)(sValue.ToBool()); } for (const auto& Option : DoubleOptions) { double fValue; if (pConfig->FindDoubleEntry(Option.name, fValue)) (this->*Option.pSetter)(fValue); } for (const auto& Option : SUIntOptions) { unsigned short value; if (pConfig->FindUShortEntry(Option.name, value)) (this->*Option.pSetter)(value); } pConfig->FindStringVector("loadmodule", vsList); for (const CString& sValue : vsList) { CString sModName = sValue.Token(0); CString sNotice = "Loading network module [" + sModName + "]"; // XXX Legacy crap, added in ZNC 0.203, modified in 0.207 // Note that 0.203 == 0.207 if (sModName == "away") { sNotice = "NOTICE: [away] was renamed, loading [awaystore] instead"; sModName = "awaystore"; } // XXX Legacy crap, added in ZNC 0.207 if (sModName == "autoaway") { sNotice = "NOTICE: [autoaway] was renamed, loading [awaystore] " "instead"; sModName = "awaystore"; } // XXX Legacy crap, added in 1.1; fakeonline module was dropped in // 1.0 and returned in 1.1 if (sModName == "fakeonline") { sNotice = "NOTICE: [fakeonline] was renamed, loading " "[modules_online] instead"; sModName = "modules_online"; } CString sModRet; CString sArgs = sValue.Token(1, true); bool bModRet = LoadModule(sModName, sArgs, sNotice, sModRet); if (!bModRet) { // XXX The awaynick module was retired in 1.6 (still available // as external module) if (sModName == "awaynick") { // load simple_away instead, unless it's already on the list bool bFound = false; for (const CString& sLoadMod : vsList) { if (sLoadMod.Token(0).Equals("simple_away")) { bFound = true; } } if (!bFound) { sNotice = "NOTICE: awaynick was retired, loading network " "module [simple_away] instead; if you still need " "awaynick, install it as an external module"; sModName = "simple_away"; // not a fatal error if simple_away is not available LoadModule(sModName, sArgs, sNotice, sModRet); } } else { sError = sModRet; return false; } } } } pConfig->FindStringVector("server", vsList); for (const CString& sServer : vsList) { CUtils::PrintAction("Adding server [" + sServer + "]"); CUtils::PrintStatus(AddServer(sServer)); } pConfig->FindStringVector("trustedserverfingerprint", vsList); for (const CString& sFP : vsList) { AddTrustedFingerprint(sFP); } pConfig->FindStringVector("chan", vsList); for (const CString& sChan : vsList) { AddChan(sChan, true); } CConfig::SubConfig subConf; CConfig::SubConfig::const_iterator subIt; pConfig->FindSubConfig("chan", subConf); for (subIt = subConf.begin(); subIt != subConf.end(); ++subIt) { const CString& sChanName = subIt->first; CConfig* pSubConf = subIt->second.m_pSubConfig; CChan* pChan = new CChan(sChanName, this, true, pSubConf); if (!pSubConf->empty()) { sError = "Unhandled lines in config for User [" + m_pUser->GetUserName() + "], Network [" + GetName() + "], Channel [" + sChanName + "]!"; CUtils::PrintError(sError); CZNC::DumpConfig(pSubConf); delete pChan; return false; } // Save the channel name, because AddChan // deletes the CChannel*, if adding fails sError = pChan->GetName(); if (!AddChan(pChan)) { sError = "Channel [" + sError + "] defined more than once"; CUtils::PrintError(sError); return false; } sError.clear(); } return true; } CConfig CIRCNetwork::ToConfig() const { CConfig config; if (!m_sNick.empty()) { config.AddKeyValuePair("Nick", m_sNick); } if (!m_sAltNick.empty()) { config.AddKeyValuePair("AltNick", m_sAltNick); } if (!m_sIdent.empty()) { config.AddKeyValuePair("Ident", m_sIdent); } if (!m_sRealName.empty()) { config.AddKeyValuePair("RealName", m_sRealName); } if (!m_sBindHost.empty()) { config.AddKeyValuePair("BindHost", m_sBindHost); } config.AddKeyValuePair("IRCConnectEnabled", CString(GetIRCConnectEnabled())); config.AddKeyValuePair("TrustAllCerts", CString(GetTrustAllCerts())); config.AddKeyValuePair("TrustPKI", CString(GetTrustPKI())); config.AddKeyValuePair("FloodRate", CString(GetFloodRate())); config.AddKeyValuePair("FloodBurst", CString(GetFloodBurst())); config.AddKeyValuePair("JoinDelay", CString(GetJoinDelay())); config.AddKeyValuePair("Encoding", m_sEncoding); if (!m_sQuitMsg.empty()) { config.AddKeyValuePair("QuitMsg", m_sQuitMsg); } // Modules const CModules& Mods = GetModules(); if (!Mods.empty()) { for (CModule* pMod : Mods) { CString sArgs = pMod->GetArgs(); if (!sArgs.empty()) { sArgs = " " + sArgs; } config.AddKeyValuePair("LoadModule", pMod->GetModName() + sArgs); } } // Servers for (CServer* pServer : m_vServers) { config.AddKeyValuePair("Server", pServer->GetString()); } for (const CString& sFP : m_ssTrustedFingerprints) { config.AddKeyValuePair("TrustedServerFingerprint", sFP); } // Chans for (CChan* pChan : m_vChans) { if (pChan->InConfig()) { config.AddSubConfig("Chan", pChan->GetName(), pChan->ToConfig()); } } return config; } void CIRCNetwork::BounceAllClients() { for (CClient* pClient : m_vClients) { pClient->BouncedOff(); } m_vClients.clear(); } bool CIRCNetwork::IsUserOnline() const { for (CClient* pClient : m_vClients) { if (!pClient->IsAway()) { return true; } } return false; } void CIRCNetwork::ClientConnected(CClient* pClient) { if (!m_pUser->MultiClients()) { BounceAllClients(); } m_vClients.push_back(pClient); size_t uIdx, uSize; if (m_pIRCSock) { pClient->NotifyServerDependentCaps(m_pIRCSock->GetAcceptedCaps()); } pClient->SetPlaybackActive(true); if (m_RawBuffer.IsEmpty()) { pClient->PutClient(":irc.znc.in 001 " + pClient->GetNick() + " :" + t_s("Welcome to ZNC")); } else { const CString& sClientNick = pClient->GetNick(false); MCString msParams; msParams["target"] = sClientNick; uSize = m_RawBuffer.Size(); for (uIdx = 0; uIdx < uSize; uIdx++) { pClient->PutClient(m_RawBuffer.GetLine(uIdx, *pClient, msParams)); } const CNick& Nick = GetIRCNick(); if (sClientNick != Nick.GetNick()) { // case-sensitive match pClient->PutClient(":" + sClientNick + "!" + Nick.GetIdent() + "@" + Nick.GetHost() + " NICK :" + Nick.GetNick()); pClient->SetNick(Nick.GetNick()); } } MCString msParams; msParams["target"] = GetIRCNick().GetNick(); // Send the cached MOTD uSize = m_MotdBuffer.Size(); if (uSize > 0) { for (uIdx = 0; uIdx < uSize; uIdx++) { pClient->PutClient(m_MotdBuffer.GetLine(uIdx, *pClient, msParams)); } } if (GetIRCSock() != nullptr) { CString sUserMode(""); const set<char>& scUserModes = GetIRCSock()->GetUserModes(); for (char cMode : scUserModes) { sUserMode += cMode; } if (!sUserMode.empty()) { pClient->PutClient(":" + GetIRCNick().GetNickMask() + " MODE " + GetIRCNick().GetNick() + " :+" + sUserMode); } } if (m_bIRCAway) { // If they want to know their away reason they'll have to whois // themselves. At least we can tell them their away status... pClient->PutClient(":irc.znc.in 306 " + GetIRCNick().GetNick() + " :You have been marked as being away"); } const vector<CChan*>& vChans = GetChans(); for (CChan* pChan : vChans) { if ((pChan->IsOn()) && (!pChan->IsDetached())) { pChan->AttachUser(pClient); } } bool bClearQuery = m_pUser->AutoClearQueryBuffer(); for (CQuery* pQuery : m_vQueries) { pQuery->SendBuffer(pClient); if (bClearQuery) { delete pQuery; } } if (bClearQuery) { m_vQueries.clear(); } uSize = m_NoticeBuffer.Size(); for (uIdx = 0; uIdx < uSize; uIdx++) { const CBufLine& BufLine = m_NoticeBuffer.GetBufLine(uIdx); CMessage Message(BufLine.GetLine(*pClient, msParams)); Message.SetNetwork(this); Message.SetClient(pClient); Message.SetTime(BufLine.GetTime()); Message.SetTags(BufLine.GetTags()); bool bContinue = false; NETWORKMODULECALL(OnPrivBufferPlayMessage(Message), m_pUser, this, nullptr, &bContinue); if (bContinue) continue; pClient->PutClient(Message); } m_NoticeBuffer.Clear(); pClient->SetPlaybackActive(false); // Tell them why they won't connect if (!GetIRCConnectEnabled()) pClient->PutStatus( t_s("You are currently disconnected from IRC. Use 'connect' to " "reconnect.")); } void CIRCNetwork::ClientDisconnected(CClient* pClient) { auto it = std::find(m_vClients.begin(), m_vClients.end(), pClient); if (it != m_vClients.end()) { m_vClients.erase(it); } } CUser* CIRCNetwork::GetUser() const { return m_pUser; } const CString& CIRCNetwork::GetName() const { return m_sName; } std::vector<CClient*> CIRCNetwork::FindClients( const CString& sIdentifier) const { std::vector<CClient*> vClients; for (CClient* pClient : m_vClients) { if (pClient->GetIdentifier().Equals(sIdentifier)) { vClients.push_back(pClient); } } return vClients; } void CIRCNetwork::SetUser(CUser* pUser) { for (CClient* pClient : m_vClients) { pClient->PutStatus( t_s("This network is being deleted or moved to another user.")); pClient->SetNetwork(nullptr); } m_vClients.clear(); if (m_pUser) { m_pUser->RemoveNetwork(this); } m_pUser = pUser; if (m_pUser) { m_pUser->AddNetwork(this); } } bool CIRCNetwork::SetName(const CString& sName) { if (IsValidNetwork(sName)) { m_sName = sName; return true; } return false; } bool CIRCNetwork::PutUser(const CString& sLine, CClient* pClient, CClient* pSkipClient) { for (CClient* pEachClient : m_vClients) { if ((!pClient || pClient == pEachClient) && pSkipClient != pEachClient) { pEachClient->PutClient(sLine); if (pClient) { return true; } } } return (pClient == nullptr); } bool CIRCNetwork::PutUser(const CMessage& Message, CClient* pClient, CClient* pSkipClient) { for (CClient* pEachClient : m_vClients) { if ((!pClient || pClient == pEachClient) && pSkipClient != pEachClient) { pEachClient->PutClient(Message); if (pClient) { return true; } } } return (pClient == nullptr); } bool CIRCNetwork::PutStatus(const CString& sLine, CClient* pClient, CClient* pSkipClient) { for (CClient* pEachClient : m_vClients) { if ((!pClient || pClient == pEachClient) && pSkipClient != pEachClient) { pEachClient->PutStatus(sLine); if (pClient) { return true; } } } return (pClient == nullptr); } bool CIRCNetwork::PutModule(const CString& sModule, const CString& sLine, CClient* pClient, CClient* pSkipClient) { for (CClient* pEachClient : m_vClients) { if ((!pClient || pClient == pEachClient) && pSkipClient != pEachClient) { pEachClient->PutModule(sModule, sLine); if (pClient) { return true; } } } return (pClient == nullptr); } // Channels const vector<CChan*>& CIRCNetwork::GetChans() const { return m_vChans; } CChan* CIRCNetwork::FindChan(CString sName) const { if (GetIRCSock()) { // See // https://tools.ietf.org/html/draft-brocklesby-irc-isupport-03#section-3.16 sName.TrimLeft(GetIRCSock()->GetISupport("STATUSMSG", "")); } for (CChan* pChan : m_vChans) { if (sName.Equals(pChan->GetName())) { return pChan; } } return nullptr; } std::vector<CChan*> CIRCNetwork::FindChans(const CString& sWild) const { std::vector<CChan*> vChans; vChans.reserve(m_vChans.size()); const CString sLower = sWild.AsLower(); for (CChan* pChan : m_vChans) { if (pChan->GetName().AsLower().WildCmp(sLower)) vChans.push_back(pChan); } return vChans; } bool CIRCNetwork::AddChan(CChan* pChan) { if (!pChan) { return false; } for (CChan* pEachChan : m_vChans) { if (pEachChan->GetName().Equals(pChan->GetName())) { delete pChan; return false; } } m_vChans.push_back(pChan); return true; } bool CIRCNetwork::AddChan(const CString& sName, bool bInConfig) { if (sName.empty() || FindChan(sName)) { return false; } CChan* pChan = new CChan(sName, this, bInConfig); m_vChans.push_back(pChan); return true; } bool CIRCNetwork::DelChan(const CString& sName) { for (vector<CChan*>::iterator a = m_vChans.begin(); a != m_vChans.end(); ++a) { if (sName.Equals((*a)->GetName())) { delete *a; m_vChans.erase(a); return true; } } return false; } void CIRCNetwork::JoinChans() { // Avoid divsion by zero, it's bad! if (m_vChans.empty()) return; // We start at a random offset into the channel list so that if your // first 3 channels are invite-only and you got MaxJoins == 3, ZNC will // still be able to join the rest of your channels. unsigned int start = rand() % m_vChans.size(); unsigned int uJoins = m_pUser->MaxJoins(); set<CChan*> sChans; for (unsigned int a = 0; a < m_vChans.size(); a++) { unsigned int idx = (start + a) % m_vChans.size(); CChan* pChan = m_vChans[idx]; if (!pChan->IsOn() && !pChan->IsDisabled()) { if (!JoinChan(pChan)) continue; sChans.insert(pChan); // Limit the number of joins if (uJoins != 0 && --uJoins == 0) { // Reset the timer. m_pJoinTimer->Reset(); break; } } } while (!sChans.empty()) JoinChans(sChans); } void CIRCNetwork::JoinChans(set<CChan*>& sChans) { CString sKeys, sJoin; bool bHaveKey = false; size_t uiJoinLength = strlen("JOIN "); while (!sChans.empty()) { set<CChan*>::iterator it = sChans.begin(); const CString& sName = (*it)->GetName(); const CString& sKey = (*it)->GetKey(); size_t len = sName.length() + sKey.length(); len += 2; // two comma if (!sKeys.empty() && uiJoinLength + len >= 512) break; if (!sJoin.empty()) { sJoin += ","; sKeys += ","; } uiJoinLength += len; sJoin += sName; if (!sKey.empty()) { sKeys += sKey; bHaveKey = true; } sChans.erase(it); } if (bHaveKey) PutIRC("JOIN " + sJoin + " " + sKeys); else PutIRC("JOIN " + sJoin); } bool CIRCNetwork::JoinChan(CChan* pChan) { bool bReturn = false; NETWORKMODULECALL(OnJoining(*pChan), m_pUser, this, nullptr, &bReturn); if (bReturn) return false; if (m_pUser->JoinTries() != 0 && pChan->GetJoinTries() >= m_pUser->JoinTries()) { PutStatus(t_f("The channel {1} could not be joined, disabling it.")( pChan->GetName())); pChan->Disable(); } else { pChan->IncJoinTries(); bool bFailed = false; NETWORKMODULECALL(OnTimerAutoJoin(*pChan), m_pUser, this, nullptr, &bFailed); if (bFailed) return false; return true; } return false; } bool CIRCNetwork::IsChan(const CString& sChan) const { if (sChan.empty()) return false; // There is no way this is a chan if (GetChanPrefixes().empty()) return true; // We can't know, so we allow everything // Thanks to the above if (empty), we can do sChan[0] return GetChanPrefixes().find(sChan[0]) != CString::npos; } // Queries const vector<CQuery*>& CIRCNetwork::GetQueries() const { return m_vQueries; } CQuery* CIRCNetwork::FindQuery(const CString& sName) const { for (CQuery* pQuery : m_vQueries) { if (sName.Equals(pQuery->GetName())) { return pQuery; } } return nullptr; } std::vector<CQuery*> CIRCNetwork::FindQueries(const CString& sWild) const { std::vector<CQuery*> vQueries; vQueries.reserve(m_vQueries.size()); const CString sLower = sWild.AsLower(); for (CQuery* pQuery : m_vQueries) { if (pQuery->GetName().AsLower().WildCmp(sLower)) vQueries.push_back(pQuery); } return vQueries; } CQuery* CIRCNetwork::AddQuery(const CString& sName) { if (sName.empty()) { return nullptr; } CQuery* pQuery = FindQuery(sName); if (!pQuery) { pQuery = new CQuery(sName, this); m_vQueries.push_back(pQuery); if (m_pUser->MaxQueryBuffers() > 0) { while (m_vQueries.size() > m_pUser->MaxQueryBuffers()) { delete *m_vQueries.begin(); m_vQueries.erase(m_vQueries.begin()); } } } return pQuery; } bool CIRCNetwork::DelQuery(const CString& sName) { for (vector<CQuery*>::iterator a = m_vQueries.begin(); a != m_vQueries.end(); ++a) { if (sName.Equals((*a)->GetName())) { delete *a; m_vQueries.erase(a); return true; } } return false; } // Server list const vector<CServer*>& CIRCNetwork::GetServers() const { return m_vServers; } CServer* CIRCNetwork::FindServer(const CString& sName) const { for (CServer* pServer : m_vServers) { if (sName.Equals(pServer->GetName())) { return pServer; } } return nullptr; } bool CIRCNetwork::DelServer(const CString& sName, unsigned short uPort, const CString& sPass) { if (sName.empty()) { return false; } unsigned int a = 0; bool bSawCurrentServer = false; CServer* pCurServer = GetCurrentServer(); for (vector<CServer*>::iterator it = m_vServers.begin(); it != m_vServers.end(); ++it, a++) { CServer* pServer = *it; if (pServer == pCurServer) bSawCurrentServer = true; if (!pServer->GetName().Equals(sName)) continue; if (uPort != 0 && pServer->GetPort() != uPort) continue; if (!sPass.empty() && pServer->GetPass() != sPass) continue; m_vServers.erase(it); if (pServer == pCurServer) { CIRCSock* pIRCSock = GetIRCSock(); // Make sure we don't skip the next server in the list! if (m_uServerIdx) { m_uServerIdx--; } if (pIRCSock) { pIRCSock->Quit(); PutStatus(t_s("Your current server was removed, jumping...")); } } else if (!bSawCurrentServer) { // Our current server comes after the server which we // are removing. This means that it now got a different // index in m_vServers! m_uServerIdx--; } delete pServer; return true; } return false; } bool CIRCNetwork::AddServer(const CString& sName) { if (sName.empty()) { return false; } bool bSSL = false; CString sLine = sName; sLine.Trim(); CString sHost = sLine.Token(0); CString sPort = sLine.Token(1); if (sPort.TrimPrefix("+")) { bSSL = true; } unsigned short uPort = sPort.ToUShort(); CString sPass = sLine.Token(2, true); return AddServer(sHost, uPort, sPass, bSSL); } bool CIRCNetwork::AddServer(const CString& sName, unsigned short uPort, const CString& sPass, bool bSSL) { #ifndef HAVE_LIBSSL if (bSSL) { return false; } #endif if (sName.empty()) { return false; } if (!uPort) { uPort = 6667; } // Check if server is already added for (CServer* pServer : m_vServers) { if (!sName.Equals(pServer->GetName())) continue; if (uPort != pServer->GetPort()) continue; if (sPass != pServer->GetPass()) continue; if (bSSL != pServer->IsSSL()) continue; // Server is already added return false; } CServer* pServer = new CServer(sName, uPort, sPass, bSSL); m_vServers.push_back(pServer); CheckIRCConnect(); return true; } CServer* CIRCNetwork::GetNextServer(bool bAdvance) { if (m_vServers.empty()) { return nullptr; } if (m_uServerIdx >= m_vServers.size()) { m_uServerIdx = 0; } if (bAdvance) { return m_vServers[m_uServerIdx++]; } else { return m_vServers[m_uServerIdx]; } } CServer* CIRCNetwork::GetCurrentServer() const { size_t uIdx = (m_uServerIdx) ? m_uServerIdx - 1 : 0; if (uIdx >= m_vServers.size()) { return nullptr; } return m_vServers[uIdx]; } void CIRCNetwork::SetIRCServer(const CString& s) { m_sIRCServer = s; } bool CIRCNetwork::SetNextServer(const CServer* pServer) { for (unsigned int a = 0; a < m_vServers.size(); a++) { if (m_vServers[a] == pServer) { m_uServerIdx = a; return true; } } return false; } bool CIRCNetwork::IsLastServer() const { return (m_uServerIdx >= m_vServers.size()); } const CString& CIRCNetwork::GetIRCServer() const { return m_sIRCServer; } const CNick& CIRCNetwork::GetIRCNick() const { return m_IRCNick; } void CIRCNetwork::SetIRCNick(const CNick& n) { m_IRCNick = n; for (CClient* pClient : m_vClients) { pClient->SetNick(n.GetNick()); } } CString CIRCNetwork::GetCurNick() const { const CIRCSock* pIRCSock = GetIRCSock(); if (pIRCSock) { return pIRCSock->GetNick(); } if (!m_vClients.empty()) { return m_vClients[0]->GetNick(); } return ""; } bool CIRCNetwork::Connect() { if (!GetIRCConnectEnabled() || m_pIRCSock || !HasServers()) return false; CServer* pServer = GetNextServer(); if (!pServer) return false; if (CZNC::Get().GetServerThrottle(pServer->GetName())) { // Can't connect right now, schedule retry later CZNC::Get().AddNetworkToQueue(this); return false; } CZNC::Get().AddServerThrottle(pServer->GetName()); bool bSSL = pServer->IsSSL(); #ifndef HAVE_LIBSSL if (bSSL) { PutStatus( t_f("Cannot connect to {1}, because ZNC is not compiled with SSL " "support.")(pServer->GetString(false))); CZNC::Get().AddNetworkToQueue(this); return false; } #endif CIRCSock* pIRCSock = new CIRCSock(this); pIRCSock->SetPass(pServer->GetPass()); pIRCSock->SetSSLTrustedPeerFingerprints(m_ssTrustedFingerprints); pIRCSock->SetTrustAllCerts(GetTrustAllCerts()); pIRCSock->SetTrustPKI(GetTrustPKI()); DEBUG("Connecting user/network [" << m_pUser->GetUserName() << "/" << m_sName << "]"); bool bAbort = false; NETWORKMODULECALL(OnIRCConnecting(pIRCSock), m_pUser, this, nullptr, &bAbort); if (bAbort) { DEBUG("Some module aborted the connection attempt"); PutStatus(t_s("Some module aborted the connection attempt")); delete pIRCSock; CZNC::Get().AddNetworkToQueue(this); return false; } CString sSockName = "IRC::" + m_pUser->GetUserName() + "::" + m_sName; CZNC::Get().GetManager().Connect(pServer->GetName(), pServer->GetPort(), sSockName, 120, bSSL, GetBindHost(), pIRCSock); return true; } bool CIRCNetwork::IsIRCConnected() const { const CIRCSock* pSock = GetIRCSock(); return (pSock && pSock->IsAuthed()); } void CIRCNetwork::SetIRCSocket(CIRCSock* pIRCSock) { m_pIRCSock = pIRCSock; } void CIRCNetwork::IRCConnected() { const SCString& ssCaps = m_pIRCSock->GetAcceptedCaps(); for (CClient* pClient : m_vClients) { pClient->NotifyServerDependentCaps(ssCaps); } if (m_uJoinDelay > 0) { m_pJoinTimer->Delay(m_uJoinDelay); } else { JoinChans(); } } void CIRCNetwork::IRCDisconnected() { for (CClient* pClient : m_vClients) { pClient->ClearServerDependentCaps(); } m_pIRCSock = nullptr; SetIRCServer(""); m_bIRCAway = false; // Get the reconnect going CheckIRCConnect(); } void CIRCNetwork::SetIRCConnectEnabled(bool b) { m_bIRCConnectEnabled = b; if (m_bIRCConnectEnabled) { CheckIRCConnect(); } else if (GetIRCSock()) { if (GetIRCSock()->IsConnected()) { GetIRCSock()->Quit(); } else { GetIRCSock()->Close(); } } } void CIRCNetwork::CheckIRCConnect() { // Do we want to connect? if (GetIRCConnectEnabled() && GetIRCSock() == nullptr) CZNC::Get().AddNetworkToQueue(this); } bool CIRCNetwork::PutIRC(const CString& sLine) { CIRCSock* pIRCSock = GetIRCSock(); if (!pIRCSock) { return false; } pIRCSock->PutIRC(sLine); return true; } bool CIRCNetwork::PutIRC(const CMessage& Message) { CIRCSock* pIRCSock = GetIRCSock(); if (!pIRCSock) { return false; } pIRCSock->PutIRC(Message); return true; } void CIRCNetwork::ClearQueryBuffer() { std::for_each(m_vQueries.begin(), m_vQueries.end(), std::default_delete<CQuery>()); m_vQueries.clear(); } const CString& CIRCNetwork::GetNick(const bool bAllowDefault) const { if (m_sNick.empty()) { return m_pUser->GetNick(bAllowDefault); } return m_sNick; } const CString& CIRCNetwork::GetAltNick(const bool bAllowDefault) const { if (m_sAltNick.empty()) { return m_pUser->GetAltNick(bAllowDefault); } return m_sAltNick; } const CString& CIRCNetwork::GetIdent(const bool bAllowDefault) const { if (m_sIdent.empty()) { return m_pUser->GetIdent(bAllowDefault); } return m_sIdent; } CString CIRCNetwork::GetRealName() const { if (m_sRealName.empty()) { return m_pUser->GetRealName(); } return m_sRealName; } const CString& CIRCNetwork::GetBindHost() const { if (m_sBindHost.empty()) { return m_pUser->GetBindHost(); } return m_sBindHost; } const CString& CIRCNetwork::GetEncoding() const { return m_sEncoding; } CString CIRCNetwork::GetQuitMsg() const { if (m_sQuitMsg.empty()) { return m_pUser->GetQuitMsg(); } return m_sQuitMsg; } void CIRCNetwork::SetNick(const CString& s) { if (m_pUser->GetNick().Equals(s)) { m_sNick = ""; } else { m_sNick = s; } } void CIRCNetwork::SetAltNick(const CString& s) { if (m_pUser->GetAltNick().Equals(s)) { m_sAltNick = ""; } else { m_sAltNick = s; } } void CIRCNetwork::SetIdent(const CString& s) { if (m_pUser->GetIdent().Equals(s)) { m_sIdent = ""; } else { m_sIdent = s; } } void CIRCNetwork::SetRealName(const CString& s) { if (m_pUser->GetRealName().Equals(s)) { m_sRealName = ""; } else { m_sRealName = s; } } void CIRCNetwork::SetBindHost(const CString& s) { if (m_pUser->GetBindHost().Equals(s)) { m_sBindHost = ""; } else { m_sBindHost = s; } } void CIRCNetwork::SetEncoding(const CString& s) { m_sEncoding = s; if (GetIRCSock()) { GetIRCSock()->SetEncoding(s); } } void CIRCNetwork::SetQuitMsg(const CString& s) { if (m_pUser->GetQuitMsg().Equals(s)) { m_sQuitMsg = ""; } else { m_sQuitMsg = s; } } CString CIRCNetwork::ExpandString(const CString& sStr) const { CString sRet; return ExpandString(sStr, sRet); } CString& CIRCNetwork::ExpandString(const CString& sStr, CString& sRet) const { sRet = sStr; sRet.Replace("%altnick%", GetAltNick()); sRet.Replace("%bindhost%", GetBindHost()); sRet.Replace("%defnick%", GetNick()); sRet.Replace("%ident%", GetIdent()); sRet.Replace("%network%", GetName()); sRet.Replace("%nick%", GetCurNick()); sRet.Replace("%realname%", GetRealName()); return m_pUser->ExpandString(sRet, sRet); } bool CIRCNetwork::LoadModule(const CString& sModName, const CString& sArgs, const CString& sNotice, CString& sError) { CUtils::PrintAction(sNotice); CString sModRet; bool bModRet = GetModules().LoadModule( sModName, sArgs, CModInfo::NetworkModule, GetUser(), this, sModRet); CUtils::PrintStatus(bModRet, sModRet); if (!bModRet) { sError = sModRet; } return bModRet; }
/* * Copyright (C) 2004-2018 ZNC, see the NOTICE file for details. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <znc/IRCNetwork.h> #include <znc/User.h> #include <znc/FileUtils.h> #include <znc/Config.h> #include <znc/IRCSock.h> #include <znc/Server.h> #include <znc/Chan.h> #include <znc/Query.h> #include <znc/Message.h> #include <algorithm> #include <memory> using std::vector; using std::set; class CIRCNetworkPingTimer : public CCron { public: CIRCNetworkPingTimer(CIRCNetwork* pNetwork) : CCron(), m_pNetwork(pNetwork) { SetName("CIRCNetworkPingTimer::" + m_pNetwork->GetUser()->GetUserName() + "::" + m_pNetwork->GetName()); Start(m_pNetwork->GetUser()->GetPingSlack()); } ~CIRCNetworkPingTimer() override {} CIRCNetworkPingTimer(const CIRCNetworkPingTimer&) = delete; CIRCNetworkPingTimer& operator=(const CIRCNetworkPingTimer&) = delete; protected: void RunJob() override { CIRCSock* pIRCSock = m_pNetwork->GetIRCSock(); auto uFrequency = m_pNetwork->GetUser()->GetPingFrequency(); if (pIRCSock && pIRCSock->GetTimeSinceLastDataTransaction() >= uFrequency) { pIRCSock->PutIRC("PING :ZNC"); } const vector<CClient*>& vClients = m_pNetwork->GetClients(); for (CClient* pClient : vClients) { if (pClient->GetTimeSinceLastDataTransaction() >= uFrequency) { pClient->PutClient("PING :ZNC"); } } // Restart timer for the case if the period had changed. Usually this is // noop Start(m_pNetwork->GetUser()->GetPingSlack()); } private: CIRCNetwork* m_pNetwork; }; class CIRCNetworkJoinTimer : public CCron { constexpr static int JOIN_FREQUENCY = 30 /* seconds */; public: CIRCNetworkJoinTimer(CIRCNetwork* pNetwork) : CCron(), m_bDelayed(false), m_pNetwork(pNetwork) { SetName("CIRCNetworkJoinTimer::" + m_pNetwork->GetUser()->GetUserName() + "::" + m_pNetwork->GetName()); Start(JOIN_FREQUENCY); } ~CIRCNetworkJoinTimer() override {} CIRCNetworkJoinTimer(const CIRCNetworkJoinTimer&) = delete; CIRCNetworkJoinTimer& operator=(const CIRCNetworkJoinTimer&) = delete; void Delay(unsigned short int uDelay) { m_bDelayed = true; Start(uDelay); } protected: void RunJob() override { if (m_bDelayed) { m_bDelayed = false; Start(JOIN_FREQUENCY); } if (m_pNetwork->IsIRCConnected()) { m_pNetwork->JoinChans(); } } private: bool m_bDelayed; CIRCNetwork* m_pNetwork; }; bool CIRCNetwork::IsValidNetwork(const CString& sNetwork) { // ^[-\w]+$ if (sNetwork.empty()) { return false; } const char* p = sNetwork.c_str(); while (*p) { if (*p != '_' && *p != '-' && !isalnum(*p)) { return false; } p++; } return true; } CIRCNetwork::CIRCNetwork(CUser* pUser, const CString& sName) : m_sName(sName), m_pUser(nullptr), m_sNick(""), m_sAltNick(""), m_sIdent(""), m_sRealName(""), m_sBindHost(""), m_sEncoding(""), m_sQuitMsg(""), m_ssTrustedFingerprints(), m_pModules(new CModules), m_vClients(), m_pIRCSock(nullptr), m_vChans(), m_vQueries(), m_sChanPrefixes(""), m_bIRCConnectEnabled(true), m_bTrustAllCerts(false), m_bTrustPKI(true), m_sIRCServer(""), m_vServers(), m_uServerIdx(0), m_IRCNick(), m_bIRCAway(false), m_fFloodRate(2), m_uFloodBurst(9), m_RawBuffer(), m_MotdBuffer(), m_NoticeBuffer(), m_pPingTimer(nullptr), m_pJoinTimer(nullptr), m_uJoinDelay(0), m_uBytesRead(0), m_uBytesWritten(0) { SetUser(pUser); // This should be more than enough raws, especially since we are buffering // the MOTD separately m_RawBuffer.SetLineCount(100, true); // This should be more than enough motd lines m_MotdBuffer.SetLineCount(200, true); m_NoticeBuffer.SetLineCount(250, true); m_pPingTimer = new CIRCNetworkPingTimer(this); CZNC::Get().GetManager().AddCron(m_pPingTimer); m_pJoinTimer = new CIRCNetworkJoinTimer(this); CZNC::Get().GetManager().AddCron(m_pJoinTimer); SetIRCConnectEnabled(true); } CIRCNetwork::CIRCNetwork(CUser* pUser, const CIRCNetwork& Network) : CIRCNetwork(pUser, "") { Clone(Network); } void CIRCNetwork::Clone(const CIRCNetwork& Network, bool bCloneName) { if (bCloneName) { m_sName = Network.GetName(); } m_fFloodRate = Network.GetFloodRate(); m_uFloodBurst = Network.GetFloodBurst(); m_uJoinDelay = Network.GetJoinDelay(); SetNick(Network.GetNick()); SetAltNick(Network.GetAltNick()); SetIdent(Network.GetIdent()); SetRealName(Network.GetRealName()); SetBindHost(Network.GetBindHost()); SetEncoding(Network.GetEncoding()); SetQuitMsg(Network.GetQuitMsg()); m_ssTrustedFingerprints = Network.m_ssTrustedFingerprints; // Servers const vector<CServer*>& vServers = Network.GetServers(); CString sServer; CServer* pCurServ = GetCurrentServer(); if (pCurServ) { sServer = pCurServ->GetName(); } DelServers(); for (CServer* pServer : vServers) { AddServer(pServer->GetName(), pServer->GetPort(), pServer->GetPass(), pServer->IsSSL()); } m_uServerIdx = 0; for (size_t a = 0; a < m_vServers.size(); a++) { if (sServer.Equals(m_vServers[a]->GetName())) { m_uServerIdx = a + 1; break; } } if (m_uServerIdx == 0) { m_uServerIdx = m_vServers.size(); CIRCSock* pSock = GetIRCSock(); if (pSock) { PutStatus( t_s("Jumping servers because this server is no longer in the " "list")); pSock->Quit(); } } // !Servers // Chans const vector<CChan*>& vChans = Network.GetChans(); for (CChan* pNewChan : vChans) { CChan* pChan = FindChan(pNewChan->GetName()); if (pChan) { pChan->SetInConfig(pNewChan->InConfig()); } else { AddChan(pNewChan->GetName(), pNewChan->InConfig()); } } for (CChan* pChan : m_vChans) { CChan* pNewChan = Network.FindChan(pChan->GetName()); if (!pNewChan) { pChan->SetInConfig(false); } else { pChan->Clone(*pNewChan); } } // !Chans // Modules set<CString> ssUnloadMods; CModules& vCurMods = GetModules(); const CModules& vNewMods = Network.GetModules(); for (CModule* pNewMod : vNewMods) { CString sModRet; CModule* pCurMod = vCurMods.FindModule(pNewMod->GetModName()); if (!pCurMod) { vCurMods.LoadModule(pNewMod->GetModName(), pNewMod->GetArgs(), CModInfo::NetworkModule, m_pUser, this, sModRet); } else if (pNewMod->GetArgs() != pCurMod->GetArgs()) { vCurMods.ReloadModule(pNewMod->GetModName(), pNewMod->GetArgs(), m_pUser, this, sModRet); } } for (CModule* pCurMod : vCurMods) { CModule* pNewMod = vNewMods.FindModule(pCurMod->GetModName()); if (!pNewMod) { ssUnloadMods.insert(pCurMod->GetModName()); } } for (const CString& sMod : ssUnloadMods) { vCurMods.UnloadModule(sMod); } // !Modules SetIRCConnectEnabled(Network.GetIRCConnectEnabled()); } CIRCNetwork::~CIRCNetwork() { if (m_pIRCSock) { CZNC::Get().GetManager().DelSockByAddr(m_pIRCSock); m_pIRCSock = nullptr; } // Delete clients while (!m_vClients.empty()) { CZNC::Get().GetManager().DelSockByAddr(m_vClients[0]); } m_vClients.clear(); // Delete servers DelServers(); // Delete modules (this unloads all modules) delete m_pModules; m_pModules = nullptr; // Delete Channels for (CChan* pChan : m_vChans) { delete pChan; } m_vChans.clear(); // Delete Queries for (CQuery* pQuery : m_vQueries) { delete pQuery; } m_vQueries.clear(); CUser* pUser = GetUser(); SetUser(nullptr); // Make sure we are not in the connection queue CZNC::Get().GetConnectionQueue().remove(this); CZNC::Get().GetManager().DelCronByAddr(m_pPingTimer); CZNC::Get().GetManager().DelCronByAddr(m_pJoinTimer); if (pUser) { pUser->AddBytesRead(m_uBytesRead); pUser->AddBytesWritten(m_uBytesWritten); } else { CZNC::Get().AddBytesRead(m_uBytesRead); CZNC::Get().AddBytesWritten(m_uBytesWritten); } } void CIRCNetwork::DelServers() { for (CServer* pServer : m_vServers) { delete pServer; } m_vServers.clear(); } CString CIRCNetwork::GetNetworkPath() const { CString sNetworkPath = m_pUser->GetUserPath() + "/networks/" + m_sName; if (!CFile::Exists(sNetworkPath)) { CDir::MakeDir(sNetworkPath); } return sNetworkPath; } template <class T> struct TOption { const char* name; void (CIRCNetwork::*pSetter)(T); }; bool CIRCNetwork::ParseConfig(CConfig* pConfig, CString& sError, bool bUpgrade) { VCString vsList; if (!bUpgrade) { TOption<const CString&> StringOptions[] = { {"nick", &CIRCNetwork::SetNick}, {"altnick", &CIRCNetwork::SetAltNick}, {"ident", &CIRCNetwork::SetIdent}, {"realname", &CIRCNetwork::SetRealName}, {"bindhost", &CIRCNetwork::SetBindHost}, {"encoding", &CIRCNetwork::SetEncoding}, {"quitmsg", &CIRCNetwork::SetQuitMsg}, }; TOption<bool> BoolOptions[] = { {"ircconnectenabled", &CIRCNetwork::SetIRCConnectEnabled}, {"trustallcerts", &CIRCNetwork::SetTrustAllCerts}, {"trustpki", &CIRCNetwork::SetTrustPKI}, }; TOption<double> DoubleOptions[] = { {"floodrate", &CIRCNetwork::SetFloodRate}, }; TOption<short unsigned int> SUIntOptions[] = { {"floodburst", &CIRCNetwork::SetFloodBurst}, {"joindelay", &CIRCNetwork::SetJoinDelay}, }; for (const auto& Option : StringOptions) { CString sValue; if (pConfig->FindStringEntry(Option.name, sValue)) (this->*Option.pSetter)(sValue); } for (const auto& Option : BoolOptions) { CString sValue; if (pConfig->FindStringEntry(Option.name, sValue)) (this->*Option.pSetter)(sValue.ToBool()); } for (const auto& Option : DoubleOptions) { double fValue; if (pConfig->FindDoubleEntry(Option.name, fValue)) (this->*Option.pSetter)(fValue); } for (const auto& Option : SUIntOptions) { unsigned short value; if (pConfig->FindUShortEntry(Option.name, value)) (this->*Option.pSetter)(value); } pConfig->FindStringVector("loadmodule", vsList); for (const CString& sValue : vsList) { CString sModName = sValue.Token(0); CString sNotice = "Loading network module [" + sModName + "]"; // XXX Legacy crap, added in ZNC 0.203, modified in 0.207 // Note that 0.203 == 0.207 if (sModName == "away") { sNotice = "NOTICE: [away] was renamed, loading [awaystore] instead"; sModName = "awaystore"; } // XXX Legacy crap, added in ZNC 0.207 if (sModName == "autoaway") { sNotice = "NOTICE: [autoaway] was renamed, loading [awaystore] " "instead"; sModName = "awaystore"; } // XXX Legacy crap, added in 1.1; fakeonline module was dropped in // 1.0 and returned in 1.1 if (sModName == "fakeonline") { sNotice = "NOTICE: [fakeonline] was renamed, loading " "[modules_online] instead"; sModName = "modules_online"; } CString sModRet; CString sArgs = sValue.Token(1, true); bool bModRet = LoadModule(sModName, sArgs, sNotice, sModRet); if (!bModRet) { // XXX The awaynick module was retired in 1.6 (still available // as external module) if (sModName == "awaynick") { // load simple_away instead, unless it's already on the list bool bFound = false; for (const CString& sLoadMod : vsList) { if (sLoadMod.Token(0).Equals("simple_away")) { bFound = true; } } if (!bFound) { sNotice = "NOTICE: awaynick was retired, loading network " "module [simple_away] instead; if you still need " "awaynick, install it as an external module"; sModName = "simple_away"; // not a fatal error if simple_away is not available LoadModule(sModName, sArgs, sNotice, sModRet); } } else { sError = sModRet; return false; } } } } pConfig->FindStringVector("server", vsList); for (const CString& sServer : vsList) { CUtils::PrintAction("Adding server [" + sServer + "]"); CUtils::PrintStatus(AddServer(sServer)); } pConfig->FindStringVector("trustedserverfingerprint", vsList); for (const CString& sFP : vsList) { AddTrustedFingerprint(sFP); } pConfig->FindStringVector("chan", vsList); for (const CString& sChan : vsList) { AddChan(sChan, true); } CConfig::SubConfig subConf; CConfig::SubConfig::const_iterator subIt; pConfig->FindSubConfig("chan", subConf); for (subIt = subConf.begin(); subIt != subConf.end(); ++subIt) { const CString& sChanName = subIt->first; CConfig* pSubConf = subIt->second.m_pSubConfig; CChan* pChan = new CChan(sChanName, this, true, pSubConf); if (!pSubConf->empty()) { sError = "Unhandled lines in config for User [" + m_pUser->GetUserName() + "], Network [" + GetName() + "], Channel [" + sChanName + "]!"; CUtils::PrintError(sError); CZNC::DumpConfig(pSubConf); delete pChan; return false; } // Save the channel name, because AddChan // deletes the CChannel*, if adding fails sError = pChan->GetName(); if (!AddChan(pChan)) { sError = "Channel [" + sError + "] defined more than once"; CUtils::PrintError(sError); return false; } sError.clear(); } return true; } CConfig CIRCNetwork::ToConfig() const { CConfig config; if (!m_sNick.empty()) { config.AddKeyValuePair("Nick", m_sNick); } if (!m_sAltNick.empty()) { config.AddKeyValuePair("AltNick", m_sAltNick); } if (!m_sIdent.empty()) { config.AddKeyValuePair("Ident", m_sIdent); } if (!m_sRealName.empty()) { config.AddKeyValuePair("RealName", m_sRealName); } if (!m_sBindHost.empty()) { config.AddKeyValuePair("BindHost", m_sBindHost); } config.AddKeyValuePair("IRCConnectEnabled", CString(GetIRCConnectEnabled())); config.AddKeyValuePair("TrustAllCerts", CString(GetTrustAllCerts())); config.AddKeyValuePair("TrustPKI", CString(GetTrustPKI())); config.AddKeyValuePair("FloodRate", CString(GetFloodRate())); config.AddKeyValuePair("FloodBurst", CString(GetFloodBurst())); config.AddKeyValuePair("JoinDelay", CString(GetJoinDelay())); config.AddKeyValuePair("Encoding", m_sEncoding); if (!m_sQuitMsg.empty()) { config.AddKeyValuePair("QuitMsg", m_sQuitMsg); } // Modules const CModules& Mods = GetModules(); if (!Mods.empty()) { for (CModule* pMod : Mods) { CString sArgs = pMod->GetArgs(); if (!sArgs.empty()) { sArgs = " " + sArgs; } config.AddKeyValuePair("LoadModule", pMod->GetModName() + sArgs); } } // Servers for (CServer* pServer : m_vServers) { config.AddKeyValuePair("Server", pServer->GetString()); } for (const CString& sFP : m_ssTrustedFingerprints) { config.AddKeyValuePair("TrustedServerFingerprint", sFP); } // Chans for (CChan* pChan : m_vChans) { if (pChan->InConfig()) { config.AddSubConfig("Chan", pChan->GetName(), pChan->ToConfig()); } } return config; } void CIRCNetwork::BounceAllClients() { for (CClient* pClient : m_vClients) { pClient->BouncedOff(); } m_vClients.clear(); } bool CIRCNetwork::IsUserOnline() const { for (CClient* pClient : m_vClients) { if (!pClient->IsAway()) { return true; } } return false; } void CIRCNetwork::ClientConnected(CClient* pClient) { if (!m_pUser->MultiClients()) { BounceAllClients(); } m_vClients.push_back(pClient); size_t uIdx, uSize; if (m_pIRCSock) { pClient->NotifyServerDependentCaps(m_pIRCSock->GetAcceptedCaps()); } pClient->SetPlaybackActive(true); if (m_RawBuffer.IsEmpty()) { pClient->PutClient(":irc.znc.in 001 " + pClient->GetNick() + " :" + t_s("Welcome to ZNC")); } else { const CString& sClientNick = pClient->GetNick(false); MCString msParams; msParams["target"] = sClientNick; uSize = m_RawBuffer.Size(); for (uIdx = 0; uIdx < uSize; uIdx++) { pClient->PutClient(m_RawBuffer.GetLine(uIdx, *pClient, msParams)); } const CNick& Nick = GetIRCNick(); if (sClientNick != Nick.GetNick()) { // case-sensitive match pClient->PutClient(":" + sClientNick + "!" + Nick.GetIdent() + "@" + Nick.GetHost() + " NICK :" + Nick.GetNick()); pClient->SetNick(Nick.GetNick()); } } MCString msParams; msParams["target"] = GetIRCNick().GetNick(); // Send the cached MOTD uSize = m_MotdBuffer.Size(); if (uSize > 0) { for (uIdx = 0; uIdx < uSize; uIdx++) { pClient->PutClient(m_MotdBuffer.GetLine(uIdx, *pClient, msParams)); } } if (GetIRCSock() != nullptr) { CString sUserMode(""); const set<char>& scUserModes = GetIRCSock()->GetUserModes(); for (char cMode : scUserModes) { sUserMode += cMode; } if (!sUserMode.empty()) { pClient->PutClient(":" + GetIRCNick().GetNickMask() + " MODE " + GetIRCNick().GetNick() + " :+" + sUserMode); } } if (m_bIRCAway) { // If they want to know their away reason they'll have to whois // themselves. At least we can tell them their away status... pClient->PutClient(":irc.znc.in 306 " + GetIRCNick().GetNick() + " :You have been marked as being away"); } const vector<CChan*>& vChans = GetChans(); for (CChan* pChan : vChans) { if ((pChan->IsOn()) && (!pChan->IsDetached())) { pChan->AttachUser(pClient); } } bool bClearQuery = m_pUser->AutoClearQueryBuffer(); for (CQuery* pQuery : m_vQueries) { pQuery->SendBuffer(pClient); if (bClearQuery) { delete pQuery; } } if (bClearQuery) { m_vQueries.clear(); } uSize = m_NoticeBuffer.Size(); for (uIdx = 0; uIdx < uSize; uIdx++) { const CBufLine& BufLine = m_NoticeBuffer.GetBufLine(uIdx); CMessage Message(BufLine.GetLine(*pClient, msParams)); Message.SetNetwork(this); Message.SetClient(pClient); Message.SetTime(BufLine.GetTime()); Message.SetTags(BufLine.GetTags()); bool bContinue = false; NETWORKMODULECALL(OnPrivBufferPlayMessage(Message), m_pUser, this, nullptr, &bContinue); if (bContinue) continue; pClient->PutClient(Message); } m_NoticeBuffer.Clear(); pClient->SetPlaybackActive(false); // Tell them why they won't connect if (!GetIRCConnectEnabled()) pClient->PutStatus( t_s("You are currently disconnected from IRC. Use 'connect' to " "reconnect.")); } void CIRCNetwork::ClientDisconnected(CClient* pClient) { auto it = std::find(m_vClients.begin(), m_vClients.end(), pClient); if (it != m_vClients.end()) { m_vClients.erase(it); } } CUser* CIRCNetwork::GetUser() const { return m_pUser; } const CString& CIRCNetwork::GetName() const { return m_sName; } std::vector<CClient*> CIRCNetwork::FindClients( const CString& sIdentifier) const { std::vector<CClient*> vClients; for (CClient* pClient : m_vClients) { if (pClient->GetIdentifier().Equals(sIdentifier)) { vClients.push_back(pClient); } } return vClients; } void CIRCNetwork::SetUser(CUser* pUser) { for (CClient* pClient : m_vClients) { pClient->PutStatus( t_s("This network is being deleted or moved to another user.")); pClient->SetNetwork(nullptr); } m_vClients.clear(); if (m_pUser) { m_pUser->RemoveNetwork(this); } m_pUser = pUser; if (m_pUser) { m_pUser->AddNetwork(this); } } bool CIRCNetwork::SetName(const CString& sName) { if (IsValidNetwork(sName)) { m_sName = sName; return true; } return false; } bool CIRCNetwork::PutUser(const CString& sLine, CClient* pClient, CClient* pSkipClient) { for (CClient* pEachClient : m_vClients) { if ((!pClient || pClient == pEachClient) && pSkipClient != pEachClient) { pEachClient->PutClient(sLine); if (pClient) { return true; } } } return (pClient == nullptr); } bool CIRCNetwork::PutUser(const CMessage& Message, CClient* pClient, CClient* pSkipClient) { for (CClient* pEachClient : m_vClients) { if ((!pClient || pClient == pEachClient) && pSkipClient != pEachClient) { pEachClient->PutClient(Message); if (pClient) { return true; } } } return (pClient == nullptr); } bool CIRCNetwork::PutStatus(const CString& sLine, CClient* pClient, CClient* pSkipClient) { for (CClient* pEachClient : m_vClients) { if ((!pClient || pClient == pEachClient) && pSkipClient != pEachClient) { pEachClient->PutStatus(sLine); if (pClient) { return true; } } } return (pClient == nullptr); } bool CIRCNetwork::PutModule(const CString& sModule, const CString& sLine, CClient* pClient, CClient* pSkipClient) { for (CClient* pEachClient : m_vClients) { if ((!pClient || pClient == pEachClient) && pSkipClient != pEachClient) { pEachClient->PutModule(sModule, sLine); if (pClient) { return true; } } } return (pClient == nullptr); } // Channels const vector<CChan*>& CIRCNetwork::GetChans() const { return m_vChans; } CChan* CIRCNetwork::FindChan(CString sName) const { if (GetIRCSock()) { // See // https://tools.ietf.org/html/draft-brocklesby-irc-isupport-03#section-3.16 sName.TrimLeft(GetIRCSock()->GetISupport("STATUSMSG", "")); } for (CChan* pChan : m_vChans) { if (sName.Equals(pChan->GetName())) { return pChan; } } return nullptr; } std::vector<CChan*> CIRCNetwork::FindChans(const CString& sWild) const { std::vector<CChan*> vChans; vChans.reserve(m_vChans.size()); const CString sLower = sWild.AsLower(); for (CChan* pChan : m_vChans) { if (pChan->GetName().AsLower().WildCmp(sLower)) vChans.push_back(pChan); } return vChans; } bool CIRCNetwork::AddChan(CChan* pChan) { if (!pChan) { return false; } for (CChan* pEachChan : m_vChans) { if (pEachChan->GetName().Equals(pChan->GetName())) { delete pChan; return false; } } m_vChans.push_back(pChan); return true; } bool CIRCNetwork::AddChan(const CString& sName, bool bInConfig) { if (sName.empty() || FindChan(sName)) { return false; } CChan* pChan = new CChan(sName, this, bInConfig); m_vChans.push_back(pChan); return true; } bool CIRCNetwork::DelChan(const CString& sName) { for (vector<CChan*>::iterator a = m_vChans.begin(); a != m_vChans.end(); ++a) { if (sName.Equals((*a)->GetName())) { delete *a; m_vChans.erase(a); return true; } } return false; } void CIRCNetwork::JoinChans() { // Avoid divsion by zero, it's bad! if (m_vChans.empty()) return; // We start at a random offset into the channel list so that if your // first 3 channels are invite-only and you got MaxJoins == 3, ZNC will // still be able to join the rest of your channels. unsigned int start = rand() % m_vChans.size(); unsigned int uJoins = m_pUser->MaxJoins(); set<CChan*> sChans; for (unsigned int a = 0; a < m_vChans.size(); a++) { unsigned int idx = (start + a) % m_vChans.size(); CChan* pChan = m_vChans[idx]; if (!pChan->IsOn() && !pChan->IsDisabled()) { if (!JoinChan(pChan)) continue; sChans.insert(pChan); // Limit the number of joins if (uJoins != 0 && --uJoins == 0) { // Reset the timer. m_pJoinTimer->Reset(); break; } } } while (!sChans.empty()) JoinChans(sChans); } void CIRCNetwork::JoinChans(set<CChan*>& sChans) { CString sKeys, sJoin; bool bHaveKey = false; size_t uiJoinLength = strlen("JOIN "); while (!sChans.empty()) { set<CChan*>::iterator it = sChans.begin(); const CString& sName = (*it)->GetName(); const CString& sKey = (*it)->GetKey(); size_t len = sName.length() + sKey.length(); len += 2; // two comma if (!sKeys.empty() && uiJoinLength + len >= 512) break; if (!sJoin.empty()) { sJoin += ","; sKeys += ","; } uiJoinLength += len; sJoin += sName; if (!sKey.empty()) { sKeys += sKey; bHaveKey = true; } sChans.erase(it); } if (bHaveKey) PutIRC("JOIN " + sJoin + " " + sKeys); else PutIRC("JOIN " + sJoin); } bool CIRCNetwork::JoinChan(CChan* pChan) { bool bReturn = false; NETWORKMODULECALL(OnJoining(*pChan), m_pUser, this, nullptr, &bReturn); if (bReturn) return false; if (m_pUser->JoinTries() != 0 && pChan->GetJoinTries() >= m_pUser->JoinTries()) { PutStatus(t_f("The channel {1} could not be joined, disabling it.")( pChan->GetName())); pChan->Disable(); } else { pChan->IncJoinTries(); bool bFailed = false; NETWORKMODULECALL(OnTimerAutoJoin(*pChan), m_pUser, this, nullptr, &bFailed); if (bFailed) return false; return true; } return false; } bool CIRCNetwork::IsChan(const CString& sChan) const { if (sChan.empty()) return false; // There is no way this is a chan if (GetChanPrefixes().empty()) return true; // We can't know, so we allow everything // Thanks to the above if (empty), we can do sChan[0] return GetChanPrefixes().find(sChan[0]) != CString::npos; } // Queries const vector<CQuery*>& CIRCNetwork::GetQueries() const { return m_vQueries; } CQuery* CIRCNetwork::FindQuery(const CString& sName) const { for (CQuery* pQuery : m_vQueries) { if (sName.Equals(pQuery->GetName())) { return pQuery; } } return nullptr; } std::vector<CQuery*> CIRCNetwork::FindQueries(const CString& sWild) const { std::vector<CQuery*> vQueries; vQueries.reserve(m_vQueries.size()); const CString sLower = sWild.AsLower(); for (CQuery* pQuery : m_vQueries) { if (pQuery->GetName().AsLower().WildCmp(sLower)) vQueries.push_back(pQuery); } return vQueries; } CQuery* CIRCNetwork::AddQuery(const CString& sName) { if (sName.empty()) { return nullptr; } CQuery* pQuery = FindQuery(sName); if (!pQuery) { pQuery = new CQuery(sName, this); m_vQueries.push_back(pQuery); if (m_pUser->MaxQueryBuffers() > 0) { while (m_vQueries.size() > m_pUser->MaxQueryBuffers()) { delete *m_vQueries.begin(); m_vQueries.erase(m_vQueries.begin()); } } } return pQuery; } bool CIRCNetwork::DelQuery(const CString& sName) { for (vector<CQuery*>::iterator a = m_vQueries.begin(); a != m_vQueries.end(); ++a) { if (sName.Equals((*a)->GetName())) { delete *a; m_vQueries.erase(a); return true; } } return false; } // Server list const vector<CServer*>& CIRCNetwork::GetServers() const { return m_vServers; } CServer* CIRCNetwork::FindServer(const CString& sName) const { for (CServer* pServer : m_vServers) { if (sName.Equals(pServer->GetName())) { return pServer; } } return nullptr; } bool CIRCNetwork::DelServer(const CString& sName, unsigned short uPort, const CString& sPass) { if (sName.empty()) { return false; } unsigned int a = 0; bool bSawCurrentServer = false; CServer* pCurServer = GetCurrentServer(); for (vector<CServer*>::iterator it = m_vServers.begin(); it != m_vServers.end(); ++it, a++) { CServer* pServer = *it; if (pServer == pCurServer) bSawCurrentServer = true; if (!pServer->GetName().Equals(sName)) continue; if (uPort != 0 && pServer->GetPort() != uPort) continue; if (!sPass.empty() && pServer->GetPass() != sPass) continue; m_vServers.erase(it); if (pServer == pCurServer) { CIRCSock* pIRCSock = GetIRCSock(); // Make sure we don't skip the next server in the list! if (m_uServerIdx) { m_uServerIdx--; } if (pIRCSock) { pIRCSock->Quit(); PutStatus(t_s("Your current server was removed, jumping...")); } } else if (!bSawCurrentServer) { // Our current server comes after the server which we // are removing. This means that it now got a different // index in m_vServers! m_uServerIdx--; } delete pServer; return true; } return false; } bool CIRCNetwork::AddServer(const CString& sName) { if (sName.empty()) { return false; } bool bSSL = false; CString sLine = sName; sLine.Trim(); CString sHost = sLine.Token(0); CString sPort = sLine.Token(1); if (sPort.TrimPrefix("+")) { bSSL = true; } unsigned short uPort = sPort.ToUShort(); CString sPass = sLine.Token(2, true); return AddServer(sHost, uPort, sPass, bSSL); } bool CIRCNetwork::AddServer(const CString& sName, unsigned short uPort, const CString& sPass, bool bSSL) { #ifndef HAVE_LIBSSL if (bSSL) { return false; } #endif if (sName.empty()) { return false; } if (!uPort) { uPort = 6667; } // Check if server is already added for (CServer* pServer : m_vServers) { if (!sName.Equals(pServer->GetName())) continue; if (uPort != pServer->GetPort()) continue; if (sPass != pServer->GetPass()) continue; if (bSSL != pServer->IsSSL()) continue; // Server is already added return false; } CServer* pServer = new CServer(sName, uPort, sPass, bSSL); m_vServers.push_back(pServer); CheckIRCConnect(); return true; } CServer* CIRCNetwork::GetNextServer(bool bAdvance) { if (m_vServers.empty()) { return nullptr; } if (m_uServerIdx >= m_vServers.size()) { m_uServerIdx = 0; } if (bAdvance) { return m_vServers[m_uServerIdx++]; } else { return m_vServers[m_uServerIdx]; } } CServer* CIRCNetwork::GetCurrentServer() const { size_t uIdx = (m_uServerIdx) ? m_uServerIdx - 1 : 0; if (uIdx >= m_vServers.size()) { return nullptr; } return m_vServers[uIdx]; } void CIRCNetwork::SetIRCServer(const CString& s) { m_sIRCServer = s; } bool CIRCNetwork::SetNextServer(const CServer* pServer) { for (unsigned int a = 0; a < m_vServers.size(); a++) { if (m_vServers[a] == pServer) { m_uServerIdx = a; return true; } } return false; } bool CIRCNetwork::IsLastServer() const { return (m_uServerIdx >= m_vServers.size()); } const CString& CIRCNetwork::GetIRCServer() const { return m_sIRCServer; } const CNick& CIRCNetwork::GetIRCNick() const { return m_IRCNick; } void CIRCNetwork::SetIRCNick(const CNick& n) { m_IRCNick = n; for (CClient* pClient : m_vClients) { pClient->SetNick(n.GetNick()); } } CString CIRCNetwork::GetCurNick() const { const CIRCSock* pIRCSock = GetIRCSock(); if (pIRCSock) { return pIRCSock->GetNick(); } if (!m_vClients.empty()) { return m_vClients[0]->GetNick(); } return ""; } bool CIRCNetwork::Connect() { if (!GetIRCConnectEnabled() || m_pIRCSock || !HasServers()) return false; CServer* pServer = GetNextServer(); if (!pServer) return false; if (CZNC::Get().GetServerThrottle(pServer->GetName())) { // Can't connect right now, schedule retry later CZNC::Get().AddNetworkToQueue(this); return false; } CZNC::Get().AddServerThrottle(pServer->GetName()); bool bSSL = pServer->IsSSL(); #ifndef HAVE_LIBSSL if (bSSL) { PutStatus( t_f("Cannot connect to {1}, because ZNC is not compiled with SSL " "support.")(pServer->GetString(false))); CZNC::Get().AddNetworkToQueue(this); return false; } #endif CIRCSock* pIRCSock = new CIRCSock(this); pIRCSock->SetPass(pServer->GetPass()); pIRCSock->SetSSLTrustedPeerFingerprints(m_ssTrustedFingerprints); pIRCSock->SetTrustAllCerts(GetTrustAllCerts()); pIRCSock->SetTrustPKI(GetTrustPKI()); DEBUG("Connecting user/network [" << m_pUser->GetUserName() << "/" << m_sName << "]"); bool bAbort = false; NETWORKMODULECALL(OnIRCConnecting(pIRCSock), m_pUser, this, nullptr, &bAbort); if (bAbort) { DEBUG("Some module aborted the connection attempt"); PutStatus(t_s("Some module aborted the connection attempt")); delete pIRCSock; CZNC::Get().AddNetworkToQueue(this); return false; } CString sSockName = "IRC::" + m_pUser->GetUserName() + "::" + m_sName; CZNC::Get().GetManager().Connect(pServer->GetName(), pServer->GetPort(), sSockName, 120, bSSL, GetBindHost(), pIRCSock); return true; } bool CIRCNetwork::IsIRCConnected() const { const CIRCSock* pSock = GetIRCSock(); return (pSock && pSock->IsAuthed()); } void CIRCNetwork::SetIRCSocket(CIRCSock* pIRCSock) { m_pIRCSock = pIRCSock; } void CIRCNetwork::IRCConnected() { const SCString& ssCaps = m_pIRCSock->GetAcceptedCaps(); for (CClient* pClient : m_vClients) { pClient->NotifyServerDependentCaps(ssCaps); } if (m_uJoinDelay > 0) { m_pJoinTimer->Delay(m_uJoinDelay); } else { JoinChans(); } } void CIRCNetwork::IRCDisconnected() { for (CClient* pClient : m_vClients) { pClient->ClearServerDependentCaps(); } m_pIRCSock = nullptr; SetIRCServer(""); m_bIRCAway = false; // Get the reconnect going CheckIRCConnect(); } void CIRCNetwork::SetIRCConnectEnabled(bool b) { m_bIRCConnectEnabled = b; if (m_bIRCConnectEnabled) { CheckIRCConnect(); } else if (GetIRCSock()) { if (GetIRCSock()->IsConnected()) { GetIRCSock()->Quit(); } else { GetIRCSock()->Close(); } } } void CIRCNetwork::CheckIRCConnect() { // Do we want to connect? if (GetIRCConnectEnabled() && GetIRCSock() == nullptr) CZNC::Get().AddNetworkToQueue(this); } bool CIRCNetwork::PutIRC(const CString& sLine) { CIRCSock* pIRCSock = GetIRCSock(); if (!pIRCSock) { return false; } pIRCSock->PutIRC(sLine); return true; } bool CIRCNetwork::PutIRC(const CMessage& Message) { CIRCSock* pIRCSock = GetIRCSock(); if (!pIRCSock) { return false; } pIRCSock->PutIRC(Message); return true; } void CIRCNetwork::ClearQueryBuffer() { std::for_each(m_vQueries.begin(), m_vQueries.end(), std::default_delete<CQuery>()); m_vQueries.clear(); } const CString& CIRCNetwork::GetNick(const bool bAllowDefault) const { if (m_sNick.empty()) { return m_pUser->GetNick(bAllowDefault); } return m_sNick; } const CString& CIRCNetwork::GetAltNick(const bool bAllowDefault) const { if (m_sAltNick.empty()) { return m_pUser->GetAltNick(bAllowDefault); } return m_sAltNick; } const CString& CIRCNetwork::GetIdent(const bool bAllowDefault) const { if (m_sIdent.empty()) { return m_pUser->GetIdent(bAllowDefault); } return m_sIdent; } CString CIRCNetwork::GetRealName() const { if (m_sRealName.empty()) { return m_pUser->GetRealName(); } return m_sRealName; } const CString& CIRCNetwork::GetBindHost() const { if (m_sBindHost.empty()) { return m_pUser->GetBindHost(); } return m_sBindHost; } const CString& CIRCNetwork::GetEncoding() const { return m_sEncoding; } CString CIRCNetwork::GetQuitMsg() const { if (m_sQuitMsg.empty()) { return m_pUser->GetQuitMsg(); } return m_sQuitMsg; } void CIRCNetwork::SetNick(const CString& s) { if (m_pUser->GetNick().Equals(s)) { m_sNick = ""; } else { m_sNick = s; } } void CIRCNetwork::SetAltNick(const CString& s) { if (m_pUser->GetAltNick().Equals(s)) { m_sAltNick = ""; } else { m_sAltNick = s; } } void CIRCNetwork::SetIdent(const CString& s) { if (m_pUser->GetIdent().Equals(s)) { m_sIdent = ""; } else { m_sIdent = s; } } void CIRCNetwork::SetRealName(const CString& s) { if (m_pUser->GetRealName().Equals(s)) { m_sRealName = ""; } else { m_sRealName = s; } } void CIRCNetwork::SetBindHost(const CString& s) { if (m_pUser->GetBindHost().Equals(s)) { m_sBindHost = ""; } else { m_sBindHost = s; } } void CIRCNetwork::SetEncoding(const CString& s) { m_sEncoding = CZNC::Get().FixupEncoding(s); if (GetIRCSock()) { GetIRCSock()->SetEncoding(m_sEncoding); } } void CIRCNetwork::SetQuitMsg(const CString& s) { if (m_pUser->GetQuitMsg().Equals(s)) { m_sQuitMsg = ""; } else { m_sQuitMsg = s; } } CString CIRCNetwork::ExpandString(const CString& sStr) const { CString sRet; return ExpandString(sStr, sRet); } CString& CIRCNetwork::ExpandString(const CString& sStr, CString& sRet) const { sRet = sStr; sRet.Replace("%altnick%", GetAltNick()); sRet.Replace("%bindhost%", GetBindHost()); sRet.Replace("%defnick%", GetNick()); sRet.Replace("%ident%", GetIdent()); sRet.Replace("%network%", GetName()); sRet.Replace("%nick%", GetCurNick()); sRet.Replace("%realname%", GetRealName()); return m_pUser->ExpandString(sRet, sRet); } bool CIRCNetwork::LoadModule(const CString& sModName, const CString& sArgs, const CString& sNotice, CString& sError) { CUtils::PrintAction(sNotice); CString sModRet; bool bModRet = GetModules().LoadModule( sModName, sArgs, CModInfo::NetworkModule, GetUser(), this, sModRet); CUtils::PrintStatus(bModRet, sModRet); if (!bModRet) { sError = sModRet; } return bModRet; }
void CIRCNetwork::SetEncoding(const CString& s) { m_sEncoding = s; if (GetIRCSock()) { GetIRCSock()->SetEncoding(s); } }
void CIRCNetwork::SetEncoding(const CString& s) { m_sEncoding = CZNC::Get().FixupEncoding(s); if (GetIRCSock()) { GetIRCSock()->SetEncoding(m_sEncoding); } }
{'added': [(1485, ' m_sEncoding = CZNC::Get().FixupEncoding(s);'), (1487, ' GetIRCSock()->SetEncoding(m_sEncoding);')], 'deleted': [(1485, ' m_sEncoding = s;'), (1487, ' GetIRCSock()->SetEncoding(s);')]}
2
2
1,168
7,640
6
32
2
https://github.com/znc/znc
CVE-2019-9917
CWE-20
1,105
algif_skcipher.c
C
skcipher_release
/* * algif_skcipher: User-space interface for skcipher algorithms * * This file provides the user-space API for symmetric key ciphers. * * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/scatterwalk.h> #include <crypto/skcipher.h> #include <crypto/if_alg.h> #include <linux/init.h> #include <linux/list.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/net.h> #include <net/sock.h> struct skcipher_sg_list { struct list_head list; int cur; struct scatterlist sg[0]; }; struct skcipher_ctx { struct list_head tsgl; struct af_alg_sgl rsgl; void *iv; struct af_alg_completion completion; atomic_t inflight; size_t used; unsigned int len; bool more; bool merge; bool enc; struct skcipher_request req; }; struct skcipher_async_rsgl { struct af_alg_sgl sgl; struct list_head list; }; struct skcipher_async_req { struct kiocb *iocb; struct skcipher_async_rsgl first_sgl; struct list_head list; struct scatterlist *tsg; char iv[]; }; #define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \ crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))) #define GET_REQ_SIZE(ctx) \ crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)) #define GET_IV_SIZE(ctx) \ crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req)) #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ sizeof(struct scatterlist) - 1) static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) { struct skcipher_async_rsgl *rsgl, *tmp; struct scatterlist *sgl; struct scatterlist *sg; int i, n; list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) { af_alg_free_sg(&rsgl->sgl); if (rsgl != &sreq->first_sgl) kfree(rsgl); } sgl = sreq->tsg; n = sg_nents(sgl); for_each_sg(sgl, sg, n, i) put_page(sg_page(sg)); kfree(sreq->tsg); } static void skcipher_async_cb(struct crypto_async_request *req, int err) { struct sock *sk = req->data; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct skcipher_async_req *sreq = GET_SREQ(req, ctx); struct kiocb *iocb = sreq->iocb; atomic_dec(&ctx->inflight); skcipher_free_async_sgls(sreq); kfree(req); iocb->ki_complete(iocb, err, err); } static inline int skcipher_sndbuf(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - ctx->used, 0); } static inline bool skcipher_writable(struct sock *sk) { return PAGE_SIZE <= skcipher_sndbuf(sk); } static int skcipher_alloc_sgl(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct skcipher_sg_list *sgl; struct scatterlist *sg = NULL; sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); if (!list_empty(&ctx->tsgl)) sg = sgl->sg; if (!sg || sgl->cur >= MAX_SGL_ENTS) { sgl = sock_kmalloc(sk, sizeof(*sgl) + sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), GFP_KERNEL); if (!sgl) return -ENOMEM; sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); sgl->cur = 0; if (sg) sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); list_add_tail(&sgl->list, &ctx->tsgl); } return 0; } static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct skcipher_sg_list *sgl; struct scatterlist *sg; int i; while (!list_empty(&ctx->tsgl)) { sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, list); sg = sgl->sg; for (i = 0; i < sgl->cur; i++) { size_t plen = min_t(size_t, used, sg[i].length); if (!sg_page(sg + i)) continue; sg[i].length -= plen; sg[i].offset += plen; used -= plen; ctx->used -= plen; if (sg[i].length) return; if (put) put_page(sg_page(sg + i)); sg_assign_page(sg + i, NULL); } list_del(&sgl->list); sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1)); } if (!ctx->used) ctx->merge = 0; } static void skcipher_free_sgl(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; skcipher_pull_sgl(sk, ctx->used, 1); } static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) { long timeout; DEFINE_WAIT(wait); int err = -ERESTARTSYS; if (flags & MSG_DONTWAIT) return -EAGAIN; sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); for (;;) { if (signal_pending(current)) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); timeout = MAX_SCHEDULE_TIMEOUT; if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) { err = 0; break; } } finish_wait(sk_sleep(sk), &wait); return err; } static void skcipher_wmem_wakeup(struct sock *sk) { struct socket_wq *wq; if (!skcipher_writable(sk)) return; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } static int skcipher_wait_for_data(struct sock *sk, unsigned flags) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; long timeout; DEFINE_WAIT(wait); int err = -ERESTARTSYS; if (flags & MSG_DONTWAIT) { return -EAGAIN; } sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); for (;;) { if (signal_pending(current)) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); timeout = MAX_SCHEDULE_TIMEOUT; if (sk_wait_event(sk, &timeout, ctx->used)) { err = 0; break; } } finish_wait(sk_sleep(sk), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); return err; } static void skcipher_data_wakeup(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct socket_wq *wq; if (!ctx->used) return; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); unsigned ivsize = crypto_skcipher_ivsize(tfm); struct skcipher_sg_list *sgl; struct af_alg_control con = {}; long copied = 0; bool enc = 0; bool init = 0; int err; int i; if (msg->msg_controllen) { err = af_alg_cmsg_send(msg, &con); if (err) return err; init = 1; switch (con.op) { case ALG_OP_ENCRYPT: enc = 1; break; case ALG_OP_DECRYPT: enc = 0; break; default: return -EINVAL; } if (con.iv && con.iv->ivlen != ivsize) return -EINVAL; } err = -EINVAL; lock_sock(sk); if (!ctx->more && ctx->used) goto unlock; if (init) { ctx->enc = enc; if (con.iv) memcpy(ctx->iv, con.iv->iv, ivsize); } while (size) { struct scatterlist *sg; unsigned long len = size; size_t plen; if (ctx->merge) { sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); sg = sgl->sg + sgl->cur - 1; len = min_t(unsigned long, len, PAGE_SIZE - sg->offset - sg->length); err = memcpy_from_msg(page_address(sg_page(sg)) + sg->offset + sg->length, msg, len); if (err) goto unlock; sg->length += len; ctx->merge = (sg->offset + sg->length) & (PAGE_SIZE - 1); ctx->used += len; copied += len; size -= len; continue; } if (!skcipher_writable(sk)) { err = skcipher_wait_for_wmem(sk, msg->msg_flags); if (err) goto unlock; } len = min_t(unsigned long, len, skcipher_sndbuf(sk)); err = skcipher_alloc_sgl(sk); if (err) goto unlock; sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); sg = sgl->sg; sg_unmark_end(sg + sgl->cur); do { i = sgl->cur; plen = min_t(size_t, len, PAGE_SIZE); sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); err = -ENOMEM; if (!sg_page(sg + i)) goto unlock; err = memcpy_from_msg(page_address(sg_page(sg + i)), msg, plen); if (err) { __free_page(sg_page(sg + i)); sg_assign_page(sg + i, NULL); goto unlock; } sg[i].length = plen; len -= plen; ctx->used += plen; copied += plen; size -= plen; sgl->cur++; } while (len && sgl->cur < MAX_SGL_ENTS); if (!size) sg_mark_end(sg + sgl->cur - 1); ctx->merge = plen & (PAGE_SIZE - 1); } err = 0; ctx->more = msg->msg_flags & MSG_MORE; unlock: skcipher_data_wakeup(sk); release_sock(sk); return copied ?: err; } static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct skcipher_sg_list *sgl; int err = -EINVAL; if (flags & MSG_SENDPAGE_NOTLAST) flags |= MSG_MORE; lock_sock(sk); if (!ctx->more && ctx->used) goto unlock; if (!size) goto done; if (!skcipher_writable(sk)) { err = skcipher_wait_for_wmem(sk, flags); if (err) goto unlock; } err = skcipher_alloc_sgl(sk); if (err) goto unlock; ctx->merge = 0; sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); if (sgl->cur) sg_unmark_end(sgl->sg + sgl->cur - 1); sg_mark_end(sgl->sg + sgl->cur); get_page(page); sg_set_page(sgl->sg + sgl->cur, page, size, offset); sgl->cur++; ctx->used += size; done: ctx->more = flags & MSG_MORE; unlock: skcipher_data_wakeup(sk); release_sock(sk); return err ?: size; } static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) { struct skcipher_sg_list *sgl; struct scatterlist *sg; int nents = 0; list_for_each_entry(sgl, &ctx->tsgl, list) { sg = sgl->sg; while (!sg->length) sg++; nents += sg_nents(sg); } return nents; } static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct skcipher_sg_list *sgl; struct scatterlist *sg; struct skcipher_async_req *sreq; struct skcipher_request *req; struct skcipher_async_rsgl *last_rsgl = NULL; unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); unsigned int reqlen = sizeof(struct skcipher_async_req) + GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); int err = -ENOMEM; bool mark = false; lock_sock(sk); req = kmalloc(reqlen, GFP_KERNEL); if (unlikely(!req)) goto unlock; sreq = GET_SREQ(req, ctx); sreq->iocb = msg->msg_iocb; memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl)); INIT_LIST_HEAD(&sreq->list); sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); if (unlikely(!sreq->tsg)) { kfree(req); goto unlock; } sg_init_table(sreq->tsg, tx_nents); memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req)); skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, skcipher_async_cb, sk); while (iov_iter_count(&msg->msg_iter)) { struct skcipher_async_rsgl *rsgl; int used; if (!ctx->used) { err = skcipher_wait_for_data(sk, flags); if (err) goto free; } sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, list); sg = sgl->sg; while (!sg->length) sg++; used = min_t(unsigned long, ctx->used, iov_iter_count(&msg->msg_iter)); used = min_t(unsigned long, used, sg->length); if (txbufs == tx_nents) { struct scatterlist *tmp; int x; /* Ran out of tx slots in async request * need to expand */ tmp = kcalloc(tx_nents * 2, sizeof(*tmp), GFP_KERNEL); if (!tmp) goto free; sg_init_table(tmp, tx_nents * 2); for (x = 0; x < tx_nents; x++) sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]), sreq->tsg[x].length, sreq->tsg[x].offset); kfree(sreq->tsg); sreq->tsg = tmp; tx_nents *= 2; mark = true; } /* Need to take over the tx sgl from ctx * to the asynch req - these sgls will be freed later */ sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length, sg->offset); if (list_empty(&sreq->list)) { rsgl = &sreq->first_sgl; list_add_tail(&rsgl->list, &sreq->list); } else { rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL); if (!rsgl) { err = -ENOMEM; goto free; } list_add_tail(&rsgl->list, &sreq->list); } used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used); err = used; if (used < 0) goto free; if (last_rsgl) af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); last_rsgl = rsgl; len += used; skcipher_pull_sgl(sk, used, 0); iov_iter_advance(&msg->msg_iter, used); } if (mark) sg_mark_end(sreq->tsg + txbufs - 1); skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, len, sreq->iv); err = ctx->enc ? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); if (err == -EINPROGRESS) { atomic_inc(&ctx->inflight); err = -EIOCBQUEUED; goto unlock; } free: skcipher_free_async_sgls(sreq); kfree(req); unlock: skcipher_wmem_wakeup(sk); release_sock(sk); return err; } static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm( &ctx->req)); struct skcipher_sg_list *sgl; struct scatterlist *sg; int err = -EAGAIN; int used; long copied = 0; lock_sock(sk); while (msg_data_left(msg)) { sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, list); sg = sgl->sg; while (!sg->length) sg++; if (!ctx->used) { err = skcipher_wait_for_data(sk, flags); if (err) goto unlock; } used = min_t(unsigned long, ctx->used, msg_data_left(msg)); used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used); err = used; if (err < 0) goto unlock; if (ctx->more || used < ctx->used) used -= used % bs; err = -EINVAL; if (!used) goto free; skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, ctx->iv); err = af_alg_wait_for_completion( ctx->enc ? crypto_skcipher_encrypt(&ctx->req) : crypto_skcipher_decrypt(&ctx->req), &ctx->completion); free: af_alg_free_sg(&ctx->rsgl); if (err) goto unlock; copied += used; skcipher_pull_sgl(sk, used, 1); iov_iter_advance(&msg->msg_iter, used); } err = 0; unlock: skcipher_wmem_wakeup(sk); release_sock(sk); return copied ?: err; } static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? skcipher_recvmsg_async(sock, msg, flags) : skcipher_recvmsg_sync(sock, msg, flags); } static unsigned int skcipher_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; unsigned int mask; sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; if (ctx->used) mask |= POLLIN | POLLRDNORM; if (skcipher_writable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; return mask; } static struct proto_ops algif_skcipher_ops = { .family = PF_ALG, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .getname = sock_no_getname, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = skcipher_sendmsg, .sendpage = skcipher_sendpage, .recvmsg = skcipher_recvmsg, .poll = skcipher_poll, }; static void *skcipher_bind(const char *name, u32 type, u32 mask) { return crypto_alloc_skcipher(name, type, mask); } static void skcipher_release(void *private) { crypto_free_skcipher(private); } static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) { return crypto_skcipher_setkey(private, key, keylen); } static void skcipher_wait(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; int ctr = 0; while (atomic_read(&ctx->inflight) && ctr++ < 100) msleep(100); } static void skcipher_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); if (atomic_read(&ctx->inflight)) skcipher_wait(sk); skcipher_free_sgl(sk); sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); } static int skcipher_accept_parent(void *private, struct sock *sk) { struct skcipher_ctx *ctx; struct alg_sock *ask = alg_sk(sk); unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private), GFP_KERNEL); if (!ctx->iv) { sock_kfree_s(sk, ctx, len); return -ENOMEM; } memset(ctx->iv, 0, crypto_skcipher_ivsize(private)); INIT_LIST_HEAD(&ctx->tsgl); ctx->len = len; ctx->used = 0; ctx->more = 0; ctx->merge = 0; ctx->enc = 0; atomic_set(&ctx->inflight, 0); af_alg_init_completion(&ctx->completion); ask->private = ctx; skcipher_request_set_tfm(&ctx->req, private); skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); sk->sk_destruct = skcipher_sock_destruct; return 0; } static const struct af_alg_type algif_type_skcipher = { .bind = skcipher_bind, .release = skcipher_release, .setkey = skcipher_setkey, .accept = skcipher_accept_parent, .ops = &algif_skcipher_ops, .name = "skcipher", .owner = THIS_MODULE }; static int __init algif_skcipher_init(void) { return af_alg_register_type(&algif_type_skcipher); } static void __exit algif_skcipher_exit(void) { int err = af_alg_unregister_type(&algif_type_skcipher); BUG_ON(err); } module_init(algif_skcipher_init); module_exit(algif_skcipher_exit); MODULE_LICENSE("GPL");
/* * algif_skcipher: User-space interface for skcipher algorithms * * This file provides the user-space API for symmetric key ciphers. * * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/scatterwalk.h> #include <crypto/skcipher.h> #include <crypto/if_alg.h> #include <linux/init.h> #include <linux/list.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/net.h> #include <net/sock.h> struct skcipher_sg_list { struct list_head list; int cur; struct scatterlist sg[0]; }; struct skcipher_tfm { struct crypto_skcipher *skcipher; bool has_key; }; struct skcipher_ctx { struct list_head tsgl; struct af_alg_sgl rsgl; void *iv; struct af_alg_completion completion; atomic_t inflight; size_t used; unsigned int len; bool more; bool merge; bool enc; struct skcipher_request req; }; struct skcipher_async_rsgl { struct af_alg_sgl sgl; struct list_head list; }; struct skcipher_async_req { struct kiocb *iocb; struct skcipher_async_rsgl first_sgl; struct list_head list; struct scatterlist *tsg; char iv[]; }; #define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \ crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))) #define GET_REQ_SIZE(ctx) \ crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)) #define GET_IV_SIZE(ctx) \ crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req)) #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ sizeof(struct scatterlist) - 1) static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) { struct skcipher_async_rsgl *rsgl, *tmp; struct scatterlist *sgl; struct scatterlist *sg; int i, n; list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) { af_alg_free_sg(&rsgl->sgl); if (rsgl != &sreq->first_sgl) kfree(rsgl); } sgl = sreq->tsg; n = sg_nents(sgl); for_each_sg(sgl, sg, n, i) put_page(sg_page(sg)); kfree(sreq->tsg); } static void skcipher_async_cb(struct crypto_async_request *req, int err) { struct sock *sk = req->data; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct skcipher_async_req *sreq = GET_SREQ(req, ctx); struct kiocb *iocb = sreq->iocb; atomic_dec(&ctx->inflight); skcipher_free_async_sgls(sreq); kfree(req); iocb->ki_complete(iocb, err, err); } static inline int skcipher_sndbuf(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - ctx->used, 0); } static inline bool skcipher_writable(struct sock *sk) { return PAGE_SIZE <= skcipher_sndbuf(sk); } static int skcipher_alloc_sgl(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct skcipher_sg_list *sgl; struct scatterlist *sg = NULL; sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); if (!list_empty(&ctx->tsgl)) sg = sgl->sg; if (!sg || sgl->cur >= MAX_SGL_ENTS) { sgl = sock_kmalloc(sk, sizeof(*sgl) + sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), GFP_KERNEL); if (!sgl) return -ENOMEM; sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); sgl->cur = 0; if (sg) sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); list_add_tail(&sgl->list, &ctx->tsgl); } return 0; } static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct skcipher_sg_list *sgl; struct scatterlist *sg; int i; while (!list_empty(&ctx->tsgl)) { sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, list); sg = sgl->sg; for (i = 0; i < sgl->cur; i++) { size_t plen = min_t(size_t, used, sg[i].length); if (!sg_page(sg + i)) continue; sg[i].length -= plen; sg[i].offset += plen; used -= plen; ctx->used -= plen; if (sg[i].length) return; if (put) put_page(sg_page(sg + i)); sg_assign_page(sg + i, NULL); } list_del(&sgl->list); sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1)); } if (!ctx->used) ctx->merge = 0; } static void skcipher_free_sgl(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; skcipher_pull_sgl(sk, ctx->used, 1); } static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) { long timeout; DEFINE_WAIT(wait); int err = -ERESTARTSYS; if (flags & MSG_DONTWAIT) return -EAGAIN; sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); for (;;) { if (signal_pending(current)) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); timeout = MAX_SCHEDULE_TIMEOUT; if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) { err = 0; break; } } finish_wait(sk_sleep(sk), &wait); return err; } static void skcipher_wmem_wakeup(struct sock *sk) { struct socket_wq *wq; if (!skcipher_writable(sk)) return; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } static int skcipher_wait_for_data(struct sock *sk, unsigned flags) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; long timeout; DEFINE_WAIT(wait); int err = -ERESTARTSYS; if (flags & MSG_DONTWAIT) { return -EAGAIN; } sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); for (;;) { if (signal_pending(current)) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); timeout = MAX_SCHEDULE_TIMEOUT; if (sk_wait_event(sk, &timeout, ctx->used)) { err = 0; break; } } finish_wait(sk_sleep(sk), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); return err; } static void skcipher_data_wakeup(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct socket_wq *wq; if (!ctx->used) return; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); unsigned ivsize = crypto_skcipher_ivsize(tfm); struct skcipher_sg_list *sgl; struct af_alg_control con = {}; long copied = 0; bool enc = 0; bool init = 0; int err; int i; if (msg->msg_controllen) { err = af_alg_cmsg_send(msg, &con); if (err) return err; init = 1; switch (con.op) { case ALG_OP_ENCRYPT: enc = 1; break; case ALG_OP_DECRYPT: enc = 0; break; default: return -EINVAL; } if (con.iv && con.iv->ivlen != ivsize) return -EINVAL; } err = -EINVAL; lock_sock(sk); if (!ctx->more && ctx->used) goto unlock; if (init) { ctx->enc = enc; if (con.iv) memcpy(ctx->iv, con.iv->iv, ivsize); } while (size) { struct scatterlist *sg; unsigned long len = size; size_t plen; if (ctx->merge) { sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); sg = sgl->sg + sgl->cur - 1; len = min_t(unsigned long, len, PAGE_SIZE - sg->offset - sg->length); err = memcpy_from_msg(page_address(sg_page(sg)) + sg->offset + sg->length, msg, len); if (err) goto unlock; sg->length += len; ctx->merge = (sg->offset + sg->length) & (PAGE_SIZE - 1); ctx->used += len; copied += len; size -= len; continue; } if (!skcipher_writable(sk)) { err = skcipher_wait_for_wmem(sk, msg->msg_flags); if (err) goto unlock; } len = min_t(unsigned long, len, skcipher_sndbuf(sk)); err = skcipher_alloc_sgl(sk); if (err) goto unlock; sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); sg = sgl->sg; sg_unmark_end(sg + sgl->cur); do { i = sgl->cur; plen = min_t(size_t, len, PAGE_SIZE); sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); err = -ENOMEM; if (!sg_page(sg + i)) goto unlock; err = memcpy_from_msg(page_address(sg_page(sg + i)), msg, plen); if (err) { __free_page(sg_page(sg + i)); sg_assign_page(sg + i, NULL); goto unlock; } sg[i].length = plen; len -= plen; ctx->used += plen; copied += plen; size -= plen; sgl->cur++; } while (len && sgl->cur < MAX_SGL_ENTS); if (!size) sg_mark_end(sg + sgl->cur - 1); ctx->merge = plen & (PAGE_SIZE - 1); } err = 0; ctx->more = msg->msg_flags & MSG_MORE; unlock: skcipher_data_wakeup(sk); release_sock(sk); return copied ?: err; } static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct skcipher_sg_list *sgl; int err = -EINVAL; if (flags & MSG_SENDPAGE_NOTLAST) flags |= MSG_MORE; lock_sock(sk); if (!ctx->more && ctx->used) goto unlock; if (!size) goto done; if (!skcipher_writable(sk)) { err = skcipher_wait_for_wmem(sk, flags); if (err) goto unlock; } err = skcipher_alloc_sgl(sk); if (err) goto unlock; ctx->merge = 0; sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); if (sgl->cur) sg_unmark_end(sgl->sg + sgl->cur - 1); sg_mark_end(sgl->sg + sgl->cur); get_page(page); sg_set_page(sgl->sg + sgl->cur, page, size, offset); sgl->cur++; ctx->used += size; done: ctx->more = flags & MSG_MORE; unlock: skcipher_data_wakeup(sk); release_sock(sk); return err ?: size; } static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) { struct skcipher_sg_list *sgl; struct scatterlist *sg; int nents = 0; list_for_each_entry(sgl, &ctx->tsgl, list) { sg = sgl->sg; while (!sg->length) sg++; nents += sg_nents(sg); } return nents; } static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct skcipher_sg_list *sgl; struct scatterlist *sg; struct skcipher_async_req *sreq; struct skcipher_request *req; struct skcipher_async_rsgl *last_rsgl = NULL; unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); unsigned int reqlen = sizeof(struct skcipher_async_req) + GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); int err = -ENOMEM; bool mark = false; lock_sock(sk); req = kmalloc(reqlen, GFP_KERNEL); if (unlikely(!req)) goto unlock; sreq = GET_SREQ(req, ctx); sreq->iocb = msg->msg_iocb; memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl)); INIT_LIST_HEAD(&sreq->list); sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); if (unlikely(!sreq->tsg)) { kfree(req); goto unlock; } sg_init_table(sreq->tsg, tx_nents); memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req)); skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, skcipher_async_cb, sk); while (iov_iter_count(&msg->msg_iter)) { struct skcipher_async_rsgl *rsgl; int used; if (!ctx->used) { err = skcipher_wait_for_data(sk, flags); if (err) goto free; } sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, list); sg = sgl->sg; while (!sg->length) sg++; used = min_t(unsigned long, ctx->used, iov_iter_count(&msg->msg_iter)); used = min_t(unsigned long, used, sg->length); if (txbufs == tx_nents) { struct scatterlist *tmp; int x; /* Ran out of tx slots in async request * need to expand */ tmp = kcalloc(tx_nents * 2, sizeof(*tmp), GFP_KERNEL); if (!tmp) goto free; sg_init_table(tmp, tx_nents * 2); for (x = 0; x < tx_nents; x++) sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]), sreq->tsg[x].length, sreq->tsg[x].offset); kfree(sreq->tsg); sreq->tsg = tmp; tx_nents *= 2; mark = true; } /* Need to take over the tx sgl from ctx * to the asynch req - these sgls will be freed later */ sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length, sg->offset); if (list_empty(&sreq->list)) { rsgl = &sreq->first_sgl; list_add_tail(&rsgl->list, &sreq->list); } else { rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL); if (!rsgl) { err = -ENOMEM; goto free; } list_add_tail(&rsgl->list, &sreq->list); } used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used); err = used; if (used < 0) goto free; if (last_rsgl) af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); last_rsgl = rsgl; len += used; skcipher_pull_sgl(sk, used, 0); iov_iter_advance(&msg->msg_iter, used); } if (mark) sg_mark_end(sreq->tsg + txbufs - 1); skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, len, sreq->iv); err = ctx->enc ? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); if (err == -EINPROGRESS) { atomic_inc(&ctx->inflight); err = -EIOCBQUEUED; goto unlock; } free: skcipher_free_async_sgls(sreq); kfree(req); unlock: skcipher_wmem_wakeup(sk); release_sock(sk); return err; } static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm( &ctx->req)); struct skcipher_sg_list *sgl; struct scatterlist *sg; int err = -EAGAIN; int used; long copied = 0; lock_sock(sk); while (msg_data_left(msg)) { sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, list); sg = sgl->sg; while (!sg->length) sg++; if (!ctx->used) { err = skcipher_wait_for_data(sk, flags); if (err) goto unlock; } used = min_t(unsigned long, ctx->used, msg_data_left(msg)); used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used); err = used; if (err < 0) goto unlock; if (ctx->more || used < ctx->used) used -= used % bs; err = -EINVAL; if (!used) goto free; skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, ctx->iv); err = af_alg_wait_for_completion( ctx->enc ? crypto_skcipher_encrypt(&ctx->req) : crypto_skcipher_decrypt(&ctx->req), &ctx->completion); free: af_alg_free_sg(&ctx->rsgl); if (err) goto unlock; copied += used; skcipher_pull_sgl(sk, used, 1); iov_iter_advance(&msg->msg_iter, used); } err = 0; unlock: skcipher_wmem_wakeup(sk); release_sock(sk); return copied ?: err; } static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? skcipher_recvmsg_async(sock, msg, flags) : skcipher_recvmsg_sync(sock, msg, flags); } static unsigned int skcipher_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; unsigned int mask; sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; if (ctx->used) mask |= POLLIN | POLLRDNORM; if (skcipher_writable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; return mask; } static struct proto_ops algif_skcipher_ops = { .family = PF_ALG, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .getname = sock_no_getname, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = skcipher_sendmsg, .sendpage = skcipher_sendpage, .recvmsg = skcipher_recvmsg, .poll = skcipher_poll, }; static void *skcipher_bind(const char *name, u32 type, u32 mask) { struct skcipher_tfm *tfm; struct crypto_skcipher *skcipher; tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); if (!tfm) return ERR_PTR(-ENOMEM); skcipher = crypto_alloc_skcipher(name, type, mask); if (IS_ERR(skcipher)) { kfree(tfm); return ERR_CAST(skcipher); } tfm->skcipher = skcipher; return tfm; } static void skcipher_release(void *private) { struct skcipher_tfm *tfm = private; crypto_free_skcipher(tfm->skcipher); kfree(tfm); } static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) { struct skcipher_tfm *tfm = private; int err; err = crypto_skcipher_setkey(tfm->skcipher, key, keylen); tfm->has_key = !err; return err; } static void skcipher_wait(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; int ctr = 0; while (atomic_read(&ctx->inflight) && ctr++ < 100) msleep(100); } static void skcipher_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); if (atomic_read(&ctx->inflight)) skcipher_wait(sk); skcipher_free_sgl(sk); sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); } static int skcipher_accept_parent(void *private, struct sock *sk) { struct skcipher_ctx *ctx; struct alg_sock *ask = alg_sk(sk); struct skcipher_tfm *tfm = private; struct crypto_skcipher *skcipher = tfm->skcipher; unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher); if (!tfm->has_key) return -ENOKEY; ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher), GFP_KERNEL); if (!ctx->iv) { sock_kfree_s(sk, ctx, len); return -ENOMEM; } memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); INIT_LIST_HEAD(&ctx->tsgl); ctx->len = len; ctx->used = 0; ctx->more = 0; ctx->merge = 0; ctx->enc = 0; atomic_set(&ctx->inflight, 0); af_alg_init_completion(&ctx->completion); ask->private = ctx; skcipher_request_set_tfm(&ctx->req, skcipher); skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); sk->sk_destruct = skcipher_sock_destruct; return 0; } static const struct af_alg_type algif_type_skcipher = { .bind = skcipher_bind, .release = skcipher_release, .setkey = skcipher_setkey, .accept = skcipher_accept_parent, .ops = &algif_skcipher_ops, .name = "skcipher", .owner = THIS_MODULE }; static int __init algif_skcipher_init(void) { return af_alg_register_type(&algif_type_skcipher); } static void __exit algif_skcipher_exit(void) { int err = af_alg_unregister_type(&algif_type_skcipher); BUG_ON(err); } module_init(algif_skcipher_init); module_exit(algif_skcipher_exit); MODULE_LICENSE("GPL");
static void skcipher_release(void *private) { crypto_free_skcipher(private); }
static void skcipher_release(void *private) { struct skcipher_tfm *tfm = private; crypto_free_skcipher(tfm->skcipher); kfree(tfm); }
{'added': [(34, 'struct skcipher_tfm {'), (35, '\tstruct crypto_skcipher *skcipher;'), (36, '\tbool has_key;'), (37, '};'), (38, ''), (758, '\tstruct skcipher_tfm *tfm;'), (759, '\tstruct crypto_skcipher *skcipher;'), (760, ''), (761, '\ttfm = kzalloc(sizeof(*tfm), GFP_KERNEL);'), (762, '\tif (!tfm)'), (763, '\t\treturn ERR_PTR(-ENOMEM);'), (764, ''), (765, '\tskcipher = crypto_alloc_skcipher(name, type, mask);'), (766, '\tif (IS_ERR(skcipher)) {'), (767, '\t\tkfree(tfm);'), (768, '\t\treturn ERR_CAST(skcipher);'), (769, '\t}'), (770, ''), (771, '\ttfm->skcipher = skcipher;'), (772, ''), (773, '\treturn tfm;'), (778, '\tstruct skcipher_tfm *tfm = private;'), (779, ''), (780, '\tcrypto_free_skcipher(tfm->skcipher);'), (781, '\tkfree(tfm);'), (786, '\tstruct skcipher_tfm *tfm = private;'), (787, '\tint err;'), (788, ''), (789, '\terr = crypto_skcipher_setkey(tfm->skcipher, key, keylen);'), (790, '\ttfm->has_key = !err;'), (791, ''), (792, '\treturn err;'), (824, '\tstruct skcipher_tfm *tfm = private;'), (825, '\tstruct crypto_skcipher *skcipher = tfm->skcipher;'), (826, '\tunsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);'), (827, ''), (828, '\tif (!tfm->has_key)'), (829, '\t\treturn -ENOKEY;'), (835, '\tctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),'), (842, '\tmemset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));'), (855, '\tskcipher_request_set_tfm(&ctx->req, skcipher);')], 'deleted': [(753, '\treturn crypto_alloc_skcipher(name, type, mask);'), (758, '\tcrypto_free_skcipher(private);'), (763, '\treturn crypto_skcipher_setkey(private, key, keylen);'), (795, '\tunsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private);'), (801, '\tctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private),'), (808, '\tmemset(ctx->iv, 0, crypto_skcipher_ivsize(private));'), (821, '\tskcipher_request_set_tfm(&ctx->req, private);')]}
41
7
693
4,331
4
13
1
https://github.com/torvalds/linux
CVE-2015-8970
CWE-476
1,473
Python-ast.c
C
make_type
/* File automatically generated by Parser/asdl_c.py. */ #include <stddef.h> #include "Python.h" #include "Python-ast.h" static PyTypeObject AST_type; static PyTypeObject *mod_type; static PyObject* ast2obj_mod(void*); static PyTypeObject *Module_type; _Py_IDENTIFIER(body); _Py_IDENTIFIER(type_ignores); static char *Module_fields[]={ "body", "type_ignores", }; static PyTypeObject *Interactive_type; static char *Interactive_fields[]={ "body", }; static PyTypeObject *Expression_type; static char *Expression_fields[]={ "body", }; static PyTypeObject *FunctionType_type; _Py_IDENTIFIER(argtypes); _Py_IDENTIFIER(returns); static char *FunctionType_fields[]={ "argtypes", "returns", }; static PyTypeObject *Suite_type; static char *Suite_fields[]={ "body", }; static PyTypeObject *stmt_type; _Py_IDENTIFIER(lineno); _Py_IDENTIFIER(col_offset); static char *stmt_attributes[] = { "lineno", "col_offset", }; static PyObject* ast2obj_stmt(void*); static PyTypeObject *FunctionDef_type; _Py_IDENTIFIER(name); _Py_IDENTIFIER(args); _Py_IDENTIFIER(decorator_list); _Py_IDENTIFIER(type_comment); static char *FunctionDef_fields[]={ "name", "args", "body", "decorator_list", "returns", "type_comment", }; static PyTypeObject *AsyncFunctionDef_type; static char *AsyncFunctionDef_fields[]={ "name", "args", "body", "decorator_list", "returns", "type_comment", }; static PyTypeObject *ClassDef_type; _Py_IDENTIFIER(bases); _Py_IDENTIFIER(keywords); static char *ClassDef_fields[]={ "name", "bases", "keywords", "body", "decorator_list", }; static PyTypeObject *Return_type; _Py_IDENTIFIER(value); static char *Return_fields[]={ "value", }; static PyTypeObject *Delete_type; _Py_IDENTIFIER(targets); static char *Delete_fields[]={ "targets", }; static PyTypeObject *Assign_type; static char *Assign_fields[]={ "targets", "value", "type_comment", }; static PyTypeObject *AugAssign_type; _Py_IDENTIFIER(target); _Py_IDENTIFIER(op); static char *AugAssign_fields[]={ "target", "op", "value", }; static PyTypeObject *AnnAssign_type; _Py_IDENTIFIER(annotation); _Py_IDENTIFIER(simple); static char *AnnAssign_fields[]={ "target", "annotation", "value", "simple", }; static PyTypeObject *For_type; _Py_IDENTIFIER(iter); _Py_IDENTIFIER(orelse); static char *For_fields[]={ "target", "iter", "body", "orelse", "type_comment", }; static PyTypeObject *AsyncFor_type; static char *AsyncFor_fields[]={ "target", "iter", "body", "orelse", "type_comment", }; static PyTypeObject *While_type; _Py_IDENTIFIER(test); static char *While_fields[]={ "test", "body", "orelse", }; static PyTypeObject *If_type; static char *If_fields[]={ "test", "body", "orelse", }; static PyTypeObject *With_type; _Py_IDENTIFIER(items); static char *With_fields[]={ "items", "body", "type_comment", }; static PyTypeObject *AsyncWith_type; static char *AsyncWith_fields[]={ "items", "body", "type_comment", }; static PyTypeObject *Raise_type; _Py_IDENTIFIER(exc); _Py_IDENTIFIER(cause); static char *Raise_fields[]={ "exc", "cause", }; static PyTypeObject *Try_type; _Py_IDENTIFIER(handlers); _Py_IDENTIFIER(finalbody); static char *Try_fields[]={ "body", "handlers", "orelse", "finalbody", }; static PyTypeObject *Assert_type; _Py_IDENTIFIER(msg); static char *Assert_fields[]={ "test", "msg", }; static PyTypeObject *Import_type; _Py_IDENTIFIER(names); static char *Import_fields[]={ "names", }; static PyTypeObject *ImportFrom_type; _Py_IDENTIFIER(module); _Py_IDENTIFIER(level); static char *ImportFrom_fields[]={ "module", "names", "level", }; static PyTypeObject *Global_type; static char *Global_fields[]={ "names", }; static PyTypeObject *Nonlocal_type; static char *Nonlocal_fields[]={ "names", }; static PyTypeObject *Expr_type; static char *Expr_fields[]={ "value", }; static PyTypeObject *Pass_type; static PyTypeObject *Break_type; static PyTypeObject *Continue_type; static PyTypeObject *expr_type; static char *expr_attributes[] = { "lineno", "col_offset", }; static PyObject* ast2obj_expr(void*); static PyTypeObject *BoolOp_type; _Py_IDENTIFIER(values); static char *BoolOp_fields[]={ "op", "values", }; static PyTypeObject *BinOp_type; _Py_IDENTIFIER(left); _Py_IDENTIFIER(right); static char *BinOp_fields[]={ "left", "op", "right", }; static PyTypeObject *UnaryOp_type; _Py_IDENTIFIER(operand); static char *UnaryOp_fields[]={ "op", "operand", }; static PyTypeObject *Lambda_type; static char *Lambda_fields[]={ "args", "body", }; static PyTypeObject *IfExp_type; static char *IfExp_fields[]={ "test", "body", "orelse", }; static PyTypeObject *Dict_type; _Py_IDENTIFIER(keys); static char *Dict_fields[]={ "keys", "values", }; static PyTypeObject *Set_type; _Py_IDENTIFIER(elts); static char *Set_fields[]={ "elts", }; static PyTypeObject *ListComp_type; _Py_IDENTIFIER(elt); _Py_IDENTIFIER(generators); static char *ListComp_fields[]={ "elt", "generators", }; static PyTypeObject *SetComp_type; static char *SetComp_fields[]={ "elt", "generators", }; static PyTypeObject *DictComp_type; _Py_IDENTIFIER(key); static char *DictComp_fields[]={ "key", "value", "generators", }; static PyTypeObject *GeneratorExp_type; static char *GeneratorExp_fields[]={ "elt", "generators", }; static PyTypeObject *Await_type; static char *Await_fields[]={ "value", }; static PyTypeObject *Yield_type; static char *Yield_fields[]={ "value", }; static PyTypeObject *YieldFrom_type; static char *YieldFrom_fields[]={ "value", }; static PyTypeObject *Compare_type; _Py_IDENTIFIER(ops); _Py_IDENTIFIER(comparators); static char *Compare_fields[]={ "left", "ops", "comparators", }; static PyTypeObject *Call_type; _Py_IDENTIFIER(func); static char *Call_fields[]={ "func", "args", "keywords", }; static PyTypeObject *Num_type; _Py_IDENTIFIER(n); static char *Num_fields[]={ "n", }; static PyTypeObject *Str_type; _Py_IDENTIFIER(s); _Py_IDENTIFIER(kind); static char *Str_fields[]={ "s", "kind", }; static PyTypeObject *FormattedValue_type; _Py_IDENTIFIER(conversion); _Py_IDENTIFIER(format_spec); static char *FormattedValue_fields[]={ "value", "conversion", "format_spec", }; static PyTypeObject *JoinedStr_type; static char *JoinedStr_fields[]={ "values", }; static PyTypeObject *Bytes_type; static char *Bytes_fields[]={ "s", }; static PyTypeObject *NameConstant_type; static char *NameConstant_fields[]={ "value", }; static PyTypeObject *Ellipsis_type; static PyTypeObject *Constant_type; static char *Constant_fields[]={ "value", }; static PyTypeObject *Attribute_type; _Py_IDENTIFIER(attr); _Py_IDENTIFIER(ctx); static char *Attribute_fields[]={ "value", "attr", "ctx", }; static PyTypeObject *Subscript_type; _Py_IDENTIFIER(slice); static char *Subscript_fields[]={ "value", "slice", "ctx", }; static PyTypeObject *Starred_type; static char *Starred_fields[]={ "value", "ctx", }; static PyTypeObject *Name_type; _Py_IDENTIFIER(id); static char *Name_fields[]={ "id", "ctx", }; static PyTypeObject *List_type; static char *List_fields[]={ "elts", "ctx", }; static PyTypeObject *Tuple_type; static char *Tuple_fields[]={ "elts", "ctx", }; static PyTypeObject *expr_context_type; static PyObject *Load_singleton, *Store_singleton, *Del_singleton, *AugLoad_singleton, *AugStore_singleton, *Param_singleton; static PyObject* ast2obj_expr_context(expr_context_ty); static PyTypeObject *Load_type; static PyTypeObject *Store_type; static PyTypeObject *Del_type; static PyTypeObject *AugLoad_type; static PyTypeObject *AugStore_type; static PyTypeObject *Param_type; static PyTypeObject *slice_type; static PyObject* ast2obj_slice(void*); static PyTypeObject *Slice_type; _Py_IDENTIFIER(lower); _Py_IDENTIFIER(upper); _Py_IDENTIFIER(step); static char *Slice_fields[]={ "lower", "upper", "step", }; static PyTypeObject *ExtSlice_type; _Py_IDENTIFIER(dims); static char *ExtSlice_fields[]={ "dims", }; static PyTypeObject *Index_type; static char *Index_fields[]={ "value", }; static PyTypeObject *boolop_type; static PyObject *And_singleton, *Or_singleton; static PyObject* ast2obj_boolop(boolop_ty); static PyTypeObject *And_type; static PyTypeObject *Or_type; static PyTypeObject *operator_type; static PyObject *Add_singleton, *Sub_singleton, *Mult_singleton, *MatMult_singleton, *Div_singleton, *Mod_singleton, *Pow_singleton, *LShift_singleton, *RShift_singleton, *BitOr_singleton, *BitXor_singleton, *BitAnd_singleton, *FloorDiv_singleton; static PyObject* ast2obj_operator(operator_ty); static PyTypeObject *Add_type; static PyTypeObject *Sub_type; static PyTypeObject *Mult_type; static PyTypeObject *MatMult_type; static PyTypeObject *Div_type; static PyTypeObject *Mod_type; static PyTypeObject *Pow_type; static PyTypeObject *LShift_type; static PyTypeObject *RShift_type; static PyTypeObject *BitOr_type; static PyTypeObject *BitXor_type; static PyTypeObject *BitAnd_type; static PyTypeObject *FloorDiv_type; static PyTypeObject *unaryop_type; static PyObject *Invert_singleton, *Not_singleton, *UAdd_singleton, *USub_singleton; static PyObject* ast2obj_unaryop(unaryop_ty); static PyTypeObject *Invert_type; static PyTypeObject *Not_type; static PyTypeObject *UAdd_type; static PyTypeObject *USub_type; static PyTypeObject *cmpop_type; static PyObject *Eq_singleton, *NotEq_singleton, *Lt_singleton, *LtE_singleton, *Gt_singleton, *GtE_singleton, *Is_singleton, *IsNot_singleton, *In_singleton, *NotIn_singleton; static PyObject* ast2obj_cmpop(cmpop_ty); static PyTypeObject *Eq_type; static PyTypeObject *NotEq_type; static PyTypeObject *Lt_type; static PyTypeObject *LtE_type; static PyTypeObject *Gt_type; static PyTypeObject *GtE_type; static PyTypeObject *Is_type; static PyTypeObject *IsNot_type; static PyTypeObject *In_type; static PyTypeObject *NotIn_type; static PyTypeObject *comprehension_type; static PyObject* ast2obj_comprehension(void*); _Py_IDENTIFIER(ifs); _Py_IDENTIFIER(is_async); static char *comprehension_fields[]={ "target", "iter", "ifs", "is_async", }; static PyTypeObject *excepthandler_type; static char *excepthandler_attributes[] = { "lineno", "col_offset", }; static PyObject* ast2obj_excepthandler(void*); static PyTypeObject *ExceptHandler_type; _Py_IDENTIFIER(type); static char *ExceptHandler_fields[]={ "type", "name", "body", }; static PyTypeObject *arguments_type; static PyObject* ast2obj_arguments(void*); _Py_IDENTIFIER(vararg); _Py_IDENTIFIER(kwonlyargs); _Py_IDENTIFIER(kw_defaults); _Py_IDENTIFIER(kwarg); _Py_IDENTIFIER(defaults); static char *arguments_fields[]={ "args", "vararg", "kwonlyargs", "kw_defaults", "kwarg", "defaults", }; static PyTypeObject *arg_type; static PyObject* ast2obj_arg(void*); static char *arg_attributes[] = { "lineno", "col_offset", }; _Py_IDENTIFIER(arg); static char *arg_fields[]={ "arg", "annotation", "type_comment", }; static PyTypeObject *keyword_type; static PyObject* ast2obj_keyword(void*); static char *keyword_fields[]={ "arg", "value", }; static PyTypeObject *alias_type; static PyObject* ast2obj_alias(void*); _Py_IDENTIFIER(asname); static char *alias_fields[]={ "name", "asname", }; static PyTypeObject *withitem_type; static PyObject* ast2obj_withitem(void*); _Py_IDENTIFIER(context_expr); _Py_IDENTIFIER(optional_vars); static char *withitem_fields[]={ "context_expr", "optional_vars", }; static PyTypeObject *type_ignore_type; static PyObject* ast2obj_type_ignore(void*); static PyTypeObject *TypeIgnore_type; static char *TypeIgnore_fields[]={ "lineno", }; typedef struct { PyObject_HEAD PyObject *dict; } AST_object; static void ast_dealloc(AST_object *self) { Py_CLEAR(self->dict); Py_TYPE(self)->tp_free(self); } static int ast_traverse(AST_object *self, visitproc visit, void *arg) { Py_VISIT(self->dict); return 0; } static void ast_clear(AST_object *self) { Py_CLEAR(self->dict); } static int ast_type_init(PyObject *self, PyObject *args, PyObject *kw) { _Py_IDENTIFIER(_fields); Py_ssize_t i, numfields = 0; int res = -1; PyObject *key, *value, *fields; fields = _PyObject_GetAttrId((PyObject*)Py_TYPE(self), &PyId__fields); if (!fields) PyErr_Clear(); if (fields) { numfields = PySequence_Size(fields); if (numfields == -1) goto cleanup; } res = 0; /* if no error occurs, this stays 0 to the end */ if (PyTuple_GET_SIZE(args) > 0) { if (numfields != PyTuple_GET_SIZE(args)) { PyErr_Format(PyExc_TypeError, "%.400s constructor takes %s" "%zd positional argument%s", Py_TYPE(self)->tp_name, numfields == 0 ? "" : "either 0 or ", numfields, numfields == 1 ? "" : "s"); res = -1; goto cleanup; } for (i = 0; i < PyTuple_GET_SIZE(args); i++) { /* cannot be reached when fields is NULL */ PyObject *name = PySequence_GetItem(fields, i); if (!name) { res = -1; goto cleanup; } res = PyObject_SetAttr(self, name, PyTuple_GET_ITEM(args, i)); Py_DECREF(name); if (res < 0) goto cleanup; } } if (kw) { i = 0; /* needed by PyDict_Next */ while (PyDict_Next(kw, &i, &key, &value)) { res = PyObject_SetAttr(self, key, value); if (res < 0) goto cleanup; } } cleanup: Py_XDECREF(fields); return res; } /* Pickling support */ static PyObject * ast_type_reduce(PyObject *self, PyObject *unused) { PyObject *res; _Py_IDENTIFIER(__dict__); PyObject *dict = _PyObject_GetAttrId(self, &PyId___dict__); if (dict == NULL) { if (PyErr_ExceptionMatches(PyExc_AttributeError)) PyErr_Clear(); else return NULL; } if (dict) { res = Py_BuildValue("O()O", Py_TYPE(self), dict); Py_DECREF(dict); return res; } return Py_BuildValue("O()", Py_TYPE(self)); } static PyMethodDef ast_type_methods[] = { {"__reduce__", ast_type_reduce, METH_NOARGS, NULL}, {NULL} }; static PyGetSetDef ast_type_getsets[] = { {"__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict}, {NULL} }; static PyTypeObject AST_type = { PyVarObject_HEAD_INIT(NULL, 0) "_ast3.AST", sizeof(AST_object), 0, (destructor)ast_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */ 0, /* tp_doc */ (traverseproc)ast_traverse, /* tp_traverse */ (inquiry)ast_clear, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ ast_type_methods, /* tp_methods */ 0, /* tp_members */ ast_type_getsets, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ offsetof(AST_object, dict),/* tp_dictoffset */ (initproc)ast_type_init, /* tp_init */ PyType_GenericAlloc, /* tp_alloc */ PyType_GenericNew, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; static PyTypeObject* make_type(char *type, PyTypeObject* base, char**fields, int num_fields) { PyObject *fnames, *result; int i; fnames = PyTuple_New(num_fields); if (!fnames) return NULL; for (i = 0; i < num_fields; i++) { PyObject *field = PyUnicode_FromString(fields[i]); if (!field) { Py_DECREF(fnames); return NULL; } PyTuple_SET_ITEM(fnames, i, field); } result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){sOss}", type, base, "_fields", fnames, "__module__", "_ast3"); Py_DECREF(fnames); return (PyTypeObject*)result; } static int add_attributes(PyTypeObject* type, char**attrs, int num_fields) { int i, result; _Py_IDENTIFIER(_attributes); PyObject *s, *l = PyTuple_New(num_fields); if (!l) return 0; for (i = 0; i < num_fields; i++) { s = PyUnicode_FromString(attrs[i]); if (!s) { Py_DECREF(l); return 0; } PyTuple_SET_ITEM(l, i, s); } result = _PyObject_SetAttrId((PyObject*)type, &PyId__attributes, l) >= 0; Py_DECREF(l); return result; } /* Conversion AST -> Python */ static PyObject* ast2obj_list(asdl_seq *seq, PyObject* (*func)(void*)) { Py_ssize_t i, n = asdl_seq_LEN(seq); PyObject *result = PyList_New(n); PyObject *value; if (!result) return NULL; for (i = 0; i < n; i++) { value = func(asdl_seq_GET(seq, i)); if (!value) { Py_DECREF(result); return NULL; } PyList_SET_ITEM(result, i, value); } return result; } static PyObject* ast2obj_object(void *o) { if (!o) o = Py_None; Py_INCREF((PyObject*)o); return (PyObject*)o; } #define ast2obj_singleton ast2obj_object #define ast2obj_constant ast2obj_object #define ast2obj_identifier ast2obj_object #define ast2obj_string ast2obj_object #define ast2obj_bytes ast2obj_object static PyObject* ast2obj_int(long b) { return PyLong_FromLong(b); } /* Conversion Python -> AST */ static int obj2ast_singleton(PyObject *obj, PyObject** out, PyArena* arena) { if (obj != Py_None && obj != Py_True && obj != Py_False) { PyErr_SetString(PyExc_ValueError, "AST singleton must be True, False, or None"); return 1; } *out = obj; return 0; } static int obj2ast_object(PyObject* obj, PyObject** out, PyArena* arena) { if (obj == Py_None) obj = NULL; if (obj) { if (PyArena_AddPyObject(arena, obj) < 0) { *out = NULL; return -1; } Py_INCREF(obj); } *out = obj; return 0; } static int obj2ast_constant(PyObject* obj, PyObject** out, PyArena* arena) { if (obj) { if (PyArena_AddPyObject(arena, obj) < 0) { *out = NULL; return -1; } Py_INCREF(obj); } *out = obj; return 0; } static int obj2ast_identifier(PyObject* obj, PyObject** out, PyArena* arena) { if (!PyUnicode_CheckExact(obj) && obj != Py_None) { PyErr_SetString(PyExc_TypeError, "AST identifier must be of type str"); return 1; } return obj2ast_object(obj, out, arena); } static int obj2ast_string(PyObject* obj, PyObject** out, PyArena* arena) { if (!PyUnicode_CheckExact(obj) && !PyBytes_CheckExact(obj)) { PyErr_SetString(PyExc_TypeError, "AST string must be of type str"); return 1; } return obj2ast_object(obj, out, arena); } static int obj2ast_bytes(PyObject* obj, PyObject** out, PyArena* arena) { if (!PyBytes_CheckExact(obj)) { PyErr_SetString(PyExc_TypeError, "AST bytes must be of type bytes"); return 1; } return obj2ast_object(obj, out, arena); } static int obj2ast_int(PyObject* obj, int* out, PyArena* arena) { int i; if (!PyLong_Check(obj)) { PyErr_Format(PyExc_ValueError, "invalid integer value: %R", obj); return 1; } i = _PyLong_AsInt(obj); if (i == -1 && PyErr_Occurred()) return 1; *out = i; return 0; } static int add_ast_fields(void) { PyObject *empty_tuple, *d; if (PyType_Ready(&AST_type) < 0) return -1; d = AST_type.tp_dict; empty_tuple = PyTuple_New(0); if (!empty_tuple || PyDict_SetItemString(d, "_fields", empty_tuple) < 0 || PyDict_SetItemString(d, "_attributes", empty_tuple) < 0) { Py_XDECREF(empty_tuple); return -1; } Py_DECREF(empty_tuple); return 0; } static int exists_not_none(PyObject *obj, _Py_Identifier *id) { int isnone; PyObject *attr = _PyObject_GetAttrId(obj, id); if (!attr) { PyErr_Clear(); return 0; } isnone = attr == Py_None; Py_DECREF(attr); return !isnone; } static int init_types(void) { static int initialized; if (initialized) return 1; if (add_ast_fields() < 0) return 0; mod_type = make_type("mod", &AST_type, NULL, 0); if (!mod_type) return 0; if (!add_attributes(mod_type, NULL, 0)) return 0; Module_type = make_type("Module", mod_type, Module_fields, 2); if (!Module_type) return 0; Interactive_type = make_type("Interactive", mod_type, Interactive_fields, 1); if (!Interactive_type) return 0; Expression_type = make_type("Expression", mod_type, Expression_fields, 1); if (!Expression_type) return 0; FunctionType_type = make_type("FunctionType", mod_type, FunctionType_fields, 2); if (!FunctionType_type) return 0; Suite_type = make_type("Suite", mod_type, Suite_fields, 1); if (!Suite_type) return 0; stmt_type = make_type("stmt", &AST_type, NULL, 0); if (!stmt_type) return 0; if (!add_attributes(stmt_type, stmt_attributes, 2)) return 0; FunctionDef_type = make_type("FunctionDef", stmt_type, FunctionDef_fields, 6); if (!FunctionDef_type) return 0; AsyncFunctionDef_type = make_type("AsyncFunctionDef", stmt_type, AsyncFunctionDef_fields, 6); if (!AsyncFunctionDef_type) return 0; ClassDef_type = make_type("ClassDef", stmt_type, ClassDef_fields, 5); if (!ClassDef_type) return 0; Return_type = make_type("Return", stmt_type, Return_fields, 1); if (!Return_type) return 0; Delete_type = make_type("Delete", stmt_type, Delete_fields, 1); if (!Delete_type) return 0; Assign_type = make_type("Assign", stmt_type, Assign_fields, 3); if (!Assign_type) return 0; AugAssign_type = make_type("AugAssign", stmt_type, AugAssign_fields, 3); if (!AugAssign_type) return 0; AnnAssign_type = make_type("AnnAssign", stmt_type, AnnAssign_fields, 4); if (!AnnAssign_type) return 0; For_type = make_type("For", stmt_type, For_fields, 5); if (!For_type) return 0; AsyncFor_type = make_type("AsyncFor", stmt_type, AsyncFor_fields, 5); if (!AsyncFor_type) return 0; While_type = make_type("While", stmt_type, While_fields, 3); if (!While_type) return 0; If_type = make_type("If", stmt_type, If_fields, 3); if (!If_type) return 0; With_type = make_type("With", stmt_type, With_fields, 3); if (!With_type) return 0; AsyncWith_type = make_type("AsyncWith", stmt_type, AsyncWith_fields, 3); if (!AsyncWith_type) return 0; Raise_type = make_type("Raise", stmt_type, Raise_fields, 2); if (!Raise_type) return 0; Try_type = make_type("Try", stmt_type, Try_fields, 4); if (!Try_type) return 0; Assert_type = make_type("Assert", stmt_type, Assert_fields, 2); if (!Assert_type) return 0; Import_type = make_type("Import", stmt_type, Import_fields, 1); if (!Import_type) return 0; ImportFrom_type = make_type("ImportFrom", stmt_type, ImportFrom_fields, 3); if (!ImportFrom_type) return 0; Global_type = make_type("Global", stmt_type, Global_fields, 1); if (!Global_type) return 0; Nonlocal_type = make_type("Nonlocal", stmt_type, Nonlocal_fields, 1); if (!Nonlocal_type) return 0; Expr_type = make_type("Expr", stmt_type, Expr_fields, 1); if (!Expr_type) return 0; Pass_type = make_type("Pass", stmt_type, NULL, 0); if (!Pass_type) return 0; Break_type = make_type("Break", stmt_type, NULL, 0); if (!Break_type) return 0; Continue_type = make_type("Continue", stmt_type, NULL, 0); if (!Continue_type) return 0; expr_type = make_type("expr", &AST_type, NULL, 0); if (!expr_type) return 0; if (!add_attributes(expr_type, expr_attributes, 2)) return 0; BoolOp_type = make_type("BoolOp", expr_type, BoolOp_fields, 2); if (!BoolOp_type) return 0; BinOp_type = make_type("BinOp", expr_type, BinOp_fields, 3); if (!BinOp_type) return 0; UnaryOp_type = make_type("UnaryOp", expr_type, UnaryOp_fields, 2); if (!UnaryOp_type) return 0; Lambda_type = make_type("Lambda", expr_type, Lambda_fields, 2); if (!Lambda_type) return 0; IfExp_type = make_type("IfExp", expr_type, IfExp_fields, 3); if (!IfExp_type) return 0; Dict_type = make_type("Dict", expr_type, Dict_fields, 2); if (!Dict_type) return 0; Set_type = make_type("Set", expr_type, Set_fields, 1); if (!Set_type) return 0; ListComp_type = make_type("ListComp", expr_type, ListComp_fields, 2); if (!ListComp_type) return 0; SetComp_type = make_type("SetComp", expr_type, SetComp_fields, 2); if (!SetComp_type) return 0; DictComp_type = make_type("DictComp", expr_type, DictComp_fields, 3); if (!DictComp_type) return 0; GeneratorExp_type = make_type("GeneratorExp", expr_type, GeneratorExp_fields, 2); if (!GeneratorExp_type) return 0; Await_type = make_type("Await", expr_type, Await_fields, 1); if (!Await_type) return 0; Yield_type = make_type("Yield", expr_type, Yield_fields, 1); if (!Yield_type) return 0; YieldFrom_type = make_type("YieldFrom", expr_type, YieldFrom_fields, 1); if (!YieldFrom_type) return 0; Compare_type = make_type("Compare", expr_type, Compare_fields, 3); if (!Compare_type) return 0; Call_type = make_type("Call", expr_type, Call_fields, 3); if (!Call_type) return 0; Num_type = make_type("Num", expr_type, Num_fields, 1); if (!Num_type) return 0; Str_type = make_type("Str", expr_type, Str_fields, 2); if (!Str_type) return 0; FormattedValue_type = make_type("FormattedValue", expr_type, FormattedValue_fields, 3); if (!FormattedValue_type) return 0; JoinedStr_type = make_type("JoinedStr", expr_type, JoinedStr_fields, 1); if (!JoinedStr_type) return 0; Bytes_type = make_type("Bytes", expr_type, Bytes_fields, 1); if (!Bytes_type) return 0; NameConstant_type = make_type("NameConstant", expr_type, NameConstant_fields, 1); if (!NameConstant_type) return 0; Ellipsis_type = make_type("Ellipsis", expr_type, NULL, 0); if (!Ellipsis_type) return 0; Constant_type = make_type("Constant", expr_type, Constant_fields, 1); if (!Constant_type) return 0; Attribute_type = make_type("Attribute", expr_type, Attribute_fields, 3); if (!Attribute_type) return 0; Subscript_type = make_type("Subscript", expr_type, Subscript_fields, 3); if (!Subscript_type) return 0; Starred_type = make_type("Starred", expr_type, Starred_fields, 2); if (!Starred_type) return 0; Name_type = make_type("Name", expr_type, Name_fields, 2); if (!Name_type) return 0; List_type = make_type("List", expr_type, List_fields, 2); if (!List_type) return 0; Tuple_type = make_type("Tuple", expr_type, Tuple_fields, 2); if (!Tuple_type) return 0; expr_context_type = make_type("expr_context", &AST_type, NULL, 0); if (!expr_context_type) return 0; if (!add_attributes(expr_context_type, NULL, 0)) return 0; Load_type = make_type("Load", expr_context_type, NULL, 0); if (!Load_type) return 0; Load_singleton = PyType_GenericNew(Load_type, NULL, NULL); if (!Load_singleton) return 0; Store_type = make_type("Store", expr_context_type, NULL, 0); if (!Store_type) return 0; Store_singleton = PyType_GenericNew(Store_type, NULL, NULL); if (!Store_singleton) return 0; Del_type = make_type("Del", expr_context_type, NULL, 0); if (!Del_type) return 0; Del_singleton = PyType_GenericNew(Del_type, NULL, NULL); if (!Del_singleton) return 0; AugLoad_type = make_type("AugLoad", expr_context_type, NULL, 0); if (!AugLoad_type) return 0; AugLoad_singleton = PyType_GenericNew(AugLoad_type, NULL, NULL); if (!AugLoad_singleton) return 0; AugStore_type = make_type("AugStore", expr_context_type, NULL, 0); if (!AugStore_type) return 0; AugStore_singleton = PyType_GenericNew(AugStore_type, NULL, NULL); if (!AugStore_singleton) return 0; Param_type = make_type("Param", expr_context_type, NULL, 0); if (!Param_type) return 0; Param_singleton = PyType_GenericNew(Param_type, NULL, NULL); if (!Param_singleton) return 0; slice_type = make_type("slice", &AST_type, NULL, 0); if (!slice_type) return 0; if (!add_attributes(slice_type, NULL, 0)) return 0; Slice_type = make_type("Slice", slice_type, Slice_fields, 3); if (!Slice_type) return 0; ExtSlice_type = make_type("ExtSlice", slice_type, ExtSlice_fields, 1); if (!ExtSlice_type) return 0; Index_type = make_type("Index", slice_type, Index_fields, 1); if (!Index_type) return 0; boolop_type = make_type("boolop", &AST_type, NULL, 0); if (!boolop_type) return 0; if (!add_attributes(boolop_type, NULL, 0)) return 0; And_type = make_type("And", boolop_type, NULL, 0); if (!And_type) return 0; And_singleton = PyType_GenericNew(And_type, NULL, NULL); if (!And_singleton) return 0; Or_type = make_type("Or", boolop_type, NULL, 0); if (!Or_type) return 0; Or_singleton = PyType_GenericNew(Or_type, NULL, NULL); if (!Or_singleton) return 0; operator_type = make_type("operator", &AST_type, NULL, 0); if (!operator_type) return 0; if (!add_attributes(operator_type, NULL, 0)) return 0; Add_type = make_type("Add", operator_type, NULL, 0); if (!Add_type) return 0; Add_singleton = PyType_GenericNew(Add_type, NULL, NULL); if (!Add_singleton) return 0; Sub_type = make_type("Sub", operator_type, NULL, 0); if (!Sub_type) return 0; Sub_singleton = PyType_GenericNew(Sub_type, NULL, NULL); if (!Sub_singleton) return 0; Mult_type = make_type("Mult", operator_type, NULL, 0); if (!Mult_type) return 0; Mult_singleton = PyType_GenericNew(Mult_type, NULL, NULL); if (!Mult_singleton) return 0; MatMult_type = make_type("MatMult", operator_type, NULL, 0); if (!MatMult_type) return 0; MatMult_singleton = PyType_GenericNew(MatMult_type, NULL, NULL); if (!MatMult_singleton) return 0; Div_type = make_type("Div", operator_type, NULL, 0); if (!Div_type) return 0; Div_singleton = PyType_GenericNew(Div_type, NULL, NULL); if (!Div_singleton) return 0; Mod_type = make_type("Mod", operator_type, NULL, 0); if (!Mod_type) return 0; Mod_singleton = PyType_GenericNew(Mod_type, NULL, NULL); if (!Mod_singleton) return 0; Pow_type = make_type("Pow", operator_type, NULL, 0); if (!Pow_type) return 0; Pow_singleton = PyType_GenericNew(Pow_type, NULL, NULL); if (!Pow_singleton) return 0; LShift_type = make_type("LShift", operator_type, NULL, 0); if (!LShift_type) return 0; LShift_singleton = PyType_GenericNew(LShift_type, NULL, NULL); if (!LShift_singleton) return 0; RShift_type = make_type("RShift", operator_type, NULL, 0); if (!RShift_type) return 0; RShift_singleton = PyType_GenericNew(RShift_type, NULL, NULL); if (!RShift_singleton) return 0; BitOr_type = make_type("BitOr", operator_type, NULL, 0); if (!BitOr_type) return 0; BitOr_singleton = PyType_GenericNew(BitOr_type, NULL, NULL); if (!BitOr_singleton) return 0; BitXor_type = make_type("BitXor", operator_type, NULL, 0); if (!BitXor_type) return 0; BitXor_singleton = PyType_GenericNew(BitXor_type, NULL, NULL); if (!BitXor_singleton) return 0; BitAnd_type = make_type("BitAnd", operator_type, NULL, 0); if (!BitAnd_type) return 0; BitAnd_singleton = PyType_GenericNew(BitAnd_type, NULL, NULL); if (!BitAnd_singleton) return 0; FloorDiv_type = make_type("FloorDiv", operator_type, NULL, 0); if (!FloorDiv_type) return 0; FloorDiv_singleton = PyType_GenericNew(FloorDiv_type, NULL, NULL); if (!FloorDiv_singleton) return 0; unaryop_type = make_type("unaryop", &AST_type, NULL, 0); if (!unaryop_type) return 0; if (!add_attributes(unaryop_type, NULL, 0)) return 0; Invert_type = make_type("Invert", unaryop_type, NULL, 0); if (!Invert_type) return 0; Invert_singleton = PyType_GenericNew(Invert_type, NULL, NULL); if (!Invert_singleton) return 0; Not_type = make_type("Not", unaryop_type, NULL, 0); if (!Not_type) return 0; Not_singleton = PyType_GenericNew(Not_type, NULL, NULL); if (!Not_singleton) return 0; UAdd_type = make_type("UAdd", unaryop_type, NULL, 0); if (!UAdd_type) return 0; UAdd_singleton = PyType_GenericNew(UAdd_type, NULL, NULL); if (!UAdd_singleton) return 0; USub_type = make_type("USub", unaryop_type, NULL, 0); if (!USub_type) return 0; USub_singleton = PyType_GenericNew(USub_type, NULL, NULL); if (!USub_singleton) return 0; cmpop_type = make_type("cmpop", &AST_type, NULL, 0); if (!cmpop_type) return 0; if (!add_attributes(cmpop_type, NULL, 0)) return 0; Eq_type = make_type("Eq", cmpop_type, NULL, 0); if (!Eq_type) return 0; Eq_singleton = PyType_GenericNew(Eq_type, NULL, NULL); if (!Eq_singleton) return 0; NotEq_type = make_type("NotEq", cmpop_type, NULL, 0); if (!NotEq_type) return 0; NotEq_singleton = PyType_GenericNew(NotEq_type, NULL, NULL); if (!NotEq_singleton) return 0; Lt_type = make_type("Lt", cmpop_type, NULL, 0); if (!Lt_type) return 0; Lt_singleton = PyType_GenericNew(Lt_type, NULL, NULL); if (!Lt_singleton) return 0; LtE_type = make_type("LtE", cmpop_type, NULL, 0); if (!LtE_type) return 0; LtE_singleton = PyType_GenericNew(LtE_type, NULL, NULL); if (!LtE_singleton) return 0; Gt_type = make_type("Gt", cmpop_type, NULL, 0); if (!Gt_type) return 0; Gt_singleton = PyType_GenericNew(Gt_type, NULL, NULL); if (!Gt_singleton) return 0; GtE_type = make_type("GtE", cmpop_type, NULL, 0); if (!GtE_type) return 0; GtE_singleton = PyType_GenericNew(GtE_type, NULL, NULL); if (!GtE_singleton) return 0; Is_type = make_type("Is", cmpop_type, NULL, 0); if (!Is_type) return 0; Is_singleton = PyType_GenericNew(Is_type, NULL, NULL); if (!Is_singleton) return 0; IsNot_type = make_type("IsNot", cmpop_type, NULL, 0); if (!IsNot_type) return 0; IsNot_singleton = PyType_GenericNew(IsNot_type, NULL, NULL); if (!IsNot_singleton) return 0; In_type = make_type("In", cmpop_type, NULL, 0); if (!In_type) return 0; In_singleton = PyType_GenericNew(In_type, NULL, NULL); if (!In_singleton) return 0; NotIn_type = make_type("NotIn", cmpop_type, NULL, 0); if (!NotIn_type) return 0; NotIn_singleton = PyType_GenericNew(NotIn_type, NULL, NULL); if (!NotIn_singleton) return 0; comprehension_type = make_type("comprehension", &AST_type, comprehension_fields, 4); if (!comprehension_type) return 0; if (!add_attributes(comprehension_type, NULL, 0)) return 0; excepthandler_type = make_type("excepthandler", &AST_type, NULL, 0); if (!excepthandler_type) return 0; if (!add_attributes(excepthandler_type, excepthandler_attributes, 2)) return 0; ExceptHandler_type = make_type("ExceptHandler", excepthandler_type, ExceptHandler_fields, 3); if (!ExceptHandler_type) return 0; arguments_type = make_type("arguments", &AST_type, arguments_fields, 6); if (!arguments_type) return 0; if (!add_attributes(arguments_type, NULL, 0)) return 0; arg_type = make_type("arg", &AST_type, arg_fields, 3); if (!arg_type) return 0; if (!add_attributes(arg_type, arg_attributes, 2)) return 0; keyword_type = make_type("keyword", &AST_type, keyword_fields, 2); if (!keyword_type) return 0; if (!add_attributes(keyword_type, NULL, 0)) return 0; alias_type = make_type("alias", &AST_type, alias_fields, 2); if (!alias_type) return 0; if (!add_attributes(alias_type, NULL, 0)) return 0; withitem_type = make_type("withitem", &AST_type, withitem_fields, 2); if (!withitem_type) return 0; if (!add_attributes(withitem_type, NULL, 0)) return 0; type_ignore_type = make_type("type_ignore", &AST_type, NULL, 0); if (!type_ignore_type) return 0; if (!add_attributes(type_ignore_type, NULL, 0)) return 0; TypeIgnore_type = make_type("TypeIgnore", type_ignore_type, TypeIgnore_fields, 1); if (!TypeIgnore_type) return 0; initialized = 1; return 1; } static int obj2ast_mod(PyObject* obj, mod_ty* out, PyArena* arena); static int obj2ast_stmt(PyObject* obj, stmt_ty* out, PyArena* arena); static int obj2ast_expr(PyObject* obj, expr_ty* out, PyArena* arena); static int obj2ast_expr_context(PyObject* obj, expr_context_ty* out, PyArena* arena); static int obj2ast_slice(PyObject* obj, slice_ty* out, PyArena* arena); static int obj2ast_boolop(PyObject* obj, boolop_ty* out, PyArena* arena); static int obj2ast_operator(PyObject* obj, operator_ty* out, PyArena* arena); static int obj2ast_unaryop(PyObject* obj, unaryop_ty* out, PyArena* arena); static int obj2ast_cmpop(PyObject* obj, cmpop_ty* out, PyArena* arena); static int obj2ast_comprehension(PyObject* obj, comprehension_ty* out, PyArena* arena); static int obj2ast_excepthandler(PyObject* obj, excepthandler_ty* out, PyArena* arena); static int obj2ast_arguments(PyObject* obj, arguments_ty* out, PyArena* arena); static int obj2ast_arg(PyObject* obj, arg_ty* out, PyArena* arena); static int obj2ast_keyword(PyObject* obj, keyword_ty* out, PyArena* arena); static int obj2ast_alias(PyObject* obj, alias_ty* out, PyArena* arena); static int obj2ast_withitem(PyObject* obj, withitem_ty* out, PyArena* arena); static int obj2ast_type_ignore(PyObject* obj, type_ignore_ty* out, PyArena* arena); mod_ty Module(asdl_seq * body, asdl_seq * type_ignores, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Module_kind; p->v.Module.body = body; p->v.Module.type_ignores = type_ignores; return p; } mod_ty Interactive(asdl_seq * body, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Interactive_kind; p->v.Interactive.body = body; return p; } mod_ty Expression(expr_ty body, PyArena *arena) { mod_ty p; if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for Expression"); return NULL; } p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Expression_kind; p->v.Expression.body = body; return p; } mod_ty FunctionType(asdl_seq * argtypes, expr_ty returns, PyArena *arena) { mod_ty p; if (!returns) { PyErr_SetString(PyExc_ValueError, "field returns is required for FunctionType"); return NULL; } p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = FunctionType_kind; p->v.FunctionType.argtypes = argtypes; p->v.FunctionType.returns = returns; return p; } mod_ty Suite(asdl_seq * body, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Suite_kind; p->v.Suite.body = body; return p; } stmt_ty FunctionDef(identifier name, arguments_ty args, asdl_seq * body, asdl_seq * decorator_list, expr_ty returns, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for FunctionDef"); return NULL; } if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for FunctionDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = FunctionDef_kind; p->v.FunctionDef.name = name; p->v.FunctionDef.args = args; p->v.FunctionDef.body = body; p->v.FunctionDef.decorator_list = decorator_list; p->v.FunctionDef.returns = returns; p->v.FunctionDef.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty AsyncFunctionDef(identifier name, arguments_ty args, asdl_seq * body, asdl_seq * decorator_list, expr_ty returns, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for AsyncFunctionDef"); return NULL; } if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for AsyncFunctionDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncFunctionDef_kind; p->v.AsyncFunctionDef.name = name; p->v.AsyncFunctionDef.args = args; p->v.AsyncFunctionDef.body = body; p->v.AsyncFunctionDef.decorator_list = decorator_list; p->v.AsyncFunctionDef.returns = returns; p->v.AsyncFunctionDef.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty ClassDef(identifier name, asdl_seq * bases, asdl_seq * keywords, asdl_seq * body, asdl_seq * decorator_list, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for ClassDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ClassDef_kind; p->v.ClassDef.name = name; p->v.ClassDef.bases = bases; p->v.ClassDef.keywords = keywords; p->v.ClassDef.body = body; p->v.ClassDef.decorator_list = decorator_list; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Return(expr_ty value, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Return_kind; p->v.Return.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Delete(asdl_seq * targets, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Delete_kind; p->v.Delete.targets = targets; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Assign(asdl_seq * targets, expr_ty value, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Assign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Assign_kind; p->v.Assign.targets = targets; p->v.Assign.value = value; p->v.Assign.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty AugAssign(expr_ty target, operator_ty op, expr_ty value, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AugAssign"); return NULL; } if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for AugAssign"); return NULL; } if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for AugAssign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AugAssign_kind; p->v.AugAssign.target = target; p->v.AugAssign.op = op; p->v.AugAssign.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty AnnAssign(expr_ty target, expr_ty annotation, expr_ty value, int simple, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AnnAssign"); return NULL; } if (!annotation) { PyErr_SetString(PyExc_ValueError, "field annotation is required for AnnAssign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AnnAssign_kind; p->v.AnnAssign.target = target; p->v.AnnAssign.annotation = annotation; p->v.AnnAssign.value = value; p->v.AnnAssign.simple = simple; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty For(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for For"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for For"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = For_kind; p->v.For.target = target; p->v.For.iter = iter; p->v.For.body = body; p->v.For.orelse = orelse; p->v.For.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty AsyncFor(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AsyncFor"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for AsyncFor"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncFor_kind; p->v.AsyncFor.target = target; p->v.AsyncFor.iter = iter; p->v.AsyncFor.body = body; p->v.AsyncFor.orelse = orelse; p->v.AsyncFor.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty While(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for While"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = While_kind; p->v.While.test = test; p->v.While.body = body; p->v.While.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty If(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for If"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = If_kind; p->v.If.test = test; p->v.If.body = body; p->v.If.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty With(asdl_seq * items, asdl_seq * body, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = With_kind; p->v.With.items = items; p->v.With.body = body; p->v.With.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty AsyncWith(asdl_seq * items, asdl_seq * body, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncWith_kind; p->v.AsyncWith.items = items; p->v.AsyncWith.body = body; p->v.AsyncWith.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Raise(expr_ty exc, expr_ty cause, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Raise_kind; p->v.Raise.exc = exc; p->v.Raise.cause = cause; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Try(asdl_seq * body, asdl_seq * handlers, asdl_seq * orelse, asdl_seq * finalbody, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Try_kind; p->v.Try.body = body; p->v.Try.handlers = handlers; p->v.Try.orelse = orelse; p->v.Try.finalbody = finalbody; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Assert(expr_ty test, expr_ty msg, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for Assert"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Assert_kind; p->v.Assert.test = test; p->v.Assert.msg = msg; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Import(asdl_seq * names, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Import_kind; p->v.Import.names = names; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty ImportFrom(identifier module, asdl_seq * names, int level, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ImportFrom_kind; p->v.ImportFrom.module = module; p->v.ImportFrom.names = names; p->v.ImportFrom.level = level; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Global(asdl_seq * names, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Global_kind; p->v.Global.names = names; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Nonlocal(asdl_seq * names, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Nonlocal_kind; p->v.Nonlocal.names = names; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Expr(expr_ty value, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Expr"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Expr_kind; p->v.Expr.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Pass(int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Pass_kind; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Break(int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Break_kind; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Continue(int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Continue_kind; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty BoolOp(boolop_ty op, asdl_seq * values, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for BoolOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = BoolOp_kind; p->v.BoolOp.op = op; p->v.BoolOp.values = values; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty BinOp(expr_ty left, operator_ty op, expr_ty right, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!left) { PyErr_SetString(PyExc_ValueError, "field left is required for BinOp"); return NULL; } if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for BinOp"); return NULL; } if (!right) { PyErr_SetString(PyExc_ValueError, "field right is required for BinOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = BinOp_kind; p->v.BinOp.left = left; p->v.BinOp.op = op; p->v.BinOp.right = right; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for UnaryOp"); return NULL; } if (!operand) { PyErr_SetString(PyExc_ValueError, "field operand is required for UnaryOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = UnaryOp_kind; p->v.UnaryOp.op = op; p->v.UnaryOp.operand = operand; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Lambda(arguments_ty args, expr_ty body, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for Lambda"); return NULL; } if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for Lambda"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Lambda_kind; p->v.Lambda.args = args; p->v.Lambda.body = body; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty IfExp(expr_ty test, expr_ty body, expr_ty orelse, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for IfExp"); return NULL; } if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for IfExp"); return NULL; } if (!orelse) { PyErr_SetString(PyExc_ValueError, "field orelse is required for IfExp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = IfExp_kind; p->v.IfExp.test = test; p->v.IfExp.body = body; p->v.IfExp.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Dict(asdl_seq * keys, asdl_seq * values, int lineno, int col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Dict_kind; p->v.Dict.keys = keys; p->v.Dict.values = values; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Set(asdl_seq * elts, int lineno, int col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Set_kind; p->v.Set.elts = elts; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty ListComp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for ListComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ListComp_kind; p->v.ListComp.elt = elt; p->v.ListComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty SetComp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for SetComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = SetComp_kind; p->v.SetComp.elt = elt; p->v.SetComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty DictComp(expr_ty key, expr_ty value, asdl_seq * generators, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!key) { PyErr_SetString(PyExc_ValueError, "field key is required for DictComp"); return NULL; } if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for DictComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = DictComp_kind; p->v.DictComp.key = key; p->v.DictComp.value = value; p->v.DictComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty GeneratorExp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for GeneratorExp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = GeneratorExp_kind; p->v.GeneratorExp.elt = elt; p->v.GeneratorExp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Await(expr_ty value, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Await"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Await_kind; p->v.Await.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Yield(expr_ty value, int lineno, int col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Yield_kind; p->v.Yield.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty YieldFrom(expr_ty value, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for YieldFrom"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = YieldFrom_kind; p->v.YieldFrom.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Compare(expr_ty left, asdl_int_seq * ops, asdl_seq * comparators, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!left) { PyErr_SetString(PyExc_ValueError, "field left is required for Compare"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Compare_kind; p->v.Compare.left = left; p->v.Compare.ops = ops; p->v.Compare.comparators = comparators; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Call(expr_ty func, asdl_seq * args, asdl_seq * keywords, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!func) { PyErr_SetString(PyExc_ValueError, "field func is required for Call"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Call_kind; p->v.Call.func = func; p->v.Call.args = args; p->v.Call.keywords = keywords; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Num(object n, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!n) { PyErr_SetString(PyExc_ValueError, "field n is required for Num"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Num_kind; p->v.Num.n = n; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Str(string s, string kind, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!s) { PyErr_SetString(PyExc_ValueError, "field s is required for Str"); return NULL; } if (!kind) { PyErr_SetString(PyExc_ValueError, "field kind is required for Str"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Str_kind; p->v.Str.s = s; p->v.Str.kind = kind; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty FormattedValue(expr_ty value, int conversion, expr_ty format_spec, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for FormattedValue"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = FormattedValue_kind; p->v.FormattedValue.value = value; p->v.FormattedValue.conversion = conversion; p->v.FormattedValue.format_spec = format_spec; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty JoinedStr(asdl_seq * values, int lineno, int col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = JoinedStr_kind; p->v.JoinedStr.values = values; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Bytes(bytes s, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!s) { PyErr_SetString(PyExc_ValueError, "field s is required for Bytes"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Bytes_kind; p->v.Bytes.s = s; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty NameConstant(singleton value, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for NameConstant"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = NameConstant_kind; p->v.NameConstant.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Ellipsis(int lineno, int col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Ellipsis_kind; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Constant(constant value, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Constant"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Constant_kind; p->v.Constant.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Attribute(expr_ty value, identifier attr, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Attribute"); return NULL; } if (!attr) { PyErr_SetString(PyExc_ValueError, "field attr is required for Attribute"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Attribute"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Attribute_kind; p->v.Attribute.value = value; p->v.Attribute.attr = attr; p->v.Attribute.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Subscript(expr_ty value, slice_ty slice, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Subscript"); return NULL; } if (!slice) { PyErr_SetString(PyExc_ValueError, "field slice is required for Subscript"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Subscript"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Subscript_kind; p->v.Subscript.value = value; p->v.Subscript.slice = slice; p->v.Subscript.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Starred(expr_ty value, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Starred"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Starred"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Starred_kind; p->v.Starred.value = value; p->v.Starred.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Name(identifier id, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!id) { PyErr_SetString(PyExc_ValueError, "field id is required for Name"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Name"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Name_kind; p->v.Name.id = id; p->v.Name.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty List(asdl_seq * elts, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for List"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = List_kind; p->v.List.elts = elts; p->v.List.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Tuple(asdl_seq * elts, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Tuple"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Tuple_kind; p->v.Tuple.elts = elts; p->v.Tuple.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } slice_ty Slice(expr_ty lower, expr_ty upper, expr_ty step, PyArena *arena) { slice_ty p; p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Slice_kind; p->v.Slice.lower = lower; p->v.Slice.upper = upper; p->v.Slice.step = step; return p; } slice_ty ExtSlice(asdl_seq * dims, PyArena *arena) { slice_ty p; p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ExtSlice_kind; p->v.ExtSlice.dims = dims; return p; } slice_ty Index(expr_ty value, PyArena *arena) { slice_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Index"); return NULL; } p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Index_kind; p->v.Index.value = value; return p; } comprehension_ty comprehension(expr_ty target, expr_ty iter, asdl_seq * ifs, int is_async, PyArena *arena) { comprehension_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for comprehension"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for comprehension"); return NULL; } p = (comprehension_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->target = target; p->iter = iter; p->ifs = ifs; p->is_async = is_async; return p; } excepthandler_ty ExceptHandler(expr_ty type, identifier name, asdl_seq * body, int lineno, int col_offset, PyArena *arena) { excepthandler_ty p; p = (excepthandler_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ExceptHandler_kind; p->v.ExceptHandler.type = type; p->v.ExceptHandler.name = name; p->v.ExceptHandler.body = body; p->lineno = lineno; p->col_offset = col_offset; return p; } arguments_ty arguments(asdl_seq * args, arg_ty vararg, asdl_seq * kwonlyargs, asdl_seq * kw_defaults, arg_ty kwarg, asdl_seq * defaults, PyArena *arena) { arguments_ty p; p = (arguments_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->args = args; p->vararg = vararg; p->kwonlyargs = kwonlyargs; p->kw_defaults = kw_defaults; p->kwarg = kwarg; p->defaults = defaults; return p; } arg_ty arg(identifier arg, expr_ty annotation, string type_comment, int lineno, int col_offset, PyArena *arena) { arg_ty p; if (!arg) { PyErr_SetString(PyExc_ValueError, "field arg is required for arg"); return NULL; } p = (arg_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->arg = arg; p->annotation = annotation; p->type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } keyword_ty keyword(identifier arg, expr_ty value, PyArena *arena) { keyword_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for keyword"); return NULL; } p = (keyword_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->arg = arg; p->value = value; return p; } alias_ty alias(identifier name, identifier asname, PyArena *arena) { alias_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for alias"); return NULL; } p = (alias_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->name = name; p->asname = asname; return p; } withitem_ty withitem(expr_ty context_expr, expr_ty optional_vars, PyArena *arena) { withitem_ty p; if (!context_expr) { PyErr_SetString(PyExc_ValueError, "field context_expr is required for withitem"); return NULL; } p = (withitem_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->context_expr = context_expr; p->optional_vars = optional_vars; return p; } type_ignore_ty TypeIgnore(int lineno, PyArena *arena) { type_ignore_ty p; p = (type_ignore_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = TypeIgnore_kind; p->v.TypeIgnore.lineno = lineno; return p; } PyObject* ast2obj_mod(void* _o) { mod_ty o = (mod_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } switch (o->kind) { case Module_kind: result = PyType_GenericNew(Module_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Module.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Module.type_ignores, ast2obj_type_ignore); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_ignores, value) == -1) goto failed; Py_DECREF(value); break; case Interactive_kind: result = PyType_GenericNew(Interactive_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Interactive.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case Expression_kind: result = PyType_GenericNew(Expression_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Expression.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case FunctionType_kind: result = PyType_GenericNew(FunctionType_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.FunctionType.argtypes, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_argtypes, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.FunctionType.returns); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1) goto failed; Py_DECREF(value); break; case Suite_kind: result = PyType_GenericNew(Suite_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Suite.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; } return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_stmt(void* _o) { stmt_ty o = (stmt_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } switch (o->kind) { case FunctionDef_kind: result = PyType_GenericNew(FunctionDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.FunctionDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arguments(o->v.FunctionDef.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.FunctionDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.FunctionDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.FunctionDef.returns); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.FunctionDef.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AsyncFunctionDef_kind: result = PyType_GenericNew(AsyncFunctionDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.AsyncFunctionDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arguments(o->v.AsyncFunctionDef.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFunctionDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFunctionDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AsyncFunctionDef.returns); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.AsyncFunctionDef.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case ClassDef_kind: result = PyType_GenericNew(ClassDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.ClassDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.bases, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_bases, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.keywords, ast2obj_keyword); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keywords, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); break; case Return_kind: result = PyType_GenericNew(Return_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Return.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Delete_kind: result = PyType_GenericNew(Delete_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Delete.targets, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_targets, value) == -1) goto failed; Py_DECREF(value); break; case Assign_kind: result = PyType_GenericNew(Assign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Assign.targets, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_targets, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Assign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.Assign.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AugAssign_kind: result = PyType_GenericNew(AugAssign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AugAssign.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_operator(o->v.AugAssign.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AugAssign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case AnnAssign_kind: result = PyType_GenericNew(AnnAssign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AnnAssign.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AnnAssign.annotation); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_annotation, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AnnAssign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.AnnAssign.simple); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_simple, value) == -1) goto failed; Py_DECREF(value); break; case For_kind: result = PyType_GenericNew(For_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.For.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.For.iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.For.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.For.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.For.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AsyncFor_kind: result = PyType_GenericNew(AsyncFor_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AsyncFor.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AsyncFor.iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFor.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFor.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.AsyncFor.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case While_kind: result = PyType_GenericNew(While_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.While.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.While.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.While.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case If_kind: result = PyType_GenericNew(If_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.If.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.If.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.If.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case With_kind: result = PyType_GenericNew(With_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.With.items, ast2obj_withitem); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_items, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.With.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.With.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AsyncWith_kind: result = PyType_GenericNew(AsyncWith_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.AsyncWith.items, ast2obj_withitem); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_items, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncWith.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.AsyncWith.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case Raise_kind: result = PyType_GenericNew(Raise_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Raise.exc); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_exc, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Raise.cause); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_cause, value) == -1) goto failed; Py_DECREF(value); break; case Try_kind: result = PyType_GenericNew(Try_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Try.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.handlers, ast2obj_excepthandler); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_handlers, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.finalbody, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_finalbody, value) == -1) goto failed; Py_DECREF(value); break; case Assert_kind: result = PyType_GenericNew(Assert_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Assert.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Assert.msg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_msg, value) == -1) goto failed; Py_DECREF(value); break; case Import_kind: result = PyType_GenericNew(Import_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Import.names, ast2obj_alias); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case ImportFrom_kind: result = PyType_GenericNew(ImportFrom_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.ImportFrom.module); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_module, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ImportFrom.names, ast2obj_alias); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.ImportFrom.level); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_level, value) == -1) goto failed; Py_DECREF(value); break; case Global_kind: result = PyType_GenericNew(Global_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Global.names, ast2obj_identifier); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case Nonlocal_kind: result = PyType_GenericNew(Nonlocal_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Nonlocal.names, ast2obj_identifier); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case Expr_kind: result = PyType_GenericNew(Expr_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Expr.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Pass_kind: result = PyType_GenericNew(Pass_type, NULL, NULL); if (!result) goto failed; break; case Break_kind: result = PyType_GenericNew(Break_type, NULL, NULL); if (!result) goto failed; break; case Continue_kind: result = PyType_GenericNew(Continue_type, NULL, NULL); if (!result) goto failed; break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_expr(void* _o) { expr_ty o = (expr_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } switch (o->kind) { case BoolOp_kind: result = PyType_GenericNew(BoolOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_boolop(o->v.BoolOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.BoolOp.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case BinOp_kind: result = PyType_GenericNew(BinOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.BinOp.left); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_left, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_operator(o->v.BinOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.BinOp.right); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_right, value) == -1) goto failed; Py_DECREF(value); break; case UnaryOp_kind: result = PyType_GenericNew(UnaryOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_unaryop(o->v.UnaryOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.UnaryOp.operand); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_operand, value) == -1) goto failed; Py_DECREF(value); break; case Lambda_kind: result = PyType_GenericNew(Lambda_type, NULL, NULL); if (!result) goto failed; value = ast2obj_arguments(o->v.Lambda.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Lambda.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case IfExp_kind: result = PyType_GenericNew(IfExp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.IfExp.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.IfExp.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.IfExp.orelse); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case Dict_kind: result = PyType_GenericNew(Dict_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Dict.keys, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keys, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Dict.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case Set_kind: result = PyType_GenericNew(Set_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Set.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); break; case ListComp_kind: result = PyType_GenericNew(ListComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.ListComp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ListComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case SetComp_kind: result = PyType_GenericNew(SetComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.SetComp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.SetComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case DictComp_kind: result = PyType_GenericNew(DictComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.DictComp.key); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_key, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.DictComp.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.DictComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case GeneratorExp_kind: result = PyType_GenericNew(GeneratorExp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.GeneratorExp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.GeneratorExp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case Await_kind: result = PyType_GenericNew(Await_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Await.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Yield_kind: result = PyType_GenericNew(Yield_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Yield.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case YieldFrom_kind: result = PyType_GenericNew(YieldFrom_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.YieldFrom.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Compare_kind: result = PyType_GenericNew(Compare_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Compare.left); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_left, value) == -1) goto failed; Py_DECREF(value); { Py_ssize_t i, n = asdl_seq_LEN(o->v.Compare.ops); value = PyList_New(n); if (!value) goto failed; for(i = 0; i < n; i++) PyList_SET_ITEM(value, i, ast2obj_cmpop((cmpop_ty)asdl_seq_GET(o->v.Compare.ops, i))); } if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ops, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Compare.comparators, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_comparators, value) == -1) goto failed; Py_DECREF(value); break; case Call_kind: result = PyType_GenericNew(Call_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Call.func); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_func, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Call.args, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Call.keywords, ast2obj_keyword); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keywords, value) == -1) goto failed; Py_DECREF(value); break; case Num_kind: result = PyType_GenericNew(Num_type, NULL, NULL); if (!result) goto failed; value = ast2obj_object(o->v.Num.n); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_n, value) == -1) goto failed; Py_DECREF(value); break; case Str_kind: result = PyType_GenericNew(Str_type, NULL, NULL); if (!result) goto failed; value = ast2obj_string(o->v.Str.s); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_s, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.Str.kind); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kind, value) == -1) goto failed; Py_DECREF(value); break; case FormattedValue_kind: result = PyType_GenericNew(FormattedValue_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.FormattedValue.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.FormattedValue.conversion); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_conversion, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.FormattedValue.format_spec); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_format_spec, value) == -1) goto failed; Py_DECREF(value); break; case JoinedStr_kind: result = PyType_GenericNew(JoinedStr_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.JoinedStr.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case Bytes_kind: result = PyType_GenericNew(Bytes_type, NULL, NULL); if (!result) goto failed; value = ast2obj_bytes(o->v.Bytes.s); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_s, value) == -1) goto failed; Py_DECREF(value); break; case NameConstant_kind: result = PyType_GenericNew(NameConstant_type, NULL, NULL); if (!result) goto failed; value = ast2obj_singleton(o->v.NameConstant.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Ellipsis_kind: result = PyType_GenericNew(Ellipsis_type, NULL, NULL); if (!result) goto failed; break; case Constant_kind: result = PyType_GenericNew(Constant_type, NULL, NULL); if (!result) goto failed; value = ast2obj_constant(o->v.Constant.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Attribute_kind: result = PyType_GenericNew(Attribute_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Attribute.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->v.Attribute.attr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_attr, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Attribute.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Subscript_kind: result = PyType_GenericNew(Subscript_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Subscript.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_slice(o->v.Subscript.slice); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_slice, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Subscript.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Starred_kind: result = PyType_GenericNew(Starred_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Starred.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Starred.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Name_kind: result = PyType_GenericNew(Name_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.Name.id); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_id, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Name.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case List_kind: result = PyType_GenericNew(List_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.List.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.List.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Tuple_kind: result = PyType_GenericNew(Tuple_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Tuple.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Tuple.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_expr_context(expr_context_ty o) { switch(o) { case Load: Py_INCREF(Load_singleton); return Load_singleton; case Store: Py_INCREF(Store_singleton); return Store_singleton; case Del: Py_INCREF(Del_singleton); return Del_singleton; case AugLoad: Py_INCREF(AugLoad_singleton); return AugLoad_singleton; case AugStore: Py_INCREF(AugStore_singleton); return AugStore_singleton; case Param: Py_INCREF(Param_singleton); return Param_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown expr_context found"); return NULL; } } PyObject* ast2obj_slice(void* _o) { slice_ty o = (slice_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } switch (o->kind) { case Slice_kind: result = PyType_GenericNew(Slice_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Slice.lower); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lower, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Slice.upper); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_upper, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Slice.step); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_step, value) == -1) goto failed; Py_DECREF(value); break; case ExtSlice_kind: result = PyType_GenericNew(ExtSlice_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.ExtSlice.dims, ast2obj_slice); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_dims, value) == -1) goto failed; Py_DECREF(value); break; case Index_kind: result = PyType_GenericNew(Index_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Index.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; } return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_boolop(boolop_ty o) { switch(o) { case And: Py_INCREF(And_singleton); return And_singleton; case Or: Py_INCREF(Or_singleton); return Or_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown boolop found"); return NULL; } } PyObject* ast2obj_operator(operator_ty o) { switch(o) { case Add: Py_INCREF(Add_singleton); return Add_singleton; case Sub: Py_INCREF(Sub_singleton); return Sub_singleton; case Mult: Py_INCREF(Mult_singleton); return Mult_singleton; case MatMult: Py_INCREF(MatMult_singleton); return MatMult_singleton; case Div: Py_INCREF(Div_singleton); return Div_singleton; case Mod: Py_INCREF(Mod_singleton); return Mod_singleton; case Pow: Py_INCREF(Pow_singleton); return Pow_singleton; case LShift: Py_INCREF(LShift_singleton); return LShift_singleton; case RShift: Py_INCREF(RShift_singleton); return RShift_singleton; case BitOr: Py_INCREF(BitOr_singleton); return BitOr_singleton; case BitXor: Py_INCREF(BitXor_singleton); return BitXor_singleton; case BitAnd: Py_INCREF(BitAnd_singleton); return BitAnd_singleton; case FloorDiv: Py_INCREF(FloorDiv_singleton); return FloorDiv_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown operator found"); return NULL; } } PyObject* ast2obj_unaryop(unaryop_ty o) { switch(o) { case Invert: Py_INCREF(Invert_singleton); return Invert_singleton; case Not: Py_INCREF(Not_singleton); return Not_singleton; case UAdd: Py_INCREF(UAdd_singleton); return UAdd_singleton; case USub: Py_INCREF(USub_singleton); return USub_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown unaryop found"); return NULL; } } PyObject* ast2obj_cmpop(cmpop_ty o) { switch(o) { case Eq: Py_INCREF(Eq_singleton); return Eq_singleton; case NotEq: Py_INCREF(NotEq_singleton); return NotEq_singleton; case Lt: Py_INCREF(Lt_singleton); return Lt_singleton; case LtE: Py_INCREF(LtE_singleton); return LtE_singleton; case Gt: Py_INCREF(Gt_singleton); return Gt_singleton; case GtE: Py_INCREF(GtE_singleton); return GtE_singleton; case Is: Py_INCREF(Is_singleton); return Is_singleton; case IsNot: Py_INCREF(IsNot_singleton); return IsNot_singleton; case In: Py_INCREF(In_singleton); return In_singleton; case NotIn: Py_INCREF(NotIn_singleton); return NotIn_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown cmpop found"); return NULL; } } PyObject* ast2obj_comprehension(void* _o) { comprehension_ty o = (comprehension_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } result = PyType_GenericNew(comprehension_type, NULL, NULL); if (!result) return NULL; value = ast2obj_expr(o->target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->ifs, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ifs, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->is_async); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_is_async, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_excepthandler(void* _o) { excepthandler_ty o = (excepthandler_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } switch (o->kind) { case ExceptHandler_kind: result = PyType_GenericNew(ExceptHandler_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.ExceptHandler.type); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->v.ExceptHandler.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ExceptHandler.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_arguments(void* _o) { arguments_ty o = (arguments_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } result = PyType_GenericNew(arguments_type, NULL, NULL); if (!result) return NULL; value = ast2obj_list(o->args, ast2obj_arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arg(o->vararg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_vararg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->kwonlyargs, ast2obj_arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kwonlyargs, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->kw_defaults, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kw_defaults, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arg(o->kwarg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kwarg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->defaults, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_defaults, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_arg(void* _o) { arg_ty o = (arg_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } result = PyType_GenericNew(arg_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_arg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->annotation); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_annotation, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_keyword(void* _o) { keyword_ty o = (keyword_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } result = PyType_GenericNew(keyword_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_arg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_alias(void* _o) { alias_ty o = (alias_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } result = PyType_GenericNew(alias_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->asname); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_asname, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_withitem(void* _o) { withitem_ty o = (withitem_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } result = PyType_GenericNew(withitem_type, NULL, NULL); if (!result) return NULL; value = ast2obj_expr(o->context_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_context_expr, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->optional_vars); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_optional_vars, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_type_ignore(void* _o) { type_ignore_ty o = (type_ignore_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_INCREF(Py_None); return Py_None; } switch (o->kind) { case TypeIgnore_kind: result = PyType_GenericNew(TypeIgnore_type, NULL, NULL); if (!result) goto failed; value = ast2obj_int(o->v.TypeIgnore.lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) == -1) goto failed; Py_DECREF(value); break; } return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } int obj2ast_mod(PyObject* obj, mod_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; if (obj == Py_None) { *out = NULL; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Module_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; asdl_seq* type_ignores; if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Module field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Module field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Module"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_type_ignores)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_type_ignores); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Module field \"type_ignores\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); type_ignores = _Ta3_asdl_seq_new(len, arena); if (type_ignores == NULL) goto failed; for (i = 0; i < len; i++) { type_ignore_ty value; res = obj2ast_type_ignore(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Module field \"type_ignores\" changed size during iteration"); goto failed; } asdl_seq_SET(type_ignores, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"type_ignores\" missing from Module"); return 1; } *out = Module(body, type_ignores, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Interactive_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Interactive field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Interactive field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Interactive"); return 1; } *out = Interactive(body, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Expression_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty body; if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Expression"); return 1; } *out = Expression(body, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)FunctionType_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* argtypes; expr_ty returns; if (_PyObject_HasAttrId(obj, &PyId_argtypes)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_argtypes); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "FunctionType field \"argtypes\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); argtypes = _Ta3_asdl_seq_new(len, arena); if (argtypes == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "FunctionType field \"argtypes\" changed size during iteration"); goto failed; } asdl_seq_SET(argtypes, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"argtypes\" missing from FunctionType"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_returns)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_returns); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &returns, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"returns\" missing from FunctionType"); return 1; } *out = FunctionType(argtypes, returns, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Suite_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Suite field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Suite field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Suite"); return 1; } *out = Suite(body, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of mod, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_stmt(PyObject* obj, stmt_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (_PyObject_HasAttrId(obj, &PyId_lineno)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_lineno); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from stmt"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_col_offset)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_col_offset); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from stmt"); return 1; } isinstance = PyObject_IsInstance(obj, (PyObject*)FunctionDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; arguments_ty args; asdl_seq* body; asdl_seq* decorator_list; expr_ty returns; string type_comment; if (_PyObject_HasAttrId(obj, &PyId_name)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_name); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from FunctionDef"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_args)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_args); if (tmp == NULL) goto failed; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from FunctionDef"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "FunctionDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "FunctionDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from FunctionDef"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_decorator_list)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_decorator_list); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "FunctionDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Ta3_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "FunctionDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from FunctionDef"); return 1; } if (exists_not_none(obj, &PyId_returns)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_returns); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &returns, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { returns = NULL; } if (exists_not_none(obj, &PyId_type_comment)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_type_comment); if (tmp == NULL) goto failed; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { type_comment = NULL; } *out = FunctionDef(name, args, body, decorator_list, returns, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncFunctionDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; arguments_ty args; asdl_seq* body; asdl_seq* decorator_list; expr_ty returns; string type_comment; if (_PyObject_HasAttrId(obj, &PyId_name)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_name); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from AsyncFunctionDef"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_args)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_args); if (tmp == NULL) goto failed; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from AsyncFunctionDef"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFunctionDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFunctionDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncFunctionDef"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_decorator_list)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_decorator_list); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFunctionDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Ta3_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFunctionDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from AsyncFunctionDef"); return 1; } if (exists_not_none(obj, &PyId_returns)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_returns); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &returns, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { returns = NULL; } if (exists_not_none(obj, &PyId_type_comment)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_type_comment); if (tmp == NULL) goto failed; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { type_comment = NULL; } *out = AsyncFunctionDef(name, args, body, decorator_list, returns, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ClassDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; asdl_seq* bases; asdl_seq* keywords; asdl_seq* body; asdl_seq* decorator_list; if (_PyObject_HasAttrId(obj, &PyId_name)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_name); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from ClassDef"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_bases)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_bases); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"bases\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); bases = _Ta3_asdl_seq_new(len, arena); if (bases == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"bases\" changed size during iteration"); goto failed; } asdl_seq_SET(bases, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"bases\" missing from ClassDef"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_keywords)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_keywords); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"keywords\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keywords = _Ta3_asdl_seq_new(len, arena); if (keywords == NULL) goto failed; for (i = 0; i < len; i++) { keyword_ty value; res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"keywords\" changed size during iteration"); goto failed; } asdl_seq_SET(keywords, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"keywords\" missing from ClassDef"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from ClassDef"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_decorator_list)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_decorator_list); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Ta3_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from ClassDef"); return 1; } *out = ClassDef(name, bases, keywords, body, decorator_list, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Return_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (exists_not_none(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { value = NULL; } *out = Return(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Delete_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* targets; if (_PyObject_HasAttrId(obj, &PyId_targets)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_targets); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Delete field \"targets\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); targets = _Ta3_asdl_seq_new(len, arena); if (targets == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Delete field \"targets\" changed size during iteration"); goto failed; } asdl_seq_SET(targets, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"targets\" missing from Delete"); return 1; } *out = Delete(targets, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Assign_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* targets; expr_ty value; string type_comment; if (_PyObject_HasAttrId(obj, &PyId_targets)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_targets); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Assign field \"targets\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); targets = _Ta3_asdl_seq_new(len, arena); if (targets == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Assign field \"targets\" changed size during iteration"); goto failed; } asdl_seq_SET(targets, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"targets\" missing from Assign"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Assign"); return 1; } if (exists_not_none(obj, &PyId_type_comment)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_type_comment); if (tmp == NULL) goto failed; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { type_comment = NULL; } *out = Assign(targets, value, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AugAssign_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; operator_ty op; expr_ty value; if (_PyObject_HasAttrId(obj, &PyId_target)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_target); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AugAssign"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_op)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_op); if (tmp == NULL) goto failed; res = obj2ast_operator(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from AugAssign"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from AugAssign"); return 1; } *out = AugAssign(target, op, value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AnnAssign_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty annotation; expr_ty value; int simple; if (_PyObject_HasAttrId(obj, &PyId_target)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_target); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AnnAssign"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_annotation)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_annotation); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &annotation, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"annotation\" missing from AnnAssign"); return 1; } if (exists_not_none(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { value = NULL; } if (_PyObject_HasAttrId(obj, &PyId_simple)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_simple); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &simple, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"simple\" missing from AnnAssign"); return 1; } *out = AnnAssign(target, annotation, value, simple, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)For_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty iter; asdl_seq* body; asdl_seq* orelse; string type_comment; if (_PyObject_HasAttrId(obj, &PyId_target)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_target); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from For"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_iter)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_iter); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from For"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "For field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "For field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from For"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_orelse)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_orelse); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "For field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Ta3_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "For field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from For"); return 1; } if (exists_not_none(obj, &PyId_type_comment)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_type_comment); if (tmp == NULL) goto failed; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { type_comment = NULL; } *out = For(target, iter, body, orelse, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncFor_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty iter; asdl_seq* body; asdl_seq* orelse; string type_comment; if (_PyObject_HasAttrId(obj, &PyId_target)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_target); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AsyncFor"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_iter)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_iter); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from AsyncFor"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFor field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFor field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncFor"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_orelse)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_orelse); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFor field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Ta3_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFor field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from AsyncFor"); return 1; } if (exists_not_none(obj, &PyId_type_comment)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_type_comment); if (tmp == NULL) goto failed; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { type_comment = NULL; } *out = AsyncFor(target, iter, body, orelse, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)While_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; asdl_seq* body; asdl_seq* orelse; if (_PyObject_HasAttrId(obj, &PyId_test)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_test); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from While"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "While field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "While field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from While"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_orelse)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_orelse); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "While field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Ta3_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "While field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from While"); return 1; } *out = While(test, body, orelse, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)If_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; asdl_seq* body; asdl_seq* orelse; if (_PyObject_HasAttrId(obj, &PyId_test)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_test); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from If"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "If field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "If field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from If"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_orelse)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_orelse); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "If field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Ta3_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "If field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from If"); return 1; } *out = If(test, body, orelse, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)With_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* items; asdl_seq* body; string type_comment; if (_PyObject_HasAttrId(obj, &PyId_items)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_items); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "With field \"items\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); items = _Ta3_asdl_seq_new(len, arena); if (items == NULL) goto failed; for (i = 0; i < len; i++) { withitem_ty value; res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "With field \"items\" changed size during iteration"); goto failed; } asdl_seq_SET(items, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"items\" missing from With"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "With field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "With field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from With"); return 1; } if (exists_not_none(obj, &PyId_type_comment)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_type_comment); if (tmp == NULL) goto failed; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { type_comment = NULL; } *out = With(items, body, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncWith_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* items; asdl_seq* body; string type_comment; if (_PyObject_HasAttrId(obj, &PyId_items)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_items); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncWith field \"items\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); items = _Ta3_asdl_seq_new(len, arena); if (items == NULL) goto failed; for (i = 0; i < len; i++) { withitem_ty value; res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncWith field \"items\" changed size during iteration"); goto failed; } asdl_seq_SET(items, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"items\" missing from AsyncWith"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncWith field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncWith field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncWith"); return 1; } if (exists_not_none(obj, &PyId_type_comment)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_type_comment); if (tmp == NULL) goto failed; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { type_comment = NULL; } *out = AsyncWith(items, body, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Raise_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty exc; expr_ty cause; if (exists_not_none(obj, &PyId_exc)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_exc); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &exc, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { exc = NULL; } if (exists_not_none(obj, &PyId_cause)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_cause); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &cause, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { cause = NULL; } *out = Raise(exc, cause, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Try_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; asdl_seq* handlers; asdl_seq* orelse; asdl_seq* finalbody; if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Try"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_handlers)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_handlers); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"handlers\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); handlers = _Ta3_asdl_seq_new(len, arena); if (handlers == NULL) goto failed; for (i = 0; i < len; i++) { excepthandler_ty value; res = obj2ast_excepthandler(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"handlers\" changed size during iteration"); goto failed; } asdl_seq_SET(handlers, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"handlers\" missing from Try"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_orelse)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_orelse); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Ta3_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from Try"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_finalbody)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_finalbody); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"finalbody\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); finalbody = _Ta3_asdl_seq_new(len, arena); if (finalbody == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"finalbody\" changed size during iteration"); goto failed; } asdl_seq_SET(finalbody, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"finalbody\" missing from Try"); return 1; } *out = Try(body, handlers, orelse, finalbody, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Assert_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; expr_ty msg; if (_PyObject_HasAttrId(obj, &PyId_test)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_test); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from Assert"); return 1; } if (exists_not_none(obj, &PyId_msg)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_msg); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &msg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { msg = NULL; } *out = Assert(test, msg, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Import_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (_PyObject_HasAttrId(obj, &PyId_names)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_names); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Import field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Ta3_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { alias_ty value; res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Import field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Import"); return 1; } *out = Import(names, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ImportFrom_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier module; asdl_seq* names; int level; if (exists_not_none(obj, &PyId_module)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_module); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &module, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { module = NULL; } if (_PyObject_HasAttrId(obj, &PyId_names)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_names); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ImportFrom field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Ta3_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { alias_ty value; res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ImportFrom field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from ImportFrom"); return 1; } if (exists_not_none(obj, &PyId_level)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_level); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &level, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { level = 0; } *out = ImportFrom(module, names, level, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Global_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (_PyObject_HasAttrId(obj, &PyId_names)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_names); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Global field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Ta3_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { identifier value; res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Global field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Global"); return 1; } *out = Global(names, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Nonlocal_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (_PyObject_HasAttrId(obj, &PyId_names)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_names); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Nonlocal field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Ta3_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { identifier value; res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Nonlocal field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Nonlocal"); return 1; } *out = Nonlocal(names, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Expr_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Expr"); return 1; } *out = Expr(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Pass_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Pass(lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Break_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Break(lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Continue_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Continue(lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of stmt, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_expr(PyObject* obj, expr_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (_PyObject_HasAttrId(obj, &PyId_lineno)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_lineno); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from expr"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_col_offset)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_col_offset); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from expr"); return 1; } isinstance = PyObject_IsInstance(obj, (PyObject*)BoolOp_type); if (isinstance == -1) { return 1; } if (isinstance) { boolop_ty op; asdl_seq* values; if (_PyObject_HasAttrId(obj, &PyId_op)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_op); if (tmp == NULL) goto failed; res = obj2ast_boolop(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from BoolOp"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_values)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_values); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "BoolOp field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Ta3_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "BoolOp field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from BoolOp"); return 1; } *out = BoolOp(op, values, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)BinOp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty left; operator_ty op; expr_ty right; if (_PyObject_HasAttrId(obj, &PyId_left)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_left); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &left, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"left\" missing from BinOp"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_op)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_op); if (tmp == NULL) goto failed; res = obj2ast_operator(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from BinOp"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_right)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_right); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &right, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"right\" missing from BinOp"); return 1; } *out = BinOp(left, op, right, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)UnaryOp_type); if (isinstance == -1) { return 1; } if (isinstance) { unaryop_ty op; expr_ty operand; if (_PyObject_HasAttrId(obj, &PyId_op)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_op); if (tmp == NULL) goto failed; res = obj2ast_unaryop(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from UnaryOp"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_operand)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_operand); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &operand, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"operand\" missing from UnaryOp"); return 1; } *out = UnaryOp(op, operand, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Lambda_type); if (isinstance == -1) { return 1; } if (isinstance) { arguments_ty args; expr_ty body; if (_PyObject_HasAttrId(obj, &PyId_args)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_args); if (tmp == NULL) goto failed; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from Lambda"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Lambda"); return 1; } *out = Lambda(args, body, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)IfExp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; expr_ty body; expr_ty orelse; if (_PyObject_HasAttrId(obj, &PyId_test)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_test); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from IfExp"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from IfExp"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_orelse)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_orelse); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &orelse, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from IfExp"); return 1; } *out = IfExp(test, body, orelse, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Dict_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* keys; asdl_seq* values; if (_PyObject_HasAttrId(obj, &PyId_keys)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_keys); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Dict field \"keys\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keys = _Ta3_asdl_seq_new(len, arena); if (keys == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Dict field \"keys\" changed size during iteration"); goto failed; } asdl_seq_SET(keys, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"keys\" missing from Dict"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_values)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_values); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Dict field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Ta3_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Dict field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from Dict"); return 1; } *out = Dict(keys, values, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Set_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; if (_PyObject_HasAttrId(obj, &PyId_elts)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_elts); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Set field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Ta3_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Set field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from Set"); return 1; } *out = Set(elts, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ListComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (_PyObject_HasAttrId(obj, &PyId_elt)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_elt); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from ListComp"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_generators)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_generators); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ListComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Ta3_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty value; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ListComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from ListComp"); return 1; } *out = ListComp(elt, generators, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)SetComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (_PyObject_HasAttrId(obj, &PyId_elt)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_elt); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from SetComp"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_generators)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_generators); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "SetComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Ta3_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty value; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "SetComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from SetComp"); return 1; } *out = SetComp(elt, generators, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)DictComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty key; expr_ty value; asdl_seq* generators; if (_PyObject_HasAttrId(obj, &PyId_key)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_key); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &key, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"key\" missing from DictComp"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from DictComp"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_generators)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_generators); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "DictComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Ta3_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty value; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "DictComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from DictComp"); return 1; } *out = DictComp(key, value, generators, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)GeneratorExp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (_PyObject_HasAttrId(obj, &PyId_elt)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_elt); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from GeneratorExp"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_generators)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_generators); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "GeneratorExp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Ta3_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty value; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "GeneratorExp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from GeneratorExp"); return 1; } *out = GeneratorExp(elt, generators, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Await_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Await"); return 1; } *out = Await(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Yield_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (exists_not_none(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { value = NULL; } *out = Yield(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)YieldFrom_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from YieldFrom"); return 1; } *out = YieldFrom(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Compare_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty left; asdl_int_seq* ops; asdl_seq* comparators; if (_PyObject_HasAttrId(obj, &PyId_left)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_left); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &left, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"left\" missing from Compare"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_ops)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_ops); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Compare field \"ops\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); ops = _Ta3_asdl_int_seq_new(len, arena); if (ops == NULL) goto failed; for (i = 0; i < len; i++) { cmpop_ty value; res = obj2ast_cmpop(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Compare field \"ops\" changed size during iteration"); goto failed; } asdl_seq_SET(ops, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"ops\" missing from Compare"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_comparators)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_comparators); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Compare field \"comparators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); comparators = _Ta3_asdl_seq_new(len, arena); if (comparators == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Compare field \"comparators\" changed size during iteration"); goto failed; } asdl_seq_SET(comparators, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"comparators\" missing from Compare"); return 1; } *out = Compare(left, ops, comparators, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Call_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty func; asdl_seq* args; asdl_seq* keywords; if (_PyObject_HasAttrId(obj, &PyId_func)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_func); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &func, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"func\" missing from Call"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_args)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_args); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Call field \"args\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); args = _Ta3_asdl_seq_new(len, arena); if (args == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Call field \"args\" changed size during iteration"); goto failed; } asdl_seq_SET(args, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from Call"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_keywords)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_keywords); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Call field \"keywords\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keywords = _Ta3_asdl_seq_new(len, arena); if (keywords == NULL) goto failed; for (i = 0; i < len; i++) { keyword_ty value; res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Call field \"keywords\" changed size during iteration"); goto failed; } asdl_seq_SET(keywords, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"keywords\" missing from Call"); return 1; } *out = Call(func, args, keywords, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Num_type); if (isinstance == -1) { return 1; } if (isinstance) { object n; if (_PyObject_HasAttrId(obj, &PyId_n)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_n); if (tmp == NULL) goto failed; res = obj2ast_object(tmp, &n, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"n\" missing from Num"); return 1; } *out = Num(n, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Str_type); if (isinstance == -1) { return 1; } if (isinstance) { string s; string kind; if (_PyObject_HasAttrId(obj, &PyId_s)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_s); if (tmp == NULL) goto failed; res = obj2ast_string(tmp, &s, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"s\" missing from Str"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_kind)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_kind); if (tmp == NULL) goto failed; res = obj2ast_string(tmp, &kind, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"kind\" missing from Str"); return 1; } *out = Str(s, kind, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)FormattedValue_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; int conversion; expr_ty format_spec; if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from FormattedValue"); return 1; } if (exists_not_none(obj, &PyId_conversion)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_conversion); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &conversion, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { conversion = 0; } if (exists_not_none(obj, &PyId_format_spec)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_format_spec); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &format_spec, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { format_spec = NULL; } *out = FormattedValue(value, conversion, format_spec, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)JoinedStr_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* values; if (_PyObject_HasAttrId(obj, &PyId_values)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_values); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "JoinedStr field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Ta3_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "JoinedStr field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from JoinedStr"); return 1; } *out = JoinedStr(values, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Bytes_type); if (isinstance == -1) { return 1; } if (isinstance) { bytes s; if (_PyObject_HasAttrId(obj, &PyId_s)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_s); if (tmp == NULL) goto failed; res = obj2ast_bytes(tmp, &s, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"s\" missing from Bytes"); return 1; } *out = Bytes(s, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)NameConstant_type); if (isinstance == -1) { return 1; } if (isinstance) { singleton value; if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_singleton(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from NameConstant"); return 1; } *out = NameConstant(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Ellipsis_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Ellipsis(lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Constant_type); if (isinstance == -1) { return 1; } if (isinstance) { constant value; if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_constant(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Constant"); return 1; } *out = Constant(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Attribute_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; identifier attr; expr_context_ty ctx; if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Attribute"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_attr)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_attr); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &attr, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"attr\" missing from Attribute"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_ctx)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_ctx); if (tmp == NULL) goto failed; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Attribute"); return 1; } *out = Attribute(value, attr, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Subscript_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; slice_ty slice; expr_context_ty ctx; if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Subscript"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_slice)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_slice); if (tmp == NULL) goto failed; res = obj2ast_slice(tmp, &slice, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"slice\" missing from Subscript"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_ctx)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_ctx); if (tmp == NULL) goto failed; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Subscript"); return 1; } *out = Subscript(value, slice, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Starred_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; expr_context_ty ctx; if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Starred"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_ctx)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_ctx); if (tmp == NULL) goto failed; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Starred"); return 1; } *out = Starred(value, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Name_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier id; expr_context_ty ctx; if (_PyObject_HasAttrId(obj, &PyId_id)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_id); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &id, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"id\" missing from Name"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_ctx)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_ctx); if (tmp == NULL) goto failed; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Name"); return 1; } *out = Name(id, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)List_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; expr_context_ty ctx; if (_PyObject_HasAttrId(obj, &PyId_elts)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_elts); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "List field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Ta3_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "List field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from List"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_ctx)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_ctx); if (tmp == NULL) goto failed; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from List"); return 1; } *out = List(elts, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Tuple_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; expr_context_ty ctx; if (_PyObject_HasAttrId(obj, &PyId_elts)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_elts); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Tuple field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Ta3_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Tuple field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from Tuple"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_ctx)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_ctx); if (tmp == NULL) goto failed; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Tuple"); return 1; } *out = Tuple(elts, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of expr, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_expr_context(PyObject* obj, expr_context_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Load_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Load; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Store_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Store; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Del_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Del; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)AugLoad_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = AugLoad; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)AugStore_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = AugStore; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Param_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Param; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of expr_context, but got %R", obj); return 1; } int obj2ast_slice(PyObject* obj, slice_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; if (obj == Py_None) { *out = NULL; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Slice_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty lower; expr_ty upper; expr_ty step; if (exists_not_none(obj, &PyId_lower)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_lower); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &lower, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { lower = NULL; } if (exists_not_none(obj, &PyId_upper)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_upper); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &upper, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { upper = NULL; } if (exists_not_none(obj, &PyId_step)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_step); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &step, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { step = NULL; } *out = Slice(lower, upper, step, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ExtSlice_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* dims; if (_PyObject_HasAttrId(obj, &PyId_dims)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_dims); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ExtSlice field \"dims\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); dims = _Ta3_asdl_seq_new(len, arena); if (dims == NULL) goto failed; for (i = 0; i < len; i++) { slice_ty value; res = obj2ast_slice(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ExtSlice field \"dims\" changed size during iteration"); goto failed; } asdl_seq_SET(dims, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"dims\" missing from ExtSlice"); return 1; } *out = ExtSlice(dims, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Index_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Index"); return 1; } *out = Index(value, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of slice, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_boolop(PyObject* obj, boolop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)And_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = And; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Or_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Or; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of boolop, but got %R", obj); return 1; } int obj2ast_operator(PyObject* obj, operator_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Add_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Add; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Sub_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Sub; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Mult_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Mult; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)MatMult_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = MatMult; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Div_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Div; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Mod_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Mod; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Pow_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Pow; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)LShift_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = LShift; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)RShift_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = RShift; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitOr_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitOr; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitXor_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitXor; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitAnd_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitAnd; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)FloorDiv_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = FloorDiv; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of operator, but got %R", obj); return 1; } int obj2ast_unaryop(PyObject* obj, unaryop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Invert_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Invert; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Not_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Not; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)UAdd_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = UAdd; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)USub_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = USub; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of unaryop, but got %R", obj); return 1; } int obj2ast_cmpop(PyObject* obj, cmpop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Eq_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Eq; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)NotEq_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = NotEq; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Lt_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Lt; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)LtE_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = LtE; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Gt_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Gt; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)GtE_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = GtE; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Is_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Is; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)IsNot_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = IsNot; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)In_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = In; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)NotIn_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = NotIn; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of cmpop, but got %R", obj); return 1; } int obj2ast_comprehension(PyObject* obj, comprehension_ty* out, PyArena* arena) { PyObject* tmp = NULL; expr_ty target; expr_ty iter; asdl_seq* ifs; int is_async; if (_PyObject_HasAttrId(obj, &PyId_target)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_target); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from comprehension"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_iter)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_iter); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from comprehension"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_ifs)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_ifs); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "comprehension field \"ifs\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); ifs = _Ta3_asdl_seq_new(len, arena); if (ifs == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "comprehension field \"ifs\" changed size during iteration"); goto failed; } asdl_seq_SET(ifs, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"ifs\" missing from comprehension"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_is_async)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_is_async); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &is_async, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"is_async\" missing from comprehension"); return 1; } *out = comprehension(target, iter, ifs, is_async, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_excepthandler(PyObject* obj, excepthandler_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (_PyObject_HasAttrId(obj, &PyId_lineno)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_lineno); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from excepthandler"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_col_offset)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_col_offset); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from excepthandler"); return 1; } isinstance = PyObject_IsInstance(obj, (PyObject*)ExceptHandler_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty type; identifier name; asdl_seq* body; if (exists_not_none(obj, &PyId_type)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_type); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &type, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { type = NULL; } if (exists_not_none(obj, &PyId_name)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_name); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { name = NULL; } if (_PyObject_HasAttrId(obj, &PyId_body)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_body); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ExceptHandler field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty value; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ExceptHandler field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from ExceptHandler"); return 1; } *out = ExceptHandler(type, name, body, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of excepthandler, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_arguments(PyObject* obj, arguments_ty* out, PyArena* arena) { PyObject* tmp = NULL; asdl_seq* args; arg_ty vararg; asdl_seq* kwonlyargs; asdl_seq* kw_defaults; arg_ty kwarg; asdl_seq* defaults; if (_PyObject_HasAttrId(obj, &PyId_args)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_args); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"args\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); args = _Ta3_asdl_seq_new(len, arena); if (args == NULL) goto failed; for (i = 0; i < len; i++) { arg_ty value; res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"args\" changed size during iteration"); goto failed; } asdl_seq_SET(args, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from arguments"); return 1; } if (exists_not_none(obj, &PyId_vararg)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_vararg); if (tmp == NULL) goto failed; res = obj2ast_arg(tmp, &vararg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { vararg = NULL; } if (_PyObject_HasAttrId(obj, &PyId_kwonlyargs)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_kwonlyargs); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"kwonlyargs\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); kwonlyargs = _Ta3_asdl_seq_new(len, arena); if (kwonlyargs == NULL) goto failed; for (i = 0; i < len; i++) { arg_ty value; res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"kwonlyargs\" changed size during iteration"); goto failed; } asdl_seq_SET(kwonlyargs, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"kwonlyargs\" missing from arguments"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_kw_defaults)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_kw_defaults); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"kw_defaults\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); kw_defaults = _Ta3_asdl_seq_new(len, arena); if (kw_defaults == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"kw_defaults\" changed size during iteration"); goto failed; } asdl_seq_SET(kw_defaults, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"kw_defaults\" missing from arguments"); return 1; } if (exists_not_none(obj, &PyId_kwarg)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_kwarg); if (tmp == NULL) goto failed; res = obj2ast_arg(tmp, &kwarg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { kwarg = NULL; } if (_PyObject_HasAttrId(obj, &PyId_defaults)) { int res; Py_ssize_t len; Py_ssize_t i; tmp = _PyObject_GetAttrId(obj, &PyId_defaults); if (tmp == NULL) goto failed; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"defaults\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); defaults = _Ta3_asdl_seq_new(len, arena); if (defaults == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty value; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"defaults\" changed size during iteration"); goto failed; } asdl_seq_SET(defaults, i, value); } Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"defaults\" missing from arguments"); return 1; } *out = arguments(args, vararg, kwonlyargs, kw_defaults, kwarg, defaults, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_arg(PyObject* obj, arg_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier arg; expr_ty annotation; string type_comment; int lineno; int col_offset; if (_PyObject_HasAttrId(obj, &PyId_arg)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_arg); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &arg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"arg\" missing from arg"); return 1; } if (exists_not_none(obj, &PyId_annotation)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_annotation); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &annotation, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { annotation = NULL; } if (exists_not_none(obj, &PyId_type_comment)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_type_comment); if (tmp == NULL) goto failed; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { type_comment = NULL; } if (_PyObject_HasAttrId(obj, &PyId_lineno)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_lineno); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from arg"); return 1; } if (_PyObject_HasAttrId(obj, &PyId_col_offset)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_col_offset); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from arg"); return 1; } *out = arg(arg, annotation, type_comment, lineno, col_offset, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_keyword(PyObject* obj, keyword_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier arg; expr_ty value; if (exists_not_none(obj, &PyId_arg)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_arg); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &arg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { arg = NULL; } if (_PyObject_HasAttrId(obj, &PyId_value)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_value); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from keyword"); return 1; } *out = keyword(arg, value, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_alias(PyObject* obj, alias_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier name; identifier asname; if (_PyObject_HasAttrId(obj, &PyId_name)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_name); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from alias"); return 1; } if (exists_not_none(obj, &PyId_asname)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_asname); if (tmp == NULL) goto failed; res = obj2ast_identifier(tmp, &asname, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { asname = NULL; } *out = alias(name, asname, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_withitem(PyObject* obj, withitem_ty* out, PyArena* arena) { PyObject* tmp = NULL; expr_ty context_expr; expr_ty optional_vars; if (_PyObject_HasAttrId(obj, &PyId_context_expr)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_context_expr); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &context_expr, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"context_expr\" missing from withitem"); return 1; } if (exists_not_none(obj, &PyId_optional_vars)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_optional_vars); if (tmp == NULL) goto failed; res = obj2ast_expr(tmp, &optional_vars, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { optional_vars = NULL; } *out = withitem(context_expr, optional_vars, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_type_ignore(PyObject* obj, type_ignore_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; if (obj == Py_None) { *out = NULL; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)TypeIgnore_type); if (isinstance == -1) { return 1; } if (isinstance) { int lineno; if (_PyObject_HasAttrId(obj, &PyId_lineno)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_lineno); if (tmp == NULL) goto failed; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from TypeIgnore"); return 1; } *out = TypeIgnore(lineno, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of type_ignore, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } PyObject *ast3_parse(PyObject *self, PyObject *args); static PyMethodDef ast3_methods[] = { {"_parse", ast3_parse, METH_VARARGS, "Parse string into typed AST."}, {NULL, NULL, 0, NULL} }; static struct PyModuleDef _astmodule3 = { PyModuleDef_HEAD_INIT, "_ast3", NULL, 0, ast3_methods }; PyMODINIT_FUNC PyInit__ast3(void) { PyObject *m, *d; if (!init_types()) return NULL; m = PyModule_Create(&_astmodule3); if (!m) return NULL; d = PyModule_GetDict(m); if (PyDict_SetItemString(d, "AST", (PyObject*)&AST_type) < 0) return NULL; if (PyModule_AddIntMacro(m, PyCF_ONLY_AST) < 0) return NULL; if (PyDict_SetItemString(d, "mod", (PyObject*)mod_type) < 0) return NULL; if (PyDict_SetItemString(d, "Module", (PyObject*)Module_type) < 0) return NULL; if (PyDict_SetItemString(d, "Interactive", (PyObject*)Interactive_type) < 0) return NULL; if (PyDict_SetItemString(d, "Expression", (PyObject*)Expression_type) < 0) return NULL; if (PyDict_SetItemString(d, "FunctionType", (PyObject*)FunctionType_type) < 0) return NULL; if (PyDict_SetItemString(d, "Suite", (PyObject*)Suite_type) < 0) return NULL; if (PyDict_SetItemString(d, "stmt", (PyObject*)stmt_type) < 0) return NULL; if (PyDict_SetItemString(d, "FunctionDef", (PyObject*)FunctionDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncFunctionDef", (PyObject*)AsyncFunctionDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "ClassDef", (PyObject*)ClassDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "Return", (PyObject*)Return_type) < 0) return NULL; if (PyDict_SetItemString(d, "Delete", (PyObject*)Delete_type) < 0) return NULL; if (PyDict_SetItemString(d, "Assign", (PyObject*)Assign_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugAssign", (PyObject*)AugAssign_type) < 0) return NULL; if (PyDict_SetItemString(d, "AnnAssign", (PyObject*)AnnAssign_type) < 0) return NULL; if (PyDict_SetItemString(d, "For", (PyObject*)For_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncFor", (PyObject*)AsyncFor_type) < 0) return NULL; if (PyDict_SetItemString(d, "While", (PyObject*)While_type) < 0) return NULL; if (PyDict_SetItemString(d, "If", (PyObject*)If_type) < 0) return NULL; if (PyDict_SetItemString(d, "With", (PyObject*)With_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncWith", (PyObject*)AsyncWith_type) < 0) return NULL; if (PyDict_SetItemString(d, "Raise", (PyObject*)Raise_type) < 0) return NULL; if (PyDict_SetItemString(d, "Try", (PyObject*)Try_type) < 0) return NULL; if (PyDict_SetItemString(d, "Assert", (PyObject*)Assert_type) < 0) return NULL; if (PyDict_SetItemString(d, "Import", (PyObject*)Import_type) < 0) return NULL; if (PyDict_SetItemString(d, "ImportFrom", (PyObject*)ImportFrom_type) < 0) return NULL; if (PyDict_SetItemString(d, "Global", (PyObject*)Global_type) < 0) return NULL; if (PyDict_SetItemString(d, "Nonlocal", (PyObject*)Nonlocal_type) < 0) return NULL; if (PyDict_SetItemString(d, "Expr", (PyObject*)Expr_type) < 0) return NULL; if (PyDict_SetItemString(d, "Pass", (PyObject*)Pass_type) < 0) return NULL; if (PyDict_SetItemString(d, "Break", (PyObject*)Break_type) < 0) return NULL; if (PyDict_SetItemString(d, "Continue", (PyObject*)Continue_type) < 0) return NULL; if (PyDict_SetItemString(d, "expr", (PyObject*)expr_type) < 0) return NULL; if (PyDict_SetItemString(d, "BoolOp", (PyObject*)BoolOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "BinOp", (PyObject*)BinOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "UnaryOp", (PyObject*)UnaryOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Lambda", (PyObject*)Lambda_type) < 0) return NULL; if (PyDict_SetItemString(d, "IfExp", (PyObject*)IfExp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Dict", (PyObject*)Dict_type) < 0) return NULL; if (PyDict_SetItemString(d, "Set", (PyObject*)Set_type) < 0) return NULL; if (PyDict_SetItemString(d, "ListComp", (PyObject*)ListComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "SetComp", (PyObject*)SetComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "DictComp", (PyObject*)DictComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "GeneratorExp", (PyObject*)GeneratorExp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Await", (PyObject*)Await_type) < 0) return NULL; if (PyDict_SetItemString(d, "Yield", (PyObject*)Yield_type) < 0) return NULL; if (PyDict_SetItemString(d, "YieldFrom", (PyObject*)YieldFrom_type) < 0) return NULL; if (PyDict_SetItemString(d, "Compare", (PyObject*)Compare_type) < 0) return NULL; if (PyDict_SetItemString(d, "Call", (PyObject*)Call_type) < 0) return NULL; if (PyDict_SetItemString(d, "Num", (PyObject*)Num_type) < 0) return NULL; if (PyDict_SetItemString(d, "Str", (PyObject*)Str_type) < 0) return NULL; if (PyDict_SetItemString(d, "FormattedValue", (PyObject*)FormattedValue_type) < 0) return NULL; if (PyDict_SetItemString(d, "JoinedStr", (PyObject*)JoinedStr_type) < 0) return NULL; if (PyDict_SetItemString(d, "Bytes", (PyObject*)Bytes_type) < 0) return NULL; if (PyDict_SetItemString(d, "NameConstant", (PyObject*)NameConstant_type) < 0) return NULL; if (PyDict_SetItemString(d, "Ellipsis", (PyObject*)Ellipsis_type) < 0) return NULL; if (PyDict_SetItemString(d, "Constant", (PyObject*)Constant_type) < 0) return NULL; if (PyDict_SetItemString(d, "Attribute", (PyObject*)Attribute_type) < 0) return NULL; if (PyDict_SetItemString(d, "Subscript", (PyObject*)Subscript_type) < 0) return NULL; if (PyDict_SetItemString(d, "Starred", (PyObject*)Starred_type) < 0) return NULL; if (PyDict_SetItemString(d, "Name", (PyObject*)Name_type) < 0) return NULL; if (PyDict_SetItemString(d, "List", (PyObject*)List_type) < 0) return NULL; if (PyDict_SetItemString(d, "Tuple", (PyObject*)Tuple_type) < 0) return NULL; if (PyDict_SetItemString(d, "expr_context", (PyObject*)expr_context_type) < 0) return NULL; if (PyDict_SetItemString(d, "Load", (PyObject*)Load_type) < 0) return NULL; if (PyDict_SetItemString(d, "Store", (PyObject*)Store_type) < 0) return NULL; if (PyDict_SetItemString(d, "Del", (PyObject*)Del_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugLoad", (PyObject*)AugLoad_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugStore", (PyObject*)AugStore_type) < 0) return NULL; if (PyDict_SetItemString(d, "Param", (PyObject*)Param_type) < 0) return NULL; if (PyDict_SetItemString(d, "slice", (PyObject*)slice_type) < 0) return NULL; if (PyDict_SetItemString(d, "Slice", (PyObject*)Slice_type) < 0) return NULL; if (PyDict_SetItemString(d, "ExtSlice", (PyObject*)ExtSlice_type) < 0) return NULL; if (PyDict_SetItemString(d, "Index", (PyObject*)Index_type) < 0) return NULL; if (PyDict_SetItemString(d, "boolop", (PyObject*)boolop_type) < 0) return NULL; if (PyDict_SetItemString(d, "And", (PyObject*)And_type) < 0) return NULL; if (PyDict_SetItemString(d, "Or", (PyObject*)Or_type) < 0) return NULL; if (PyDict_SetItemString(d, "operator", (PyObject*)operator_type) < 0) return NULL; if (PyDict_SetItemString(d, "Add", (PyObject*)Add_type) < 0) return NULL; if (PyDict_SetItemString(d, "Sub", (PyObject*)Sub_type) < 0) return NULL; if (PyDict_SetItemString(d, "Mult", (PyObject*)Mult_type) < 0) return NULL; if (PyDict_SetItemString(d, "MatMult", (PyObject*)MatMult_type) < 0) return NULL; if (PyDict_SetItemString(d, "Div", (PyObject*)Div_type) < 0) return NULL; if (PyDict_SetItemString(d, "Mod", (PyObject*)Mod_type) < 0) return NULL; if (PyDict_SetItemString(d, "Pow", (PyObject*)Pow_type) < 0) return NULL; if (PyDict_SetItemString(d, "LShift", (PyObject*)LShift_type) < 0) return NULL; if (PyDict_SetItemString(d, "RShift", (PyObject*)RShift_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitOr", (PyObject*)BitOr_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitXor", (PyObject*)BitXor_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitAnd", (PyObject*)BitAnd_type) < 0) return NULL; if (PyDict_SetItemString(d, "FloorDiv", (PyObject*)FloorDiv_type) < 0) return NULL; if (PyDict_SetItemString(d, "unaryop", (PyObject*)unaryop_type) < 0) return NULL; if (PyDict_SetItemString(d, "Invert", (PyObject*)Invert_type) < 0) return NULL; if (PyDict_SetItemString(d, "Not", (PyObject*)Not_type) < 0) return NULL; if (PyDict_SetItemString(d, "UAdd", (PyObject*)UAdd_type) < 0) return NULL; if (PyDict_SetItemString(d, "USub", (PyObject*)USub_type) < 0) return NULL; if (PyDict_SetItemString(d, "cmpop", (PyObject*)cmpop_type) < 0) return NULL; if (PyDict_SetItemString(d, "Eq", (PyObject*)Eq_type) < 0) return NULL; if (PyDict_SetItemString(d, "NotEq", (PyObject*)NotEq_type) < 0) return NULL; if (PyDict_SetItemString(d, "Lt", (PyObject*)Lt_type) < 0) return NULL; if (PyDict_SetItemString(d, "LtE", (PyObject*)LtE_type) < 0) return NULL; if (PyDict_SetItemString(d, "Gt", (PyObject*)Gt_type) < 0) return NULL; if (PyDict_SetItemString(d, "GtE", (PyObject*)GtE_type) < 0) return NULL; if (PyDict_SetItemString(d, "Is", (PyObject*)Is_type) < 0) return NULL; if (PyDict_SetItemString(d, "IsNot", (PyObject*)IsNot_type) < 0) return NULL; if (PyDict_SetItemString(d, "In", (PyObject*)In_type) < 0) return NULL; if (PyDict_SetItemString(d, "NotIn", (PyObject*)NotIn_type) < 0) return NULL; if (PyDict_SetItemString(d, "comprehension", (PyObject*)comprehension_type) < 0) return NULL; if (PyDict_SetItemString(d, "excepthandler", (PyObject*)excepthandler_type) < 0) return NULL; if (PyDict_SetItemString(d, "ExceptHandler", (PyObject*)ExceptHandler_type) < 0) return NULL; if (PyDict_SetItemString(d, "arguments", (PyObject*)arguments_type) < 0) return NULL; if (PyDict_SetItemString(d, "arg", (PyObject*)arg_type) < 0) return NULL; if (PyDict_SetItemString(d, "keyword", (PyObject*)keyword_type) < 0) return NULL; if (PyDict_SetItemString(d, "alias", (PyObject*)alias_type) < 0) return NULL; if (PyDict_SetItemString(d, "withitem", (PyObject*)withitem_type) < 0) return NULL; if (PyDict_SetItemString(d, "type_ignore", (PyObject*)type_ignore_type) < 0) return NULL; if (PyDict_SetItemString(d, "TypeIgnore", (PyObject*)TypeIgnore_type) < 0) return NULL; return m; } PyObject* Ta3AST_mod2obj(mod_ty t) { if (!init_types()) return NULL; return ast2obj_mod(t); } /* mode is 0 for "exec", 1 for "eval" and 2 for "single" input */ mod_ty Ta3AST_obj2mod(PyObject* ast, PyArena* arena, int mode) { mod_ty res; PyObject *req_type[3]; char *req_name[] = {"Module", "Expression", "Interactive"}; int isinstance; req_type[0] = (PyObject*)Module_type; req_type[1] = (PyObject*)Expression_type; req_type[2] = (PyObject*)Interactive_type; assert(0 <= mode && mode <= 2); if (!init_types()) return NULL; isinstance = PyObject_IsInstance(ast, req_type[mode]); if (isinstance == -1) return NULL; if (!isinstance) { PyErr_Format(PyExc_TypeError, "expected %s node, got %.400s", req_name[mode], Py_TYPE(ast)->tp_name); return NULL; } if (obj2ast_mod(ast, &res, arena) != 0) return NULL; else return res; } int Ta3AST_Check(PyObject* obj) { if (!init_types()) return -1; return PyObject_IsInstance(obj, (PyObject*)&AST_type); }
/* File automatically generated by Parser/asdl_c.py. */ #include <stddef.h> #include "Python.h" #include "Python-ast.h" static PyTypeObject AST_type; static PyTypeObject *mod_type; static PyObject* ast2obj_mod(void*); static PyTypeObject *Module_type; _Py_IDENTIFIER(body); _Py_IDENTIFIER(type_ignores); static char *Module_fields[]={ "body", "type_ignores", }; static PyTypeObject *Interactive_type; static char *Interactive_fields[]={ "body", }; static PyTypeObject *Expression_type; static char *Expression_fields[]={ "body", }; static PyTypeObject *FunctionType_type; _Py_IDENTIFIER(argtypes); _Py_IDENTIFIER(returns); static char *FunctionType_fields[]={ "argtypes", "returns", }; static PyTypeObject *Suite_type; static char *Suite_fields[]={ "body", }; static PyTypeObject *stmt_type; _Py_IDENTIFIER(lineno); _Py_IDENTIFIER(col_offset); static char *stmt_attributes[] = { "lineno", "col_offset", }; static PyObject* ast2obj_stmt(void*); static PyTypeObject *FunctionDef_type; _Py_IDENTIFIER(name); _Py_IDENTIFIER(args); _Py_IDENTIFIER(decorator_list); _Py_IDENTIFIER(type_comment); static char *FunctionDef_fields[]={ "name", "args", "body", "decorator_list", "returns", "type_comment", }; static PyTypeObject *AsyncFunctionDef_type; static char *AsyncFunctionDef_fields[]={ "name", "args", "body", "decorator_list", "returns", "type_comment", }; static PyTypeObject *ClassDef_type; _Py_IDENTIFIER(bases); _Py_IDENTIFIER(keywords); static char *ClassDef_fields[]={ "name", "bases", "keywords", "body", "decorator_list", }; static PyTypeObject *Return_type; _Py_IDENTIFIER(value); static char *Return_fields[]={ "value", }; static PyTypeObject *Delete_type; _Py_IDENTIFIER(targets); static char *Delete_fields[]={ "targets", }; static PyTypeObject *Assign_type; static char *Assign_fields[]={ "targets", "value", "type_comment", }; static PyTypeObject *AugAssign_type; _Py_IDENTIFIER(target); _Py_IDENTIFIER(op); static char *AugAssign_fields[]={ "target", "op", "value", }; static PyTypeObject *AnnAssign_type; _Py_IDENTIFIER(annotation); _Py_IDENTIFIER(simple); static char *AnnAssign_fields[]={ "target", "annotation", "value", "simple", }; static PyTypeObject *For_type; _Py_IDENTIFIER(iter); _Py_IDENTIFIER(orelse); static char *For_fields[]={ "target", "iter", "body", "orelse", "type_comment", }; static PyTypeObject *AsyncFor_type; static char *AsyncFor_fields[]={ "target", "iter", "body", "orelse", "type_comment", }; static PyTypeObject *While_type; _Py_IDENTIFIER(test); static char *While_fields[]={ "test", "body", "orelse", }; static PyTypeObject *If_type; static char *If_fields[]={ "test", "body", "orelse", }; static PyTypeObject *With_type; _Py_IDENTIFIER(items); static char *With_fields[]={ "items", "body", "type_comment", }; static PyTypeObject *AsyncWith_type; static char *AsyncWith_fields[]={ "items", "body", "type_comment", }; static PyTypeObject *Raise_type; _Py_IDENTIFIER(exc); _Py_IDENTIFIER(cause); static char *Raise_fields[]={ "exc", "cause", }; static PyTypeObject *Try_type; _Py_IDENTIFIER(handlers); _Py_IDENTIFIER(finalbody); static char *Try_fields[]={ "body", "handlers", "orelse", "finalbody", }; static PyTypeObject *Assert_type; _Py_IDENTIFIER(msg); static char *Assert_fields[]={ "test", "msg", }; static PyTypeObject *Import_type; _Py_IDENTIFIER(names); static char *Import_fields[]={ "names", }; static PyTypeObject *ImportFrom_type; _Py_IDENTIFIER(module); _Py_IDENTIFIER(level); static char *ImportFrom_fields[]={ "module", "names", "level", }; static PyTypeObject *Global_type; static char *Global_fields[]={ "names", }; static PyTypeObject *Nonlocal_type; static char *Nonlocal_fields[]={ "names", }; static PyTypeObject *Expr_type; static char *Expr_fields[]={ "value", }; static PyTypeObject *Pass_type; static PyTypeObject *Break_type; static PyTypeObject *Continue_type; static PyTypeObject *expr_type; static char *expr_attributes[] = { "lineno", "col_offset", }; static PyObject* ast2obj_expr(void*); static PyTypeObject *BoolOp_type; _Py_IDENTIFIER(values); static char *BoolOp_fields[]={ "op", "values", }; static PyTypeObject *BinOp_type; _Py_IDENTIFIER(left); _Py_IDENTIFIER(right); static char *BinOp_fields[]={ "left", "op", "right", }; static PyTypeObject *UnaryOp_type; _Py_IDENTIFIER(operand); static char *UnaryOp_fields[]={ "op", "operand", }; static PyTypeObject *Lambda_type; static char *Lambda_fields[]={ "args", "body", }; static PyTypeObject *IfExp_type; static char *IfExp_fields[]={ "test", "body", "orelse", }; static PyTypeObject *Dict_type; _Py_IDENTIFIER(keys); static char *Dict_fields[]={ "keys", "values", }; static PyTypeObject *Set_type; _Py_IDENTIFIER(elts); static char *Set_fields[]={ "elts", }; static PyTypeObject *ListComp_type; _Py_IDENTIFIER(elt); _Py_IDENTIFIER(generators); static char *ListComp_fields[]={ "elt", "generators", }; static PyTypeObject *SetComp_type; static char *SetComp_fields[]={ "elt", "generators", }; static PyTypeObject *DictComp_type; _Py_IDENTIFIER(key); static char *DictComp_fields[]={ "key", "value", "generators", }; static PyTypeObject *GeneratorExp_type; static char *GeneratorExp_fields[]={ "elt", "generators", }; static PyTypeObject *Await_type; static char *Await_fields[]={ "value", }; static PyTypeObject *Yield_type; static char *Yield_fields[]={ "value", }; static PyTypeObject *YieldFrom_type; static char *YieldFrom_fields[]={ "value", }; static PyTypeObject *Compare_type; _Py_IDENTIFIER(ops); _Py_IDENTIFIER(comparators); static char *Compare_fields[]={ "left", "ops", "comparators", }; static PyTypeObject *Call_type; _Py_IDENTIFIER(func); static char *Call_fields[]={ "func", "args", "keywords", }; static PyTypeObject *Num_type; _Py_IDENTIFIER(n); static char *Num_fields[]={ "n", }; static PyTypeObject *Str_type; _Py_IDENTIFIER(s); _Py_IDENTIFIER(kind); static char *Str_fields[]={ "s", "kind", }; static PyTypeObject *FormattedValue_type; _Py_IDENTIFIER(conversion); _Py_IDENTIFIER(format_spec); static char *FormattedValue_fields[]={ "value", "conversion", "format_spec", }; static PyTypeObject *JoinedStr_type; static char *JoinedStr_fields[]={ "values", }; static PyTypeObject *Bytes_type; static char *Bytes_fields[]={ "s", }; static PyTypeObject *NameConstant_type; static char *NameConstant_fields[]={ "value", }; static PyTypeObject *Ellipsis_type; static PyTypeObject *Constant_type; static char *Constant_fields[]={ "value", }; static PyTypeObject *Attribute_type; _Py_IDENTIFIER(attr); _Py_IDENTIFIER(ctx); static char *Attribute_fields[]={ "value", "attr", "ctx", }; static PyTypeObject *Subscript_type; _Py_IDENTIFIER(slice); static char *Subscript_fields[]={ "value", "slice", "ctx", }; static PyTypeObject *Starred_type; static char *Starred_fields[]={ "value", "ctx", }; static PyTypeObject *Name_type; _Py_IDENTIFIER(id); static char *Name_fields[]={ "id", "ctx", }; static PyTypeObject *List_type; static char *List_fields[]={ "elts", "ctx", }; static PyTypeObject *Tuple_type; static char *Tuple_fields[]={ "elts", "ctx", }; static PyTypeObject *expr_context_type; static PyObject *Load_singleton, *Store_singleton, *Del_singleton, *AugLoad_singleton, *AugStore_singleton, *Param_singleton; static PyObject* ast2obj_expr_context(expr_context_ty); static PyTypeObject *Load_type; static PyTypeObject *Store_type; static PyTypeObject *Del_type; static PyTypeObject *AugLoad_type; static PyTypeObject *AugStore_type; static PyTypeObject *Param_type; static PyTypeObject *slice_type; static PyObject* ast2obj_slice(void*); static PyTypeObject *Slice_type; _Py_IDENTIFIER(lower); _Py_IDENTIFIER(upper); _Py_IDENTIFIER(step); static char *Slice_fields[]={ "lower", "upper", "step", }; static PyTypeObject *ExtSlice_type; _Py_IDENTIFIER(dims); static char *ExtSlice_fields[]={ "dims", }; static PyTypeObject *Index_type; static char *Index_fields[]={ "value", }; static PyTypeObject *boolop_type; static PyObject *And_singleton, *Or_singleton; static PyObject* ast2obj_boolop(boolop_ty); static PyTypeObject *And_type; static PyTypeObject *Or_type; static PyTypeObject *operator_type; static PyObject *Add_singleton, *Sub_singleton, *Mult_singleton, *MatMult_singleton, *Div_singleton, *Mod_singleton, *Pow_singleton, *LShift_singleton, *RShift_singleton, *BitOr_singleton, *BitXor_singleton, *BitAnd_singleton, *FloorDiv_singleton; static PyObject* ast2obj_operator(operator_ty); static PyTypeObject *Add_type; static PyTypeObject *Sub_type; static PyTypeObject *Mult_type; static PyTypeObject *MatMult_type; static PyTypeObject *Div_type; static PyTypeObject *Mod_type; static PyTypeObject *Pow_type; static PyTypeObject *LShift_type; static PyTypeObject *RShift_type; static PyTypeObject *BitOr_type; static PyTypeObject *BitXor_type; static PyTypeObject *BitAnd_type; static PyTypeObject *FloorDiv_type; static PyTypeObject *unaryop_type; static PyObject *Invert_singleton, *Not_singleton, *UAdd_singleton, *USub_singleton; static PyObject* ast2obj_unaryop(unaryop_ty); static PyTypeObject *Invert_type; static PyTypeObject *Not_type; static PyTypeObject *UAdd_type; static PyTypeObject *USub_type; static PyTypeObject *cmpop_type; static PyObject *Eq_singleton, *NotEq_singleton, *Lt_singleton, *LtE_singleton, *Gt_singleton, *GtE_singleton, *Is_singleton, *IsNot_singleton, *In_singleton, *NotIn_singleton; static PyObject* ast2obj_cmpop(cmpop_ty); static PyTypeObject *Eq_type; static PyTypeObject *NotEq_type; static PyTypeObject *Lt_type; static PyTypeObject *LtE_type; static PyTypeObject *Gt_type; static PyTypeObject *GtE_type; static PyTypeObject *Is_type; static PyTypeObject *IsNot_type; static PyTypeObject *In_type; static PyTypeObject *NotIn_type; static PyTypeObject *comprehension_type; static PyObject* ast2obj_comprehension(void*); _Py_IDENTIFIER(ifs); _Py_IDENTIFIER(is_async); static char *comprehension_fields[]={ "target", "iter", "ifs", "is_async", }; static PyTypeObject *excepthandler_type; static char *excepthandler_attributes[] = { "lineno", "col_offset", }; static PyObject* ast2obj_excepthandler(void*); static PyTypeObject *ExceptHandler_type; _Py_IDENTIFIER(type); static char *ExceptHandler_fields[]={ "type", "name", "body", }; static PyTypeObject *arguments_type; static PyObject* ast2obj_arguments(void*); _Py_IDENTIFIER(vararg); _Py_IDENTIFIER(kwonlyargs); _Py_IDENTIFIER(kw_defaults); _Py_IDENTIFIER(kwarg); _Py_IDENTIFIER(defaults); static char *arguments_fields[]={ "args", "vararg", "kwonlyargs", "kw_defaults", "kwarg", "defaults", }; static PyTypeObject *arg_type; static PyObject* ast2obj_arg(void*); static char *arg_attributes[] = { "lineno", "col_offset", }; _Py_IDENTIFIER(arg); static char *arg_fields[]={ "arg", "annotation", "type_comment", }; static PyTypeObject *keyword_type; static PyObject* ast2obj_keyword(void*); static char *keyword_fields[]={ "arg", "value", }; static PyTypeObject *alias_type; static PyObject* ast2obj_alias(void*); _Py_IDENTIFIER(asname); static char *alias_fields[]={ "name", "asname", }; static PyTypeObject *withitem_type; static PyObject* ast2obj_withitem(void*); _Py_IDENTIFIER(context_expr); _Py_IDENTIFIER(optional_vars); static char *withitem_fields[]={ "context_expr", "optional_vars", }; static PyTypeObject *type_ignore_type; static PyObject* ast2obj_type_ignore(void*); static PyTypeObject *TypeIgnore_type; static char *TypeIgnore_fields[]={ "lineno", }; _Py_IDENTIFIER(_fields); _Py_IDENTIFIER(_attributes); typedef struct { PyObject_HEAD PyObject *dict; } AST_object; static void ast_dealloc(AST_object *self) { /* bpo-31095: UnTrack is needed before calling any callbacks */ PyObject_GC_UnTrack(self); Py_CLEAR(self->dict); Py_TYPE(self)->tp_free(self); } static int ast_traverse(AST_object *self, visitproc visit, void *arg) { Py_VISIT(self->dict); return 0; } static int ast_clear(AST_object *self) { Py_CLEAR(self->dict); return 0; } static int lookup_attr_id(PyObject *v, _Py_Identifier *name, PyObject **result) { PyObject *oname = _PyUnicode_FromId(name); /* borrowed */ if (!oname) { *result = NULL; return -1; } *result = PyObject_GetAttr(v, oname); if (*result == NULL) { if (!PyErr_ExceptionMatches(PyExc_AttributeError)) { return -1; } PyErr_Clear(); } return 0; } static int ast_type_init(PyObject *self, PyObject *args, PyObject *kw) { Py_ssize_t i, numfields = 0; int res = -1; PyObject *key, *value, *fields; if (lookup_attr_id((PyObject*)Py_TYPE(self), &PyId__fields, &fields) < 0) { goto cleanup; } if (fields) { numfields = PySequence_Size(fields); if (numfields == -1) goto cleanup; } res = 0; /* if no error occurs, this stays 0 to the end */ if (numfields < PyTuple_GET_SIZE(args)) { PyErr_Format(PyExc_TypeError, "%.400s constructor takes at most " "%zd positional argument%s", Py_TYPE(self)->tp_name, numfields, numfields == 1 ? "" : "s"); res = -1; goto cleanup; } for (i = 0; i < PyTuple_GET_SIZE(args); i++) { /* cannot be reached when fields is NULL */ PyObject *name = PySequence_GetItem(fields, i); if (!name) { res = -1; goto cleanup; } res = PyObject_SetAttr(self, name, PyTuple_GET_ITEM(args, i)); Py_DECREF(name); if (res < 0) goto cleanup; } if (kw) { i = 0; /* needed by PyDict_Next */ while (PyDict_Next(kw, &i, &key, &value)) { res = PyObject_SetAttr(self, key, value); if (res < 0) goto cleanup; } } cleanup: Py_XDECREF(fields); return res; } /* Pickling support */ static PyObject * ast_type_reduce(PyObject *self, PyObject *unused) { _Py_IDENTIFIER(__dict__); PyObject *dict; if (lookup_attr_id(self, &PyId___dict__, &dict) < 0) { return NULL; } if (dict) { return Py_BuildValue("O()N", Py_TYPE(self), dict); } return Py_BuildValue("O()", Py_TYPE(self)); } static PyMethodDef ast_type_methods[] = { {"__reduce__", ast_type_reduce, METH_NOARGS, NULL}, {NULL} }; static PyGetSetDef ast_type_getsets[] = { {"__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict}, {NULL} }; static PyTypeObject AST_type = { PyVarObject_HEAD_INIT(NULL, 0) "_ast3.AST", sizeof(AST_object), 0, (destructor)ast_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */ 0, /* tp_doc */ (traverseproc)ast_traverse, /* tp_traverse */ (inquiry)ast_clear, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ ast_type_methods, /* tp_methods */ 0, /* tp_members */ ast_type_getsets, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ offsetof(AST_object, dict),/* tp_dictoffset */ (initproc)ast_type_init, /* tp_init */ PyType_GenericAlloc, /* tp_alloc */ PyType_GenericNew, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; static PyTypeObject* make_type(char *type, PyTypeObject* base, char**fields, int num_fields) { _Py_IDENTIFIER(__module__); _Py_IDENTIFIER(_ast3); PyObject *fnames, *result; int i; fnames = PyTuple_New(num_fields); if (!fnames) return NULL; for (i = 0; i < num_fields; i++) { PyObject *field = PyUnicode_FromString(fields[i]); if (!field) { Py_DECREF(fnames); return NULL; } PyTuple_SET_ITEM(fnames, i, field); } result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){OOOO}", type, base, _PyUnicode_FromId(&PyId__fields), fnames, _PyUnicode_FromId(&PyId___module__), _PyUnicode_FromId(&PyId__ast3)); Py_DECREF(fnames); return (PyTypeObject*)result; } static int add_attributes(PyTypeObject* type, char**attrs, int num_fields) { int i, result; PyObject *s, *l = PyTuple_New(num_fields); if (!l) return 0; for (i = 0; i < num_fields; i++) { s = PyUnicode_FromString(attrs[i]); if (!s) { Py_DECREF(l); return 0; } PyTuple_SET_ITEM(l, i, s); } result = _PyObject_SetAttrId((PyObject*)type, &PyId__attributes, l) >= 0; Py_DECREF(l); return result; } /* Conversion AST -> Python */ static PyObject* ast2obj_list(asdl_seq *seq, PyObject* (*func)(void*)) { Py_ssize_t i, n = asdl_seq_LEN(seq); PyObject *result = PyList_New(n); PyObject *value; if (!result) return NULL; for (i = 0; i < n; i++) { value = func(asdl_seq_GET(seq, i)); if (!value) { Py_DECREF(result); return NULL; } PyList_SET_ITEM(result, i, value); } return result; } static PyObject* ast2obj_object(void *o) { if (!o) o = Py_None; Py_INCREF((PyObject*)o); return (PyObject*)o; } #define ast2obj_singleton ast2obj_object #define ast2obj_constant ast2obj_object #define ast2obj_identifier ast2obj_object #define ast2obj_string ast2obj_object #define ast2obj_bytes ast2obj_object static PyObject* ast2obj_int(long b) { return PyLong_FromLong(b); } /* Conversion Python -> AST */ static int obj2ast_singleton(PyObject *obj, PyObject** out, PyArena* arena) { if (obj != Py_None && obj != Py_True && obj != Py_False) { PyErr_SetString(PyExc_ValueError, "AST singleton must be True, False, or None"); return 1; } *out = obj; return 0; } static int obj2ast_object(PyObject* obj, PyObject** out, PyArena* arena) { if (obj == Py_None) obj = NULL; if (obj) { if (PyArena_AddPyObject(arena, obj) < 0) { *out = NULL; return -1; } Py_INCREF(obj); } *out = obj; return 0; } static int obj2ast_constant(PyObject* obj, PyObject** out, PyArena* arena) { if (obj) { if (PyArena_AddPyObject(arena, obj) < 0) { *out = NULL; return -1; } Py_INCREF(obj); } *out = obj; return 0; } static int obj2ast_identifier(PyObject* obj, PyObject** out, PyArena* arena) { if (!PyUnicode_CheckExact(obj) && obj != Py_None) { PyErr_SetString(PyExc_TypeError, "AST identifier must be of type str"); return 1; } return obj2ast_object(obj, out, arena); } static int obj2ast_string(PyObject* obj, PyObject** out, PyArena* arena) { if (!PyUnicode_CheckExact(obj) && !PyBytes_CheckExact(obj)) { PyErr_SetString(PyExc_TypeError, "AST string must be of type str"); return 1; } return obj2ast_object(obj, out, arena); } static int obj2ast_bytes(PyObject* obj, PyObject** out, PyArena* arena) { if (!PyBytes_CheckExact(obj)) { PyErr_SetString(PyExc_TypeError, "AST bytes must be of type bytes"); return 1; } return obj2ast_object(obj, out, arena); } static int obj2ast_int(PyObject* obj, int* out, PyArena* arena) { int i; if (!PyLong_Check(obj)) { PyErr_Format(PyExc_ValueError, "invalid integer value: %R", obj); return 1; } i = _PyLong_AsInt(obj); if (i == -1 && PyErr_Occurred()) return 1; *out = i; return 0; } static int add_ast_fields(void) { PyObject *empty_tuple, *d; if (PyType_Ready(&AST_type) < 0) return -1; d = AST_type.tp_dict; empty_tuple = PyTuple_New(0); if (!empty_tuple || _PyDict_SetItemId(d, &PyId__fields, empty_tuple) < 0 || _PyDict_SetItemId(d, &PyId__attributes, empty_tuple) < 0) { Py_XDECREF(empty_tuple); return -1; } Py_DECREF(empty_tuple); return 0; } static int init_types(void) { static int initialized; if (initialized) return 1; if (add_ast_fields() < 0) return 0; mod_type = make_type("mod", &AST_type, NULL, 0); if (!mod_type) return 0; if (!add_attributes(mod_type, NULL, 0)) return 0; Module_type = make_type("Module", mod_type, Module_fields, 2); if (!Module_type) return 0; Interactive_type = make_type("Interactive", mod_type, Interactive_fields, 1); if (!Interactive_type) return 0; Expression_type = make_type("Expression", mod_type, Expression_fields, 1); if (!Expression_type) return 0; FunctionType_type = make_type("FunctionType", mod_type, FunctionType_fields, 2); if (!FunctionType_type) return 0; Suite_type = make_type("Suite", mod_type, Suite_fields, 1); if (!Suite_type) return 0; stmt_type = make_type("stmt", &AST_type, NULL, 0); if (!stmt_type) return 0; if (!add_attributes(stmt_type, stmt_attributes, 2)) return 0; FunctionDef_type = make_type("FunctionDef", stmt_type, FunctionDef_fields, 6); if (!FunctionDef_type) return 0; AsyncFunctionDef_type = make_type("AsyncFunctionDef", stmt_type, AsyncFunctionDef_fields, 6); if (!AsyncFunctionDef_type) return 0; ClassDef_type = make_type("ClassDef", stmt_type, ClassDef_fields, 5); if (!ClassDef_type) return 0; Return_type = make_type("Return", stmt_type, Return_fields, 1); if (!Return_type) return 0; Delete_type = make_type("Delete", stmt_type, Delete_fields, 1); if (!Delete_type) return 0; Assign_type = make_type("Assign", stmt_type, Assign_fields, 3); if (!Assign_type) return 0; AugAssign_type = make_type("AugAssign", stmt_type, AugAssign_fields, 3); if (!AugAssign_type) return 0; AnnAssign_type = make_type("AnnAssign", stmt_type, AnnAssign_fields, 4); if (!AnnAssign_type) return 0; For_type = make_type("For", stmt_type, For_fields, 5); if (!For_type) return 0; AsyncFor_type = make_type("AsyncFor", stmt_type, AsyncFor_fields, 5); if (!AsyncFor_type) return 0; While_type = make_type("While", stmt_type, While_fields, 3); if (!While_type) return 0; If_type = make_type("If", stmt_type, If_fields, 3); if (!If_type) return 0; With_type = make_type("With", stmt_type, With_fields, 3); if (!With_type) return 0; AsyncWith_type = make_type("AsyncWith", stmt_type, AsyncWith_fields, 3); if (!AsyncWith_type) return 0; Raise_type = make_type("Raise", stmt_type, Raise_fields, 2); if (!Raise_type) return 0; Try_type = make_type("Try", stmt_type, Try_fields, 4); if (!Try_type) return 0; Assert_type = make_type("Assert", stmt_type, Assert_fields, 2); if (!Assert_type) return 0; Import_type = make_type("Import", stmt_type, Import_fields, 1); if (!Import_type) return 0; ImportFrom_type = make_type("ImportFrom", stmt_type, ImportFrom_fields, 3); if (!ImportFrom_type) return 0; Global_type = make_type("Global", stmt_type, Global_fields, 1); if (!Global_type) return 0; Nonlocal_type = make_type("Nonlocal", stmt_type, Nonlocal_fields, 1); if (!Nonlocal_type) return 0; Expr_type = make_type("Expr", stmt_type, Expr_fields, 1); if (!Expr_type) return 0; Pass_type = make_type("Pass", stmt_type, NULL, 0); if (!Pass_type) return 0; Break_type = make_type("Break", stmt_type, NULL, 0); if (!Break_type) return 0; Continue_type = make_type("Continue", stmt_type, NULL, 0); if (!Continue_type) return 0; expr_type = make_type("expr", &AST_type, NULL, 0); if (!expr_type) return 0; if (!add_attributes(expr_type, expr_attributes, 2)) return 0; BoolOp_type = make_type("BoolOp", expr_type, BoolOp_fields, 2); if (!BoolOp_type) return 0; BinOp_type = make_type("BinOp", expr_type, BinOp_fields, 3); if (!BinOp_type) return 0; UnaryOp_type = make_type("UnaryOp", expr_type, UnaryOp_fields, 2); if (!UnaryOp_type) return 0; Lambda_type = make_type("Lambda", expr_type, Lambda_fields, 2); if (!Lambda_type) return 0; IfExp_type = make_type("IfExp", expr_type, IfExp_fields, 3); if (!IfExp_type) return 0; Dict_type = make_type("Dict", expr_type, Dict_fields, 2); if (!Dict_type) return 0; Set_type = make_type("Set", expr_type, Set_fields, 1); if (!Set_type) return 0; ListComp_type = make_type("ListComp", expr_type, ListComp_fields, 2); if (!ListComp_type) return 0; SetComp_type = make_type("SetComp", expr_type, SetComp_fields, 2); if (!SetComp_type) return 0; DictComp_type = make_type("DictComp", expr_type, DictComp_fields, 3); if (!DictComp_type) return 0; GeneratorExp_type = make_type("GeneratorExp", expr_type, GeneratorExp_fields, 2); if (!GeneratorExp_type) return 0; Await_type = make_type("Await", expr_type, Await_fields, 1); if (!Await_type) return 0; Yield_type = make_type("Yield", expr_type, Yield_fields, 1); if (!Yield_type) return 0; YieldFrom_type = make_type("YieldFrom", expr_type, YieldFrom_fields, 1); if (!YieldFrom_type) return 0; Compare_type = make_type("Compare", expr_type, Compare_fields, 3); if (!Compare_type) return 0; Call_type = make_type("Call", expr_type, Call_fields, 3); if (!Call_type) return 0; Num_type = make_type("Num", expr_type, Num_fields, 1); if (!Num_type) return 0; Str_type = make_type("Str", expr_type, Str_fields, 2); if (!Str_type) return 0; FormattedValue_type = make_type("FormattedValue", expr_type, FormattedValue_fields, 3); if (!FormattedValue_type) return 0; JoinedStr_type = make_type("JoinedStr", expr_type, JoinedStr_fields, 1); if (!JoinedStr_type) return 0; Bytes_type = make_type("Bytes", expr_type, Bytes_fields, 1); if (!Bytes_type) return 0; NameConstant_type = make_type("NameConstant", expr_type, NameConstant_fields, 1); if (!NameConstant_type) return 0; Ellipsis_type = make_type("Ellipsis", expr_type, NULL, 0); if (!Ellipsis_type) return 0; Constant_type = make_type("Constant", expr_type, Constant_fields, 1); if (!Constant_type) return 0; Attribute_type = make_type("Attribute", expr_type, Attribute_fields, 3); if (!Attribute_type) return 0; Subscript_type = make_type("Subscript", expr_type, Subscript_fields, 3); if (!Subscript_type) return 0; Starred_type = make_type("Starred", expr_type, Starred_fields, 2); if (!Starred_type) return 0; Name_type = make_type("Name", expr_type, Name_fields, 2); if (!Name_type) return 0; List_type = make_type("List", expr_type, List_fields, 2); if (!List_type) return 0; Tuple_type = make_type("Tuple", expr_type, Tuple_fields, 2); if (!Tuple_type) return 0; expr_context_type = make_type("expr_context", &AST_type, NULL, 0); if (!expr_context_type) return 0; if (!add_attributes(expr_context_type, NULL, 0)) return 0; Load_type = make_type("Load", expr_context_type, NULL, 0); if (!Load_type) return 0; Load_singleton = PyType_GenericNew(Load_type, NULL, NULL); if (!Load_singleton) return 0; Store_type = make_type("Store", expr_context_type, NULL, 0); if (!Store_type) return 0; Store_singleton = PyType_GenericNew(Store_type, NULL, NULL); if (!Store_singleton) return 0; Del_type = make_type("Del", expr_context_type, NULL, 0); if (!Del_type) return 0; Del_singleton = PyType_GenericNew(Del_type, NULL, NULL); if (!Del_singleton) return 0; AugLoad_type = make_type("AugLoad", expr_context_type, NULL, 0); if (!AugLoad_type) return 0; AugLoad_singleton = PyType_GenericNew(AugLoad_type, NULL, NULL); if (!AugLoad_singleton) return 0; AugStore_type = make_type("AugStore", expr_context_type, NULL, 0); if (!AugStore_type) return 0; AugStore_singleton = PyType_GenericNew(AugStore_type, NULL, NULL); if (!AugStore_singleton) return 0; Param_type = make_type("Param", expr_context_type, NULL, 0); if (!Param_type) return 0; Param_singleton = PyType_GenericNew(Param_type, NULL, NULL); if (!Param_singleton) return 0; slice_type = make_type("slice", &AST_type, NULL, 0); if (!slice_type) return 0; if (!add_attributes(slice_type, NULL, 0)) return 0; Slice_type = make_type("Slice", slice_type, Slice_fields, 3); if (!Slice_type) return 0; ExtSlice_type = make_type("ExtSlice", slice_type, ExtSlice_fields, 1); if (!ExtSlice_type) return 0; Index_type = make_type("Index", slice_type, Index_fields, 1); if (!Index_type) return 0; boolop_type = make_type("boolop", &AST_type, NULL, 0); if (!boolop_type) return 0; if (!add_attributes(boolop_type, NULL, 0)) return 0; And_type = make_type("And", boolop_type, NULL, 0); if (!And_type) return 0; And_singleton = PyType_GenericNew(And_type, NULL, NULL); if (!And_singleton) return 0; Or_type = make_type("Or", boolop_type, NULL, 0); if (!Or_type) return 0; Or_singleton = PyType_GenericNew(Or_type, NULL, NULL); if (!Or_singleton) return 0; operator_type = make_type("operator", &AST_type, NULL, 0); if (!operator_type) return 0; if (!add_attributes(operator_type, NULL, 0)) return 0; Add_type = make_type("Add", operator_type, NULL, 0); if (!Add_type) return 0; Add_singleton = PyType_GenericNew(Add_type, NULL, NULL); if (!Add_singleton) return 0; Sub_type = make_type("Sub", operator_type, NULL, 0); if (!Sub_type) return 0; Sub_singleton = PyType_GenericNew(Sub_type, NULL, NULL); if (!Sub_singleton) return 0; Mult_type = make_type("Mult", operator_type, NULL, 0); if (!Mult_type) return 0; Mult_singleton = PyType_GenericNew(Mult_type, NULL, NULL); if (!Mult_singleton) return 0; MatMult_type = make_type("MatMult", operator_type, NULL, 0); if (!MatMult_type) return 0; MatMult_singleton = PyType_GenericNew(MatMult_type, NULL, NULL); if (!MatMult_singleton) return 0; Div_type = make_type("Div", operator_type, NULL, 0); if (!Div_type) return 0; Div_singleton = PyType_GenericNew(Div_type, NULL, NULL); if (!Div_singleton) return 0; Mod_type = make_type("Mod", operator_type, NULL, 0); if (!Mod_type) return 0; Mod_singleton = PyType_GenericNew(Mod_type, NULL, NULL); if (!Mod_singleton) return 0; Pow_type = make_type("Pow", operator_type, NULL, 0); if (!Pow_type) return 0; Pow_singleton = PyType_GenericNew(Pow_type, NULL, NULL); if (!Pow_singleton) return 0; LShift_type = make_type("LShift", operator_type, NULL, 0); if (!LShift_type) return 0; LShift_singleton = PyType_GenericNew(LShift_type, NULL, NULL); if (!LShift_singleton) return 0; RShift_type = make_type("RShift", operator_type, NULL, 0); if (!RShift_type) return 0; RShift_singleton = PyType_GenericNew(RShift_type, NULL, NULL); if (!RShift_singleton) return 0; BitOr_type = make_type("BitOr", operator_type, NULL, 0); if (!BitOr_type) return 0; BitOr_singleton = PyType_GenericNew(BitOr_type, NULL, NULL); if (!BitOr_singleton) return 0; BitXor_type = make_type("BitXor", operator_type, NULL, 0); if (!BitXor_type) return 0; BitXor_singleton = PyType_GenericNew(BitXor_type, NULL, NULL); if (!BitXor_singleton) return 0; BitAnd_type = make_type("BitAnd", operator_type, NULL, 0); if (!BitAnd_type) return 0; BitAnd_singleton = PyType_GenericNew(BitAnd_type, NULL, NULL); if (!BitAnd_singleton) return 0; FloorDiv_type = make_type("FloorDiv", operator_type, NULL, 0); if (!FloorDiv_type) return 0; FloorDiv_singleton = PyType_GenericNew(FloorDiv_type, NULL, NULL); if (!FloorDiv_singleton) return 0; unaryop_type = make_type("unaryop", &AST_type, NULL, 0); if (!unaryop_type) return 0; if (!add_attributes(unaryop_type, NULL, 0)) return 0; Invert_type = make_type("Invert", unaryop_type, NULL, 0); if (!Invert_type) return 0; Invert_singleton = PyType_GenericNew(Invert_type, NULL, NULL); if (!Invert_singleton) return 0; Not_type = make_type("Not", unaryop_type, NULL, 0); if (!Not_type) return 0; Not_singleton = PyType_GenericNew(Not_type, NULL, NULL); if (!Not_singleton) return 0; UAdd_type = make_type("UAdd", unaryop_type, NULL, 0); if (!UAdd_type) return 0; UAdd_singleton = PyType_GenericNew(UAdd_type, NULL, NULL); if (!UAdd_singleton) return 0; USub_type = make_type("USub", unaryop_type, NULL, 0); if (!USub_type) return 0; USub_singleton = PyType_GenericNew(USub_type, NULL, NULL); if (!USub_singleton) return 0; cmpop_type = make_type("cmpop", &AST_type, NULL, 0); if (!cmpop_type) return 0; if (!add_attributes(cmpop_type, NULL, 0)) return 0; Eq_type = make_type("Eq", cmpop_type, NULL, 0); if (!Eq_type) return 0; Eq_singleton = PyType_GenericNew(Eq_type, NULL, NULL); if (!Eq_singleton) return 0; NotEq_type = make_type("NotEq", cmpop_type, NULL, 0); if (!NotEq_type) return 0; NotEq_singleton = PyType_GenericNew(NotEq_type, NULL, NULL); if (!NotEq_singleton) return 0; Lt_type = make_type("Lt", cmpop_type, NULL, 0); if (!Lt_type) return 0; Lt_singleton = PyType_GenericNew(Lt_type, NULL, NULL); if (!Lt_singleton) return 0; LtE_type = make_type("LtE", cmpop_type, NULL, 0); if (!LtE_type) return 0; LtE_singleton = PyType_GenericNew(LtE_type, NULL, NULL); if (!LtE_singleton) return 0; Gt_type = make_type("Gt", cmpop_type, NULL, 0); if (!Gt_type) return 0; Gt_singleton = PyType_GenericNew(Gt_type, NULL, NULL); if (!Gt_singleton) return 0; GtE_type = make_type("GtE", cmpop_type, NULL, 0); if (!GtE_type) return 0; GtE_singleton = PyType_GenericNew(GtE_type, NULL, NULL); if (!GtE_singleton) return 0; Is_type = make_type("Is", cmpop_type, NULL, 0); if (!Is_type) return 0; Is_singleton = PyType_GenericNew(Is_type, NULL, NULL); if (!Is_singleton) return 0; IsNot_type = make_type("IsNot", cmpop_type, NULL, 0); if (!IsNot_type) return 0; IsNot_singleton = PyType_GenericNew(IsNot_type, NULL, NULL); if (!IsNot_singleton) return 0; In_type = make_type("In", cmpop_type, NULL, 0); if (!In_type) return 0; In_singleton = PyType_GenericNew(In_type, NULL, NULL); if (!In_singleton) return 0; NotIn_type = make_type("NotIn", cmpop_type, NULL, 0); if (!NotIn_type) return 0; NotIn_singleton = PyType_GenericNew(NotIn_type, NULL, NULL); if (!NotIn_singleton) return 0; comprehension_type = make_type("comprehension", &AST_type, comprehension_fields, 4); if (!comprehension_type) return 0; if (!add_attributes(comprehension_type, NULL, 0)) return 0; excepthandler_type = make_type("excepthandler", &AST_type, NULL, 0); if (!excepthandler_type) return 0; if (!add_attributes(excepthandler_type, excepthandler_attributes, 2)) return 0; ExceptHandler_type = make_type("ExceptHandler", excepthandler_type, ExceptHandler_fields, 3); if (!ExceptHandler_type) return 0; arguments_type = make_type("arguments", &AST_type, arguments_fields, 6); if (!arguments_type) return 0; if (!add_attributes(arguments_type, NULL, 0)) return 0; arg_type = make_type("arg", &AST_type, arg_fields, 3); if (!arg_type) return 0; if (!add_attributes(arg_type, arg_attributes, 2)) return 0; keyword_type = make_type("keyword", &AST_type, keyword_fields, 2); if (!keyword_type) return 0; if (!add_attributes(keyword_type, NULL, 0)) return 0; alias_type = make_type("alias", &AST_type, alias_fields, 2); if (!alias_type) return 0; if (!add_attributes(alias_type, NULL, 0)) return 0; withitem_type = make_type("withitem", &AST_type, withitem_fields, 2); if (!withitem_type) return 0; if (!add_attributes(withitem_type, NULL, 0)) return 0; type_ignore_type = make_type("type_ignore", &AST_type, NULL, 0); if (!type_ignore_type) return 0; if (!add_attributes(type_ignore_type, NULL, 0)) return 0; TypeIgnore_type = make_type("TypeIgnore", type_ignore_type, TypeIgnore_fields, 1); if (!TypeIgnore_type) return 0; initialized = 1; return 1; } static int obj2ast_mod(PyObject* obj, mod_ty* out, PyArena* arena); static int obj2ast_stmt(PyObject* obj, stmt_ty* out, PyArena* arena); static int obj2ast_expr(PyObject* obj, expr_ty* out, PyArena* arena); static int obj2ast_expr_context(PyObject* obj, expr_context_ty* out, PyArena* arena); static int obj2ast_slice(PyObject* obj, slice_ty* out, PyArena* arena); static int obj2ast_boolop(PyObject* obj, boolop_ty* out, PyArena* arena); static int obj2ast_operator(PyObject* obj, operator_ty* out, PyArena* arena); static int obj2ast_unaryop(PyObject* obj, unaryop_ty* out, PyArena* arena); static int obj2ast_cmpop(PyObject* obj, cmpop_ty* out, PyArena* arena); static int obj2ast_comprehension(PyObject* obj, comprehension_ty* out, PyArena* arena); static int obj2ast_excepthandler(PyObject* obj, excepthandler_ty* out, PyArena* arena); static int obj2ast_arguments(PyObject* obj, arguments_ty* out, PyArena* arena); static int obj2ast_arg(PyObject* obj, arg_ty* out, PyArena* arena); static int obj2ast_keyword(PyObject* obj, keyword_ty* out, PyArena* arena); static int obj2ast_alias(PyObject* obj, alias_ty* out, PyArena* arena); static int obj2ast_withitem(PyObject* obj, withitem_ty* out, PyArena* arena); static int obj2ast_type_ignore(PyObject* obj, type_ignore_ty* out, PyArena* arena); mod_ty Module(asdl_seq * body, asdl_seq * type_ignores, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Module_kind; p->v.Module.body = body; p->v.Module.type_ignores = type_ignores; return p; } mod_ty Interactive(asdl_seq * body, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Interactive_kind; p->v.Interactive.body = body; return p; } mod_ty Expression(expr_ty body, PyArena *arena) { mod_ty p; if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for Expression"); return NULL; } p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Expression_kind; p->v.Expression.body = body; return p; } mod_ty FunctionType(asdl_seq * argtypes, expr_ty returns, PyArena *arena) { mod_ty p; if (!returns) { PyErr_SetString(PyExc_ValueError, "field returns is required for FunctionType"); return NULL; } p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = FunctionType_kind; p->v.FunctionType.argtypes = argtypes; p->v.FunctionType.returns = returns; return p; } mod_ty Suite(asdl_seq * body, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Suite_kind; p->v.Suite.body = body; return p; } stmt_ty FunctionDef(identifier name, arguments_ty args, asdl_seq * body, asdl_seq * decorator_list, expr_ty returns, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for FunctionDef"); return NULL; } if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for FunctionDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = FunctionDef_kind; p->v.FunctionDef.name = name; p->v.FunctionDef.args = args; p->v.FunctionDef.body = body; p->v.FunctionDef.decorator_list = decorator_list; p->v.FunctionDef.returns = returns; p->v.FunctionDef.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty AsyncFunctionDef(identifier name, arguments_ty args, asdl_seq * body, asdl_seq * decorator_list, expr_ty returns, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for AsyncFunctionDef"); return NULL; } if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for AsyncFunctionDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncFunctionDef_kind; p->v.AsyncFunctionDef.name = name; p->v.AsyncFunctionDef.args = args; p->v.AsyncFunctionDef.body = body; p->v.AsyncFunctionDef.decorator_list = decorator_list; p->v.AsyncFunctionDef.returns = returns; p->v.AsyncFunctionDef.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty ClassDef(identifier name, asdl_seq * bases, asdl_seq * keywords, asdl_seq * body, asdl_seq * decorator_list, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for ClassDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ClassDef_kind; p->v.ClassDef.name = name; p->v.ClassDef.bases = bases; p->v.ClassDef.keywords = keywords; p->v.ClassDef.body = body; p->v.ClassDef.decorator_list = decorator_list; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Return(expr_ty value, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Return_kind; p->v.Return.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Delete(asdl_seq * targets, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Delete_kind; p->v.Delete.targets = targets; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Assign(asdl_seq * targets, expr_ty value, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Assign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Assign_kind; p->v.Assign.targets = targets; p->v.Assign.value = value; p->v.Assign.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty AugAssign(expr_ty target, operator_ty op, expr_ty value, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AugAssign"); return NULL; } if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for AugAssign"); return NULL; } if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for AugAssign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AugAssign_kind; p->v.AugAssign.target = target; p->v.AugAssign.op = op; p->v.AugAssign.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty AnnAssign(expr_ty target, expr_ty annotation, expr_ty value, int simple, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AnnAssign"); return NULL; } if (!annotation) { PyErr_SetString(PyExc_ValueError, "field annotation is required for AnnAssign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AnnAssign_kind; p->v.AnnAssign.target = target; p->v.AnnAssign.annotation = annotation; p->v.AnnAssign.value = value; p->v.AnnAssign.simple = simple; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty For(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for For"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for For"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = For_kind; p->v.For.target = target; p->v.For.iter = iter; p->v.For.body = body; p->v.For.orelse = orelse; p->v.For.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty AsyncFor(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AsyncFor"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for AsyncFor"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncFor_kind; p->v.AsyncFor.target = target; p->v.AsyncFor.iter = iter; p->v.AsyncFor.body = body; p->v.AsyncFor.orelse = orelse; p->v.AsyncFor.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty While(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for While"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = While_kind; p->v.While.test = test; p->v.While.body = body; p->v.While.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty If(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for If"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = If_kind; p->v.If.test = test; p->v.If.body = body; p->v.If.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty With(asdl_seq * items, asdl_seq * body, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = With_kind; p->v.With.items = items; p->v.With.body = body; p->v.With.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty AsyncWith(asdl_seq * items, asdl_seq * body, string type_comment, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncWith_kind; p->v.AsyncWith.items = items; p->v.AsyncWith.body = body; p->v.AsyncWith.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Raise(expr_ty exc, expr_ty cause, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Raise_kind; p->v.Raise.exc = exc; p->v.Raise.cause = cause; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Try(asdl_seq * body, asdl_seq * handlers, asdl_seq * orelse, asdl_seq * finalbody, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Try_kind; p->v.Try.body = body; p->v.Try.handlers = handlers; p->v.Try.orelse = orelse; p->v.Try.finalbody = finalbody; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Assert(expr_ty test, expr_ty msg, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for Assert"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Assert_kind; p->v.Assert.test = test; p->v.Assert.msg = msg; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Import(asdl_seq * names, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Import_kind; p->v.Import.names = names; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty ImportFrom(identifier module, asdl_seq * names, int level, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ImportFrom_kind; p->v.ImportFrom.module = module; p->v.ImportFrom.names = names; p->v.ImportFrom.level = level; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Global(asdl_seq * names, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Global_kind; p->v.Global.names = names; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Nonlocal(asdl_seq * names, int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Nonlocal_kind; p->v.Nonlocal.names = names; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Expr(expr_ty value, int lineno, int col_offset, PyArena *arena) { stmt_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Expr"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Expr_kind; p->v.Expr.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Pass(int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Pass_kind; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Break(int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Break_kind; p->lineno = lineno; p->col_offset = col_offset; return p; } stmt_ty Continue(int lineno, int col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Continue_kind; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty BoolOp(boolop_ty op, asdl_seq * values, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for BoolOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = BoolOp_kind; p->v.BoolOp.op = op; p->v.BoolOp.values = values; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty BinOp(expr_ty left, operator_ty op, expr_ty right, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!left) { PyErr_SetString(PyExc_ValueError, "field left is required for BinOp"); return NULL; } if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for BinOp"); return NULL; } if (!right) { PyErr_SetString(PyExc_ValueError, "field right is required for BinOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = BinOp_kind; p->v.BinOp.left = left; p->v.BinOp.op = op; p->v.BinOp.right = right; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for UnaryOp"); return NULL; } if (!operand) { PyErr_SetString(PyExc_ValueError, "field operand is required for UnaryOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = UnaryOp_kind; p->v.UnaryOp.op = op; p->v.UnaryOp.operand = operand; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Lambda(arguments_ty args, expr_ty body, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for Lambda"); return NULL; } if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for Lambda"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Lambda_kind; p->v.Lambda.args = args; p->v.Lambda.body = body; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty IfExp(expr_ty test, expr_ty body, expr_ty orelse, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for IfExp"); return NULL; } if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for IfExp"); return NULL; } if (!orelse) { PyErr_SetString(PyExc_ValueError, "field orelse is required for IfExp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = IfExp_kind; p->v.IfExp.test = test; p->v.IfExp.body = body; p->v.IfExp.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Dict(asdl_seq * keys, asdl_seq * values, int lineno, int col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Dict_kind; p->v.Dict.keys = keys; p->v.Dict.values = values; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Set(asdl_seq * elts, int lineno, int col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Set_kind; p->v.Set.elts = elts; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty ListComp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for ListComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ListComp_kind; p->v.ListComp.elt = elt; p->v.ListComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty SetComp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for SetComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = SetComp_kind; p->v.SetComp.elt = elt; p->v.SetComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty DictComp(expr_ty key, expr_ty value, asdl_seq * generators, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!key) { PyErr_SetString(PyExc_ValueError, "field key is required for DictComp"); return NULL; } if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for DictComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = DictComp_kind; p->v.DictComp.key = key; p->v.DictComp.value = value; p->v.DictComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty GeneratorExp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for GeneratorExp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = GeneratorExp_kind; p->v.GeneratorExp.elt = elt; p->v.GeneratorExp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Await(expr_ty value, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Await"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Await_kind; p->v.Await.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Yield(expr_ty value, int lineno, int col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Yield_kind; p->v.Yield.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty YieldFrom(expr_ty value, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for YieldFrom"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = YieldFrom_kind; p->v.YieldFrom.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Compare(expr_ty left, asdl_int_seq * ops, asdl_seq * comparators, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!left) { PyErr_SetString(PyExc_ValueError, "field left is required for Compare"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Compare_kind; p->v.Compare.left = left; p->v.Compare.ops = ops; p->v.Compare.comparators = comparators; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Call(expr_ty func, asdl_seq * args, asdl_seq * keywords, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!func) { PyErr_SetString(PyExc_ValueError, "field func is required for Call"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Call_kind; p->v.Call.func = func; p->v.Call.args = args; p->v.Call.keywords = keywords; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Num(object n, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!n) { PyErr_SetString(PyExc_ValueError, "field n is required for Num"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Num_kind; p->v.Num.n = n; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Str(string s, string kind, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!s) { PyErr_SetString(PyExc_ValueError, "field s is required for Str"); return NULL; } if (!kind) { PyErr_SetString(PyExc_ValueError, "field kind is required for Str"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Str_kind; p->v.Str.s = s; p->v.Str.kind = kind; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty FormattedValue(expr_ty value, int conversion, expr_ty format_spec, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for FormattedValue"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = FormattedValue_kind; p->v.FormattedValue.value = value; p->v.FormattedValue.conversion = conversion; p->v.FormattedValue.format_spec = format_spec; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty JoinedStr(asdl_seq * values, int lineno, int col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = JoinedStr_kind; p->v.JoinedStr.values = values; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Bytes(bytes s, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!s) { PyErr_SetString(PyExc_ValueError, "field s is required for Bytes"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Bytes_kind; p->v.Bytes.s = s; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty NameConstant(singleton value, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for NameConstant"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = NameConstant_kind; p->v.NameConstant.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Ellipsis(int lineno, int col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Ellipsis_kind; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Constant(constant value, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Constant"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Constant_kind; p->v.Constant.value = value; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Attribute(expr_ty value, identifier attr, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Attribute"); return NULL; } if (!attr) { PyErr_SetString(PyExc_ValueError, "field attr is required for Attribute"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Attribute"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Attribute_kind; p->v.Attribute.value = value; p->v.Attribute.attr = attr; p->v.Attribute.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Subscript(expr_ty value, slice_ty slice, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Subscript"); return NULL; } if (!slice) { PyErr_SetString(PyExc_ValueError, "field slice is required for Subscript"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Subscript"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Subscript_kind; p->v.Subscript.value = value; p->v.Subscript.slice = slice; p->v.Subscript.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Starred(expr_ty value, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Starred"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Starred"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Starred_kind; p->v.Starred.value = value; p->v.Starred.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Name(identifier id, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!id) { PyErr_SetString(PyExc_ValueError, "field id is required for Name"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Name"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Name_kind; p->v.Name.id = id; p->v.Name.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty List(asdl_seq * elts, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for List"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = List_kind; p->v.List.elts = elts; p->v.List.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } expr_ty Tuple(asdl_seq * elts, expr_context_ty ctx, int lineno, int col_offset, PyArena *arena) { expr_ty p; if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Tuple"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Tuple_kind; p->v.Tuple.elts = elts; p->v.Tuple.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; return p; } slice_ty Slice(expr_ty lower, expr_ty upper, expr_ty step, PyArena *arena) { slice_ty p; p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Slice_kind; p->v.Slice.lower = lower; p->v.Slice.upper = upper; p->v.Slice.step = step; return p; } slice_ty ExtSlice(asdl_seq * dims, PyArena *arena) { slice_ty p; p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ExtSlice_kind; p->v.ExtSlice.dims = dims; return p; } slice_ty Index(expr_ty value, PyArena *arena) { slice_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Index"); return NULL; } p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Index_kind; p->v.Index.value = value; return p; } comprehension_ty comprehension(expr_ty target, expr_ty iter, asdl_seq * ifs, int is_async, PyArena *arena) { comprehension_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for comprehension"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for comprehension"); return NULL; } p = (comprehension_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->target = target; p->iter = iter; p->ifs = ifs; p->is_async = is_async; return p; } excepthandler_ty ExceptHandler(expr_ty type, identifier name, asdl_seq * body, int lineno, int col_offset, PyArena *arena) { excepthandler_ty p; p = (excepthandler_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ExceptHandler_kind; p->v.ExceptHandler.type = type; p->v.ExceptHandler.name = name; p->v.ExceptHandler.body = body; p->lineno = lineno; p->col_offset = col_offset; return p; } arguments_ty arguments(asdl_seq * args, arg_ty vararg, asdl_seq * kwonlyargs, asdl_seq * kw_defaults, arg_ty kwarg, asdl_seq * defaults, PyArena *arena) { arguments_ty p; p = (arguments_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->args = args; p->vararg = vararg; p->kwonlyargs = kwonlyargs; p->kw_defaults = kw_defaults; p->kwarg = kwarg; p->defaults = defaults; return p; } arg_ty arg(identifier arg, expr_ty annotation, string type_comment, int lineno, int col_offset, PyArena *arena) { arg_ty p; if (!arg) { PyErr_SetString(PyExc_ValueError, "field arg is required for arg"); return NULL; } p = (arg_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->arg = arg; p->annotation = annotation; p->type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; return p; } keyword_ty keyword(identifier arg, expr_ty value, PyArena *arena) { keyword_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for keyword"); return NULL; } p = (keyword_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->arg = arg; p->value = value; return p; } alias_ty alias(identifier name, identifier asname, PyArena *arena) { alias_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for alias"); return NULL; } p = (alias_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->name = name; p->asname = asname; return p; } withitem_ty withitem(expr_ty context_expr, expr_ty optional_vars, PyArena *arena) { withitem_ty p; if (!context_expr) { PyErr_SetString(PyExc_ValueError, "field context_expr is required for withitem"); return NULL; } p = (withitem_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->context_expr = context_expr; p->optional_vars = optional_vars; return p; } type_ignore_ty TypeIgnore(int lineno, PyArena *arena) { type_ignore_ty p; p = (type_ignore_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = TypeIgnore_kind; p->v.TypeIgnore.lineno = lineno; return p; } PyObject* ast2obj_mod(void* _o) { mod_ty o = (mod_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case Module_kind: result = PyType_GenericNew(Module_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Module.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Module.type_ignores, ast2obj_type_ignore); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_ignores, value) == -1) goto failed; Py_DECREF(value); break; case Interactive_kind: result = PyType_GenericNew(Interactive_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Interactive.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case Expression_kind: result = PyType_GenericNew(Expression_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Expression.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case FunctionType_kind: result = PyType_GenericNew(FunctionType_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.FunctionType.argtypes, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_argtypes, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.FunctionType.returns); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1) goto failed; Py_DECREF(value); break; case Suite_kind: result = PyType_GenericNew(Suite_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Suite.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; } return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_stmt(void* _o) { stmt_ty o = (stmt_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case FunctionDef_kind: result = PyType_GenericNew(FunctionDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.FunctionDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arguments(o->v.FunctionDef.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.FunctionDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.FunctionDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.FunctionDef.returns); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.FunctionDef.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AsyncFunctionDef_kind: result = PyType_GenericNew(AsyncFunctionDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.AsyncFunctionDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arguments(o->v.AsyncFunctionDef.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFunctionDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFunctionDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AsyncFunctionDef.returns); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.AsyncFunctionDef.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case ClassDef_kind: result = PyType_GenericNew(ClassDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.ClassDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.bases, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_bases, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.keywords, ast2obj_keyword); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keywords, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); break; case Return_kind: result = PyType_GenericNew(Return_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Return.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Delete_kind: result = PyType_GenericNew(Delete_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Delete.targets, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_targets, value) == -1) goto failed; Py_DECREF(value); break; case Assign_kind: result = PyType_GenericNew(Assign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Assign.targets, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_targets, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Assign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.Assign.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AugAssign_kind: result = PyType_GenericNew(AugAssign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AugAssign.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_operator(o->v.AugAssign.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AugAssign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case AnnAssign_kind: result = PyType_GenericNew(AnnAssign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AnnAssign.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AnnAssign.annotation); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_annotation, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AnnAssign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.AnnAssign.simple); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_simple, value) == -1) goto failed; Py_DECREF(value); break; case For_kind: result = PyType_GenericNew(For_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.For.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.For.iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.For.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.For.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.For.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AsyncFor_kind: result = PyType_GenericNew(AsyncFor_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AsyncFor.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AsyncFor.iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFor.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFor.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.AsyncFor.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case While_kind: result = PyType_GenericNew(While_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.While.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.While.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.While.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case If_kind: result = PyType_GenericNew(If_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.If.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.If.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.If.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case With_kind: result = PyType_GenericNew(With_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.With.items, ast2obj_withitem); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_items, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.With.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.With.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AsyncWith_kind: result = PyType_GenericNew(AsyncWith_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.AsyncWith.items, ast2obj_withitem); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_items, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncWith.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.AsyncWith.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case Raise_kind: result = PyType_GenericNew(Raise_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Raise.exc); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_exc, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Raise.cause); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_cause, value) == -1) goto failed; Py_DECREF(value); break; case Try_kind: result = PyType_GenericNew(Try_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Try.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.handlers, ast2obj_excepthandler); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_handlers, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.finalbody, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_finalbody, value) == -1) goto failed; Py_DECREF(value); break; case Assert_kind: result = PyType_GenericNew(Assert_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Assert.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Assert.msg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_msg, value) == -1) goto failed; Py_DECREF(value); break; case Import_kind: result = PyType_GenericNew(Import_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Import.names, ast2obj_alias); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case ImportFrom_kind: result = PyType_GenericNew(ImportFrom_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.ImportFrom.module); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_module, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ImportFrom.names, ast2obj_alias); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.ImportFrom.level); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_level, value) == -1) goto failed; Py_DECREF(value); break; case Global_kind: result = PyType_GenericNew(Global_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Global.names, ast2obj_identifier); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case Nonlocal_kind: result = PyType_GenericNew(Nonlocal_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Nonlocal.names, ast2obj_identifier); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case Expr_kind: result = PyType_GenericNew(Expr_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Expr.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Pass_kind: result = PyType_GenericNew(Pass_type, NULL, NULL); if (!result) goto failed; break; case Break_kind: result = PyType_GenericNew(Break_type, NULL, NULL); if (!result) goto failed; break; case Continue_kind: result = PyType_GenericNew(Continue_type, NULL, NULL); if (!result) goto failed; break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_expr(void* _o) { expr_ty o = (expr_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case BoolOp_kind: result = PyType_GenericNew(BoolOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_boolop(o->v.BoolOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.BoolOp.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case BinOp_kind: result = PyType_GenericNew(BinOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.BinOp.left); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_left, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_operator(o->v.BinOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.BinOp.right); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_right, value) == -1) goto failed; Py_DECREF(value); break; case UnaryOp_kind: result = PyType_GenericNew(UnaryOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_unaryop(o->v.UnaryOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.UnaryOp.operand); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_operand, value) == -1) goto failed; Py_DECREF(value); break; case Lambda_kind: result = PyType_GenericNew(Lambda_type, NULL, NULL); if (!result) goto failed; value = ast2obj_arguments(o->v.Lambda.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Lambda.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case IfExp_kind: result = PyType_GenericNew(IfExp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.IfExp.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.IfExp.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.IfExp.orelse); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case Dict_kind: result = PyType_GenericNew(Dict_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Dict.keys, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keys, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Dict.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case Set_kind: result = PyType_GenericNew(Set_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Set.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); break; case ListComp_kind: result = PyType_GenericNew(ListComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.ListComp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ListComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case SetComp_kind: result = PyType_GenericNew(SetComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.SetComp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.SetComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case DictComp_kind: result = PyType_GenericNew(DictComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.DictComp.key); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_key, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.DictComp.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.DictComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case GeneratorExp_kind: result = PyType_GenericNew(GeneratorExp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.GeneratorExp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.GeneratorExp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case Await_kind: result = PyType_GenericNew(Await_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Await.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Yield_kind: result = PyType_GenericNew(Yield_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Yield.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case YieldFrom_kind: result = PyType_GenericNew(YieldFrom_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.YieldFrom.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Compare_kind: result = PyType_GenericNew(Compare_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Compare.left); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_left, value) == -1) goto failed; Py_DECREF(value); { Py_ssize_t i, n = asdl_seq_LEN(o->v.Compare.ops); value = PyList_New(n); if (!value) goto failed; for(i = 0; i < n; i++) PyList_SET_ITEM(value, i, ast2obj_cmpop((cmpop_ty)asdl_seq_GET(o->v.Compare.ops, i))); } if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ops, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Compare.comparators, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_comparators, value) == -1) goto failed; Py_DECREF(value); break; case Call_kind: result = PyType_GenericNew(Call_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Call.func); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_func, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Call.args, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Call.keywords, ast2obj_keyword); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keywords, value) == -1) goto failed; Py_DECREF(value); break; case Num_kind: result = PyType_GenericNew(Num_type, NULL, NULL); if (!result) goto failed; value = ast2obj_object(o->v.Num.n); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_n, value) == -1) goto failed; Py_DECREF(value); break; case Str_kind: result = PyType_GenericNew(Str_type, NULL, NULL); if (!result) goto failed; value = ast2obj_string(o->v.Str.s); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_s, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.Str.kind); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kind, value) == -1) goto failed; Py_DECREF(value); break; case FormattedValue_kind: result = PyType_GenericNew(FormattedValue_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.FormattedValue.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.FormattedValue.conversion); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_conversion, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.FormattedValue.format_spec); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_format_spec, value) == -1) goto failed; Py_DECREF(value); break; case JoinedStr_kind: result = PyType_GenericNew(JoinedStr_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.JoinedStr.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case Bytes_kind: result = PyType_GenericNew(Bytes_type, NULL, NULL); if (!result) goto failed; value = ast2obj_bytes(o->v.Bytes.s); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_s, value) == -1) goto failed; Py_DECREF(value); break; case NameConstant_kind: result = PyType_GenericNew(NameConstant_type, NULL, NULL); if (!result) goto failed; value = ast2obj_singleton(o->v.NameConstant.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Ellipsis_kind: result = PyType_GenericNew(Ellipsis_type, NULL, NULL); if (!result) goto failed; break; case Constant_kind: result = PyType_GenericNew(Constant_type, NULL, NULL); if (!result) goto failed; value = ast2obj_constant(o->v.Constant.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Attribute_kind: result = PyType_GenericNew(Attribute_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Attribute.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->v.Attribute.attr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_attr, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Attribute.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Subscript_kind: result = PyType_GenericNew(Subscript_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Subscript.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_slice(o->v.Subscript.slice); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_slice, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Subscript.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Starred_kind: result = PyType_GenericNew(Starred_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Starred.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Starred.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Name_kind: result = PyType_GenericNew(Name_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.Name.id); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_id, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Name.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case List_kind: result = PyType_GenericNew(List_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.List.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.List.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Tuple_kind: result = PyType_GenericNew(Tuple_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Tuple.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Tuple.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_expr_context(expr_context_ty o) { switch(o) { case Load: Py_INCREF(Load_singleton); return Load_singleton; case Store: Py_INCREF(Store_singleton); return Store_singleton; case Del: Py_INCREF(Del_singleton); return Del_singleton; case AugLoad: Py_INCREF(AugLoad_singleton); return AugLoad_singleton; case AugStore: Py_INCREF(AugStore_singleton); return AugStore_singleton; case Param: Py_INCREF(Param_singleton); return Param_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown expr_context found"); return NULL; } } PyObject* ast2obj_slice(void* _o) { slice_ty o = (slice_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case Slice_kind: result = PyType_GenericNew(Slice_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Slice.lower); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lower, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Slice.upper); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_upper, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Slice.step); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_step, value) == -1) goto failed; Py_DECREF(value); break; case ExtSlice_kind: result = PyType_GenericNew(ExtSlice_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.ExtSlice.dims, ast2obj_slice); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_dims, value) == -1) goto failed; Py_DECREF(value); break; case Index_kind: result = PyType_GenericNew(Index_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Index.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; } return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_boolop(boolop_ty o) { switch(o) { case And: Py_INCREF(And_singleton); return And_singleton; case Or: Py_INCREF(Or_singleton); return Or_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown boolop found"); return NULL; } } PyObject* ast2obj_operator(operator_ty o) { switch(o) { case Add: Py_INCREF(Add_singleton); return Add_singleton; case Sub: Py_INCREF(Sub_singleton); return Sub_singleton; case Mult: Py_INCREF(Mult_singleton); return Mult_singleton; case MatMult: Py_INCREF(MatMult_singleton); return MatMult_singleton; case Div: Py_INCREF(Div_singleton); return Div_singleton; case Mod: Py_INCREF(Mod_singleton); return Mod_singleton; case Pow: Py_INCREF(Pow_singleton); return Pow_singleton; case LShift: Py_INCREF(LShift_singleton); return LShift_singleton; case RShift: Py_INCREF(RShift_singleton); return RShift_singleton; case BitOr: Py_INCREF(BitOr_singleton); return BitOr_singleton; case BitXor: Py_INCREF(BitXor_singleton); return BitXor_singleton; case BitAnd: Py_INCREF(BitAnd_singleton); return BitAnd_singleton; case FloorDiv: Py_INCREF(FloorDiv_singleton); return FloorDiv_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown operator found"); return NULL; } } PyObject* ast2obj_unaryop(unaryop_ty o) { switch(o) { case Invert: Py_INCREF(Invert_singleton); return Invert_singleton; case Not: Py_INCREF(Not_singleton); return Not_singleton; case UAdd: Py_INCREF(UAdd_singleton); return UAdd_singleton; case USub: Py_INCREF(USub_singleton); return USub_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown unaryop found"); return NULL; } } PyObject* ast2obj_cmpop(cmpop_ty o) { switch(o) { case Eq: Py_INCREF(Eq_singleton); return Eq_singleton; case NotEq: Py_INCREF(NotEq_singleton); return NotEq_singleton; case Lt: Py_INCREF(Lt_singleton); return Lt_singleton; case LtE: Py_INCREF(LtE_singleton); return LtE_singleton; case Gt: Py_INCREF(Gt_singleton); return Gt_singleton; case GtE: Py_INCREF(GtE_singleton); return GtE_singleton; case Is: Py_INCREF(Is_singleton); return Is_singleton; case IsNot: Py_INCREF(IsNot_singleton); return IsNot_singleton; case In: Py_INCREF(In_singleton); return In_singleton; case NotIn: Py_INCREF(NotIn_singleton); return NotIn_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown cmpop found"); return NULL; } } PyObject* ast2obj_comprehension(void* _o) { comprehension_ty o = (comprehension_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(comprehension_type, NULL, NULL); if (!result) return NULL; value = ast2obj_expr(o->target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->ifs, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ifs, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->is_async); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_is_async, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_excepthandler(void* _o) { excepthandler_ty o = (excepthandler_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case ExceptHandler_kind: result = PyType_GenericNew(ExceptHandler_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.ExceptHandler.type); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->v.ExceptHandler.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ExceptHandler.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_arguments(void* _o) { arguments_ty o = (arguments_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(arguments_type, NULL, NULL); if (!result) return NULL; value = ast2obj_list(o->args, ast2obj_arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arg(o->vararg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_vararg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->kwonlyargs, ast2obj_arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kwonlyargs, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->kw_defaults, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kw_defaults, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arg(o->kwarg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kwarg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->defaults, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_defaults, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_arg(void* _o) { arg_ty o = (arg_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(arg_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_arg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->annotation); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_annotation, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_keyword(void* _o) { keyword_ty o = (keyword_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(keyword_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_arg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_alias(void* _o) { alias_ty o = (alias_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(alias_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->asname); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_asname, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_withitem(void* _o) { withitem_ty o = (withitem_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(withitem_type, NULL, NULL); if (!result) return NULL; value = ast2obj_expr(o->context_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_context_expr, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->optional_vars); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_optional_vars, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_type_ignore(void* _o) { type_ignore_ty o = (type_ignore_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case TypeIgnore_kind: result = PyType_GenericNew(TypeIgnore_type, NULL, NULL); if (!result) goto failed; value = ast2obj_int(o->v.TypeIgnore.lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) == -1) goto failed; Py_DECREF(value); break; } return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } int obj2ast_mod(PyObject* obj, mod_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; if (obj == Py_None) { *out = NULL; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Module_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; asdl_seq* type_ignores; if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Module"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Module field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Module field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_type_ignores, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"type_ignores\" missing from Module"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Module field \"type_ignores\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); type_ignores = _Ta3_asdl_seq_new(len, arena); if (type_ignores == NULL) goto failed; for (i = 0; i < len; i++) { type_ignore_ty val; res = obj2ast_type_ignore(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Module field \"type_ignores\" changed size during iteration"); goto failed; } asdl_seq_SET(type_ignores, i, val); } Py_CLEAR(tmp); } *out = Module(body, type_ignores, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Interactive_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Interactive"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Interactive field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Interactive field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = Interactive(body, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Expression_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty body; if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Expression"); return 1; } else { int res; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Expression(body, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)FunctionType_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* argtypes; expr_ty returns; if (lookup_attr_id(obj, &PyId_argtypes, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"argtypes\" missing from FunctionType"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "FunctionType field \"argtypes\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); argtypes = _Ta3_asdl_seq_new(len, arena); if (argtypes == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "FunctionType field \"argtypes\" changed size during iteration"); goto failed; } asdl_seq_SET(argtypes, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_returns, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"returns\" missing from FunctionType"); return 1; } else { int res; res = obj2ast_expr(tmp, &returns, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = FunctionType(argtypes, returns, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Suite_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Suite"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Suite field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Suite field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = Suite(body, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of mod, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_stmt(PyObject* obj, stmt_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (lookup_attr_id(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from stmt"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from stmt"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } isinstance = PyObject_IsInstance(obj, (PyObject*)FunctionDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; arguments_ty args; asdl_seq* body; asdl_seq* decorator_list; expr_ty returns; string type_comment; if (lookup_attr_id(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from FunctionDef"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from FunctionDef"); return 1; } else { int res; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from FunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "FunctionDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "FunctionDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_decorator_list, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from FunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "FunctionDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Ta3_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "FunctionDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_returns, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); returns = NULL; } else { int res; res = obj2ast_expr(tmp, &returns, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = FunctionDef(name, args, body, decorator_list, returns, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncFunctionDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; arguments_ty args; asdl_seq* body; asdl_seq* decorator_list; expr_ty returns; string type_comment; if (lookup_attr_id(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from AsyncFunctionDef"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from AsyncFunctionDef"); return 1; } else { int res; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncFunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFunctionDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFunctionDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_decorator_list, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from AsyncFunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFunctionDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Ta3_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFunctionDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_returns, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); returns = NULL; } else { int res; res = obj2ast_expr(tmp, &returns, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AsyncFunctionDef(name, args, body, decorator_list, returns, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ClassDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; asdl_seq* bases; asdl_seq* keywords; asdl_seq* body; asdl_seq* decorator_list; if (lookup_attr_id(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from ClassDef"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_bases, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"bases\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"bases\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); bases = _Ta3_asdl_seq_new(len, arena); if (bases == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"bases\" changed size during iteration"); goto failed; } asdl_seq_SET(bases, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_keywords, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"keywords\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"keywords\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keywords = _Ta3_asdl_seq_new(len, arena); if (keywords == NULL) goto failed; for (i = 0; i < len; i++) { keyword_ty val; res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"keywords\" changed size during iteration"); goto failed; } asdl_seq_SET(keywords, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_decorator_list, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Ta3_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, val); } Py_CLEAR(tmp); } *out = ClassDef(name, bases, keywords, body, decorator_list, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Return_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); value = NULL; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Return(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Delete_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* targets; if (lookup_attr_id(obj, &PyId_targets, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"targets\" missing from Delete"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Delete field \"targets\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); targets = _Ta3_asdl_seq_new(len, arena); if (targets == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Delete field \"targets\" changed size during iteration"); goto failed; } asdl_seq_SET(targets, i, val); } Py_CLEAR(tmp); } *out = Delete(targets, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Assign_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* targets; expr_ty value; string type_comment; if (lookup_attr_id(obj, &PyId_targets, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"targets\" missing from Assign"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Assign field \"targets\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); targets = _Ta3_asdl_seq_new(len, arena); if (targets == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Assign field \"targets\" changed size during iteration"); goto failed; } asdl_seq_SET(targets, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Assign"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Assign(targets, value, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AugAssign_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; operator_ty op; expr_ty value; if (lookup_attr_id(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AugAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from AugAssign"); return 1; } else { int res; res = obj2ast_operator(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from AugAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AugAssign(target, op, value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AnnAssign_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty annotation; expr_ty value; int simple; if (lookup_attr_id(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AnnAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_annotation, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"annotation\" missing from AnnAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &annotation, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); value = NULL; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_simple, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"simple\" missing from AnnAssign"); return 1; } else { int res; res = obj2ast_int(tmp, &simple, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AnnAssign(target, annotation, value, simple, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)For_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty iter; asdl_seq* body; asdl_seq* orelse; string type_comment; if (lookup_attr_id(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from For"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_iter, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from For"); return 1; } else { int res; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from For"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "For field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "For field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from For"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "For field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Ta3_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "For field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = For(target, iter, body, orelse, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncFor_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty iter; asdl_seq* body; asdl_seq* orelse; string type_comment; if (lookup_attr_id(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AsyncFor"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_iter, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from AsyncFor"); return 1; } else { int res; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncFor"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFor field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFor field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from AsyncFor"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFor field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Ta3_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFor field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AsyncFor(target, iter, body, orelse, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)While_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; asdl_seq* body; asdl_seq* orelse; if (lookup_attr_id(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from While"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from While"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "While field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "While field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from While"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "While field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Ta3_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "While field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } *out = While(test, body, orelse, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)If_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; asdl_seq* body; asdl_seq* orelse; if (lookup_attr_id(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from If"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from If"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "If field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "If field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from If"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "If field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Ta3_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "If field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } *out = If(test, body, orelse, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)With_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* items; asdl_seq* body; string type_comment; if (lookup_attr_id(obj, &PyId_items, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"items\" missing from With"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "With field \"items\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); items = _Ta3_asdl_seq_new(len, arena); if (items == NULL) goto failed; for (i = 0; i < len; i++) { withitem_ty val; res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "With field \"items\" changed size during iteration"); goto failed; } asdl_seq_SET(items, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from With"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "With field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "With field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = With(items, body, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncWith_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* items; asdl_seq* body; string type_comment; if (lookup_attr_id(obj, &PyId_items, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"items\" missing from AsyncWith"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncWith field \"items\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); items = _Ta3_asdl_seq_new(len, arena); if (items == NULL) goto failed; for (i = 0; i < len; i++) { withitem_ty val; res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncWith field \"items\" changed size during iteration"); goto failed; } asdl_seq_SET(items, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncWith"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncWith field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncWith field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AsyncWith(items, body, type_comment, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Raise_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty exc; expr_ty cause; if (lookup_attr_id(obj, &PyId_exc, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); exc = NULL; } else { int res; res = obj2ast_expr(tmp, &exc, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_cause, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); cause = NULL; } else { int res; res = obj2ast_expr(tmp, &cause, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Raise(exc, cause, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Try_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; asdl_seq* handlers; asdl_seq* orelse; asdl_seq* finalbody; if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_handlers, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"handlers\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"handlers\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); handlers = _Ta3_asdl_seq_new(len, arena); if (handlers == NULL) goto failed; for (i = 0; i < len; i++) { excepthandler_ty val; res = obj2ast_excepthandler(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"handlers\" changed size during iteration"); goto failed; } asdl_seq_SET(handlers, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Ta3_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_finalbody, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"finalbody\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"finalbody\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); finalbody = _Ta3_asdl_seq_new(len, arena); if (finalbody == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"finalbody\" changed size during iteration"); goto failed; } asdl_seq_SET(finalbody, i, val); } Py_CLEAR(tmp); } *out = Try(body, handlers, orelse, finalbody, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Assert_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; expr_ty msg; if (lookup_attr_id(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from Assert"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_msg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); msg = NULL; } else { int res; res = obj2ast_expr(tmp, &msg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Assert(test, msg, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Import_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (lookup_attr_id(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Import"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Import field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Ta3_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { alias_ty val; res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Import field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } *out = Import(names, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ImportFrom_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier module; asdl_seq* names; int level; if (lookup_attr_id(obj, &PyId_module, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); module = NULL; } else { int res; res = obj2ast_identifier(tmp, &module, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from ImportFrom"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ImportFrom field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Ta3_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { alias_ty val; res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ImportFrom field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_level, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); level = 0; } else { int res; res = obj2ast_int(tmp, &level, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = ImportFrom(module, names, level, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Global_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (lookup_attr_id(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Global"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Global field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Ta3_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { identifier val; res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Global field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } *out = Global(names, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Nonlocal_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (lookup_attr_id(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Nonlocal"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Nonlocal field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Ta3_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { identifier val; res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Nonlocal field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } *out = Nonlocal(names, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Expr_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Expr"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Expr(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Pass_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Pass(lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Break_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Break(lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Continue_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Continue(lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of stmt, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_expr(PyObject* obj, expr_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (lookup_attr_id(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from expr"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from expr"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } isinstance = PyObject_IsInstance(obj, (PyObject*)BoolOp_type); if (isinstance == -1) { return 1; } if (isinstance) { boolop_ty op; asdl_seq* values; if (lookup_attr_id(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from BoolOp"); return 1; } else { int res; res = obj2ast_boolop(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_values, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from BoolOp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "BoolOp field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Ta3_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "BoolOp field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, val); } Py_CLEAR(tmp); } *out = BoolOp(op, values, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)BinOp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty left; operator_ty op; expr_ty right; if (lookup_attr_id(obj, &PyId_left, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"left\" missing from BinOp"); return 1; } else { int res; res = obj2ast_expr(tmp, &left, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from BinOp"); return 1; } else { int res; res = obj2ast_operator(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_right, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"right\" missing from BinOp"); return 1; } else { int res; res = obj2ast_expr(tmp, &right, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = BinOp(left, op, right, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)UnaryOp_type); if (isinstance == -1) { return 1; } if (isinstance) { unaryop_ty op; expr_ty operand; if (lookup_attr_id(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from UnaryOp"); return 1; } else { int res; res = obj2ast_unaryop(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_operand, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"operand\" missing from UnaryOp"); return 1; } else { int res; res = obj2ast_expr(tmp, &operand, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = UnaryOp(op, operand, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Lambda_type); if (isinstance == -1) { return 1; } if (isinstance) { arguments_ty args; expr_ty body; if (lookup_attr_id(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from Lambda"); return 1; } else { int res; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Lambda"); return 1; } else { int res; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Lambda(args, body, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)IfExp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; expr_ty body; expr_ty orelse; if (lookup_attr_id(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from IfExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from IfExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from IfExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &orelse, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = IfExp(test, body, orelse, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Dict_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* keys; asdl_seq* values; if (lookup_attr_id(obj, &PyId_keys, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"keys\" missing from Dict"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Dict field \"keys\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keys = _Ta3_asdl_seq_new(len, arena); if (keys == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Dict field \"keys\" changed size during iteration"); goto failed; } asdl_seq_SET(keys, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_values, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from Dict"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Dict field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Ta3_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Dict field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, val); } Py_CLEAR(tmp); } *out = Dict(keys, values, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Set_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; if (lookup_attr_id(obj, &PyId_elts, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from Set"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Set field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Ta3_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Set field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, val); } Py_CLEAR(tmp); } *out = Set(elts, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ListComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (lookup_attr_id(obj, &PyId_elt, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from ListComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from ListComp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ListComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Ta3_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ListComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = ListComp(elt, generators, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)SetComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (lookup_attr_id(obj, &PyId_elt, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from SetComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from SetComp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "SetComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Ta3_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "SetComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = SetComp(elt, generators, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)DictComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty key; expr_ty value; asdl_seq* generators; if (lookup_attr_id(obj, &PyId_key, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"key\" missing from DictComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &key, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from DictComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from DictComp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "DictComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Ta3_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "DictComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = DictComp(key, value, generators, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)GeneratorExp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (lookup_attr_id(obj, &PyId_elt, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from GeneratorExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from GeneratorExp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "GeneratorExp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Ta3_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "GeneratorExp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = GeneratorExp(elt, generators, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Await_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Await"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Await(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Yield_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); value = NULL; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Yield(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)YieldFrom_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from YieldFrom"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = YieldFrom(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Compare_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty left; asdl_int_seq* ops; asdl_seq* comparators; if (lookup_attr_id(obj, &PyId_left, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"left\" missing from Compare"); return 1; } else { int res; res = obj2ast_expr(tmp, &left, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_ops, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ops\" missing from Compare"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Compare field \"ops\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); ops = _Ta3_asdl_int_seq_new(len, arena); if (ops == NULL) goto failed; for (i = 0; i < len; i++) { cmpop_ty val; res = obj2ast_cmpop(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Compare field \"ops\" changed size during iteration"); goto failed; } asdl_seq_SET(ops, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_comparators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"comparators\" missing from Compare"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Compare field \"comparators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); comparators = _Ta3_asdl_seq_new(len, arena); if (comparators == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Compare field \"comparators\" changed size during iteration"); goto failed; } asdl_seq_SET(comparators, i, val); } Py_CLEAR(tmp); } *out = Compare(left, ops, comparators, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Call_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty func; asdl_seq* args; asdl_seq* keywords; if (lookup_attr_id(obj, &PyId_func, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"func\" missing from Call"); return 1; } else { int res; res = obj2ast_expr(tmp, &func, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from Call"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Call field \"args\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); args = _Ta3_asdl_seq_new(len, arena); if (args == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Call field \"args\" changed size during iteration"); goto failed; } asdl_seq_SET(args, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_keywords, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"keywords\" missing from Call"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Call field \"keywords\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keywords = _Ta3_asdl_seq_new(len, arena); if (keywords == NULL) goto failed; for (i = 0; i < len; i++) { keyword_ty val; res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Call field \"keywords\" changed size during iteration"); goto failed; } asdl_seq_SET(keywords, i, val); } Py_CLEAR(tmp); } *out = Call(func, args, keywords, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Num_type); if (isinstance == -1) { return 1; } if (isinstance) { object n; if (lookup_attr_id(obj, &PyId_n, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"n\" missing from Num"); return 1; } else { int res; res = obj2ast_object(tmp, &n, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Num(n, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Str_type); if (isinstance == -1) { return 1; } if (isinstance) { string s; string kind; if (lookup_attr_id(obj, &PyId_s, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"s\" missing from Str"); return 1; } else { int res; res = obj2ast_string(tmp, &s, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_HasAttrId(obj, &PyId_kind)) { int res; tmp = _PyObject_GetAttrId(obj, &PyId_kind); if (tmp == NULL) goto failed; res = obj2ast_string(tmp, &kind, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } else { PyErr_SetString(PyExc_TypeError, "required field \"kind\" missing from Str"); return 1; } *out = Str(s, kind, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)FormattedValue_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; int conversion; expr_ty format_spec; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from FormattedValue"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_conversion, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); conversion = 0; } else { int res; res = obj2ast_int(tmp, &conversion, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_format_spec, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); format_spec = NULL; } else { int res; res = obj2ast_expr(tmp, &format_spec, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = FormattedValue(value, conversion, format_spec, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)JoinedStr_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* values; if (lookup_attr_id(obj, &PyId_values, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from JoinedStr"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "JoinedStr field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Ta3_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "JoinedStr field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, val); } Py_CLEAR(tmp); } *out = JoinedStr(values, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Bytes_type); if (isinstance == -1) { return 1; } if (isinstance) { bytes s; if (lookup_attr_id(obj, &PyId_s, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"s\" missing from Bytes"); return 1; } else { int res; res = obj2ast_bytes(tmp, &s, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Bytes(s, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)NameConstant_type); if (isinstance == -1) { return 1; } if (isinstance) { singleton value; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from NameConstant"); return 1; } else { int res; res = obj2ast_singleton(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = NameConstant(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Ellipsis_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Ellipsis(lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Constant_type); if (isinstance == -1) { return 1; } if (isinstance) { constant value; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Constant"); return 1; } else { int res; res = obj2ast_constant(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Constant(value, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Attribute_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; identifier attr; expr_context_ty ctx; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Attribute"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_attr, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"attr\" missing from Attribute"); return 1; } else { int res; res = obj2ast_identifier(tmp, &attr, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Attribute"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Attribute(value, attr, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Subscript_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; slice_ty slice; expr_context_ty ctx; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Subscript"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_slice, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"slice\" missing from Subscript"); return 1; } else { int res; res = obj2ast_slice(tmp, &slice, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Subscript"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Subscript(value, slice, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Starred_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; expr_context_ty ctx; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Starred"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Starred"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Starred(value, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Name_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier id; expr_context_ty ctx; if (lookup_attr_id(obj, &PyId_id, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"id\" missing from Name"); return 1; } else { int res; res = obj2ast_identifier(tmp, &id, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Name"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Name(id, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)List_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; expr_context_ty ctx; if (lookup_attr_id(obj, &PyId_elts, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from List"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "List field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Ta3_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "List field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from List"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = List(elts, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Tuple_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; expr_context_ty ctx; if (lookup_attr_id(obj, &PyId_elts, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from Tuple"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Tuple field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Ta3_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Tuple field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Tuple"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Tuple(elts, ctx, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of expr, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_expr_context(PyObject* obj, expr_context_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Load_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Load; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Store_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Store; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Del_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Del; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)AugLoad_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = AugLoad; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)AugStore_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = AugStore; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Param_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Param; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of expr_context, but got %R", obj); return 1; } int obj2ast_slice(PyObject* obj, slice_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; if (obj == Py_None) { *out = NULL; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Slice_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty lower; expr_ty upper; expr_ty step; if (lookup_attr_id(obj, &PyId_lower, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); lower = NULL; } else { int res; res = obj2ast_expr(tmp, &lower, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_upper, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); upper = NULL; } else { int res; res = obj2ast_expr(tmp, &upper, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_step, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); step = NULL; } else { int res; res = obj2ast_expr(tmp, &step, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Slice(lower, upper, step, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ExtSlice_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* dims; if (lookup_attr_id(obj, &PyId_dims, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"dims\" missing from ExtSlice"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ExtSlice field \"dims\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); dims = _Ta3_asdl_seq_new(len, arena); if (dims == NULL) goto failed; for (i = 0; i < len; i++) { slice_ty val; res = obj2ast_slice(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ExtSlice field \"dims\" changed size during iteration"); goto failed; } asdl_seq_SET(dims, i, val); } Py_CLEAR(tmp); } *out = ExtSlice(dims, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Index_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Index"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Index(value, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of slice, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_boolop(PyObject* obj, boolop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)And_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = And; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Or_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Or; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of boolop, but got %R", obj); return 1; } int obj2ast_operator(PyObject* obj, operator_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Add_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Add; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Sub_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Sub; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Mult_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Mult; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)MatMult_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = MatMult; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Div_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Div; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Mod_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Mod; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Pow_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Pow; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)LShift_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = LShift; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)RShift_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = RShift; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitOr_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitOr; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitXor_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitXor; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitAnd_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitAnd; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)FloorDiv_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = FloorDiv; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of operator, but got %R", obj); return 1; } int obj2ast_unaryop(PyObject* obj, unaryop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Invert_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Invert; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Not_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Not; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)UAdd_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = UAdd; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)USub_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = USub; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of unaryop, but got %R", obj); return 1; } int obj2ast_cmpop(PyObject* obj, cmpop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Eq_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Eq; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)NotEq_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = NotEq; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Lt_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Lt; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)LtE_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = LtE; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Gt_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Gt; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)GtE_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = GtE; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Is_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Is; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)IsNot_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = IsNot; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)In_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = In; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)NotIn_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = NotIn; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of cmpop, but got %R", obj); return 1; } int obj2ast_comprehension(PyObject* obj, comprehension_ty* out, PyArena* arena) { PyObject* tmp = NULL; expr_ty target; expr_ty iter; asdl_seq* ifs; int is_async; if (lookup_attr_id(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from comprehension"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_iter, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from comprehension"); return 1; } else { int res; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_ifs, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ifs\" missing from comprehension"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "comprehension field \"ifs\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); ifs = _Ta3_asdl_seq_new(len, arena); if (ifs == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "comprehension field \"ifs\" changed size during iteration"); goto failed; } asdl_seq_SET(ifs, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_is_async, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"is_async\" missing from comprehension"); return 1; } else { int res; res = obj2ast_int(tmp, &is_async, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = comprehension(target, iter, ifs, is_async, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_excepthandler(PyObject* obj, excepthandler_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (lookup_attr_id(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from excepthandler"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from excepthandler"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } isinstance = PyObject_IsInstance(obj, (PyObject*)ExceptHandler_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty type; identifier name; asdl_seq* body; if (lookup_attr_id(obj, &PyId_type, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type = NULL; } else { int res; res = obj2ast_expr(tmp, &type, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); name = NULL; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from ExceptHandler"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ExceptHandler field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Ta3_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ExceptHandler field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = ExceptHandler(type, name, body, lineno, col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of excepthandler, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_arguments(PyObject* obj, arguments_ty* out, PyArena* arena) { PyObject* tmp = NULL; asdl_seq* args; arg_ty vararg; asdl_seq* kwonlyargs; asdl_seq* kw_defaults; arg_ty kwarg; asdl_seq* defaults; if (lookup_attr_id(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"args\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); args = _Ta3_asdl_seq_new(len, arena); if (args == NULL) goto failed; for (i = 0; i < len; i++) { arg_ty val; res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"args\" changed size during iteration"); goto failed; } asdl_seq_SET(args, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_vararg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); vararg = NULL; } else { int res; res = obj2ast_arg(tmp, &vararg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_kwonlyargs, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"kwonlyargs\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"kwonlyargs\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); kwonlyargs = _Ta3_asdl_seq_new(len, arena); if (kwonlyargs == NULL) goto failed; for (i = 0; i < len; i++) { arg_ty val; res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"kwonlyargs\" changed size during iteration"); goto failed; } asdl_seq_SET(kwonlyargs, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_kw_defaults, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"kw_defaults\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"kw_defaults\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); kw_defaults = _Ta3_asdl_seq_new(len, arena); if (kw_defaults == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"kw_defaults\" changed size during iteration"); goto failed; } asdl_seq_SET(kw_defaults, i, val); } Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_kwarg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); kwarg = NULL; } else { int res; res = obj2ast_arg(tmp, &kwarg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_defaults, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"defaults\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"defaults\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); defaults = _Ta3_asdl_seq_new(len, arena); if (defaults == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"defaults\" changed size during iteration"); goto failed; } asdl_seq_SET(defaults, i, val); } Py_CLEAR(tmp); } *out = arguments(args, vararg, kwonlyargs, kw_defaults, kwarg, defaults, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_arg(PyObject* obj, arg_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier arg; expr_ty annotation; string type_comment; int lineno; int col_offset; if (lookup_attr_id(obj, &PyId_arg, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"arg\" missing from arg"); return 1; } else { int res; res = obj2ast_identifier(tmp, &arg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_annotation, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); annotation = NULL; } else { int res; res = obj2ast_expr(tmp, &annotation, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from arg"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from arg"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = arg(arg, annotation, type_comment, lineno, col_offset, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_keyword(PyObject* obj, keyword_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier arg; expr_ty value; if (lookup_attr_id(obj, &PyId_arg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); arg = NULL; } else { int res; res = obj2ast_identifier(tmp, &arg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from keyword"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = keyword(arg, value, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_alias(PyObject* obj, alias_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier name; identifier asname; if (lookup_attr_id(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from alias"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_asname, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); asname = NULL; } else { int res; res = obj2ast_identifier(tmp, &asname, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = alias(name, asname, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_withitem(PyObject* obj, withitem_ty* out, PyArena* arena) { PyObject* tmp = NULL; expr_ty context_expr; expr_ty optional_vars; if (lookup_attr_id(obj, &PyId_context_expr, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"context_expr\" missing from withitem"); return 1; } else { int res; res = obj2ast_expr(tmp, &context_expr, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (lookup_attr_id(obj, &PyId_optional_vars, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); optional_vars = NULL; } else { int res; res = obj2ast_expr(tmp, &optional_vars, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = withitem(context_expr, optional_vars, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_type_ignore(PyObject* obj, type_ignore_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; if (obj == Py_None) { *out = NULL; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)TypeIgnore_type); if (isinstance == -1) { return 1; } if (isinstance) { int lineno; if (lookup_attr_id(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from TypeIgnore"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = TypeIgnore(lineno, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of type_ignore, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } PyObject *ast3_parse(PyObject *self, PyObject *args); static PyMethodDef ast3_methods[] = { {"_parse", ast3_parse, METH_VARARGS, "Parse string into typed AST."}, {NULL, NULL, 0, NULL} }; static struct PyModuleDef _astmodule = { PyModuleDef_HEAD_INIT, "_ast3", NULL, 0, ast3_methods }; PyMODINIT_FUNC PyInit__ast3(void) { PyObject *m, *d; if (!init_types()) return NULL; m = PyModule_Create(&_astmodule); if (!m) return NULL; d = PyModule_GetDict(m); if (PyDict_SetItemString(d, "AST", (PyObject*)&AST_type) < 0) return NULL; if (PyModule_AddIntMacro(m, PyCF_ONLY_AST) < 0) return NULL; if (PyDict_SetItemString(d, "mod", (PyObject*)mod_type) < 0) return NULL; if (PyDict_SetItemString(d, "Module", (PyObject*)Module_type) < 0) return NULL; if (PyDict_SetItemString(d, "Interactive", (PyObject*)Interactive_type) < 0) return NULL; if (PyDict_SetItemString(d, "Expression", (PyObject*)Expression_type) < 0) return NULL; if (PyDict_SetItemString(d, "FunctionType", (PyObject*)FunctionType_type) < 0) return NULL; if (PyDict_SetItemString(d, "Suite", (PyObject*)Suite_type) < 0) return NULL; if (PyDict_SetItemString(d, "stmt", (PyObject*)stmt_type) < 0) return NULL; if (PyDict_SetItemString(d, "FunctionDef", (PyObject*)FunctionDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncFunctionDef", (PyObject*)AsyncFunctionDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "ClassDef", (PyObject*)ClassDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "Return", (PyObject*)Return_type) < 0) return NULL; if (PyDict_SetItemString(d, "Delete", (PyObject*)Delete_type) < 0) return NULL; if (PyDict_SetItemString(d, "Assign", (PyObject*)Assign_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugAssign", (PyObject*)AugAssign_type) < 0) return NULL; if (PyDict_SetItemString(d, "AnnAssign", (PyObject*)AnnAssign_type) < 0) return NULL; if (PyDict_SetItemString(d, "For", (PyObject*)For_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncFor", (PyObject*)AsyncFor_type) < 0) return NULL; if (PyDict_SetItemString(d, "While", (PyObject*)While_type) < 0) return NULL; if (PyDict_SetItemString(d, "If", (PyObject*)If_type) < 0) return NULL; if (PyDict_SetItemString(d, "With", (PyObject*)With_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncWith", (PyObject*)AsyncWith_type) < 0) return NULL; if (PyDict_SetItemString(d, "Raise", (PyObject*)Raise_type) < 0) return NULL; if (PyDict_SetItemString(d, "Try", (PyObject*)Try_type) < 0) return NULL; if (PyDict_SetItemString(d, "Assert", (PyObject*)Assert_type) < 0) return NULL; if (PyDict_SetItemString(d, "Import", (PyObject*)Import_type) < 0) return NULL; if (PyDict_SetItemString(d, "ImportFrom", (PyObject*)ImportFrom_type) < 0) return NULL; if (PyDict_SetItemString(d, "Global", (PyObject*)Global_type) < 0) return NULL; if (PyDict_SetItemString(d, "Nonlocal", (PyObject*)Nonlocal_type) < 0) return NULL; if (PyDict_SetItemString(d, "Expr", (PyObject*)Expr_type) < 0) return NULL; if (PyDict_SetItemString(d, "Pass", (PyObject*)Pass_type) < 0) return NULL; if (PyDict_SetItemString(d, "Break", (PyObject*)Break_type) < 0) return NULL; if (PyDict_SetItemString(d, "Continue", (PyObject*)Continue_type) < 0) return NULL; if (PyDict_SetItemString(d, "expr", (PyObject*)expr_type) < 0) return NULL; if (PyDict_SetItemString(d, "BoolOp", (PyObject*)BoolOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "BinOp", (PyObject*)BinOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "UnaryOp", (PyObject*)UnaryOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Lambda", (PyObject*)Lambda_type) < 0) return NULL; if (PyDict_SetItemString(d, "IfExp", (PyObject*)IfExp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Dict", (PyObject*)Dict_type) < 0) return NULL; if (PyDict_SetItemString(d, "Set", (PyObject*)Set_type) < 0) return NULL; if (PyDict_SetItemString(d, "ListComp", (PyObject*)ListComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "SetComp", (PyObject*)SetComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "DictComp", (PyObject*)DictComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "GeneratorExp", (PyObject*)GeneratorExp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Await", (PyObject*)Await_type) < 0) return NULL; if (PyDict_SetItemString(d, "Yield", (PyObject*)Yield_type) < 0) return NULL; if (PyDict_SetItemString(d, "YieldFrom", (PyObject*)YieldFrom_type) < 0) return NULL; if (PyDict_SetItemString(d, "Compare", (PyObject*)Compare_type) < 0) return NULL; if (PyDict_SetItemString(d, "Call", (PyObject*)Call_type) < 0) return NULL; if (PyDict_SetItemString(d, "Num", (PyObject*)Num_type) < 0) return NULL; if (PyDict_SetItemString(d, "Str", (PyObject*)Str_type) < 0) return NULL; if (PyDict_SetItemString(d, "FormattedValue", (PyObject*)FormattedValue_type) < 0) return NULL; if (PyDict_SetItemString(d, "JoinedStr", (PyObject*)JoinedStr_type) < 0) return NULL; if (PyDict_SetItemString(d, "Bytes", (PyObject*)Bytes_type) < 0) return NULL; if (PyDict_SetItemString(d, "NameConstant", (PyObject*)NameConstant_type) < 0) return NULL; if (PyDict_SetItemString(d, "Ellipsis", (PyObject*)Ellipsis_type) < 0) return NULL; if (PyDict_SetItemString(d, "Constant", (PyObject*)Constant_type) < 0) return NULL; if (PyDict_SetItemString(d, "Attribute", (PyObject*)Attribute_type) < 0) return NULL; if (PyDict_SetItemString(d, "Subscript", (PyObject*)Subscript_type) < 0) return NULL; if (PyDict_SetItemString(d, "Starred", (PyObject*)Starred_type) < 0) return NULL; if (PyDict_SetItemString(d, "Name", (PyObject*)Name_type) < 0) return NULL; if (PyDict_SetItemString(d, "List", (PyObject*)List_type) < 0) return NULL; if (PyDict_SetItemString(d, "Tuple", (PyObject*)Tuple_type) < 0) return NULL; if (PyDict_SetItemString(d, "expr_context", (PyObject*)expr_context_type) < 0) return NULL; if (PyDict_SetItemString(d, "Load", (PyObject*)Load_type) < 0) return NULL; if (PyDict_SetItemString(d, "Store", (PyObject*)Store_type) < 0) return NULL; if (PyDict_SetItemString(d, "Del", (PyObject*)Del_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugLoad", (PyObject*)AugLoad_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugStore", (PyObject*)AugStore_type) < 0) return NULL; if (PyDict_SetItemString(d, "Param", (PyObject*)Param_type) < 0) return NULL; if (PyDict_SetItemString(d, "slice", (PyObject*)slice_type) < 0) return NULL; if (PyDict_SetItemString(d, "Slice", (PyObject*)Slice_type) < 0) return NULL; if (PyDict_SetItemString(d, "ExtSlice", (PyObject*)ExtSlice_type) < 0) return NULL; if (PyDict_SetItemString(d, "Index", (PyObject*)Index_type) < 0) return NULL; if (PyDict_SetItemString(d, "boolop", (PyObject*)boolop_type) < 0) return NULL; if (PyDict_SetItemString(d, "And", (PyObject*)And_type) < 0) return NULL; if (PyDict_SetItemString(d, "Or", (PyObject*)Or_type) < 0) return NULL; if (PyDict_SetItemString(d, "operator", (PyObject*)operator_type) < 0) return NULL; if (PyDict_SetItemString(d, "Add", (PyObject*)Add_type) < 0) return NULL; if (PyDict_SetItemString(d, "Sub", (PyObject*)Sub_type) < 0) return NULL; if (PyDict_SetItemString(d, "Mult", (PyObject*)Mult_type) < 0) return NULL; if (PyDict_SetItemString(d, "MatMult", (PyObject*)MatMult_type) < 0) return NULL; if (PyDict_SetItemString(d, "Div", (PyObject*)Div_type) < 0) return NULL; if (PyDict_SetItemString(d, "Mod", (PyObject*)Mod_type) < 0) return NULL; if (PyDict_SetItemString(d, "Pow", (PyObject*)Pow_type) < 0) return NULL; if (PyDict_SetItemString(d, "LShift", (PyObject*)LShift_type) < 0) return NULL; if (PyDict_SetItemString(d, "RShift", (PyObject*)RShift_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitOr", (PyObject*)BitOr_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitXor", (PyObject*)BitXor_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitAnd", (PyObject*)BitAnd_type) < 0) return NULL; if (PyDict_SetItemString(d, "FloorDiv", (PyObject*)FloorDiv_type) < 0) return NULL; if (PyDict_SetItemString(d, "unaryop", (PyObject*)unaryop_type) < 0) return NULL; if (PyDict_SetItemString(d, "Invert", (PyObject*)Invert_type) < 0) return NULL; if (PyDict_SetItemString(d, "Not", (PyObject*)Not_type) < 0) return NULL; if (PyDict_SetItemString(d, "UAdd", (PyObject*)UAdd_type) < 0) return NULL; if (PyDict_SetItemString(d, "USub", (PyObject*)USub_type) < 0) return NULL; if (PyDict_SetItemString(d, "cmpop", (PyObject*)cmpop_type) < 0) return NULL; if (PyDict_SetItemString(d, "Eq", (PyObject*)Eq_type) < 0) return NULL; if (PyDict_SetItemString(d, "NotEq", (PyObject*)NotEq_type) < 0) return NULL; if (PyDict_SetItemString(d, "Lt", (PyObject*)Lt_type) < 0) return NULL; if (PyDict_SetItemString(d, "LtE", (PyObject*)LtE_type) < 0) return NULL; if (PyDict_SetItemString(d, "Gt", (PyObject*)Gt_type) < 0) return NULL; if (PyDict_SetItemString(d, "GtE", (PyObject*)GtE_type) < 0) return NULL; if (PyDict_SetItemString(d, "Is", (PyObject*)Is_type) < 0) return NULL; if (PyDict_SetItemString(d, "IsNot", (PyObject*)IsNot_type) < 0) return NULL; if (PyDict_SetItemString(d, "In", (PyObject*)In_type) < 0) return NULL; if (PyDict_SetItemString(d, "NotIn", (PyObject*)NotIn_type) < 0) return NULL; if (PyDict_SetItemString(d, "comprehension", (PyObject*)comprehension_type) < 0) return NULL; if (PyDict_SetItemString(d, "excepthandler", (PyObject*)excepthandler_type) < 0) return NULL; if (PyDict_SetItemString(d, "ExceptHandler", (PyObject*)ExceptHandler_type) < 0) return NULL; if (PyDict_SetItemString(d, "arguments", (PyObject*)arguments_type) < 0) return NULL; if (PyDict_SetItemString(d, "arg", (PyObject*)arg_type) < 0) return NULL; if (PyDict_SetItemString(d, "keyword", (PyObject*)keyword_type) < 0) return NULL; if (PyDict_SetItemString(d, "alias", (PyObject*)alias_type) < 0) return NULL; if (PyDict_SetItemString(d, "withitem", (PyObject*)withitem_type) < 0) return NULL; if (PyDict_SetItemString(d, "type_ignore", (PyObject*)type_ignore_type) < 0) return NULL; if (PyDict_SetItemString(d, "TypeIgnore", (PyObject*)TypeIgnore_type) < 0) return NULL; return m; } PyObject* Ta3AST_mod2obj(mod_ty t) { if (!init_types()) return NULL; return ast2obj_mod(t); } /* mode is 0 for "exec", 1 for "eval" and 2 for "single" input */ mod_ty Ta3AST_obj2mod(PyObject* ast, PyArena* arena, int mode) { mod_ty res; PyObject *req_type[3]; char *req_name[] = {"Module", "Expression", "Interactive"}; int isinstance; req_type[0] = (PyObject*)Module_type; req_type[1] = (PyObject*)Expression_type; req_type[2] = (PyObject*)Interactive_type; assert(0 <= mode && mode <= 2); if (!init_types()) return NULL; isinstance = PyObject_IsInstance(ast, req_type[mode]); if (isinstance == -1) return NULL; if (!isinstance) { PyErr_Format(PyExc_TypeError, "expected %s node, got %.400s", req_name[mode], Py_TYPE(ast)->tp_name); return NULL; } if (obj2ast_mod(ast, &res, arena) != 0) return NULL; else return res; } int Ta3AST_Check(PyObject* obj) { if (!init_types()) return -1; return PyObject_IsInstance(obj, (PyObject*)&AST_type); }
static PyTypeObject* make_type(char *type, PyTypeObject* base, char**fields, int num_fields) { PyObject *fnames, *result; int i; fnames = PyTuple_New(num_fields); if (!fnames) return NULL; for (i = 0; i < num_fields; i++) { PyObject *field = PyUnicode_FromString(fields[i]); if (!field) { Py_DECREF(fnames); return NULL; } PyTuple_SET_ITEM(fnames, i, field); } result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){sOss}", type, base, "_fields", fnames, "__module__", "_ast3"); Py_DECREF(fnames); return (PyTypeObject*)result; }
static PyTypeObject* make_type(char *type, PyTypeObject* base, char**fields, int num_fields) { _Py_IDENTIFIER(__module__); _Py_IDENTIFIER(_ast3); PyObject *fnames, *result; int i; fnames = PyTuple_New(num_fields); if (!fnames) return NULL; for (i = 0; i < num_fields; i++) { PyObject *field = PyUnicode_FromString(fields[i]); if (!field) { Py_DECREF(fnames); return NULL; } PyTuple_SET_ITEM(fnames, i, field); } result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){OOOO}", type, base, _PyUnicode_FromId(&PyId__fields), fnames, _PyUnicode_FromId(&PyId___module__), _PyUnicode_FromId(&PyId__ast3)); Py_DECREF(fnames); return (PyTypeObject*)result; }
{'added': [(532, '_Py_IDENTIFIER(_fields);'), (533, '_Py_IDENTIFIER(_attributes);'), (534, ''), (543, ' /* bpo-31095: UnTrack is needed before calling any callbacks */'), (544, ' PyObject_GC_UnTrack(self);'), (556, 'static int'), (560, ' return 0;'), (561, '}'), (562, ''), (563, 'static int lookup_attr_id(PyObject *v, _Py_Identifier *name, PyObject **result)'), (564, '{'), (565, ' PyObject *oname = _PyUnicode_FromId(name); /* borrowed */'), (566, ' if (!oname) {'), (567, ' *result = NULL;'), (568, ' return -1;'), (569, ' }'), (570, ' *result = PyObject_GetAttr(v, oname);'), (571, ' if (*result == NULL) {'), (572, ' if (!PyErr_ExceptionMatches(PyExc_AttributeError)) {'), (573, ' return -1;'), (574, ' }'), (575, ' PyErr_Clear();'), (576, ' }'), (577, ' return 0;'), (586, ' if (lookup_attr_id((PyObject*)Py_TYPE(self), &PyId__fields, &fields) < 0) {'), (587, ' goto cleanup;'), (588, ' }'), (594, ''), (596, ' if (numfields < PyTuple_GET_SIZE(args)) {'), (597, ' PyErr_Format(PyExc_TypeError, "%.400s constructor takes at most "'), (598, ' "%zd positional argument%s",'), (599, ' Py_TYPE(self)->tp_name,'), (600, ' numfields, numfields == 1 ? "" : "s");'), (601, ' res = -1;'), (602, ' goto cleanup;'), (603, ' }'), (604, ' for (i = 0; i < PyTuple_GET_SIZE(args); i++) {'), (605, ' /* cannot be reached when fields is NULL */'), (606, ' PyObject *name = PySequence_GetItem(fields, i);'), (607, ' if (!name) {'), (611, ' res = PyObject_SetAttr(self, name, PyTuple_GET_ITEM(args, i));'), (612, ' Py_DECREF(name);'), (613, ' if (res < 0)'), (614, ' goto cleanup;'), (634, ' PyObject *dict;'), (635, ' if (lookup_attr_id(self, &PyId___dict__, &dict) < 0) {'), (636, ' return NULL;'), (639, ' return Py_BuildValue("O()N", Py_TYPE(self), dict);'), (699, ' _Py_IDENTIFIER(__module__);'), (700, ' _Py_IDENTIFIER(_ast3);'), (713, ' result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){OOOO}",'), (714, ' type, base,'), (715, ' _PyUnicode_FromId(&PyId__fields), fnames,'), (716, ' _PyUnicode_FromId(&PyId___module__),'), (717, ' _PyUnicode_FromId(&PyId__ast3));'), (870, ' _PyDict_SetItemId(d, &PyId__fields, empty_tuple) < 0 ||'), (871, ' _PyDict_SetItemId(d, &PyId__attributes, empty_tuple) < 0) {'), (2672, ' Py_RETURN_NONE;'), (2745, ' Py_RETURN_NONE;'), (3204, ' Py_RETURN_NONE;'), (3671, ' Py_RETURN_NONE;'), (3849, ' Py_RETURN_NONE;'), (3887, ' Py_RETURN_NONE;'), (3934, ' Py_RETURN_NONE;'), (3982, ' Py_RETURN_NONE;'), (4025, ' Py_RETURN_NONE;'), (4053, ' Py_RETURN_NONE;'), (4081, ' Py_RETURN_NONE;'), (4109, ' Py_RETURN_NONE;'), (4150, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (4151, ' return 1;'), (4152, ' }'), (4153, ' if (tmp == NULL) {'), (4154, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Module");'), (4155, ' return 1;'), (4156, ' }'), (4157, ' else {'), (4169, ' stmt_ty val;'), (4170, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (4176, ' asdl_seq_SET(body, i, val);'), (4179, ' }'), (4180, ' if (lookup_attr_id(obj, &PyId_type_ignores, &tmp) < 0) {'), (4181, ' return 1;'), (4182, ' }'), (4183, ' if (tmp == NULL) {'), (4184, ' PyErr_SetString(PyExc_TypeError, "required field \\"type_ignores\\" missing from Module");'), (4187, ' else {'), (4199, ' type_ignore_ty val;'), (4200, ' res = obj2ast_type_ignore(PyList_GET_ITEM(tmp, i), &val, arena);'), (4206, ' asdl_seq_SET(type_ignores, i, val);'), (4221, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (4222, ' return 1;'), (4223, ' }'), (4224, ' if (tmp == NULL) {'), (4225, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Interactive");'), (4226, ' return 1;'), (4227, ' }'), (4228, ' else {'), (4240, ' stmt_ty val;'), (4241, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (4247, ' asdl_seq_SET(body, i, val);'), (4262, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (4263, ' return 1;'), (4264, ' }'), (4265, ' if (tmp == NULL) {'), (4266, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Expression");'), (4267, ' return 1;'), (4268, ' }'), (4269, ' else {'), (4287, ' if (lookup_attr_id(obj, &PyId_argtypes, &tmp) < 0) {'), (4288, ' return 1;'), (4289, ' }'), (4290, ' if (tmp == NULL) {'), (4291, ' PyErr_SetString(PyExc_TypeError, "required field \\"argtypes\\" missing from FunctionType");'), (4292, ' return 1;'), (4293, ' }'), (4294, ' else {'), (4306, ' expr_ty val;'), (4307, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (4313, ' asdl_seq_SET(argtypes, i, val);'), (4316, ' }'), (4317, ' if (lookup_attr_id(obj, &PyId_returns, &tmp) < 0) {'), (4318, ' return 1;'), (4319, ' }'), (4320, ' if (tmp == NULL) {'), (4321, ' PyErr_SetString(PyExc_TypeError, "required field \\"returns\\" missing from FunctionType");'), (4324, ' else {'), (4341, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (4342, ' return 1;'), (4343, ' }'), (4344, ' if (tmp == NULL) {'), (4345, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Suite");'), (4346, ' return 1;'), (4347, ' }'), (4348, ' else {'), (4360, ' stmt_ty val;'), (4361, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (4367, ' asdl_seq_SET(body, i, val);'), (4395, ' if (lookup_attr_id(obj, &PyId_lineno, &tmp) < 0) {'), (4396, ' return 1;'), (4397, ' }'), (4398, ' if (tmp == NULL) {'), (4399, ' PyErr_SetString(PyExc_TypeError, "required field \\"lineno\\" missing from stmt");'), (4400, ' return 1;'), (4401, ' }'), (4402, ' else {'), (4407, ' }'), (4408, ' if (lookup_attr_id(obj, &PyId_col_offset, &tmp) < 0) {'), (4411, ' if (tmp == NULL) {'), (4412, ' PyErr_SetString(PyExc_TypeError, "required field \\"col_offset\\" missing from stmt");'), (4413, ' return 1;'), (4414, ' }'), (4415, ' else {'), (4433, ' if (lookup_attr_id(obj, &PyId_name, &tmp) < 0) {'), (4434, ' return 1;'), (4435, ' }'), (4436, ' if (tmp == NULL) {'), (4437, ' PyErr_SetString(PyExc_TypeError, "required field \\"name\\" missing from FunctionDef");'), (4438, ' return 1;'), (4439, ' }'), (4440, ' else {'), (4445, ' }'), (4446, ' if (lookup_attr_id(obj, &PyId_args, &tmp) < 0) {'), (4447, ' return 1;'), (4448, ' }'), (4449, ' if (tmp == NULL) {'), (4450, ' PyErr_SetString(PyExc_TypeError, "required field \\"args\\" missing from FunctionDef");'), (4453, ' else {'), (4458, ' }'), (4459, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (4460, ' return 1;'), (4461, ' }'), (4462, ' if (tmp == NULL) {'), (4463, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from FunctionDef");'), (4466, ' else {'), (4478, ' stmt_ty val;'), (4479, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (4485, ' asdl_seq_SET(body, i, val);'), (4488, ' }'), (4489, ' if (lookup_attr_id(obj, &PyId_decorator_list, &tmp) < 0) {'), (4492, ' if (tmp == NULL) {'), (4493, ' PyErr_SetString(PyExc_TypeError, "required field \\"decorator_list\\" missing from FunctionDef");'), (4494, ' return 1;'), (4495, ' }'), (4496, ' else {'), (4508, ' expr_ty val;'), (4509, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (4515, ' asdl_seq_SET(decorator_list, i, val);'), (4518, ' }'), (4519, ' if (lookup_attr_id(obj, &PyId_returns, &tmp) < 0) {'), (4522, ' if (tmp == NULL || tmp == Py_None) {'), (4523, ' Py_CLEAR(tmp);'), (4524, ' returns = NULL;'), (4525, ' }'), (4526, ' else {'), (4532, ' if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) {'), (4533, ' return 1;'), (4534, ' }'), (4535, ' if (tmp == NULL || tmp == Py_None) {'), (4536, ' Py_CLEAR(tmp);'), (4537, ' type_comment = NULL;'), (4538, ' }'), (4539, ' else {'), (4562, ' if (lookup_attr_id(obj, &PyId_name, &tmp) < 0) {'), (4563, ' return 1;'), (4564, ' }'), (4565, ' if (tmp == NULL) {'), (4566, ' PyErr_SetString(PyExc_TypeError, "required field \\"name\\" missing from AsyncFunctionDef");'), (4567, ' return 1;'), (4568, ' }'), (4569, ' else {'), (4574, ' }'), (4575, ' if (lookup_attr_id(obj, &PyId_args, &tmp) < 0) {'), (4576, ' return 1;'), (4577, ' }'), (4578, ' if (tmp == NULL) {'), (4579, ' PyErr_SetString(PyExc_TypeError, "required field \\"args\\" missing from AsyncFunctionDef");'), (4582, ' else {'), (4587, ' }'), (4588, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (4589, ' return 1;'), (4590, ' }'), (4591, ' if (tmp == NULL) {'), (4592, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from AsyncFunctionDef");'), (4595, ' else {'), (4607, ' stmt_ty val;'), (4608, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (4614, ' asdl_seq_SET(body, i, val);'), (4617, ' }'), (4618, ' if (lookup_attr_id(obj, &PyId_decorator_list, &tmp) < 0) {'), (4621, ' if (tmp == NULL) {'), (4622, ' PyErr_SetString(PyExc_TypeError, "required field \\"decorator_list\\" missing from AsyncFunctionDef");'), (4623, ' return 1;'), (4624, ' }'), (4625, ' else {'), (4637, ' expr_ty val;'), (4638, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (4644, ' asdl_seq_SET(decorator_list, i, val);'), (4647, ' }'), (4648, ' if (lookup_attr_id(obj, &PyId_returns, &tmp) < 0) {'), (4651, ' if (tmp == NULL || tmp == Py_None) {'), (4652, ' Py_CLEAR(tmp);'), (4653, ' returns = NULL;'), (4654, ' }'), (4655, ' else {'), (4661, ' if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) {'), (4662, ' return 1;'), (4663, ' }'), (4664, ' if (tmp == NULL || tmp == Py_None) {'), (4665, ' Py_CLEAR(tmp);'), (4666, ' type_comment = NULL;'), (4667, ' }'), (4668, ' else {'), (4690, ' if (lookup_attr_id(obj, &PyId_name, &tmp) < 0) {'), (4691, ' return 1;'), (4692, ' }'), (4693, ' if (tmp == NULL) {'), (4694, ' PyErr_SetString(PyExc_TypeError, "required field \\"name\\" missing from ClassDef");'), (4695, ' return 1;'), (4696, ' }'), (4697, ' else {'), (4702, ' }'), (4703, ' if (lookup_attr_id(obj, &PyId_bases, &tmp) < 0) {'), (4704, ' return 1;'), (4705, ' }'), (4706, ' if (tmp == NULL) {'), (4707, ' PyErr_SetString(PyExc_TypeError, "required field \\"bases\\" missing from ClassDef");'), (4710, ' else {'), (4722, ' expr_ty val;'), (4723, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (4729, ' asdl_seq_SET(bases, i, val);'), (4732, ' }'), (4733, ' if (lookup_attr_id(obj, &PyId_keywords, &tmp) < 0) {'), (4734, ' return 1;'), (4735, ' }'), (4736, ' if (tmp == NULL) {'), (4737, ' PyErr_SetString(PyExc_TypeError, "required field \\"keywords\\" missing from ClassDef");'), (4740, ' else {'), (4752, ' keyword_ty val;'), (4753, ' res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &val, arena);'), (4759, ' asdl_seq_SET(keywords, i, val);'), (4762, ' }'), (4763, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (4766, ' if (tmp == NULL) {'), (4767, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from ClassDef");'), (4768, ' return 1;'), (4769, ' }'), (4770, ' else {'), (4782, ' stmt_ty val;'), (4783, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (4789, ' asdl_seq_SET(body, i, val);'), (4792, ' }'), (4793, ' if (lookup_attr_id(obj, &PyId_decorator_list, &tmp) < 0) {'), (4794, ' return 1;'), (4795, ' }'), (4796, ' if (tmp == NULL) {'), (4797, ' PyErr_SetString(PyExc_TypeError, "required field \\"decorator_list\\" missing from ClassDef");'), (4800, ' else {'), (4812, ' expr_ty val;'), (4813, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (4819, ' asdl_seq_SET(decorator_list, i, val);'), (4835, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (4836, ' return 1;'), (4837, ' }'), (4838, ' if (tmp == NULL || tmp == Py_None) {'), (4839, ' Py_CLEAR(tmp);'), (4840, ' value = NULL;'), (4841, ' }'), (4842, ' else {'), (4859, ' if (lookup_attr_id(obj, &PyId_targets, &tmp) < 0) {'), (4860, ' return 1;'), (4861, ' }'), (4862, ' if (tmp == NULL) {'), (4863, ' PyErr_SetString(PyExc_TypeError, "required field \\"targets\\" missing from Delete");'), (4864, ' return 1;'), (4865, ' }'), (4866, ' else {'), (4878, ' expr_ty val;'), (4879, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (4885, ' asdl_seq_SET(targets, i, val);'), (4902, ' if (lookup_attr_id(obj, &PyId_targets, &tmp) < 0) {'), (4903, ' return 1;'), (4904, ' }'), (4905, ' if (tmp == NULL) {'), (4906, ' PyErr_SetString(PyExc_TypeError, "required field \\"targets\\" missing from Assign");'), (4907, ' return 1;'), (4908, ' }'), (4909, ' else {'), (4921, ' expr_ty val;'), (4922, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (4928, ' asdl_seq_SET(targets, i, val);'), (4931, ' }'), (4932, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (4935, ' if (tmp == NULL) {'), (4936, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Assign");'), (4937, ' return 1;'), (4938, ' }'), (4939, ' else {'), (4944, ' }'), (4945, ' if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) {'), (4948, ' if (tmp == NULL || tmp == Py_None) {'), (4949, ' Py_CLEAR(tmp);'), (4950, ' type_comment = NULL;'), (4951, ' }'), (4952, ' else {'), (4971, ' if (lookup_attr_id(obj, &PyId_target, &tmp) < 0) {'), (4972, ' return 1;'), (4973, ' }'), (4974, ' if (tmp == NULL) {'), (4975, ' PyErr_SetString(PyExc_TypeError, "required field \\"target\\" missing from AugAssign");'), (4976, ' return 1;'), (4977, ' }'), (4978, ' else {'), (4983, ' }'), (4984, ' if (lookup_attr_id(obj, &PyId_op, &tmp) < 0) {'), (4985, ' return 1;'), (4986, ' }'), (4987, ' if (tmp == NULL) {'), (4988, ' PyErr_SetString(PyExc_TypeError, "required field \\"op\\" missing from AugAssign");'), (4991, ' else {'), (4996, ' }'), (4997, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (4998, ' return 1;'), (4999, ' }'), (5000, ' if (tmp == NULL) {'), (5001, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from AugAssign");'), (5004, ' else {'), (5024, ' if (lookup_attr_id(obj, &PyId_target, &tmp) < 0) {'), (5025, ' return 1;'), (5026, ' }'), (5027, ' if (tmp == NULL) {'), (5028, ' PyErr_SetString(PyExc_TypeError, "required field \\"target\\" missing from AnnAssign");'), (5029, ' return 1;'), (5030, ' }'), (5031, ' else {'), (5036, ' }'), (5037, ' if (lookup_attr_id(obj, &PyId_annotation, &tmp) < 0) {'), (5038, ' return 1;'), (5039, ' }'), (5040, ' if (tmp == NULL) {'), (5041, ' PyErr_SetString(PyExc_TypeError, "required field \\"annotation\\" missing from AnnAssign");'), (5044, ' else {'), (5049, ' }'), (5050, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (5053, ' if (tmp == NULL || tmp == Py_None) {'), (5054, ' Py_CLEAR(tmp);'), (5055, ' value = NULL;'), (5056, ' }'), (5057, ' else {'), (5063, ' if (lookup_attr_id(obj, &PyId_simple, &tmp) < 0) {'), (5064, ' return 1;'), (5065, ' }'), (5066, ' if (tmp == NULL) {'), (5067, ' PyErr_SetString(PyExc_TypeError, "required field \\"simple\\" missing from AnnAssign");'), (5068, ' return 1;'), (5069, ' }'), (5070, ' else {'), (5092, ' if (lookup_attr_id(obj, &PyId_target, &tmp) < 0) {'), (5093, ' return 1;'), (5094, ' }'), (5095, ' if (tmp == NULL) {'), (5096, ' PyErr_SetString(PyExc_TypeError, "required field \\"target\\" missing from For");'), (5097, ' return 1;'), (5098, ' }'), (5099, ' else {'), (5104, ' }'), (5105, ' if (lookup_attr_id(obj, &PyId_iter, &tmp) < 0) {'), (5108, ' if (tmp == NULL) {'), (5109, ' PyErr_SetString(PyExc_TypeError, "required field \\"iter\\" missing from For");'), (5110, ' return 1;'), (5111, ' }'), (5112, ' else {'), (5117, ' }'), (5118, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (5119, ' return 1;'), (5120, ' }'), (5121, ' if (tmp == NULL) {'), (5122, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from For");'), (5125, ' else {'), (5137, ' stmt_ty val;'), (5138, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5144, ' asdl_seq_SET(body, i, val);'), (5147, ' }'), (5148, ' if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) {'), (5149, ' return 1;'), (5150, ' }'), (5151, ' if (tmp == NULL) {'), (5152, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from For");'), (5155, ' else {'), (5167, ' stmt_ty val;'), (5168, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5174, ' asdl_seq_SET(orelse, i, val);'), (5177, ' }'), (5178, ' if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) {'), (5181, ' if (tmp == NULL || tmp == Py_None) {'), (5182, ' Py_CLEAR(tmp);'), (5183, ' type_comment = NULL;'), (5184, ' }'), (5185, ' else {'), (5207, ' if (lookup_attr_id(obj, &PyId_target, &tmp) < 0) {'), (5208, ' return 1;'), (5209, ' }'), (5210, ' if (tmp == NULL) {'), (5211, ' PyErr_SetString(PyExc_TypeError, "required field \\"target\\" missing from AsyncFor");'), (5212, ' return 1;'), (5213, ' }'), (5214, ' else {'), (5219, ' }'), (5220, ' if (lookup_attr_id(obj, &PyId_iter, &tmp) < 0) {'), (5223, ' if (tmp == NULL) {'), (5224, ' PyErr_SetString(PyExc_TypeError, "required field \\"iter\\" missing from AsyncFor");'), (5225, ' return 1;'), (5226, ' }'), (5227, ' else {'), (5232, ' }'), (5233, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (5234, ' return 1;'), (5235, ' }'), (5236, ' if (tmp == NULL) {'), (5237, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from AsyncFor");'), (5240, ' else {'), (5252, ' stmt_ty val;'), (5253, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5259, ' asdl_seq_SET(body, i, val);'), (5262, ' }'), (5263, ' if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) {'), (5264, ' return 1;'), (5265, ' }'), (5266, ' if (tmp == NULL) {'), (5267, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from AsyncFor");'), (5270, ' else {'), (5282, ' stmt_ty val;'), (5283, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5289, ' asdl_seq_SET(orelse, i, val);'), (5292, ' }'), (5293, ' if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) {'), (5296, ' if (tmp == NULL || tmp == Py_None) {'), (5297, ' Py_CLEAR(tmp);'), (5298, ' type_comment = NULL;'), (5299, ' }'), (5300, ' else {'), (5320, ' if (lookup_attr_id(obj, &PyId_test, &tmp) < 0) {'), (5321, ' return 1;'), (5322, ' }'), (5323, ' if (tmp == NULL) {'), (5324, ' PyErr_SetString(PyExc_TypeError, "required field \\"test\\" missing from While");'), (5325, ' return 1;'), (5326, ' }'), (5327, ' else {'), (5332, ' }'), (5333, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (5336, ' if (tmp == NULL) {'), (5337, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from While");'), (5338, ' return 1;'), (5339, ' }'), (5340, ' else {'), (5352, ' stmt_ty val;'), (5353, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5359, ' asdl_seq_SET(body, i, val);'), (5362, ' }'), (5363, ' if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) {'), (5364, ' return 1;'), (5365, ' }'), (5366, ' if (tmp == NULL) {'), (5367, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from While");'), (5370, ' else {'), (5382, ' stmt_ty val;'), (5383, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5389, ' asdl_seq_SET(orelse, i, val);'), (5406, ' if (lookup_attr_id(obj, &PyId_test, &tmp) < 0) {'), (5407, ' return 1;'), (5408, ' }'), (5409, ' if (tmp == NULL) {'), (5410, ' PyErr_SetString(PyExc_TypeError, "required field \\"test\\" missing from If");'), (5411, ' return 1;'), (5412, ' }'), (5413, ' else {'), (5418, ' }'), (5419, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (5420, ' return 1;'), (5421, ' }'), (5422, ' if (tmp == NULL) {'), (5423, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from If");'), (5426, ' else {'), (5438, ' stmt_ty val;'), (5439, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5445, ' asdl_seq_SET(body, i, val);'), (5448, ' }'), (5449, ' if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) {'), (5450, ' return 1;'), (5451, ' }'), (5452, ' if (tmp == NULL) {'), (5453, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from If");'), (5456, ' else {'), (5468, ' stmt_ty val;'), (5469, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5475, ' asdl_seq_SET(orelse, i, val);'), (5492, ' if (lookup_attr_id(obj, &PyId_items, &tmp) < 0) {'), (5493, ' return 1;'), (5494, ' }'), (5495, ' if (tmp == NULL) {'), (5496, ' PyErr_SetString(PyExc_TypeError, "required field \\"items\\" missing from With");'), (5497, ' return 1;'), (5498, ' }'), (5499, ' else {'), (5511, ' withitem_ty val;'), (5512, ' res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &val, arena);'), (5518, ' asdl_seq_SET(items, i, val);'), (5521, ' }'), (5522, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (5523, ' return 1;'), (5524, ' }'), (5525, ' if (tmp == NULL) {'), (5526, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from With");'), (5529, ' else {'), (5541, ' stmt_ty val;'), (5542, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5548, ' asdl_seq_SET(body, i, val);'), (5551, ' }'), (5552, ' if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) {'), (5555, ' if (tmp == NULL || tmp == Py_None) {'), (5556, ' Py_CLEAR(tmp);'), (5557, ' type_comment = NULL;'), (5558, ' }'), (5559, ' else {'), (5578, ' if (lookup_attr_id(obj, &PyId_items, &tmp) < 0) {'), (5579, ' return 1;'), (5580, ' }'), (5581, ' if (tmp == NULL) {'), (5582, ' PyErr_SetString(PyExc_TypeError, "required field \\"items\\" missing from AsyncWith");'), (5583, ' return 1;'), (5584, ' }'), (5585, ' else {'), (5597, ' withitem_ty val;'), (5598, ' res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &val, arena);'), (5604, ' asdl_seq_SET(items, i, val);'), (5607, ' }'), (5608, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (5611, ' if (tmp == NULL) {'), (5612, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from AsyncWith");'), (5613, ' return 1;'), (5614, ' }'), (5615, ' else {'), (5627, ' stmt_ty val;'), (5628, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5634, ' asdl_seq_SET(body, i, val);'), (5637, ' }'), (5638, ' if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) {'), (5641, ' if (tmp == NULL || tmp == Py_None) {'), (5642, ' Py_CLEAR(tmp);'), (5643, ' type_comment = NULL;'), (5644, ' }'), (5645, ' else {'), (5663, ' if (lookup_attr_id(obj, &PyId_exc, &tmp) < 0) {'), (5664, ' return 1;'), (5665, ' }'), (5666, ' if (tmp == NULL || tmp == Py_None) {'), (5667, ' Py_CLEAR(tmp);'), (5668, ' exc = NULL;'), (5669, ' }'), (5670, ' else {'), (5676, ' if (lookup_attr_id(obj, &PyId_cause, &tmp) < 0) {'), (5677, ' return 1;'), (5678, ' }'), (5679, ' if (tmp == NULL || tmp == Py_None) {'), (5680, ' Py_CLEAR(tmp);'), (5681, ' cause = NULL;'), (5682, ' }'), (5683, ' else {'), (5703, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (5704, ' return 1;'), (5705, ' }'), (5706, ' if (tmp == NULL) {'), (5707, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Try");'), (5708, ' return 1;'), (5709, ' }'), (5710, ' else {'), (5722, ' stmt_ty val;'), (5723, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5729, ' asdl_seq_SET(body, i, val);'), (5732, ' }'), (5733, ' if (lookup_attr_id(obj, &PyId_handlers, &tmp) < 0) {'), (5736, ' if (tmp == NULL) {'), (5737, ' PyErr_SetString(PyExc_TypeError, "required field \\"handlers\\" missing from Try");'), (5738, ' return 1;'), (5739, ' }'), (5740, ' else {'), (5741, ' int res;'), (5742, ' Py_ssize_t len;'), (5752, ' excepthandler_ty val;'), (5753, ' res = obj2ast_excepthandler(PyList_GET_ITEM(tmp, i), &val, arena);'), (5759, ' asdl_seq_SET(handlers, i, val);'), (5762, ' }'), (5763, ' if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) {'), (5764, ' return 1;'), (5765, ' }'), (5766, ' if (tmp == NULL) {'), (5767, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from Try");'), (5770, ' else {'), (5782, ' stmt_ty val;'), (5783, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5789, ' asdl_seq_SET(orelse, i, val);'), (5792, ' }'), (5793, ' if (lookup_attr_id(obj, &PyId_finalbody, &tmp) < 0) {'), (5796, ' if (tmp == NULL) {'), (5797, ' PyErr_SetString(PyExc_TypeError, "required field \\"finalbody\\" missing from Try");'), (5798, ' return 1;'), (5799, ' }'), (5800, ' else {'), (5812, ' stmt_ty val;'), (5813, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (5819, ' asdl_seq_SET(finalbody, i, val);'), (5836, ' if (lookup_attr_id(obj, &PyId_test, &tmp) < 0) {'), (5837, ' return 1;'), (5838, ' }'), (5839, ' if (tmp == NULL) {'), (5840, ' PyErr_SetString(PyExc_TypeError, "required field \\"test\\" missing from Assert");'), (5841, ' return 1;'), (5842, ' }'), (5843, ' else {'), (5848, ' }'), (5849, ' if (lookup_attr_id(obj, &PyId_msg, &tmp) < 0) {'), (5852, ' if (tmp == NULL || tmp == Py_None) {'), (5853, ' Py_CLEAR(tmp);'), (5854, ' msg = NULL;'), (5855, ' }'), (5856, ' else {'), (5873, ' if (lookup_attr_id(obj, &PyId_names, &tmp) < 0) {'), (5874, ' return 1;'), (5875, ' }'), (5876, ' if (tmp == NULL) {'), (5877, ' PyErr_SetString(PyExc_TypeError, "required field \\"names\\" missing from Import");'), (5878, ' return 1;'), (5879, ' }'), (5880, ' else {'), (5892, ' alias_ty val;'), (5893, ' res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &val, arena);'), (5899, ' asdl_seq_SET(names, i, val);'), (5916, ' if (lookup_attr_id(obj, &PyId_module, &tmp) < 0) {'), (5917, ' return 1;'), (5918, ' }'), (5919, ' if (tmp == NULL || tmp == Py_None) {'), (5920, ' Py_CLEAR(tmp);'), (5921, ' module = NULL;'), (5922, ' }'), (5923, ' else {'), (5929, ' if (lookup_attr_id(obj, &PyId_names, &tmp) < 0) {'), (5930, ' return 1;'), (5931, ' }'), (5932, ' if (tmp == NULL) {'), (5933, ' PyErr_SetString(PyExc_TypeError, "required field \\"names\\" missing from ImportFrom");'), (5934, ' return 1;'), (5935, ' }'), (5936, ' else {'), (5948, ' alias_ty val;'), (5949, ' res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &val, arena);'), (5955, ' asdl_seq_SET(names, i, val);'), (5958, ' }'), (5959, ' if (lookup_attr_id(obj, &PyId_level, &tmp) < 0) {'), (5962, ' if (tmp == NULL || tmp == Py_None) {'), (5963, ' Py_CLEAR(tmp);'), (5964, ' level = 0;'), (5965, ' }'), (5966, ' else {'), (5983, ' if (lookup_attr_id(obj, &PyId_names, &tmp) < 0) {'), (5984, ' return 1;'), (5985, ' }'), (5986, ' if (tmp == NULL) {'), (5987, ' PyErr_SetString(PyExc_TypeError, "required field \\"names\\" missing from Global");'), (5988, ' return 1;'), (5989, ' }'), (5990, ' else {'), (6002, ' identifier val;'), (6003, ' res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &val, arena);'), (6009, ' asdl_seq_SET(names, i, val);'), (6024, ' if (lookup_attr_id(obj, &PyId_names, &tmp) < 0) {'), (6025, ' return 1;'), (6026, ' }'), (6027, ' if (tmp == NULL) {'), (6028, ' PyErr_SetString(PyExc_TypeError, "required field \\"names\\" missing from Nonlocal");'), (6029, ' return 1;'), (6030, ' }'), (6031, ' else {'), (6043, ' identifier val;'), (6044, ' res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &val, arena);'), (6050, ' asdl_seq_SET(names, i, val);'), (6065, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (6066, ' return 1;'), (6067, ' }'), (6068, ' if (tmp == NULL) {'), (6069, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Expr");'), (6070, ' return 1;'), (6071, ' }'), (6072, ' else {'), (6132, ' if (lookup_attr_id(obj, &PyId_lineno, &tmp) < 0) {'), (6133, ' return 1;'), (6134, ' }'), (6135, ' if (tmp == NULL) {'), (6136, ' PyErr_SetString(PyExc_TypeError, "required field \\"lineno\\" missing from expr");'), (6137, ' return 1;'), (6138, ' }'), (6139, ' else {'), (6144, ' }'), (6145, ' if (lookup_attr_id(obj, &PyId_col_offset, &tmp) < 0) {'), (6148, ' if (tmp == NULL) {'), (6149, ' PyErr_SetString(PyExc_TypeError, "required field \\"col_offset\\" missing from expr");'), (6150, ' return 1;'), (6151, ' }'), (6152, ' else {'), (6166, ' if (lookup_attr_id(obj, &PyId_op, &tmp) < 0) {'), (6167, ' return 1;'), (6168, ' }'), (6169, ' if (tmp == NULL) {'), (6170, ' PyErr_SetString(PyExc_TypeError, "required field \\"op\\" missing from BoolOp");'), (6171, ' return 1;'), (6172, ' }'), (6173, ' else {'), (6178, ' }'), (6179, ' if (lookup_attr_id(obj, &PyId_values, &tmp) < 0) {'), (6180, ' return 1;'), (6181, ' }'), (6182, ' if (tmp == NULL) {'), (6183, ' PyErr_SetString(PyExc_TypeError, "required field \\"values\\" missing from BoolOp");'), (6186, ' else {'), (6198, ' expr_ty val;'), (6199, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (6205, ' asdl_seq_SET(values, i, val);'), (6222, ' if (lookup_attr_id(obj, &PyId_left, &tmp) < 0) {'), (6223, ' return 1;'), (6224, ' }'), (6225, ' if (tmp == NULL) {'), (6226, ' PyErr_SetString(PyExc_TypeError, "required field \\"left\\" missing from BinOp");'), (6227, ' return 1;'), (6228, ' }'), (6229, ' else {'), (6234, ' }'), (6235, ' if (lookup_attr_id(obj, &PyId_op, &tmp) < 0) {'), (6236, ' return 1;'), (6237, ' }'), (6238, ' if (tmp == NULL) {'), (6239, ' PyErr_SetString(PyExc_TypeError, "required field \\"op\\" missing from BinOp");'), (6242, ' else {'), (6247, ' }'), (6248, ' if (lookup_attr_id(obj, &PyId_right, &tmp) < 0) {'), (6249, ' return 1;'), (6250, ' }'), (6251, ' if (tmp == NULL) {'), (6252, ' PyErr_SetString(PyExc_TypeError, "required field \\"right\\" missing from BinOp");'), (6255, ' else {'), (6273, ' if (lookup_attr_id(obj, &PyId_op, &tmp) < 0) {'), (6274, ' return 1;'), (6275, ' }'), (6276, ' if (tmp == NULL) {'), (6277, ' PyErr_SetString(PyExc_TypeError, "required field \\"op\\" missing from UnaryOp");'), (6278, ' return 1;'), (6279, ' }'), (6280, ' else {'), (6285, ' }'), (6286, ' if (lookup_attr_id(obj, &PyId_operand, &tmp) < 0) {'), (6287, ' return 1;'), (6288, ' }'), (6289, ' if (tmp == NULL) {'), (6290, ' PyErr_SetString(PyExc_TypeError, "required field \\"operand\\" missing from UnaryOp");'), (6293, ' else {'), (6311, ' if (lookup_attr_id(obj, &PyId_args, &tmp) < 0) {'), (6312, ' return 1;'), (6313, ' }'), (6314, ' if (tmp == NULL) {'), (6315, ' PyErr_SetString(PyExc_TypeError, "required field \\"args\\" missing from Lambda");'), (6316, ' return 1;'), (6317, ' }'), (6318, ' else {'), (6323, ' }'), (6324, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (6325, ' return 1;'), (6326, ' }'), (6327, ' if (tmp == NULL) {'), (6328, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Lambda");'), (6331, ' else {'), (6350, ' if (lookup_attr_id(obj, &PyId_test, &tmp) < 0) {'), (6351, ' return 1;'), (6352, ' }'), (6353, ' if (tmp == NULL) {'), (6354, ' PyErr_SetString(PyExc_TypeError, "required field \\"test\\" missing from IfExp");'), (6355, ' return 1;'), (6356, ' }'), (6357, ' else {'), (6362, ' }'), (6363, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (6364, ' return 1;'), (6365, ' }'), (6366, ' if (tmp == NULL) {'), (6367, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from IfExp");'), (6370, ' else {'), (6375, ' }'), (6376, ' if (lookup_attr_id(obj, &PyId_orelse, &tmp) < 0) {'), (6379, ' if (tmp == NULL) {'), (6380, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from IfExp");'), (6381, ' return 1;'), (6382, ' }'), (6383, ' else {'), (6401, ' if (lookup_attr_id(obj, &PyId_keys, &tmp) < 0) {'), (6402, ' return 1;'), (6403, ' }'), (6404, ' if (tmp == NULL) {'), (6405, ' PyErr_SetString(PyExc_TypeError, "required field \\"keys\\" missing from Dict");'), (6406, ' return 1;'), (6407, ' }'), (6408, ' else {'), (6420, ' expr_ty val;'), (6421, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (6427, ' asdl_seq_SET(keys, i, val);'), (6430, ' }'), (6431, ' if (lookup_attr_id(obj, &PyId_values, &tmp) < 0) {'), (6434, ' if (tmp == NULL) {'), (6435, ' PyErr_SetString(PyExc_TypeError, "required field \\"values\\" missing from Dict");'), (6436, ' return 1;'), (6437, ' }'), (6438, ' else {'), (6450, ' expr_ty val;'), (6451, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (6457, ' asdl_seq_SET(values, i, val);'), (6472, ' if (lookup_attr_id(obj, &PyId_elts, &tmp) < 0) {'), (6473, ' return 1;'), (6474, ' }'), (6475, ' if (tmp == NULL) {'), (6476, ' PyErr_SetString(PyExc_TypeError, "required field \\"elts\\" missing from Set");'), (6477, ' return 1;'), (6478, ' }'), (6479, ' else {'), (6491, ' expr_ty val;'), (6492, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (6498, ' asdl_seq_SET(elts, i, val);'), (6514, ' if (lookup_attr_id(obj, &PyId_elt, &tmp) < 0) {'), (6515, ' return 1;'), (6516, ' }'), (6517, ' if (tmp == NULL) {'), (6518, ' PyErr_SetString(PyExc_TypeError, "required field \\"elt\\" missing from ListComp");'), (6519, ' return 1;'), (6520, ' }'), (6521, ' else {'), (6526, ' }'), (6527, ' if (lookup_attr_id(obj, &PyId_generators, &tmp) < 0) {'), (6528, ' return 1;'), (6529, ' }'), (6530, ' if (tmp == NULL) {'), (6531, ' PyErr_SetString(PyExc_TypeError, "required field \\"generators\\" missing from ListComp");'), (6534, ' else {'), (6546, ' comprehension_ty val;'), (6547, ' res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena);'), (6553, ' asdl_seq_SET(generators, i, val);'), (6569, ' if (lookup_attr_id(obj, &PyId_elt, &tmp) < 0) {'), (6570, ' return 1;'), (6571, ' }'), (6572, ' if (tmp == NULL) {'), (6573, ' PyErr_SetString(PyExc_TypeError, "required field \\"elt\\" missing from SetComp");'), (6574, ' return 1;'), (6575, ' }'), (6576, ' else {'), (6581, ' }'), (6582, ' if (lookup_attr_id(obj, &PyId_generators, &tmp) < 0) {'), (6583, ' return 1;'), (6584, ' }'), (6585, ' if (tmp == NULL) {'), (6586, ' PyErr_SetString(PyExc_TypeError, "required field \\"generators\\" missing from SetComp");'), (6589, ' else {'), (6601, ' comprehension_ty val;'), (6602, ' res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena);'), (6608, ' asdl_seq_SET(generators, i, val);'), (6625, ' if (lookup_attr_id(obj, &PyId_key, &tmp) < 0) {'), (6626, ' return 1;'), (6627, ' }'), (6628, ' if (tmp == NULL) {'), (6629, ' PyErr_SetString(PyExc_TypeError, "required field \\"key\\" missing from DictComp");'), (6630, ' return 1;'), (6631, ' }'), (6632, ' else {'), (6637, ' }'), (6638, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (6639, ' return 1;'), (6640, ' }'), (6641, ' if (tmp == NULL) {'), (6642, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from DictComp");'), (6645, ' else {'), (6650, ' }'), (6651, ' if (lookup_attr_id(obj, &PyId_generators, &tmp) < 0) {'), (6652, ' return 1;'), (6653, ' }'), (6654, ' if (tmp == NULL) {'), (6655, ' PyErr_SetString(PyExc_TypeError, "required field \\"generators\\" missing from DictComp");'), (6658, ' else {'), (6670, ' comprehension_ty val;'), (6671, ' res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena);'), (6677, ' asdl_seq_SET(generators, i, val);'), (6693, ' if (lookup_attr_id(obj, &PyId_elt, &tmp) < 0) {'), (6694, ' return 1;'), (6695, ' }'), (6696, ' if (tmp == NULL) {'), (6697, ' PyErr_SetString(PyExc_TypeError, "required field \\"elt\\" missing from GeneratorExp");'), (6698, ' return 1;'), (6699, ' }'), (6700, ' else {'), (6705, ' }'), (6706, ' if (lookup_attr_id(obj, &PyId_generators, &tmp) < 0) {'), (6707, ' return 1;'), (6708, ' }'), (6709, ' if (tmp == NULL) {'), (6710, ' PyErr_SetString(PyExc_TypeError, "required field \\"generators\\" missing from GeneratorExp");'), (6713, ' else {'), (6725, ' comprehension_ty val;'), (6726, ' res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena);'), (6732, ' asdl_seq_SET(generators, i, val);'), (6747, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (6748, ' return 1;'), (6749, ' }'), (6750, ' if (tmp == NULL) {'), (6751, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Await");'), (6752, ' return 1;'), (6753, ' }'), (6754, ' else {'), (6771, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (6772, ' return 1;'), (6773, ' }'), (6774, ' if (tmp == NULL || tmp == Py_None) {'), (6775, ' Py_CLEAR(tmp);'), (6776, ' value = NULL;'), (6777, ' }'), (6778, ' else {'), (6795, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (6796, ' return 1;'), (6797, ' }'), (6798, ' if (tmp == NULL) {'), (6799, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from YieldFrom");'), (6800, ' return 1;'), (6801, ' }'), (6802, ' else {'), (6821, ' if (lookup_attr_id(obj, &PyId_left, &tmp) < 0) {'), (6822, ' return 1;'), (6823, ' }'), (6824, ' if (tmp == NULL) {'), (6825, ' PyErr_SetString(PyExc_TypeError, "required field \\"left\\" missing from Compare");'), (6826, ' return 1;'), (6827, ' }'), (6828, ' else {'), (6833, ' }'), (6834, ' if (lookup_attr_id(obj, &PyId_ops, &tmp) < 0) {'), (6835, ' return 1;'), (6836, ' }'), (6837, ' if (tmp == NULL) {'), (6838, ' PyErr_SetString(PyExc_TypeError, "required field \\"ops\\" missing from Compare");'), (6841, ' else {'), (6853, ' cmpop_ty val;'), (6854, ' res = obj2ast_cmpop(PyList_GET_ITEM(tmp, i), &val, arena);'), (6860, ' asdl_seq_SET(ops, i, val);'), (6863, ' }'), (6864, ' if (lookup_attr_id(obj, &PyId_comparators, &tmp) < 0) {'), (6865, ' return 1;'), (6866, ' }'), (6867, ' if (tmp == NULL) {'), (6868, ' PyErr_SetString(PyExc_TypeError, "required field \\"comparators\\" missing from Compare");'), (6871, ' else {'), (6883, ' expr_ty val;'), (6884, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (6890, ' asdl_seq_SET(comparators, i, val);'), (6907, ' if (lookup_attr_id(obj, &PyId_func, &tmp) < 0) {'), (6908, ' return 1;'), (6909, ' }'), (6910, ' if (tmp == NULL) {'), (6911, ' PyErr_SetString(PyExc_TypeError, "required field \\"func\\" missing from Call");'), (6912, ' return 1;'), (6913, ' }'), (6914, ' else {'), (6919, ' }'), (6920, ' if (lookup_attr_id(obj, &PyId_args, &tmp) < 0) {'), (6921, ' return 1;'), (6922, ' }'), (6923, ' if (tmp == NULL) {'), (6924, ' PyErr_SetString(PyExc_TypeError, "required field \\"args\\" missing from Call");'), (6927, ' else {'), (6939, ' expr_ty val;'), (6940, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (6946, ' asdl_seq_SET(args, i, val);'), (6949, ' }'), (6950, ' if (lookup_attr_id(obj, &PyId_keywords, &tmp) < 0) {'), (6951, ' return 1;'), (6952, ' }'), (6953, ' if (tmp == NULL) {'), (6954, ' PyErr_SetString(PyExc_TypeError, "required field \\"keywords\\" missing from Call");'), (6957, ' else {'), (6969, ' keyword_ty val;'), (6970, ' res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &val, arena);'), (6976, ' asdl_seq_SET(keywords, i, val);'), (6991, ' if (lookup_attr_id(obj, &PyId_n, &tmp) < 0) {'), (6992, ' return 1;'), (6993, ' }'), (6994, ' if (tmp == NULL) {'), (6995, ' PyErr_SetString(PyExc_TypeError, "required field \\"n\\" missing from Num");'), (6996, ' return 1;'), (6997, ' }'), (6998, ' else {'), (7016, ' if (lookup_attr_id(obj, &PyId_s, &tmp) < 0) {'), (7017, ' return 1;'), (7018, ' }'), (7019, ' if (tmp == NULL) {'), (7020, ' PyErr_SetString(PyExc_TypeError, "required field \\"s\\" missing from Str");'), (7021, ' return 1;'), (7022, ' }'), (7023, ' else {'), (7053, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (7054, ' return 1;'), (7055, ' }'), (7056, ' if (tmp == NULL) {'), (7057, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from FormattedValue");'), (7058, ' return 1;'), (7059, ' }'), (7060, ' else {'), (7065, ' }'), (7066, ' if (lookup_attr_id(obj, &PyId_conversion, &tmp) < 0) {'), (7069, ' if (tmp == NULL || tmp == Py_None) {'), (7070, ' Py_CLEAR(tmp);'), (7071, ' conversion = 0;'), (7072, ' }'), (7073, ' else {'), (7079, ' if (lookup_attr_id(obj, &PyId_format_spec, &tmp) < 0) {'), (7080, ' return 1;'), (7081, ' }'), (7082, ' if (tmp == NULL || tmp == Py_None) {'), (7083, ' Py_CLEAR(tmp);'), (7084, ' format_spec = NULL;'), (7085, ' }'), (7086, ' else {'), (7104, ' if (lookup_attr_id(obj, &PyId_values, &tmp) < 0) {'), (7105, ' return 1;'), (7106, ' }'), (7107, ' if (tmp == NULL) {'), (7108, ' PyErr_SetString(PyExc_TypeError, "required field \\"values\\" missing from JoinedStr");'), (7109, ' return 1;'), (7110, ' }'), (7111, ' else {'), (7123, ' expr_ty val;'), (7124, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (7130, ' asdl_seq_SET(values, i, val);'), (7145, ' if (lookup_attr_id(obj, &PyId_s, &tmp) < 0) {'), (7146, ' return 1;'), (7147, ' }'), (7148, ' if (tmp == NULL) {'), (7149, ' PyErr_SetString(PyExc_TypeError, "required field \\"s\\" missing from Bytes");'), (7150, ' return 1;'), (7151, ' }'), (7152, ' else {'), (7169, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (7170, ' return 1;'), (7171, ' }'), (7172, ' if (tmp == NULL) {'), (7173, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from NameConstant");'), (7174, ' return 1;'), (7175, ' }'), (7176, ' else {'), (7203, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (7204, ' return 1;'), (7205, ' }'), (7206, ' if (tmp == NULL) {'), (7207, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Constant");'), (7208, ' return 1;'), (7209, ' }'), (7210, ' else {'), (7229, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (7230, ' return 1;'), (7231, ' }'), (7232, ' if (tmp == NULL) {'), (7233, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Attribute");'), (7234, ' return 1;'), (7235, ' }'), (7236, ' else {'), (7241, ' }'), (7242, ' if (lookup_attr_id(obj, &PyId_attr, &tmp) < 0) {'), (7243, ' return 1;'), (7244, ' }'), (7245, ' if (tmp == NULL) {'), (7246, ' PyErr_SetString(PyExc_TypeError, "required field \\"attr\\" missing from Attribute");'), (7249, ' else {'), (7254, ' }'), (7255, ' if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) {'), (7256, ' return 1;'), (7257, ' }'), (7258, ' if (tmp == NULL) {'), (7259, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from Attribute");'), (7262, ' else {'), (7281, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (7282, ' return 1;'), (7283, ' }'), (7284, ' if (tmp == NULL) {'), (7285, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Subscript");'), (7286, ' return 1;'), (7287, ' }'), (7288, ' else {'), (7293, ' }'), (7294, ' if (lookup_attr_id(obj, &PyId_slice, &tmp) < 0) {'), (7295, ' return 1;'), (7296, ' }'), (7297, ' if (tmp == NULL) {'), (7298, ' PyErr_SetString(PyExc_TypeError, "required field \\"slice\\" missing from Subscript");'), (7301, ' else {'), (7306, ' }'), (7307, ' if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) {'), (7310, ' if (tmp == NULL) {'), (7311, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from Subscript");'), (7312, ' return 1;'), (7313, ' }'), (7314, ' else {'), (7332, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (7333, ' return 1;'), (7334, ' }'), (7335, ' if (tmp == NULL) {'), (7336, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Starred");'), (7337, ' return 1;'), (7338, ' }'), (7339, ' else {'), (7344, ' }'), (7345, ' if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) {'), (7348, ' if (tmp == NULL) {'), (7349, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from Starred");'), (7350, ' return 1;'), (7351, ' }'), (7352, ' else {'), (7370, ' if (lookup_attr_id(obj, &PyId_id, &tmp) < 0) {'), (7371, ' return 1;'), (7372, ' }'), (7373, ' if (tmp == NULL) {'), (7374, ' PyErr_SetString(PyExc_TypeError, "required field \\"id\\" missing from Name");'), (7375, ' return 1;'), (7376, ' }'), (7377, ' else {'), (7382, ' }'), (7383, ' if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) {'), (7384, ' return 1;'), (7385, ' }'), (7386, ' if (tmp == NULL) {'), (7387, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from Name");'), (7390, ' else {'), (7408, ' if (lookup_attr_id(obj, &PyId_elts, &tmp) < 0) {'), (7409, ' return 1;'), (7410, ' }'), (7411, ' if (tmp == NULL) {'), (7412, ' PyErr_SetString(PyExc_TypeError, "required field \\"elts\\" missing from List");'), (7413, ' return 1;'), (7414, ' }'), (7415, ' else {'), (7427, ' expr_ty val;'), (7428, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (7434, ' asdl_seq_SET(elts, i, val);'), (7437, ' }'), (7438, ' if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) {'), (7439, ' return 1;'), (7440, ' }'), (7441, ' if (tmp == NULL) {'), (7442, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from List");'), (7445, ' else {'), (7463, ' if (lookup_attr_id(obj, &PyId_elts, &tmp) < 0) {'), (7464, ' return 1;'), (7465, ' }'), (7466, ' if (tmp == NULL) {'), (7467, ' PyErr_SetString(PyExc_TypeError, "required field \\"elts\\" missing from Tuple");'), (7468, ' return 1;'), (7469, ' }'), (7470, ' else {'), (7482, ' expr_ty val;'), (7483, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (7489, ' asdl_seq_SET(elts, i, val);'), (7492, ' }'), (7493, ' if (lookup_attr_id(obj, &PyId_ctx, &tmp) < 0) {'), (7494, ' return 1;'), (7495, ' }'), (7496, ' if (tmp == NULL) {'), (7497, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from Tuple");'), (7500, ' else {'), (7595, ' if (lookup_attr_id(obj, &PyId_lower, &tmp) < 0) {'), (7596, ' return 1;'), (7597, ' }'), (7598, ' if (tmp == NULL || tmp == Py_None) {'), (7599, ' Py_CLEAR(tmp);'), (7600, ' lower = NULL;'), (7601, ' }'), (7602, ' else {'), (7608, ' if (lookup_attr_id(obj, &PyId_upper, &tmp) < 0) {'), (7609, ' return 1;'), (7610, ' }'), (7611, ' if (tmp == NULL || tmp == Py_None) {'), (7612, ' Py_CLEAR(tmp);'), (7613, ' upper = NULL;'), (7614, ' }'), (7615, ' else {'), (7621, ' if (lookup_attr_id(obj, &PyId_step, &tmp) < 0) {'), (7622, ' return 1;'), (7623, ' }'), (7624, ' if (tmp == NULL || tmp == Py_None) {'), (7625, ' Py_CLEAR(tmp);'), (7626, ' step = NULL;'), (7627, ' }'), (7628, ' else {'), (7645, ' if (lookup_attr_id(obj, &PyId_dims, &tmp) < 0) {'), (7646, ' return 1;'), (7647, ' }'), (7648, ' if (tmp == NULL) {'), (7649, ' PyErr_SetString(PyExc_TypeError, "required field \\"dims\\" missing from ExtSlice");'), (7650, ' return 1;'), (7651, ' }'), (7652, ' else {'), (7664, ' slice_ty val;'), (7665, ' res = obj2ast_slice(PyList_GET_ITEM(tmp, i), &val, arena);'), (7671, ' asdl_seq_SET(dims, i, val);'), (7686, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (7687, ' return 1;'), (7688, ' }'), (7689, ' if (tmp == NULL) {'), (7690, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Index");'), (7691, ' return 1;'), (7692, ' }'), (7693, ' else {'), (7991, ' if (lookup_attr_id(obj, &PyId_target, &tmp) < 0) {'), (7992, ' return 1;'), (7993, ' }'), (7994, ' if (tmp == NULL) {'), (7995, ' PyErr_SetString(PyExc_TypeError, "required field \\"target\\" missing from comprehension");'), (7996, ' return 1;'), (7997, ' }'), (7998, ' else {'), (8003, ' }'), (8004, ' if (lookup_attr_id(obj, &PyId_iter, &tmp) < 0) {'), (8005, ' return 1;'), (8006, ' }'), (8007, ' if (tmp == NULL) {'), (8008, ' PyErr_SetString(PyExc_TypeError, "required field \\"iter\\" missing from comprehension");'), (8011, ' else {'), (8016, ' }'), (8017, ' if (lookup_attr_id(obj, &PyId_ifs, &tmp) < 0) {'), (8018, ' return 1;'), (8019, ' }'), (8020, ' if (tmp == NULL) {'), (8021, ' PyErr_SetString(PyExc_TypeError, "required field \\"ifs\\" missing from comprehension");'), (8024, ' else {'), (8036, ' expr_ty val;'), (8037, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (8043, ' asdl_seq_SET(ifs, i, val);'), (8046, ' }'), (8047, ' if (lookup_attr_id(obj, &PyId_is_async, &tmp) < 0) {'), (8048, ' return 1;'), (8049, ' }'), (8050, ' if (tmp == NULL) {'), (8051, ' PyErr_SetString(PyExc_TypeError, "required field \\"is_async\\" missing from comprehension");'), (8054, ' else {'), (8080, ' if (lookup_attr_id(obj, &PyId_lineno, &tmp) < 0) {'), (8081, ' return 1;'), (8082, ' }'), (8083, ' if (tmp == NULL) {'), (8084, ' PyErr_SetString(PyExc_TypeError, "required field \\"lineno\\" missing from excepthandler");'), (8085, ' return 1;'), (8086, ' }'), (8087, ' else {'), (8092, ' }'), (8093, ' if (lookup_attr_id(obj, &PyId_col_offset, &tmp) < 0) {'), (8094, ' return 1;'), (8095, ' }'), (8096, ' if (tmp == NULL) {'), (8097, ' PyErr_SetString(PyExc_TypeError, "required field \\"col_offset\\" missing from excepthandler");'), (8100, ' else {'), (8115, ' if (lookup_attr_id(obj, &PyId_type, &tmp) < 0) {'), (8116, ' return 1;'), (8117, ' }'), (8118, ' if (tmp == NULL || tmp == Py_None) {'), (8119, ' Py_CLEAR(tmp);'), (8120, ' type = NULL;'), (8121, ' }'), (8122, ' else {'), (8128, ' if (lookup_attr_id(obj, &PyId_name, &tmp) < 0) {'), (8129, ' return 1;'), (8130, ' }'), (8131, ' if (tmp == NULL || tmp == Py_None) {'), (8132, ' Py_CLEAR(tmp);'), (8133, ' name = NULL;'), (8134, ' }'), (8135, ' else {'), (8141, ' if (lookup_attr_id(obj, &PyId_body, &tmp) < 0) {'), (8142, ' return 1;'), (8143, ' }'), (8144, ' if (tmp == NULL) {'), (8145, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from ExceptHandler");'), (8146, ' return 1;'), (8147, ' }'), (8148, ' else {'), (8160, ' stmt_ty val;'), (8161, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena);'), (8167, ' asdl_seq_SET(body, i, val);'), (8193, ' if (lookup_attr_id(obj, &PyId_args, &tmp) < 0) {'), (8194, ' return 1;'), (8195, ' }'), (8196, ' if (tmp == NULL) {'), (8197, ' PyErr_SetString(PyExc_TypeError, "required field \\"args\\" missing from arguments");'), (8198, ' return 1;'), (8199, ' }'), (8200, ' else {'), (8212, ' arg_ty val;'), (8213, ' res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &val, arena);'), (8219, ' asdl_seq_SET(args, i, val);'), (8222, ' }'), (8223, ' if (lookup_attr_id(obj, &PyId_vararg, &tmp) < 0) {'), (8226, ' if (tmp == NULL || tmp == Py_None) {'), (8227, ' Py_CLEAR(tmp);'), (8228, ' vararg = NULL;'), (8229, ' }'), (8230, ' else {'), (8236, ' if (lookup_attr_id(obj, &PyId_kwonlyargs, &tmp) < 0) {'), (8237, ' return 1;'), (8238, ' }'), (8239, ' if (tmp == NULL) {'), (8240, ' PyErr_SetString(PyExc_TypeError, "required field \\"kwonlyargs\\" missing from arguments");'), (8241, ' return 1;'), (8242, ' }'), (8243, ' else {'), (8255, ' arg_ty val;'), (8256, ' res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &val, arena);'), (8262, ' asdl_seq_SET(kwonlyargs, i, val);'), (8265, ' }'), (8266, ' if (lookup_attr_id(obj, &PyId_kw_defaults, &tmp) < 0) {'), (8267, ' return 1;'), (8268, ' }'), (8269, ' if (tmp == NULL) {'), (8270, ' PyErr_SetString(PyExc_TypeError, "required field \\"kw_defaults\\" missing from arguments");'), (8273, ' else {'), (8285, ' expr_ty val;'), (8286, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (8292, ' asdl_seq_SET(kw_defaults, i, val);'), (8295, ' }'), (8296, ' if (lookup_attr_id(obj, &PyId_kwarg, &tmp) < 0) {'), (8299, ' if (tmp == NULL || tmp == Py_None) {'), (8300, ' Py_CLEAR(tmp);'), (8301, ' kwarg = NULL;'), (8302, ' }'), (8303, ' else {'), (8309, ' if (lookup_attr_id(obj, &PyId_defaults, &tmp) < 0) {'), (8310, ' return 1;'), (8311, ' }'), (8312, ' if (tmp == NULL) {'), (8313, ' PyErr_SetString(PyExc_TypeError, "required field \\"defaults\\" missing from arguments");'), (8314, ' return 1;'), (8315, ' }'), (8316, ' else {'), (8328, ' expr_ty val;'), (8329, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (8335, ' asdl_seq_SET(defaults, i, val);'), (8357, ' if (lookup_attr_id(obj, &PyId_arg, &tmp) < 0) {'), (8358, ' return 1;'), (8359, ' }'), (8360, ' if (tmp == NULL) {'), (8361, ' PyErr_SetString(PyExc_TypeError, "required field \\"arg\\" missing from arg");'), (8362, ' return 1;'), (8363, ' }'), (8364, ' else {'), (8369, ' }'), (8370, ' if (lookup_attr_id(obj, &PyId_annotation, &tmp) < 0) {'), (8373, ' if (tmp == NULL || tmp == Py_None) {'), (8374, ' Py_CLEAR(tmp);'), (8375, ' annotation = NULL;'), (8376, ' }'), (8377, ' else {'), (8383, ' if (lookup_attr_id(obj, &PyId_type_comment, &tmp) < 0) {'), (8384, ' return 1;'), (8385, ' }'), (8386, ' if (tmp == NULL || tmp == Py_None) {'), (8387, ' Py_CLEAR(tmp);'), (8388, ' type_comment = NULL;'), (8389, ' }'), (8390, ' else {'), (8396, ' if (lookup_attr_id(obj, &PyId_lineno, &tmp) < 0) {'), (8397, ' return 1;'), (8398, ' }'), (8399, ' if (tmp == NULL) {'), (8400, ' PyErr_SetString(PyExc_TypeError, "required field \\"lineno\\" missing from arg");'), (8401, ' return 1;'), (8402, ' }'), (8403, ' else {'), (8408, ' }'), (8409, ' if (lookup_attr_id(obj, &PyId_col_offset, &tmp) < 0) {'), (8410, ' return 1;'), (8411, ' }'), (8412, ' if (tmp == NULL) {'), (8413, ' PyErr_SetString(PyExc_TypeError, "required field \\"col_offset\\" missing from arg");'), (8416, ' else {'), (8436, ' if (lookup_attr_id(obj, &PyId_arg, &tmp) < 0) {'), (8437, ' return 1;'), (8438, ' }'), (8439, ' if (tmp == NULL || tmp == Py_None) {'), (8440, ' Py_CLEAR(tmp);'), (8441, ' arg = NULL;'), (8442, ' }'), (8443, ' else {'), (8449, ' if (lookup_attr_id(obj, &PyId_value, &tmp) < 0) {'), (8450, ' return 1;'), (8451, ' }'), (8452, ' if (tmp == NULL) {'), (8453, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from keyword");'), (8454, ' return 1;'), (8455, ' }'), (8456, ' else {'), (8476, ' if (lookup_attr_id(obj, &PyId_name, &tmp) < 0) {'), (8477, ' return 1;'), (8478, ' }'), (8479, ' if (tmp == NULL) {'), (8480, ' PyErr_SetString(PyExc_TypeError, "required field \\"name\\" missing from alias");'), (8481, ' return 1;'), (8482, ' }'), (8483, ' else {'), (8488, ' }'), (8489, ' if (lookup_attr_id(obj, &PyId_asname, &tmp) < 0) {'), (8492, ' if (tmp == NULL || tmp == Py_None) {'), (8493, ' Py_CLEAR(tmp);'), (8494, ' asname = NULL;'), (8495, ' }'), (8496, ' else {'), (8516, ' if (lookup_attr_id(obj, &PyId_context_expr, &tmp) < 0) {'), (8517, ' return 1;'), (8518, ' }'), (8519, ' if (tmp == NULL) {'), (8520, ' PyErr_SetString(PyExc_TypeError, "required field \\"context_expr\\" missing from withitem");'), (8521, ' return 1;'), (8522, ' }'), (8523, ' else {'), (8528, ' }'), (8529, ' if (lookup_attr_id(obj, &PyId_optional_vars, &tmp) < 0) {'), (8532, ' if (tmp == NULL || tmp == Py_None) {'), (8533, ' Py_CLEAR(tmp);'), (8534, ' optional_vars = NULL;'), (8535, ' }'), (8536, ' else {'), (8567, ' if (lookup_attr_id(obj, &PyId_lineno, &tmp) < 0) {'), (8568, ' return 1;'), (8569, ' }'), (8570, ' if (tmp == NULL) {'), (8571, ' PyErr_SetString(PyExc_TypeError, "required field \\"lineno\\" missing from TypeIgnore");'), (8572, ' return 1;'), (8573, ' }'), (8574, ' else {'), (8597, 'static struct PyModuleDef _astmodule = {'), (8598, ' PyModuleDef_HEAD_INIT, "_ast3", NULL, 0, ast3_methods'), (8605, ' m = PyModule_Create(&_astmodule);')], 'deleted': [(551, 'static void'), (560, ' _Py_IDENTIFIER(_fields);'), (564, ' fields = _PyObject_GetAttrId((PyObject*)Py_TYPE(self), &PyId__fields);'), (565, ' if (!fields)'), (566, ' PyErr_Clear();'), (573, ' if (PyTuple_GET_SIZE(args) > 0) {'), (574, ' if (numfields != PyTuple_GET_SIZE(args)) {'), (575, ' PyErr_Format(PyExc_TypeError, "%.400s constructor takes %s"'), (576, ' "%zd positional argument%s",'), (577, ' Py_TYPE(self)->tp_name,'), (578, ' numfields == 0 ? "" : "either 0 or ",'), (579, ' numfields, numfields == 1 ? "" : "s");'), (583, ' for (i = 0; i < PyTuple_GET_SIZE(args); i++) {'), (584, ' /* cannot be reached when fields is NULL */'), (585, ' PyObject *name = PySequence_GetItem(fields, i);'), (586, ' if (!name) {'), (587, ' res = -1;'), (588, ' goto cleanup;'), (589, ' }'), (590, ' res = PyObject_SetAttr(self, name, PyTuple_GET_ITEM(args, i));'), (591, ' Py_DECREF(name);'), (592, ' if (res < 0)'), (593, ' goto cleanup;'), (594, ' }'), (613, ' PyObject *res;'), (615, ' PyObject *dict = _PyObject_GetAttrId(self, &PyId___dict__);'), (616, ' if (dict == NULL) {'), (617, ' if (PyErr_ExceptionMatches(PyExc_AttributeError))'), (618, ' PyErr_Clear();'), (619, ' else'), (620, ' return NULL;'), (623, ' res = Py_BuildValue("O()O", Py_TYPE(self), dict);'), (624, ' Py_DECREF(dict);'), (625, ' return res;'), (697, ' result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){sOss}",'), (698, ' type, base, "_fields", fnames, "__module__", "_ast3");'), (706, ' _Py_IDENTIFIER(_attributes);'), (852, ' PyDict_SetItemString(d, "_fields", empty_tuple) < 0 ||'), (853, ' PyDict_SetItemString(d, "_attributes", empty_tuple) < 0) {'), (861, 'static int exists_not_none(PyObject *obj, _Py_Identifier *id)'), (862, '{'), (863, ' int isnone;'), (864, ' PyObject *attr = _PyObject_GetAttrId(obj, id);'), (865, ' if (!attr) {'), (866, ' PyErr_Clear();'), (867, ' return 0;'), (868, ' }'), (869, ' isnone = attr == Py_None;'), (870, ' Py_DECREF(attr);'), (871, ' return !isnone;'), (872, '}'), (873, ''), (2667, ' Py_INCREF(Py_None);'), (2668, ' return Py_None;'), (2741, ' Py_INCREF(Py_None);'), (2742, ' return Py_None;'), (3201, ' Py_INCREF(Py_None);'), (3202, ' return Py_None;'), (3669, ' Py_INCREF(Py_None);'), (3670, ' return Py_None;'), (3848, ' Py_INCREF(Py_None);'), (3849, ' return Py_None;'), (3887, ' Py_INCREF(Py_None);'), (3888, ' return Py_None;'), (3935, ' Py_INCREF(Py_None);'), (3936, ' return Py_None;'), (3984, ' Py_INCREF(Py_None);'), (3985, ' return Py_None;'), (4028, ' Py_INCREF(Py_None);'), (4029, ' return Py_None;'), (4057, ' Py_INCREF(Py_None);'), (4058, ' return Py_None;'), (4086, ' Py_INCREF(Py_None);'), (4087, ' return Py_None;'), (4115, ' Py_INCREF(Py_None);'), (4116, ' return Py_None;'), (4157, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (4161, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (4162, ' if (tmp == NULL) goto failed;'), (4171, ' stmt_ty value;'), (4172, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (4178, ' asdl_seq_SET(body, i, value);'), (4181, ' } else {'), (4182, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Module");'), (4185, ' if (_PyObject_HasAttrId(obj, &PyId_type_ignores)) {'), (4189, ' tmp = _PyObject_GetAttrId(obj, &PyId_type_ignores);'), (4190, ' if (tmp == NULL) goto failed;'), (4199, ' type_ignore_ty value;'), (4200, ' res = obj2ast_type_ignore(PyList_GET_ITEM(tmp, i), &value, arena);'), (4206, ' asdl_seq_SET(type_ignores, i, value);'), (4209, ' } else {'), (4210, ' PyErr_SetString(PyExc_TypeError, "required field \\"type_ignores\\" missing from Module");'), (4211, ' return 1;'), (4224, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (4228, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (4229, ' if (tmp == NULL) goto failed;'), (4238, ' stmt_ty value;'), (4239, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (4245, ' asdl_seq_SET(body, i, value);'), (4248, ' } else {'), (4249, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Interactive");'), (4250, ' return 1;'), (4263, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (4265, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (4266, ' if (tmp == NULL) goto failed;'), (4270, ' } else {'), (4271, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Expression");'), (4272, ' return 1;'), (4286, ' if (_PyObject_HasAttrId(obj, &PyId_argtypes)) {'), (4290, ' tmp = _PyObject_GetAttrId(obj, &PyId_argtypes);'), (4291, ' if (tmp == NULL) goto failed;'), (4300, ' expr_ty value;'), (4301, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (4307, ' asdl_seq_SET(argtypes, i, value);'), (4310, ' } else {'), (4311, ' PyErr_SetString(PyExc_TypeError, "required field \\"argtypes\\" missing from FunctionType");'), (4314, ' if (_PyObject_HasAttrId(obj, &PyId_returns)) {'), (4316, ' tmp = _PyObject_GetAttrId(obj, &PyId_returns);'), (4317, ' if (tmp == NULL) goto failed;'), (4321, ' } else {'), (4322, ' PyErr_SetString(PyExc_TypeError, "required field \\"returns\\" missing from FunctionType");'), (4323, ' return 1;'), (4336, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (4340, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (4341, ' if (tmp == NULL) goto failed;'), (4350, ' stmt_ty value;'), (4351, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (4357, ' asdl_seq_SET(body, i, value);'), (4360, ' } else {'), (4361, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Suite");'), (4362, ' return 1;'), (4388, ' if (_PyObject_HasAttrId(obj, &PyId_lineno)) {'), (4390, ' tmp = _PyObject_GetAttrId(obj, &PyId_lineno);'), (4391, ' if (tmp == NULL) goto failed;'), (4395, ' } else {'), (4396, ' PyErr_SetString(PyExc_TypeError, "required field \\"lineno\\" missing from stmt");'), (4399, ' if (_PyObject_HasAttrId(obj, &PyId_col_offset)) {'), (4401, ' tmp = _PyObject_GetAttrId(obj, &PyId_col_offset);'), (4402, ' if (tmp == NULL) goto failed;'), (4406, ' } else {'), (4407, ' PyErr_SetString(PyExc_TypeError, "required field \\"col_offset\\" missing from stmt");'), (4408, ' return 1;'), (4422, ' if (_PyObject_HasAttrId(obj, &PyId_name)) {'), (4424, ' tmp = _PyObject_GetAttrId(obj, &PyId_name);'), (4425, ' if (tmp == NULL) goto failed;'), (4429, ' } else {'), (4430, ' PyErr_SetString(PyExc_TypeError, "required field \\"name\\" missing from FunctionDef");'), (4433, ' if (_PyObject_HasAttrId(obj, &PyId_args)) {'), (4435, ' tmp = _PyObject_GetAttrId(obj, &PyId_args);'), (4436, ' if (tmp == NULL) goto failed;'), (4440, ' } else {'), (4441, ' PyErr_SetString(PyExc_TypeError, "required field \\"args\\" missing from FunctionDef");'), (4444, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (4448, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (4449, ' if (tmp == NULL) goto failed;'), (4458, ' stmt_ty value;'), (4459, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (4465, ' asdl_seq_SET(body, i, value);'), (4468, ' } else {'), (4469, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from FunctionDef");'), (4472, ' if (_PyObject_HasAttrId(obj, &PyId_decorator_list)) {'), (4476, ' tmp = _PyObject_GetAttrId(obj, &PyId_decorator_list);'), (4477, ' if (tmp == NULL) goto failed;'), (4486, ' expr_ty value;'), (4487, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (4493, ' asdl_seq_SET(decorator_list, i, value);'), (4496, ' } else {'), (4497, ' PyErr_SetString(PyExc_TypeError, "required field \\"decorator_list\\" missing from FunctionDef");'), (4500, ' if (exists_not_none(obj, &PyId_returns)) {'), (4502, ' tmp = _PyObject_GetAttrId(obj, &PyId_returns);'), (4503, ' if (tmp == NULL) goto failed;'), (4507, ' } else {'), (4508, ' returns = NULL;'), (4510, ' if (exists_not_none(obj, &PyId_type_comment)) {'), (4512, ' tmp = _PyObject_GetAttrId(obj, &PyId_type_comment);'), (4513, ' if (tmp == NULL) goto failed;'), (4517, ' } else {'), (4518, ' type_comment = NULL;'), (4537, ' if (_PyObject_HasAttrId(obj, &PyId_name)) {'), (4539, ' tmp = _PyObject_GetAttrId(obj, &PyId_name);'), (4540, ' if (tmp == NULL) goto failed;'), (4544, ' } else {'), (4545, ' PyErr_SetString(PyExc_TypeError, "required field \\"name\\" missing from AsyncFunctionDef");'), (4548, ' if (_PyObject_HasAttrId(obj, &PyId_args)) {'), (4550, ' tmp = _PyObject_GetAttrId(obj, &PyId_args);'), (4551, ' if (tmp == NULL) goto failed;'), (4555, ' } else {'), (4556, ' PyErr_SetString(PyExc_TypeError, "required field \\"args\\" missing from AsyncFunctionDef");'), (4559, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (4563, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (4564, ' if (tmp == NULL) goto failed;'), (4573, ' stmt_ty value;'), (4574, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (4580, ' asdl_seq_SET(body, i, value);'), (4583, ' } else {'), (4584, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from AsyncFunctionDef");'), (4587, ' if (_PyObject_HasAttrId(obj, &PyId_decorator_list)) {'), (4591, ' tmp = _PyObject_GetAttrId(obj, &PyId_decorator_list);'), (4592, ' if (tmp == NULL) goto failed;'), (4601, ' expr_ty value;'), (4602, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (4608, ' asdl_seq_SET(decorator_list, i, value);'), (4611, ' } else {'), (4612, ' PyErr_SetString(PyExc_TypeError, "required field \\"decorator_list\\" missing from AsyncFunctionDef");'), (4615, ' if (exists_not_none(obj, &PyId_returns)) {'), (4617, ' tmp = _PyObject_GetAttrId(obj, &PyId_returns);'), (4618, ' if (tmp == NULL) goto failed;'), (4622, ' } else {'), (4623, ' returns = NULL;'), (4625, ' if (exists_not_none(obj, &PyId_type_comment)) {'), (4627, ' tmp = _PyObject_GetAttrId(obj, &PyId_type_comment);'), (4628, ' if (tmp == NULL) goto failed;'), (4632, ' } else {'), (4633, ' type_comment = NULL;'), (4651, ' if (_PyObject_HasAttrId(obj, &PyId_name)) {'), (4653, ' tmp = _PyObject_GetAttrId(obj, &PyId_name);'), (4654, ' if (tmp == NULL) goto failed;'), (4658, ' } else {'), (4659, ' PyErr_SetString(PyExc_TypeError, "required field \\"name\\" missing from ClassDef");'), (4662, ' if (_PyObject_HasAttrId(obj, &PyId_bases)) {'), (4666, ' tmp = _PyObject_GetAttrId(obj, &PyId_bases);'), (4667, ' if (tmp == NULL) goto failed;'), (4676, ' expr_ty value;'), (4677, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (4683, ' asdl_seq_SET(bases, i, value);'), (4686, ' } else {'), (4687, ' PyErr_SetString(PyExc_TypeError, "required field \\"bases\\" missing from ClassDef");'), (4690, ' if (_PyObject_HasAttrId(obj, &PyId_keywords)) {'), (4694, ' tmp = _PyObject_GetAttrId(obj, &PyId_keywords);'), (4695, ' if (tmp == NULL) goto failed;'), (4704, ' keyword_ty value;'), (4705, ' res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &value, arena);'), (4711, ' asdl_seq_SET(keywords, i, value);'), (4714, ' } else {'), (4715, ' PyErr_SetString(PyExc_TypeError, "required field \\"keywords\\" missing from ClassDef");'), (4718, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (4722, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (4723, ' if (tmp == NULL) goto failed;'), (4732, ' stmt_ty value;'), (4733, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (4739, ' asdl_seq_SET(body, i, value);'), (4742, ' } else {'), (4743, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from ClassDef");'), (4746, ' if (_PyObject_HasAttrId(obj, &PyId_decorator_list)) {'), (4750, ' tmp = _PyObject_GetAttrId(obj, &PyId_decorator_list);'), (4751, ' if (tmp == NULL) goto failed;'), (4760, ' expr_ty value;'), (4761, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (4767, ' asdl_seq_SET(decorator_list, i, value);'), (4770, ' } else {'), (4771, ' PyErr_SetString(PyExc_TypeError, "required field \\"decorator_list\\" missing from ClassDef");'), (4772, ' return 1;'), (4786, ' if (exists_not_none(obj, &PyId_value)) {'), (4788, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (4789, ' if (tmp == NULL) goto failed;'), (4793, ' } else {'), (4794, ' value = NULL;'), (4807, ' if (_PyObject_HasAttrId(obj, &PyId_targets)) {'), (4811, ' tmp = _PyObject_GetAttrId(obj, &PyId_targets);'), (4812, ' if (tmp == NULL) goto failed;'), (4821, ' expr_ty value;'), (4822, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (4828, ' asdl_seq_SET(targets, i, value);'), (4831, ' } else {'), (4832, ' PyErr_SetString(PyExc_TypeError, "required field \\"targets\\" missing from Delete");'), (4833, ' return 1;'), (4848, ' if (_PyObject_HasAttrId(obj, &PyId_targets)) {'), (4852, ' tmp = _PyObject_GetAttrId(obj, &PyId_targets);'), (4853, ' if (tmp == NULL) goto failed;'), (4862, ' expr_ty value;'), (4863, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (4869, ' asdl_seq_SET(targets, i, value);'), (4872, ' } else {'), (4873, ' PyErr_SetString(PyExc_TypeError, "required field \\"targets\\" missing from Assign");'), (4876, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (4878, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (4879, ' if (tmp == NULL) goto failed;'), (4883, ' } else {'), (4884, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Assign");'), (4887, ' if (exists_not_none(obj, &PyId_type_comment)) {'), (4889, ' tmp = _PyObject_GetAttrId(obj, &PyId_type_comment);'), (4890, ' if (tmp == NULL) goto failed;'), (4894, ' } else {'), (4895, ' type_comment = NULL;'), (4910, ' if (_PyObject_HasAttrId(obj, &PyId_target)) {'), (4912, ' tmp = _PyObject_GetAttrId(obj, &PyId_target);'), (4913, ' if (tmp == NULL) goto failed;'), (4917, ' } else {'), (4918, ' PyErr_SetString(PyExc_TypeError, "required field \\"target\\" missing from AugAssign");'), (4921, ' if (_PyObject_HasAttrId(obj, &PyId_op)) {'), (4923, ' tmp = _PyObject_GetAttrId(obj, &PyId_op);'), (4924, ' if (tmp == NULL) goto failed;'), (4928, ' } else {'), (4929, ' PyErr_SetString(PyExc_TypeError, "required field \\"op\\" missing from AugAssign");'), (4932, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (4934, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (4935, ' if (tmp == NULL) goto failed;'), (4939, ' } else {'), (4940, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from AugAssign");'), (4941, ' return 1;'), (4957, ' if (_PyObject_HasAttrId(obj, &PyId_target)) {'), (4959, ' tmp = _PyObject_GetAttrId(obj, &PyId_target);'), (4960, ' if (tmp == NULL) goto failed;'), (4964, ' } else {'), (4965, ' PyErr_SetString(PyExc_TypeError, "required field \\"target\\" missing from AnnAssign");'), (4968, ' if (_PyObject_HasAttrId(obj, &PyId_annotation)) {'), (4970, ' tmp = _PyObject_GetAttrId(obj, &PyId_annotation);'), (4971, ' if (tmp == NULL) goto failed;'), (4975, ' } else {'), (4976, ' PyErr_SetString(PyExc_TypeError, "required field \\"annotation\\" missing from AnnAssign");'), (4979, ' if (exists_not_none(obj, &PyId_value)) {'), (4981, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (4982, ' if (tmp == NULL) goto failed;'), (4986, ' } else {'), (4987, ' value = NULL;'), (4989, ' if (_PyObject_HasAttrId(obj, &PyId_simple)) {'), (4991, ' tmp = _PyObject_GetAttrId(obj, &PyId_simple);'), (4992, ' if (tmp == NULL) goto failed;'), (4996, ' } else {'), (4997, ' PyErr_SetString(PyExc_TypeError, "required field \\"simple\\" missing from AnnAssign");'), (4998, ' return 1;'), (5016, ' if (_PyObject_HasAttrId(obj, &PyId_target)) {'), (5018, ' tmp = _PyObject_GetAttrId(obj, &PyId_target);'), (5019, ' if (tmp == NULL) goto failed;'), (5023, ' } else {'), (5024, ' PyErr_SetString(PyExc_TypeError, "required field \\"target\\" missing from For");'), (5027, ' if (_PyObject_HasAttrId(obj, &PyId_iter)) {'), (5029, ' tmp = _PyObject_GetAttrId(obj, &PyId_iter);'), (5030, ' if (tmp == NULL) goto failed;'), (5034, ' } else {'), (5035, ' PyErr_SetString(PyExc_TypeError, "required field \\"iter\\" missing from For");'), (5038, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (5042, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (5043, ' if (tmp == NULL) goto failed;'), (5052, ' stmt_ty value;'), (5053, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5059, ' asdl_seq_SET(body, i, value);'), (5062, ' } else {'), (5063, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from For");'), (5066, ' if (_PyObject_HasAttrId(obj, &PyId_orelse)) {'), (5070, ' tmp = _PyObject_GetAttrId(obj, &PyId_orelse);'), (5071, ' if (tmp == NULL) goto failed;'), (5080, ' stmt_ty value;'), (5081, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5087, ' asdl_seq_SET(orelse, i, value);'), (5090, ' } else {'), (5091, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from For");'), (5094, ' if (exists_not_none(obj, &PyId_type_comment)) {'), (5096, ' tmp = _PyObject_GetAttrId(obj, &PyId_type_comment);'), (5097, ' if (tmp == NULL) goto failed;'), (5101, ' } else {'), (5102, ' type_comment = NULL;'), (5120, ' if (_PyObject_HasAttrId(obj, &PyId_target)) {'), (5122, ' tmp = _PyObject_GetAttrId(obj, &PyId_target);'), (5123, ' if (tmp == NULL) goto failed;'), (5127, ' } else {'), (5128, ' PyErr_SetString(PyExc_TypeError, "required field \\"target\\" missing from AsyncFor");'), (5131, ' if (_PyObject_HasAttrId(obj, &PyId_iter)) {'), (5133, ' tmp = _PyObject_GetAttrId(obj, &PyId_iter);'), (5134, ' if (tmp == NULL) goto failed;'), (5138, ' } else {'), (5139, ' PyErr_SetString(PyExc_TypeError, "required field \\"iter\\" missing from AsyncFor");'), (5142, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (5146, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (5147, ' if (tmp == NULL) goto failed;'), (5156, ' stmt_ty value;'), (5157, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5163, ' asdl_seq_SET(body, i, value);'), (5166, ' } else {'), (5167, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from AsyncFor");'), (5170, ' if (_PyObject_HasAttrId(obj, &PyId_orelse)) {'), (5174, ' tmp = _PyObject_GetAttrId(obj, &PyId_orelse);'), (5175, ' if (tmp == NULL) goto failed;'), (5184, ' stmt_ty value;'), (5185, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5191, ' asdl_seq_SET(orelse, i, value);'), (5194, ' } else {'), (5195, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from AsyncFor");'), (5198, ' if (exists_not_none(obj, &PyId_type_comment)) {'), (5200, ' tmp = _PyObject_GetAttrId(obj, &PyId_type_comment);'), (5201, ' if (tmp == NULL) goto failed;'), (5205, ' } else {'), (5206, ' type_comment = NULL;'), (5222, ' if (_PyObject_HasAttrId(obj, &PyId_test)) {'), (5224, ' tmp = _PyObject_GetAttrId(obj, &PyId_test);'), (5225, ' if (tmp == NULL) goto failed;'), (5229, ' } else {'), (5230, ' PyErr_SetString(PyExc_TypeError, "required field \\"test\\" missing from While");'), (5233, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (5237, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (5238, ' if (tmp == NULL) goto failed;'), (5247, ' stmt_ty value;'), (5248, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5254, ' asdl_seq_SET(body, i, value);'), (5257, ' } else {'), (5258, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from While");'), (5261, ' if (_PyObject_HasAttrId(obj, &PyId_orelse)) {'), (5265, ' tmp = _PyObject_GetAttrId(obj, &PyId_orelse);'), (5266, ' if (tmp == NULL) goto failed;'), (5275, ' stmt_ty value;'), (5276, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5282, ' asdl_seq_SET(orelse, i, value);'), (5285, ' } else {'), (5286, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from While");'), (5287, ' return 1;'), (5302, ' if (_PyObject_HasAttrId(obj, &PyId_test)) {'), (5304, ' tmp = _PyObject_GetAttrId(obj, &PyId_test);'), (5305, ' if (tmp == NULL) goto failed;'), (5309, ' } else {'), (5310, ' PyErr_SetString(PyExc_TypeError, "required field \\"test\\" missing from If");'), (5313, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (5317, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (5318, ' if (tmp == NULL) goto failed;'), (5327, ' stmt_ty value;'), (5328, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5334, ' asdl_seq_SET(body, i, value);'), (5337, ' } else {'), (5338, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from If");'), (5341, ' if (_PyObject_HasAttrId(obj, &PyId_orelse)) {'), (5345, ' tmp = _PyObject_GetAttrId(obj, &PyId_orelse);'), (5346, ' if (tmp == NULL) goto failed;'), (5355, ' stmt_ty value;'), (5356, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5362, ' asdl_seq_SET(orelse, i, value);'), (5365, ' } else {'), (5366, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from If");'), (5367, ' return 1;'), (5382, ' if (_PyObject_HasAttrId(obj, &PyId_items)) {'), (5386, ' tmp = _PyObject_GetAttrId(obj, &PyId_items);'), (5387, ' if (tmp == NULL) goto failed;'), (5396, ' withitem_ty value;'), (5397, ' res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &value, arena);'), (5403, ' asdl_seq_SET(items, i, value);'), (5406, ' } else {'), (5407, ' PyErr_SetString(PyExc_TypeError, "required field \\"items\\" missing from With");'), (5410, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (5414, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (5415, ' if (tmp == NULL) goto failed;'), (5424, ' stmt_ty value;'), (5425, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5431, ' asdl_seq_SET(body, i, value);'), (5434, ' } else {'), (5435, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from With");'), (5438, ' if (exists_not_none(obj, &PyId_type_comment)) {'), (5440, ' tmp = _PyObject_GetAttrId(obj, &PyId_type_comment);'), (5441, ' if (tmp == NULL) goto failed;'), (5445, ' } else {'), (5446, ' type_comment = NULL;'), (5461, ' if (_PyObject_HasAttrId(obj, &PyId_items)) {'), (5465, ' tmp = _PyObject_GetAttrId(obj, &PyId_items);'), (5466, ' if (tmp == NULL) goto failed;'), (5475, ' withitem_ty value;'), (5476, ' res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &value, arena);'), (5482, ' asdl_seq_SET(items, i, value);'), (5485, ' } else {'), (5486, ' PyErr_SetString(PyExc_TypeError, "required field \\"items\\" missing from AsyncWith");'), (5489, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (5493, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (5494, ' if (tmp == NULL) goto failed;'), (5503, ' stmt_ty value;'), (5504, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5510, ' asdl_seq_SET(body, i, value);'), (5513, ' } else {'), (5514, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from AsyncWith");'), (5517, ' if (exists_not_none(obj, &PyId_type_comment)) {'), (5519, ' tmp = _PyObject_GetAttrId(obj, &PyId_type_comment);'), (5520, ' if (tmp == NULL) goto failed;'), (5524, ' } else {'), (5525, ' type_comment = NULL;'), (5539, ' if (exists_not_none(obj, &PyId_exc)) {'), (5541, ' tmp = _PyObject_GetAttrId(obj, &PyId_exc);'), (5542, ' if (tmp == NULL) goto failed;'), (5546, ' } else {'), (5547, ' exc = NULL;'), (5549, ' if (exists_not_none(obj, &PyId_cause)) {'), (5551, ' tmp = _PyObject_GetAttrId(obj, &PyId_cause);'), (5552, ' if (tmp == NULL) goto failed;'), (5556, ' } else {'), (5557, ' cause = NULL;'), (5573, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (5577, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (5578, ' if (tmp == NULL) goto failed;'), (5587, ' stmt_ty value;'), (5588, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5594, ' asdl_seq_SET(body, i, value);'), (5597, ' } else {'), (5598, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Try");'), (5601, ' if (_PyObject_HasAttrId(obj, &PyId_handlers)) {'), (5602, ' int res;'), (5603, ' Py_ssize_t len;'), (5605, ' tmp = _PyObject_GetAttrId(obj, &PyId_handlers);'), (5606, ' if (tmp == NULL) goto failed;'), (5615, ' excepthandler_ty value;'), (5616, ' res = obj2ast_excepthandler(PyList_GET_ITEM(tmp, i), &value, arena);'), (5622, ' asdl_seq_SET(handlers, i, value);'), (5625, ' } else {'), (5626, ' PyErr_SetString(PyExc_TypeError, "required field \\"handlers\\" missing from Try");'), (5629, ' if (_PyObject_HasAttrId(obj, &PyId_orelse)) {'), (5633, ' tmp = _PyObject_GetAttrId(obj, &PyId_orelse);'), (5634, ' if (tmp == NULL) goto failed;'), (5643, ' stmt_ty value;'), (5644, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5650, ' asdl_seq_SET(orelse, i, value);'), (5653, ' } else {'), (5654, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from Try");'), (5657, ' if (_PyObject_HasAttrId(obj, &PyId_finalbody)) {'), (5661, ' tmp = _PyObject_GetAttrId(obj, &PyId_finalbody);'), (5662, ' if (tmp == NULL) goto failed;'), (5671, ' stmt_ty value;'), (5672, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (5678, ' asdl_seq_SET(finalbody, i, value);'), (5681, ' } else {'), (5682, ' PyErr_SetString(PyExc_TypeError, "required field \\"finalbody\\" missing from Try");'), (5683, ' return 1;'), (5698, ' if (_PyObject_HasAttrId(obj, &PyId_test)) {'), (5700, ' tmp = _PyObject_GetAttrId(obj, &PyId_test);'), (5701, ' if (tmp == NULL) goto failed;'), (5705, ' } else {'), (5706, ' PyErr_SetString(PyExc_TypeError, "required field \\"test\\" missing from Assert");'), (5709, ' if (exists_not_none(obj, &PyId_msg)) {'), (5711, ' tmp = _PyObject_GetAttrId(obj, &PyId_msg);'), (5712, ' if (tmp == NULL) goto failed;'), (5716, ' } else {'), (5717, ' msg = NULL;'), (5730, ' if (_PyObject_HasAttrId(obj, &PyId_names)) {'), (5734, ' tmp = _PyObject_GetAttrId(obj, &PyId_names);'), (5735, ' if (tmp == NULL) goto failed;'), (5744, ' alias_ty value;'), (5745, ' res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &value, arena);'), (5751, ' asdl_seq_SET(names, i, value);'), (5754, ' } else {'), (5755, ' PyErr_SetString(PyExc_TypeError, "required field \\"names\\" missing from Import");'), (5756, ' return 1;'), (5771, ' if (exists_not_none(obj, &PyId_module)) {'), (5773, ' tmp = _PyObject_GetAttrId(obj, &PyId_module);'), (5774, ' if (tmp == NULL) goto failed;'), (5778, ' } else {'), (5779, ' module = NULL;'), (5781, ' if (_PyObject_HasAttrId(obj, &PyId_names)) {'), (5785, ' tmp = _PyObject_GetAttrId(obj, &PyId_names);'), (5786, ' if (tmp == NULL) goto failed;'), (5795, ' alias_ty value;'), (5796, ' res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &value, arena);'), (5802, ' asdl_seq_SET(names, i, value);'), (5805, ' } else {'), (5806, ' PyErr_SetString(PyExc_TypeError, "required field \\"names\\" missing from ImportFrom");'), (5809, ' if (exists_not_none(obj, &PyId_level)) {'), (5811, ' tmp = _PyObject_GetAttrId(obj, &PyId_level);'), (5812, ' if (tmp == NULL) goto failed;'), (5816, ' } else {'), (5817, ' level = 0;'), (5830, ' if (_PyObject_HasAttrId(obj, &PyId_names)) {'), (5834, ' tmp = _PyObject_GetAttrId(obj, &PyId_names);'), (5835, ' if (tmp == NULL) goto failed;'), (5844, ' identifier value;'), (5845, ' res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &value, arena);'), (5851, ' asdl_seq_SET(names, i, value);'), (5854, ' } else {'), (5855, ' PyErr_SetString(PyExc_TypeError, "required field \\"names\\" missing from Global");'), (5856, ' return 1;'), (5869, ' if (_PyObject_HasAttrId(obj, &PyId_names)) {'), (5873, ' tmp = _PyObject_GetAttrId(obj, &PyId_names);'), (5874, ' if (tmp == NULL) goto failed;'), (5883, ' identifier value;'), (5884, ' res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &value, arena);'), (5890, ' asdl_seq_SET(names, i, value);'), (5893, ' } else {'), (5894, ' PyErr_SetString(PyExc_TypeError, "required field \\"names\\" missing from Nonlocal");'), (5895, ' return 1;'), (5908, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (5910, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (5911, ' if (tmp == NULL) goto failed;'), (5915, ' } else {'), (5916, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Expr");'), (5917, ' return 1;'), (5973, ' if (_PyObject_HasAttrId(obj, &PyId_lineno)) {'), (5975, ' tmp = _PyObject_GetAttrId(obj, &PyId_lineno);'), (5976, ' if (tmp == NULL) goto failed;'), (5980, ' } else {'), (5981, ' PyErr_SetString(PyExc_TypeError, "required field \\"lineno\\" missing from expr");'), (5984, ' if (_PyObject_HasAttrId(obj, &PyId_col_offset)) {'), (5986, ' tmp = _PyObject_GetAttrId(obj, &PyId_col_offset);'), (5987, ' if (tmp == NULL) goto failed;'), (5991, ' } else {'), (5992, ' PyErr_SetString(PyExc_TypeError, "required field \\"col_offset\\" missing from expr");'), (5993, ' return 1;'), (6003, ' if (_PyObject_HasAttrId(obj, &PyId_op)) {'), (6005, ' tmp = _PyObject_GetAttrId(obj, &PyId_op);'), (6006, ' if (tmp == NULL) goto failed;'), (6010, ' } else {'), (6011, ' PyErr_SetString(PyExc_TypeError, "required field \\"op\\" missing from BoolOp");'), (6014, ' if (_PyObject_HasAttrId(obj, &PyId_values)) {'), (6018, ' tmp = _PyObject_GetAttrId(obj, &PyId_values);'), (6019, ' if (tmp == NULL) goto failed;'), (6028, ' expr_ty value;'), (6029, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (6035, ' asdl_seq_SET(values, i, value);'), (6038, ' } else {'), (6039, ' PyErr_SetString(PyExc_TypeError, "required field \\"values\\" missing from BoolOp");'), (6040, ' return 1;'), (6055, ' if (_PyObject_HasAttrId(obj, &PyId_left)) {'), (6057, ' tmp = _PyObject_GetAttrId(obj, &PyId_left);'), (6058, ' if (tmp == NULL) goto failed;'), (6062, ' } else {'), (6063, ' PyErr_SetString(PyExc_TypeError, "required field \\"left\\" missing from BinOp");'), (6066, ' if (_PyObject_HasAttrId(obj, &PyId_op)) {'), (6068, ' tmp = _PyObject_GetAttrId(obj, &PyId_op);'), (6069, ' if (tmp == NULL) goto failed;'), (6073, ' } else {'), (6074, ' PyErr_SetString(PyExc_TypeError, "required field \\"op\\" missing from BinOp");'), (6077, ' if (_PyObject_HasAttrId(obj, &PyId_right)) {'), (6079, ' tmp = _PyObject_GetAttrId(obj, &PyId_right);'), (6080, ' if (tmp == NULL) goto failed;'), (6084, ' } else {'), (6085, ' PyErr_SetString(PyExc_TypeError, "required field \\"right\\" missing from BinOp");'), (6086, ' return 1;'), (6100, ' if (_PyObject_HasAttrId(obj, &PyId_op)) {'), (6102, ' tmp = _PyObject_GetAttrId(obj, &PyId_op);'), (6103, ' if (tmp == NULL) goto failed;'), (6107, ' } else {'), (6108, ' PyErr_SetString(PyExc_TypeError, "required field \\"op\\" missing from UnaryOp");'), (6111, ' if (_PyObject_HasAttrId(obj, &PyId_operand)) {'), (6113, ' tmp = _PyObject_GetAttrId(obj, &PyId_operand);'), (6114, ' if (tmp == NULL) goto failed;'), (6118, ' } else {'), (6119, ' PyErr_SetString(PyExc_TypeError, "required field \\"operand\\" missing from UnaryOp");'), (6120, ' return 1;'), (6134, ' if (_PyObject_HasAttrId(obj, &PyId_args)) {'), (6136, ' tmp = _PyObject_GetAttrId(obj, &PyId_args);'), (6137, ' if (tmp == NULL) goto failed;'), (6141, ' } else {'), (6142, ' PyErr_SetString(PyExc_TypeError, "required field \\"args\\" missing from Lambda");'), (6145, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (6147, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (6148, ' if (tmp == NULL) goto failed;'), (6152, ' } else {'), (6153, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from Lambda");'), (6154, ' return 1;'), (6169, ' if (_PyObject_HasAttrId(obj, &PyId_test)) {'), (6171, ' tmp = _PyObject_GetAttrId(obj, &PyId_test);'), (6172, ' if (tmp == NULL) goto failed;'), (6176, ' } else {'), (6177, ' PyErr_SetString(PyExc_TypeError, "required field \\"test\\" missing from IfExp");'), (6180, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (6182, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (6183, ' if (tmp == NULL) goto failed;'), (6187, ' } else {'), (6188, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from IfExp");'), (6191, ' if (_PyObject_HasAttrId(obj, &PyId_orelse)) {'), (6193, ' tmp = _PyObject_GetAttrId(obj, &PyId_orelse);'), (6194, ' if (tmp == NULL) goto failed;'), (6198, ' } else {'), (6199, ' PyErr_SetString(PyExc_TypeError, "required field \\"orelse\\" missing from IfExp");'), (6200, ' return 1;'), (6214, ' if (_PyObject_HasAttrId(obj, &PyId_keys)) {'), (6218, ' tmp = _PyObject_GetAttrId(obj, &PyId_keys);'), (6219, ' if (tmp == NULL) goto failed;'), (6228, ' expr_ty value;'), (6229, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (6235, ' asdl_seq_SET(keys, i, value);'), (6238, ' } else {'), (6239, ' PyErr_SetString(PyExc_TypeError, "required field \\"keys\\" missing from Dict");'), (6242, ' if (_PyObject_HasAttrId(obj, &PyId_values)) {'), (6246, ' tmp = _PyObject_GetAttrId(obj, &PyId_values);'), (6247, ' if (tmp == NULL) goto failed;'), (6256, ' expr_ty value;'), (6257, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (6263, ' asdl_seq_SET(values, i, value);'), (6266, ' } else {'), (6267, ' PyErr_SetString(PyExc_TypeError, "required field \\"values\\" missing from Dict");'), (6268, ' return 1;'), (6281, ' if (_PyObject_HasAttrId(obj, &PyId_elts)) {'), (6285, ' tmp = _PyObject_GetAttrId(obj, &PyId_elts);'), (6286, ' if (tmp == NULL) goto failed;'), (6295, ' expr_ty value;'), (6296, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (6302, ' asdl_seq_SET(elts, i, value);'), (6305, ' } else {'), (6306, ' PyErr_SetString(PyExc_TypeError, "required field \\"elts\\" missing from Set");'), (6307, ' return 1;'), (6321, ' if (_PyObject_HasAttrId(obj, &PyId_elt)) {'), (6323, ' tmp = _PyObject_GetAttrId(obj, &PyId_elt);'), (6324, ' if (tmp == NULL) goto failed;'), (6328, ' } else {'), (6329, ' PyErr_SetString(PyExc_TypeError, "required field \\"elt\\" missing from ListComp");'), (6332, ' if (_PyObject_HasAttrId(obj, &PyId_generators)) {'), (6336, ' tmp = _PyObject_GetAttrId(obj, &PyId_generators);'), (6337, ' if (tmp == NULL) goto failed;'), (6346, ' comprehension_ty value;'), (6347, ' res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &value, arena);'), (6353, ' asdl_seq_SET(generators, i, value);'), (6356, ' } else {'), (6357, ' PyErr_SetString(PyExc_TypeError, "required field \\"generators\\" missing from ListComp");'), (6358, ' return 1;'), (6372, ' if (_PyObject_HasAttrId(obj, &PyId_elt)) {'), (6374, ' tmp = _PyObject_GetAttrId(obj, &PyId_elt);'), (6375, ' if (tmp == NULL) goto failed;'), (6379, ' } else {'), (6380, ' PyErr_SetString(PyExc_TypeError, "required field \\"elt\\" missing from SetComp");'), (6383, ' if (_PyObject_HasAttrId(obj, &PyId_generators)) {'), (6387, ' tmp = _PyObject_GetAttrId(obj, &PyId_generators);'), (6388, ' if (tmp == NULL) goto failed;'), (6397, ' comprehension_ty value;'), (6398, ' res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &value, arena);'), (6404, ' asdl_seq_SET(generators, i, value);'), (6407, ' } else {'), (6408, ' PyErr_SetString(PyExc_TypeError, "required field \\"generators\\" missing from SetComp");'), (6409, ' return 1;'), (6424, ' if (_PyObject_HasAttrId(obj, &PyId_key)) {'), (6426, ' tmp = _PyObject_GetAttrId(obj, &PyId_key);'), (6427, ' if (tmp == NULL) goto failed;'), (6431, ' } else {'), (6432, ' PyErr_SetString(PyExc_TypeError, "required field \\"key\\" missing from DictComp");'), (6435, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (6437, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (6438, ' if (tmp == NULL) goto failed;'), (6442, ' } else {'), (6443, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from DictComp");'), (6446, ' if (_PyObject_HasAttrId(obj, &PyId_generators)) {'), (6450, ' tmp = _PyObject_GetAttrId(obj, &PyId_generators);'), (6451, ' if (tmp == NULL) goto failed;'), (6460, ' comprehension_ty value;'), (6461, ' res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &value, arena);'), (6467, ' asdl_seq_SET(generators, i, value);'), (6470, ' } else {'), (6471, ' PyErr_SetString(PyExc_TypeError, "required field \\"generators\\" missing from DictComp");'), (6472, ' return 1;'), (6486, ' if (_PyObject_HasAttrId(obj, &PyId_elt)) {'), (6488, ' tmp = _PyObject_GetAttrId(obj, &PyId_elt);'), (6489, ' if (tmp == NULL) goto failed;'), (6493, ' } else {'), (6494, ' PyErr_SetString(PyExc_TypeError, "required field \\"elt\\" missing from GeneratorExp");'), (6497, ' if (_PyObject_HasAttrId(obj, &PyId_generators)) {'), (6501, ' tmp = _PyObject_GetAttrId(obj, &PyId_generators);'), (6502, ' if (tmp == NULL) goto failed;'), (6511, ' comprehension_ty value;'), (6512, ' res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &value, arena);'), (6518, ' asdl_seq_SET(generators, i, value);'), (6521, ' } else {'), (6522, ' PyErr_SetString(PyExc_TypeError, "required field \\"generators\\" missing from GeneratorExp");'), (6523, ' return 1;'), (6536, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (6538, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (6539, ' if (tmp == NULL) goto failed;'), (6543, ' } else {'), (6544, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Await");'), (6545, ' return 1;'), (6558, ' if (exists_not_none(obj, &PyId_value)) {'), (6560, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (6561, ' if (tmp == NULL) goto failed;'), (6565, ' } else {'), (6566, ' value = NULL;'), (6579, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (6581, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (6582, ' if (tmp == NULL) goto failed;'), (6586, ' } else {'), (6587, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from YieldFrom");'), (6588, ' return 1;'), (6603, ' if (_PyObject_HasAttrId(obj, &PyId_left)) {'), (6605, ' tmp = _PyObject_GetAttrId(obj, &PyId_left);'), (6606, ' if (tmp == NULL) goto failed;'), (6610, ' } else {'), (6611, ' PyErr_SetString(PyExc_TypeError, "required field \\"left\\" missing from Compare");'), (6614, ' if (_PyObject_HasAttrId(obj, &PyId_ops)) {'), (6618, ' tmp = _PyObject_GetAttrId(obj, &PyId_ops);'), (6619, ' if (tmp == NULL) goto failed;'), (6628, ' cmpop_ty value;'), (6629, ' res = obj2ast_cmpop(PyList_GET_ITEM(tmp, i), &value, arena);'), (6635, ' asdl_seq_SET(ops, i, value);'), (6638, ' } else {'), (6639, ' PyErr_SetString(PyExc_TypeError, "required field \\"ops\\" missing from Compare");'), (6642, ' if (_PyObject_HasAttrId(obj, &PyId_comparators)) {'), (6646, ' tmp = _PyObject_GetAttrId(obj, &PyId_comparators);'), (6647, ' if (tmp == NULL) goto failed;'), (6656, ' expr_ty value;'), (6657, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (6663, ' asdl_seq_SET(comparators, i, value);'), (6666, ' } else {'), (6667, ' PyErr_SetString(PyExc_TypeError, "required field \\"comparators\\" missing from Compare");'), (6668, ' return 1;'), (6683, ' if (_PyObject_HasAttrId(obj, &PyId_func)) {'), (6685, ' tmp = _PyObject_GetAttrId(obj, &PyId_func);'), (6686, ' if (tmp == NULL) goto failed;'), (6690, ' } else {'), (6691, ' PyErr_SetString(PyExc_TypeError, "required field \\"func\\" missing from Call");'), (6694, ' if (_PyObject_HasAttrId(obj, &PyId_args)) {'), (6698, ' tmp = _PyObject_GetAttrId(obj, &PyId_args);'), (6699, ' if (tmp == NULL) goto failed;'), (6708, ' expr_ty value;'), (6709, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (6715, ' asdl_seq_SET(args, i, value);'), (6718, ' } else {'), (6719, ' PyErr_SetString(PyExc_TypeError, "required field \\"args\\" missing from Call");'), (6722, ' if (_PyObject_HasAttrId(obj, &PyId_keywords)) {'), (6726, ' tmp = _PyObject_GetAttrId(obj, &PyId_keywords);'), (6727, ' if (tmp == NULL) goto failed;'), (6736, ' keyword_ty value;'), (6737, ' res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &value, arena);'), (6743, ' asdl_seq_SET(keywords, i, value);'), (6746, ' } else {'), (6747, ' PyErr_SetString(PyExc_TypeError, "required field \\"keywords\\" missing from Call");'), (6748, ' return 1;'), (6761, ' if (_PyObject_HasAttrId(obj, &PyId_n)) {'), (6763, ' tmp = _PyObject_GetAttrId(obj, &PyId_n);'), (6764, ' if (tmp == NULL) goto failed;'), (6768, ' } else {'), (6769, ' PyErr_SetString(PyExc_TypeError, "required field \\"n\\" missing from Num");'), (6770, ' return 1;'), (6784, ' if (_PyObject_HasAttrId(obj, &PyId_s)) {'), (6786, ' tmp = _PyObject_GetAttrId(obj, &PyId_s);'), (6787, ' if (tmp == NULL) goto failed;'), (6791, ' } else {'), (6792, ' PyErr_SetString(PyExc_TypeError, "required field \\"s\\" missing from Str");'), (6793, ' return 1;'), (6819, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (6821, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (6822, ' if (tmp == NULL) goto failed;'), (6826, ' } else {'), (6827, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from FormattedValue");'), (6830, ' if (exists_not_none(obj, &PyId_conversion)) {'), (6832, ' tmp = _PyObject_GetAttrId(obj, &PyId_conversion);'), (6833, ' if (tmp == NULL) goto failed;'), (6837, ' } else {'), (6838, ' conversion = 0;'), (6840, ' if (exists_not_none(obj, &PyId_format_spec)) {'), (6842, ' tmp = _PyObject_GetAttrId(obj, &PyId_format_spec);'), (6843, ' if (tmp == NULL) goto failed;'), (6847, ' } else {'), (6848, ' format_spec = NULL;'), (6862, ' if (_PyObject_HasAttrId(obj, &PyId_values)) {'), (6866, ' tmp = _PyObject_GetAttrId(obj, &PyId_values);'), (6867, ' if (tmp == NULL) goto failed;'), (6876, ' expr_ty value;'), (6877, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (6883, ' asdl_seq_SET(values, i, value);'), (6886, ' } else {'), (6887, ' PyErr_SetString(PyExc_TypeError, "required field \\"values\\" missing from JoinedStr");'), (6888, ' return 1;'), (6901, ' if (_PyObject_HasAttrId(obj, &PyId_s)) {'), (6903, ' tmp = _PyObject_GetAttrId(obj, &PyId_s);'), (6904, ' if (tmp == NULL) goto failed;'), (6908, ' } else {'), (6909, ' PyErr_SetString(PyExc_TypeError, "required field \\"s\\" missing from Bytes");'), (6910, ' return 1;'), (6923, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (6925, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (6926, ' if (tmp == NULL) goto failed;'), (6930, ' } else {'), (6931, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from NameConstant");'), (6932, ' return 1;'), (6955, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (6957, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (6958, ' if (tmp == NULL) goto failed;'), (6962, ' } else {'), (6963, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Constant");'), (6964, ' return 1;'), (6979, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (6981, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (6982, ' if (tmp == NULL) goto failed;'), (6986, ' } else {'), (6987, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Attribute");'), (6990, ' if (_PyObject_HasAttrId(obj, &PyId_attr)) {'), (6992, ' tmp = _PyObject_GetAttrId(obj, &PyId_attr);'), (6993, ' if (tmp == NULL) goto failed;'), (6997, ' } else {'), (6998, ' PyErr_SetString(PyExc_TypeError, "required field \\"attr\\" missing from Attribute");'), (7001, ' if (_PyObject_HasAttrId(obj, &PyId_ctx)) {'), (7003, ' tmp = _PyObject_GetAttrId(obj, &PyId_ctx);'), (7004, ' if (tmp == NULL) goto failed;'), (7008, ' } else {'), (7009, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from Attribute");'), (7010, ' return 1;'), (7025, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (7027, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (7028, ' if (tmp == NULL) goto failed;'), (7032, ' } else {'), (7033, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Subscript");'), (7036, ' if (_PyObject_HasAttrId(obj, &PyId_slice)) {'), (7038, ' tmp = _PyObject_GetAttrId(obj, &PyId_slice);'), (7039, ' if (tmp == NULL) goto failed;'), (7043, ' } else {'), (7044, ' PyErr_SetString(PyExc_TypeError, "required field \\"slice\\" missing from Subscript");'), (7047, ' if (_PyObject_HasAttrId(obj, &PyId_ctx)) {'), (7049, ' tmp = _PyObject_GetAttrId(obj, &PyId_ctx);'), (7050, ' if (tmp == NULL) goto failed;'), (7054, ' } else {'), (7055, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from Subscript");'), (7056, ' return 1;'), (7070, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (7072, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (7073, ' if (tmp == NULL) goto failed;'), (7077, ' } else {'), (7078, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Starred");'), (7081, ' if (_PyObject_HasAttrId(obj, &PyId_ctx)) {'), (7083, ' tmp = _PyObject_GetAttrId(obj, &PyId_ctx);'), (7084, ' if (tmp == NULL) goto failed;'), (7088, ' } else {'), (7089, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from Starred");'), (7090, ' return 1;'), (7104, ' if (_PyObject_HasAttrId(obj, &PyId_id)) {'), (7106, ' tmp = _PyObject_GetAttrId(obj, &PyId_id);'), (7107, ' if (tmp == NULL) goto failed;'), (7111, ' } else {'), (7112, ' PyErr_SetString(PyExc_TypeError, "required field \\"id\\" missing from Name");'), (7115, ' if (_PyObject_HasAttrId(obj, &PyId_ctx)) {'), (7117, ' tmp = _PyObject_GetAttrId(obj, &PyId_ctx);'), (7118, ' if (tmp == NULL) goto failed;'), (7122, ' } else {'), (7123, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from Name");'), (7124, ' return 1;'), (7138, ' if (_PyObject_HasAttrId(obj, &PyId_elts)) {'), (7142, ' tmp = _PyObject_GetAttrId(obj, &PyId_elts);'), (7143, ' if (tmp == NULL) goto failed;'), (7152, ' expr_ty value;'), (7153, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (7159, ' asdl_seq_SET(elts, i, value);'), (7162, ' } else {'), (7163, ' PyErr_SetString(PyExc_TypeError, "required field \\"elts\\" missing from List");'), (7166, ' if (_PyObject_HasAttrId(obj, &PyId_ctx)) {'), (7168, ' tmp = _PyObject_GetAttrId(obj, &PyId_ctx);'), (7169, ' if (tmp == NULL) goto failed;'), (7173, ' } else {'), (7174, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from List");'), (7175, ' return 1;'), (7189, ' if (_PyObject_HasAttrId(obj, &PyId_elts)) {'), (7193, ' tmp = _PyObject_GetAttrId(obj, &PyId_elts);'), (7194, ' if (tmp == NULL) goto failed;'), (7203, ' expr_ty value;'), (7204, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (7210, ' asdl_seq_SET(elts, i, value);'), (7213, ' } else {'), (7214, ' PyErr_SetString(PyExc_TypeError, "required field \\"elts\\" missing from Tuple");'), (7217, ' if (_PyObject_HasAttrId(obj, &PyId_ctx)) {'), (7219, ' tmp = _PyObject_GetAttrId(obj, &PyId_ctx);'), (7220, ' if (tmp == NULL) goto failed;'), (7224, ' } else {'), (7225, ' PyErr_SetString(PyExc_TypeError, "required field \\"ctx\\" missing from Tuple");'), (7226, ' return 1;'), (7317, ' if (exists_not_none(obj, &PyId_lower)) {'), (7319, ' tmp = _PyObject_GetAttrId(obj, &PyId_lower);'), (7320, ' if (tmp == NULL) goto failed;'), (7324, ' } else {'), (7325, ' lower = NULL;'), (7327, ' if (exists_not_none(obj, &PyId_upper)) {'), (7329, ' tmp = _PyObject_GetAttrId(obj, &PyId_upper);'), (7330, ' if (tmp == NULL) goto failed;'), (7334, ' } else {'), (7335, ' upper = NULL;'), (7337, ' if (exists_not_none(obj, &PyId_step)) {'), (7339, ' tmp = _PyObject_GetAttrId(obj, &PyId_step);'), (7340, ' if (tmp == NULL) goto failed;'), (7344, ' } else {'), (7345, ' step = NULL;'), (7358, ' if (_PyObject_HasAttrId(obj, &PyId_dims)) {'), (7362, ' tmp = _PyObject_GetAttrId(obj, &PyId_dims);'), (7363, ' if (tmp == NULL) goto failed;'), (7372, ' slice_ty value;'), (7373, ' res = obj2ast_slice(PyList_GET_ITEM(tmp, i), &value, arena);'), (7379, ' asdl_seq_SET(dims, i, value);'), (7382, ' } else {'), (7383, ' PyErr_SetString(PyExc_TypeError, "required field \\"dims\\" missing from ExtSlice");'), (7384, ' return 1;'), (7397, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (7399, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (7400, ' if (tmp == NULL) goto failed;'), (7404, ' } else {'), (7405, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from Index");'), (7406, ' return 1;'), (7700, ' if (_PyObject_HasAttrId(obj, &PyId_target)) {'), (7702, ' tmp = _PyObject_GetAttrId(obj, &PyId_target);'), (7703, ' if (tmp == NULL) goto failed;'), (7707, ' } else {'), (7708, ' PyErr_SetString(PyExc_TypeError, "required field \\"target\\" missing from comprehension");'), (7711, ' if (_PyObject_HasAttrId(obj, &PyId_iter)) {'), (7713, ' tmp = _PyObject_GetAttrId(obj, &PyId_iter);'), (7714, ' if (tmp == NULL) goto failed;'), (7718, ' } else {'), (7719, ' PyErr_SetString(PyExc_TypeError, "required field \\"iter\\" missing from comprehension");'), (7722, ' if (_PyObject_HasAttrId(obj, &PyId_ifs)) {'), (7726, ' tmp = _PyObject_GetAttrId(obj, &PyId_ifs);'), (7727, ' if (tmp == NULL) goto failed;'), (7736, ' expr_ty value;'), (7737, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (7743, ' asdl_seq_SET(ifs, i, value);'), (7746, ' } else {'), (7747, ' PyErr_SetString(PyExc_TypeError, "required field \\"ifs\\" missing from comprehension");'), (7750, ' if (_PyObject_HasAttrId(obj, &PyId_is_async)) {'), (7752, ' tmp = _PyObject_GetAttrId(obj, &PyId_is_async);'), (7753, ' if (tmp == NULL) goto failed;'), (7757, ' } else {'), (7758, ' PyErr_SetString(PyExc_TypeError, "required field \\"is_async\\" missing from comprehension");'), (7759, ' return 1;'), (7781, ' if (_PyObject_HasAttrId(obj, &PyId_lineno)) {'), (7783, ' tmp = _PyObject_GetAttrId(obj, &PyId_lineno);'), (7784, ' if (tmp == NULL) goto failed;'), (7788, ' } else {'), (7789, ' PyErr_SetString(PyExc_TypeError, "required field \\"lineno\\" missing from excepthandler");'), (7792, ' if (_PyObject_HasAttrId(obj, &PyId_col_offset)) {'), (7794, ' tmp = _PyObject_GetAttrId(obj, &PyId_col_offset);'), (7795, ' if (tmp == NULL) goto failed;'), (7799, ' } else {'), (7800, ' PyErr_SetString(PyExc_TypeError, "required field \\"col_offset\\" missing from excepthandler");'), (7801, ' return 1;'), (7812, ' if (exists_not_none(obj, &PyId_type)) {'), (7814, ' tmp = _PyObject_GetAttrId(obj, &PyId_type);'), (7815, ' if (tmp == NULL) goto failed;'), (7819, ' } else {'), (7820, ' type = NULL;'), (7822, ' if (exists_not_none(obj, &PyId_name)) {'), (7824, ' tmp = _PyObject_GetAttrId(obj, &PyId_name);'), (7825, ' if (tmp == NULL) goto failed;'), (7829, ' } else {'), (7830, ' name = NULL;'), (7832, ' if (_PyObject_HasAttrId(obj, &PyId_body)) {'), (7836, ' tmp = _PyObject_GetAttrId(obj, &PyId_body);'), (7837, ' if (tmp == NULL) goto failed;'), (7846, ' stmt_ty value;'), (7847, ' res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &value, arena);'), (7853, ' asdl_seq_SET(body, i, value);'), (7856, ' } else {'), (7857, ' PyErr_SetString(PyExc_TypeError, "required field \\"body\\" missing from ExceptHandler");'), (7858, ' return 1;'), (7882, ' if (_PyObject_HasAttrId(obj, &PyId_args)) {'), (7886, ' tmp = _PyObject_GetAttrId(obj, &PyId_args);'), (7887, ' if (tmp == NULL) goto failed;'), (7896, ' arg_ty value;'), (7897, ' res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &value, arena);'), (7903, ' asdl_seq_SET(args, i, value);'), (7906, ' } else {'), (7907, ' PyErr_SetString(PyExc_TypeError, "required field \\"args\\" missing from arguments");'), (7910, ' if (exists_not_none(obj, &PyId_vararg)) {'), (7912, ' tmp = _PyObject_GetAttrId(obj, &PyId_vararg);'), (7913, ' if (tmp == NULL) goto failed;'), (7917, ' } else {'), (7918, ' vararg = NULL;'), (7920, ' if (_PyObject_HasAttrId(obj, &PyId_kwonlyargs)) {'), (7924, ' tmp = _PyObject_GetAttrId(obj, &PyId_kwonlyargs);'), (7925, ' if (tmp == NULL) goto failed;'), (7934, ' arg_ty value;'), (7935, ' res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &value, arena);'), (7941, ' asdl_seq_SET(kwonlyargs, i, value);'), (7944, ' } else {'), (7945, ' PyErr_SetString(PyExc_TypeError, "required field \\"kwonlyargs\\" missing from arguments");'), (7948, ' if (_PyObject_HasAttrId(obj, &PyId_kw_defaults)) {'), (7952, ' tmp = _PyObject_GetAttrId(obj, &PyId_kw_defaults);'), (7953, ' if (tmp == NULL) goto failed;'), (7962, ' expr_ty value;'), (7963, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (7969, ' asdl_seq_SET(kw_defaults, i, value);'), (7972, ' } else {'), (7973, ' PyErr_SetString(PyExc_TypeError, "required field \\"kw_defaults\\" missing from arguments");'), (7976, ' if (exists_not_none(obj, &PyId_kwarg)) {'), (7978, ' tmp = _PyObject_GetAttrId(obj, &PyId_kwarg);'), (7979, ' if (tmp == NULL) goto failed;'), (7983, ' } else {'), (7984, ' kwarg = NULL;'), (7986, ' if (_PyObject_HasAttrId(obj, &PyId_defaults)) {'), (7990, ' tmp = _PyObject_GetAttrId(obj, &PyId_defaults);'), (7991, ' if (tmp == NULL) goto failed;'), (8000, ' expr_ty value;'), (8001, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &value, arena);'), (8007, ' asdl_seq_SET(defaults, i, value);'), (8010, ' } else {'), (8011, ' PyErr_SetString(PyExc_TypeError, "required field \\"defaults\\" missing from arguments");'), (8012, ' return 1;'), (8032, ' if (_PyObject_HasAttrId(obj, &PyId_arg)) {'), (8034, ' tmp = _PyObject_GetAttrId(obj, &PyId_arg);'), (8035, ' if (tmp == NULL) goto failed;'), (8039, ' } else {'), (8040, ' PyErr_SetString(PyExc_TypeError, "required field \\"arg\\" missing from arg");'), (8043, ' if (exists_not_none(obj, &PyId_annotation)) {'), (8045, ' tmp = _PyObject_GetAttrId(obj, &PyId_annotation);'), (8046, ' if (tmp == NULL) goto failed;'), (8050, ' } else {'), (8051, ' annotation = NULL;'), (8053, ' if (exists_not_none(obj, &PyId_type_comment)) {'), (8055, ' tmp = _PyObject_GetAttrId(obj, &PyId_type_comment);'), (8056, ' if (tmp == NULL) goto failed;'), (8060, ' } else {'), (8061, ' type_comment = NULL;'), (8063, ' if (_PyObject_HasAttrId(obj, &PyId_lineno)) {'), (8065, ' tmp = _PyObject_GetAttrId(obj, &PyId_lineno);'), (8066, ' if (tmp == NULL) goto failed;'), (8070, ' } else {'), (8071, ' PyErr_SetString(PyExc_TypeError, "required field \\"lineno\\" missing from arg");'), (8074, ' if (_PyObject_HasAttrId(obj, &PyId_col_offset)) {'), (8076, ' tmp = _PyObject_GetAttrId(obj, &PyId_col_offset);'), (8077, ' if (tmp == NULL) goto failed;'), (8081, ' } else {'), (8082, ' PyErr_SetString(PyExc_TypeError, "required field \\"col_offset\\" missing from arg");'), (8083, ' return 1;'), (8099, ' if (exists_not_none(obj, &PyId_arg)) {'), (8101, ' tmp = _PyObject_GetAttrId(obj, &PyId_arg);'), (8102, ' if (tmp == NULL) goto failed;'), (8106, ' } else {'), (8107, ' arg = NULL;'), (8109, ' if (_PyObject_HasAttrId(obj, &PyId_value)) {'), (8111, ' tmp = _PyObject_GetAttrId(obj, &PyId_value);'), (8112, ' if (tmp == NULL) goto failed;'), (8116, ' } else {'), (8117, ' PyErr_SetString(PyExc_TypeError, "required field \\"value\\" missing from keyword");'), (8118, ' return 1;'), (8134, ' if (_PyObject_HasAttrId(obj, &PyId_name)) {'), (8136, ' tmp = _PyObject_GetAttrId(obj, &PyId_name);'), (8137, ' if (tmp == NULL) goto failed;'), (8141, ' } else {'), (8142, ' PyErr_SetString(PyExc_TypeError, "required field \\"name\\" missing from alias");'), (8145, ' if (exists_not_none(obj, &PyId_asname)) {'), (8147, ' tmp = _PyObject_GetAttrId(obj, &PyId_asname);'), (8148, ' if (tmp == NULL) goto failed;'), (8152, ' } else {'), (8153, ' asname = NULL;'), (8169, ' if (_PyObject_HasAttrId(obj, &PyId_context_expr)) {'), (8171, ' tmp = _PyObject_GetAttrId(obj, &PyId_context_expr);'), (8172, ' if (tmp == NULL) goto failed;'), (8176, ' } else {'), (8177, ' PyErr_SetString(PyExc_TypeError, "required field \\"context_expr\\" missing from withitem");'), (8180, ' if (exists_not_none(obj, &PyId_optional_vars)) {'), (8182, ' tmp = _PyObject_GetAttrId(obj, &PyId_optional_vars);'), (8183, ' if (tmp == NULL) goto failed;'), (8187, ' } else {'), (8188, ' optional_vars = NULL;'), (8215, ' if (_PyObject_HasAttrId(obj, &PyId_lineno)) {'), (8217, ' tmp = _PyObject_GetAttrId(obj, &PyId_lineno);'), (8218, ' if (tmp == NULL) goto failed;'), (8222, ' } else {'), (8223, ' PyErr_SetString(PyExc_TypeError, "required field \\"lineno\\" missing from TypeIgnore");'), (8224, ' return 1;'), (8243, 'static struct PyModuleDef _astmodule3 = {'), (8244, ' PyModuleDef_HEAD_INIT, "_ast3", NULL, 0, ast3_methods'), (8251, ' m = PyModule_Create(&_astmodule3);')]}
1,485
1,131
8,579
52,530
19
133
4
https://github.com/python/typed_ast
CVE-2019-19274
CWE-125
1,635
wasm.cc
C++
Envoy::Extensions::Common::Wasm::Context::onDownstreamData
#include "extensions/common/wasm/wasm.h" #include <stdio.h> #include <limits> #include <memory> #include <string> #include "envoy/common/exception.h" #include "envoy/config/wasm/v2/wasm.pb.validate.h" #include "envoy/grpc/status.h" #include "envoy/http/codes.h" #include "envoy/local_info/local_info.h" #include "envoy/server/wasm.h" #include "envoy/thread_local/thread_local.h" #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" #include "common/common/base64.h" #include "common/common/empty_string.h" #include "common/common/enum_to_int.h" #include "common/common/logger.h" #include "common/config/datasource.h" #include "common/http/header_map_impl.h" #include "common/http/message_impl.h" #include "common/http/utility.h" #include "common/tracing/http_tracer_impl.h" #include "extensions/common/wasm/wasm_state.h" #include "extensions/common/wasm/well_known_names.h" #include "extensions/filters/common/expr/context.h" #include "absl/base/casts.h" #include "absl/container/flat_hash_map.h" #include "absl/container/node_hash_map.h" #include "absl/synchronization/mutex.h" #include "eval/eval/field_access.h" #include "eval/eval/field_backed_list_impl.h" #include "eval/eval/field_backed_map_impl.h" #include "eval/public/cel_value.h" #include "openssl/bytestring.h" #include "openssl/hmac.h" #include "openssl/sha.h" namespace Envoy { namespace Extensions { namespace Common { namespace Wasm { // Any currently executing Wasm call context. #define WASM_CONTEXT(_c) \ (ContextOrEffectiveContext(static_cast<Context*>((void)_c, current_context_))) // The id of the context which should be used for calls out of the VM in place of current_context_ // above. namespace { // TODO: move to utils during upstreaming. std::string base64Sha256(absl::string_view data) { std::vector<uint8_t> digest(SHA256_DIGEST_LENGTH); EVP_MD_CTX* ctx(EVP_MD_CTX_new()); auto rc = EVP_DigestInit(ctx, EVP_sha256()); RELEASE_ASSERT(rc == 1, "Failed to init digest context"); rc = EVP_DigestUpdate(ctx, data.data(), data.size()); RELEASE_ASSERT(rc == 1, "Failed to update digest"); rc = EVP_DigestFinal(ctx, digest.data(), nullptr); RELEASE_ASSERT(rc == 1, "Failed to finalize digest"); EVP_MD_CTX_free(ctx); return Base64::encode(reinterpret_cast<const char*>(&digest[0]), digest.size()); } inline Word wasmResultToWord(WasmResult r) { return Word(static_cast<uint64_t>(r)); } inline uint32_t convertWordToUint32(Word w) { return static_cast<uint32_t>(w.u64_); } // Convert a function of the form Word(Word...) to one of the form uint32_t(uint32_t...). template <typename F, F* fn> struct ConvertFunctionWordToUint32 { static void convertFunctionWordToUint32() {} }; template <typename R, typename... Args, auto (*F)(Args...)->R> struct ConvertFunctionWordToUint32<R(Args...), F> { static auto convertFunctionWordToUint32(typename ConvertWordTypeToUint32<Args>::type... args) { return convertWordToUint32(F(std::forward<Args>(args)...)); } }; template <typename... Args, auto (*F)(Args...)->void> struct ConvertFunctionWordToUint32<void(Args...), F> { static void convertFunctionWordToUint32(typename ConvertWordTypeToUint32<Args>::type... args) { F(std::forward<Args>(args)...); } }; class SharedData { public: WasmResult get(absl::string_view vm_id, const absl::string_view key, std::pair<std::string, uint32_t>* result) { absl::ReaderMutexLock l(&mutex); auto map = data.find(vm_id); if (map == data.end()) { return WasmResult::NotFound; } auto it = map->second.find(key); if (it != map->second.end()) { *result = it->second; return WasmResult::Ok; } return WasmResult::NotFound; } WasmResult set(absl::string_view vm_id, absl::string_view key, absl::string_view value, uint32_t cas) { absl::WriterMutexLock l(&mutex); absl::flat_hash_map<std::string, std::pair<std::string, uint32_t>>* map; auto map_it = data.find(vm_id); if (map_it == data.end()) { map = &data[vm_id]; } else { map = &map_it->second; } auto it = map->find(key); if (it != map->end()) { if (cas && cas != it->second.second) { return WasmResult::CasMismatch; } it->second = std::make_pair(std::string(value), nextCas()); } else { map->emplace(key, std::make_pair(std::string(value), nextCas())); } return WasmResult::Ok; } uint32_t registerQueue(absl::string_view vm_id, absl::string_view queue_name, uint32_t context_id, Event::Dispatcher& dispatcher) { absl::WriterMutexLock l(&mutex); auto key = std::make_pair(std::string(vm_id), std::string(queue_name)); auto it = queue_tokens.insert(std::make_pair(key, static_cast<uint32_t>(0))); if (it.second) { it.first->second = nextQueueToken(); queue_token_set.insert(it.first->second); } uint32_t token = it.first->second; auto& q = queues[token]; q.vm_id = std::string(vm_id); q.context_id = context_id; q.dispatcher = &dispatcher; // Preserve any existing data. return token; } uint32_t resolveQueue(absl::string_view vm_id, absl::string_view queue_name) { absl::WriterMutexLock l(&mutex); auto key = std::make_pair(std::string(vm_id), std::string(queue_name)); auto it = queue_tokens.find(key); if (it != queue_tokens.end()) { return it->second; } return 0; // N.B. zero indicates that the queue was not found. } WasmResult dequeue(uint32_t token, std::string* data) { absl::ReaderMutexLock l(&mutex); auto it = queues.find(token); if (it == queues.end()) { return WasmResult::NotFound; } if (it->second.queue.empty()) { return WasmResult::Empty; } *data = it->second.queue.front(); it->second.queue.pop_front(); return WasmResult::Ok; } WasmResult enqueue(uint32_t token, absl::string_view value) { absl::WriterMutexLock l(&mutex); auto it = queues.find(token); if (it == queues.end()) { return WasmResult::NotFound; } it->second.queue.push_back(std::string(value)); auto vm_id = it->second.vm_id; auto context_id = it->second.context_id; it->second.dispatcher->post([vm_id, context_id, token] { auto wasm = getThreadLocalWasmPtr(vm_id); if (wasm) { wasm->queueReady(context_id, token); } }); return WasmResult::Ok; } uint32_t nextCas() { auto result = cas; cas++; if (!cas) { // 0 is not a valid CAS value. cas++; } return result; } private: uint32_t nextQueueToken() { while (true) { uint32_t token = next_queue_token++; if (token == 0) { continue; // 0 is an illegal token. } if (queue_token_set.find(token) == queue_token_set.end()) { return token; } } } struct Queue { std::string vm_id; uint32_t context_id; Event::Dispatcher* dispatcher; std::deque<std::string> queue; }; absl::Mutex mutex; uint32_t cas = 1; uint32_t next_queue_token = 1; absl::node_hash_map<std::string, absl::flat_hash_map<std::string, std::pair<std::string, uint32_t>>> data; absl::node_hash_map<uint32_t, Queue> queues; struct pair_hash { template <class T1, class T2> std::size_t operator()(const std::pair<T1, T2>& pair) const { return std::hash<T1>()(pair.first) ^ std::hash<T2>()(pair.second); } }; absl::flat_hash_map<std::pair<std::string, std::string>, uint32_t, pair_hash> queue_tokens; absl::flat_hash_set<uint32_t> queue_token_set; }; SharedData global_shared_data; // Map from Wasm ID to the local Wasm instance. thread_local absl::flat_hash_map<std::string, std::weak_ptr<Wasm>> local_wasms; const std::string INLINE_STRING = "<inline>"; template <typename Pairs> size_t pairsSize(const Pairs& result) { size_t size = 4; // number of headers for (auto& p : result) { size += 8; // size of key, size of value size += p.first.size() + 1; // null terminated key size += p.second.size() + 1; // null terminated value } return size; } template <typename Pairs> void marshalPairs(const Pairs& result, char* buffer) { char* b = buffer; *reinterpret_cast<uint32_t*>(b) = result.size(); b += sizeof(uint32_t); for (auto& p : result) { *reinterpret_cast<uint32_t*>(b) = p.first.size(); b += sizeof(uint32_t); *reinterpret_cast<uint32_t*>(b) = p.second.size(); b += sizeof(uint32_t); } for (auto& p : result) { memcpy(b, p.first.data(), p.first.size()); b += p.first.size(); *b++ = 0; memcpy(b, p.second.data(), p.second.size()); b += p.second.size(); *b++ = 0; } } Pairs toPairs(absl::string_view buffer) { Pairs result; const char* b = buffer.data(); if (buffer.size() < sizeof(uint32_t)) { return {}; } auto size = *reinterpret_cast<const uint32_t*>(b); b += sizeof(uint32_t); if (sizeof(uint32_t) + size * 2 * sizeof(uint32_t) > buffer.size()) { return {}; } result.resize(size); for (uint32_t i = 0; i < size; i++) { result[i].first = absl::string_view(nullptr, *reinterpret_cast<const uint32_t*>(b)); b += sizeof(uint32_t); result[i].second = absl::string_view(nullptr, *reinterpret_cast<const uint32_t*>(b)); b += sizeof(uint32_t); } for (auto& p : result) { p.first = absl::string_view(b, p.first.size()); b += p.first.size() + 1; p.second = absl::string_view(b, p.second.size()); b += p.second.size() + 1; } return result; } template <typename Pairs> bool getPairs(Context* context, const Pairs& result, uint64_t ptr_ptr, uint64_t size_ptr) { if (result.empty()) { return context->wasm()->copyToPointerSize("", ptr_ptr, size_ptr); } uint64_t size = pairsSize(result); uint64_t ptr; char* buffer = static_cast<char*>(context->wasm()->allocMemory(size, &ptr)); marshalPairs(result, buffer); if (!context->wasmVm()->setWord(ptr_ptr, Word(ptr))) { return false; } if (!context->wasmVm()->setWord(size_ptr, Word(size))) { return false; } return true; } void exportPairs(Context* context, const Pairs& pairs, uint64_t* ptr_ptr, uint64_t* size_ptr) { if (pairs.empty()) { *ptr_ptr = 0; *size_ptr = 0; return; } uint64_t size = pairsSize(pairs); char* buffer = static_cast<char*>(context->wasm()->allocMemory(size, ptr_ptr)); marshalPairs(pairs, buffer); *size_ptr = size; } Http::HeaderMapPtr buildHeaderMapFromPairs(const Pairs& pairs) { auto map = std::make_unique<Http::HeaderMapImpl>(); for (auto& p : pairs) { // Note: because of the lack of a string_view interface for addCopy and // the lack of an interface to add an entry with an empty value and return // the entry, there is no efficient way to prevent either a double copy // of the valueor a double lookup of the entry. map->addCopy(Http::LowerCaseString(std::string(p.first)), std::string(p.second)); } return map; } const uint8_t* decodeVarint(const uint8_t* pos, const uint8_t* end, uint32_t* out) { uint32_t ret = 0; int shift = 0; while (pos < end && (*pos & 0x80)) { ret |= (*pos & 0x7f) << shift; shift += 7; pos++; } if (pos < end) { ret |= *pos << shift; pos++; } *out = ret; return pos; } Context* ContextOrEffectiveContext(Context* context) { if (effective_context_id_ == 0) { return context; } auto effective_context = context->wasm()->getContext(effective_context_id_); if (effective_context) { return effective_context; } // The effective_context_id_ no longer exists, revert to the true context. return context; } } // namespace // Test support. uint32_t resolveQueueForTest(absl::string_view vm_id, absl::string_view queue_name) { return global_shared_data.resolveQueue(vm_id, queue_name); } // // HTTP Handlers // Word setPropertyHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->setProperty(key.value(), value.value())); } // Generic selector Word getPropertyHandler(void* raw_context, Word path_ptr, Word path_size, Word value_ptr_ptr, Word value_size_ptr) { auto context = WASM_CONTEXT(raw_context); auto path = context->wasmVm()->getMemory(path_ptr.u64_, path_size.u64_); if (!path.has_value()) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } std::string value; auto result = context->getProperty(path.value(), &value); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(value, value_ptr_ptr.u64_, value_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } // Continue/Reply/Route Word continueRequestHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->continueRequest(); return wasmResultToWord(WasmResult::Ok); } Word continueResponseHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->continueResponse(); return wasmResultToWord(WasmResult::Ok); } Word sendLocalResponseHandler(void* raw_context, Word response_code, Word response_code_details_ptr, Word response_code_details_size, Word body_ptr, Word body_size, Word additional_response_header_pairs_ptr, Word additional_response_header_pairs_size, Word grpc_code) { auto context = WASM_CONTEXT(raw_context); auto details = context->wasmVm()->getMemory(response_code_details_ptr.u64_, response_code_details_size.u64_); auto body = context->wasmVm()->getMemory(body_ptr.u64_, body_size.u64_); auto additional_response_header_pairs = context->wasmVm()->getMemory( additional_response_header_pairs_ptr.u64_, additional_response_header_pairs_size.u64_); if (!details || !body || !additional_response_header_pairs) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto additional_headers = toPairs(additional_response_header_pairs.value()); auto modify_headers = [additional_headers](Http::HeaderMap& headers) { for (auto& p : additional_headers) { const Http::LowerCaseString lower_key(std::move(std::string(p.first))); headers.addCopy(lower_key, std::string(p.second)); } }; auto grpc_status = static_cast<Grpc::Status::GrpcStatus>(grpc_code.u64_); auto grpc_status_opt = (grpc_status != Grpc::Status::GrpcStatus::InvalidCode) ? absl::optional<Grpc::Status::GrpcStatus>(grpc_status) : absl::optional<Grpc::Status::GrpcStatus>(); context->sendLocalResponse(static_cast<Envoy::Http::Code>(response_code.u64_), body.value(), modify_headers, grpc_status_opt, details.value()); return wasmResultToWord(WasmResult::Ok); } Word setEffectiveContextHandler(void* raw_context, Word context_id) { auto context = WASM_CONTEXT(raw_context); uint32_t cid = static_cast<uint32_t>(context_id.u64_); auto c = context->wasm()->getContext(cid); if (!c) { return wasmResultToWord(WasmResult::BadArgument); } effective_context_id_ = cid; return wasmResultToWord(WasmResult::Ok); } Word clearRouteCacheHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->clearRouteCache(); return wasmResultToWord(WasmResult::Ok); } // SharedData Word getSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr_ptr, Word value_size_ptr, Word cas_ptr) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } std::pair<std::string, uint32_t> data; WasmResult result = context->getSharedData(key.value(), &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(data.first, value_ptr_ptr.u64_, value_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } if (!context->wasmVm()->setMemory(cas_ptr.u64_, sizeof(uint32_t), &data.second)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word setSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr, Word value_size, Word cas) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->setSharedData(key.value(), value.value(), cas.u64_)); } Word registerSharedQueueHandler(void* raw_context, Word queue_name_ptr, Word queue_name_size, Word token_ptr) { auto context = WASM_CONTEXT(raw_context); auto queue_name = context->wasmVm()->getMemory(queue_name_ptr.u64_, queue_name_size.u64_); if (!queue_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t token = context->registerSharedQueue(queue_name.value()); if (!context->wasm()->setDatatype(token_ptr.u64_, token)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word dequeueSharedQueueHandler(void* raw_context, Word token, Word data_ptr_ptr, Word data_size_ptr) { auto context = WASM_CONTEXT(raw_context); std::string data; WasmResult result = context->dequeueSharedQueue(token.u32(), &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(data, data_ptr_ptr.u64_, data_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word resolveSharedQueueHandler(void* raw_context, Word vm_id_ptr, Word vm_id_size, Word queue_name_ptr, Word queue_name_size, Word token_ptr) { auto context = WASM_CONTEXT(raw_context); auto vm_id = context->wasmVm()->getMemory(vm_id_ptr.u64_, vm_id_size.u64_); auto queue_name = context->wasmVm()->getMemory(queue_name_ptr.u64_, queue_name_size.u64_); if (!vm_id || !queue_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t token = 0; auto result = context->resolveSharedQueue(vm_id.value(), queue_name.value(), &token); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(token_ptr.u64_, token)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word enqueueSharedQueueHandler(void* raw_context, Word token, Word data_ptr, Word data_size) { auto context = WASM_CONTEXT(raw_context); auto data = context->wasmVm()->getMemory(data_ptr.u64_, data_size.u64_); if (!data) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->enqueueSharedQueue(token.u32(), data.value())); } // Network Word getDownstreamDataBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); absl::string_view data; auto result = context->getDownstreamDataBufferBytes(start.u64_, length.u64_, &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } context->wasm()->copyToPointerSize(data, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word getUpstreamDataBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); absl::string_view data; auto result = context->getUpstreamDataBufferBytes(start.u64_, length.u64_, &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } context->wasm()->copyToPointerSize(data, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } // Header/Trailer/Metadata Maps Word addHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->addHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value(), value.value()); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr_ptr, Word value_size_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto result = context->getHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value()); context->wasm()->copyToPointerSize(result, value_ptr_ptr.u64_, value_size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word replaceHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->replaceHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value(), value.value()); return wasmResultToWord(WasmResult::Ok); } Word removeHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->removeHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value()); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapPairsHandler(void* raw_context, Word type, Word ptr_ptr, Word size_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto result = context->getHeaderMapPairs(static_cast<HeaderMapType>(type.u64_)); if (!getPairs(context, result, ptr_ptr.u64_, size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word setHeaderMapPairsHandler(void* raw_context, Word type, Word ptr, Word size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto data = context->wasmVm()->getMemory(ptr.u64_, size.u64_); if (!data) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->setHeaderMapPairs(static_cast<HeaderMapType>(type.u64_), toPairs(data.value())); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapSizeHandler(void* raw_context, Word type, Word result_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); size_t result = context->getHeaderMapSize(static_cast<HeaderMapType>(type.u64_)); if (!context->wasmVm()->setWord(result_ptr.u64_, Word(result))) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } // Body Buffer Word getRequestBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); auto result = context->getRequestBodyBufferBytes(start.u64_, length.u64_); context->wasm()->copyToPointerSize(result, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word getResponseBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); auto result = context->getResponseBodyBufferBytes(start.u64_, length.u64_); context->wasm()->copyToPointerSize(result, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word httpCallHandler(void* raw_context, Word uri_ptr, Word uri_size, Word header_pairs_ptr, Word header_pairs_size, Word body_ptr, Word body_size, Word trailer_pairs_ptr, Word trailer_pairs_size, Word timeout_milliseconds) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto uri = context->wasmVm()->getMemory(uri_ptr.u64_, uri_size.u64_); auto body = context->wasmVm()->getMemory(body_ptr.u64_, body_size.u64_); auto header_pairs = context->wasmVm()->getMemory(header_pairs_ptr.u64_, header_pairs_size.u64_); auto trailer_pairs = context->wasmVm()->getMemory(trailer_pairs_ptr.u64_, trailer_pairs_size.u64_); if (!uri || !body || !header_pairs || !trailer_pairs) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto headers = toPairs(header_pairs.value()); auto trailers = toPairs(trailer_pairs.value()); return context->httpCall(uri.value(), headers, body.value(), trailers, timeout_milliseconds.u64_); } Word defineMetricHandler(void* raw_context, Word metric_type, Word name_ptr, Word name_size, Word metric_id_ptr) { if (metric_type.u64_ > static_cast<uint64_t>(Context::MetricType::Max)) { return 0; } auto context = WASM_CONTEXT(raw_context); auto name = context->wasmVm()->getMemory(name_ptr.u64_, name_size.u64_); if (!name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t metric_id = 0; auto result = context->defineMetric(static_cast<Context::MetricType>(metric_type.u64_), name.value(), &metric_id); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(metric_id_ptr.u64_, metric_id)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word incrementMetricHandler(void* raw_context, Word metric_id, int64_t offset) { auto context = WASM_CONTEXT(raw_context); return wasmResultToWord(context->incrementMetric(metric_id.u64_, offset)); } Word recordMetricHandler(void* raw_context, Word metric_id, uint64_t value) { auto context = WASM_CONTEXT(raw_context); return wasmResultToWord(context->recordMetric(metric_id.u64_, value)); } Word getMetricHandler(void* raw_context, Word metric_id, Word result_uint64_ptr) { auto context = WASM_CONTEXT(raw_context); uint64_t value = 0; auto result = context->getMetric(metric_id.u64_, &value); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(result_uint64_ptr.u64_, value)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word grpcCallHandler(void* raw_context, Word service_ptr, Word service_size, Word service_name_ptr, Word service_name_size, Word method_name_ptr, Word method_name_size, Word request_ptr, Word request_size, Word timeout_milliseconds) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto service = context->wasmVm()->getMemory(service_ptr.u64_, service_size.u64_); auto service_name = context->wasmVm()->getMemory(service_name_ptr.u64_, service_name_size.u64_); auto method_name = context->wasmVm()->getMemory(method_name_ptr.u64_, method_name_size.u64_); auto request = context->wasmVm()->getMemory(request_ptr.u64_, request_size.u64_); if (!service || !service_name || !method_name || !request) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } envoy::api::v2::core::GrpcService service_proto; if (!service_proto.ParseFromArray(service.value().data(), service.value().size())) { return false; } return context->grpcCall(service_proto, service_name.value(), method_name.value(), request.value(), std::chrono::milliseconds(timeout_milliseconds.u64_)); } Word grpcStreamHandler(void* raw_context, Word service_ptr, Word service_size, Word service_name_ptr, Word service_name_size, Word method_name_ptr, Word method_name_size) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto service = context->wasmVm()->getMemory(service_ptr.u64_, service_size.u64_); auto service_name = context->wasmVm()->getMemory(service_name_ptr.u64_, service_name_size.u64_); auto method_name = context->wasmVm()->getMemory(method_name_ptr.u64_, method_name_size.u64_); if (!service || !service_name || !method_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } envoy::api::v2::core::GrpcService service_proto; if (!service_proto.ParseFromArray(service.value().data(), service.value().size())) { return false; } return context->grpcStream(service_proto, service_name.value(), method_name.value()); } Word grpcCancelHandler(void* raw_context, Word token) { auto context = WASM_CONTEXT(raw_context)->root_context(); return wasmResultToWord(context->grpcCancel(token.u64_)); } Word grpcCloseHandler(void* raw_context, Word token) { auto context = WASM_CONTEXT(raw_context)->root_context(); return wasmResultToWord(context->grpcClose(token.u64_)); } Word grpcSendHandler(void* raw_context, Word token, Word message_ptr, Word message_size, Word end_stream) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto message = context->wasmVm()->getMemory(message_ptr.u64_, message_size.u64_); if (!message) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->grpcSend(token.u64_, message.value(), end_stream.u64_)); } // Implementation of writev-like() syscall that redirects stdout/stderr to Envoy logs. Word writevImpl(void* raw_context, Word fd, Word iovs, Word iovs_len, Word* nwritten_ptr) { auto context = WASM_CONTEXT(raw_context); // Read syscall args. spdlog::level::level_enum log_level; switch (fd.u64_) { case 1 /* stdout */: log_level = spdlog::level::info; break; case 2 /* stderr */: log_level = spdlog::level::err; break; default: return 8; // __WASI_EBADF } std::string s; for (size_t i = 0; i < iovs_len.u64_; i++) { auto memslice = context->wasmVm()->getMemory(iovs.u64_ + i * 2 * sizeof(uint32_t), 2 * sizeof(uint32_t)); if (!memslice) { return 21; // __WASI_EFAULT } const uint32_t* iovec = reinterpret_cast<const uint32_t*>(memslice.value().data()); if (iovec[1] /* buf_len */) { memslice = context->wasmVm()->getMemory(iovec[0] /* buf */, iovec[1] /* buf_len */); if (!memslice) { return 21; // __WASI_EFAULT } s.append(memslice.value().data(), memslice.value().size()); } } size_t written = s.size(); if (written) { // Remove trailing newline from the logs, if any. if (s[written - 1] == '\n') { s.erase(written - 1); } context->scriptLog(log_level, s); } *nwritten_ptr = Word(written); return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_fd_write(_wasi_fd_t fd, const _wasi_ciovec_t *iov, size_t iovs_len, size_t* // nwritten); Word wasi_unstable_fd_writeHandler(void* raw_context, Word fd, Word iovs, Word iovs_len, Word nwritten_ptr) { auto context = WASM_CONTEXT(raw_context); Word nwritten(0); auto result = writevImpl(raw_context, fd, iovs, iovs_len, &nwritten); if (result.u64_ != 0) { // __WASI_ESUCCESS return result; } if (!context->wasmVm()->setWord(nwritten_ptr.u64_, Word(nwritten))) { return 21; // __WASI_EFAULT } return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_fd_seek(__wasi_fd_t fd, __wasi_filedelta_t offset, __wasi_whence_t // whence,__wasi_filesize_t *newoffset); Word wasi_unstable_fd_seekHandler(void*, Word, int64_t, Word, Word) { throw WasmException("wasi_unstable fd_seek"); } // __wasi_errno_t __wasi_fd_close(__wasi_fd_t fd); Word wasi_unstable_fd_closeHandler(void*, Word) { throw WasmException("wasi_unstable fd_close"); } // __wasi_errno_t __wasi_environ_get(char **environ, char *environ_buf); Word wasi_unstable_environ_getHandler(void*, Word, Word) { return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_environ_sizes_get(size_t *environ_count, size_t *environ_buf_size); Word wasi_unstable_environ_sizes_getHandler(void* raw_context, Word count_ptr, Word buf_size_ptr) { auto context = WASM_CONTEXT(raw_context); if (!context->wasmVm()->setWord(count_ptr.u64_, Word(0))) { return 21; // __WASI_EFAULT } if (!context->wasmVm()->setWord(buf_size_ptr.u64_, Word(0))) { return 21; // __WASI_EFAULT } return 0; // __WASI_ESUCCESS } // void __wasi_proc_exit(__wasi_exitcode_t rval); void wasi_unstable_proc_exitHandler(void*, Word) { throw WasmException("wasi_unstable proc_exit"); } Word pthread_equalHandler(void*, Word left, Word right) { return left.u64_ == right.u64_; } Word setTickPeriodMillisecondsHandler(void* raw_context, Word tick_period_milliseconds) { return wasmResultToWord( WASM_CONTEXT(raw_context) ->setTickPeriod(std::chrono::milliseconds(tick_period_milliseconds.u64_))); } Word getCurrentTimeNanosecondsHandler(void* raw_context, Word result_uint64_ptr) { auto context = WASM_CONTEXT(raw_context); uint64_t result = context->getCurrentTimeNanoseconds(); if (!context->wasm()->setDatatype(result_uint64_ptr.u64_, result)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word logHandler(void* raw_context, Word level, Word address, Word size) { auto context = WASM_CONTEXT(raw_context); auto message = context->wasmVm()->getMemory(address.u64_, size.u64_); if (!message) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->scriptLog(static_cast<spdlog::level::level_enum>(level.u64_), message.value()); return wasmResultToWord(WasmResult::Ok); } WasmResult Context::setTickPeriod(std::chrono::milliseconds tick_period) { wasm_->setTickPeriod(root_context_id_ ? root_context_id_ : id_, tick_period); return WasmResult::Ok; } uint64_t Context::getCurrentTimeNanoseconds() { return std::chrono::duration_cast<std::chrono::nanoseconds>( wasm_->time_source_.systemTime().time_since_epoch()) .count(); } // TODO(https://github.com/google/cel-cpp/issues/38) bool exportValue(const Filters::Common::Expr::CelValue& value, ProtobufWkt::Value* out) { using Filters::Common::Expr::CelValue; switch (value.type()) { case CelValue::Type::kBool: out->set_bool_value(value.BoolOrDie()); return true; case CelValue::Type::kInt64: out->set_number_value(static_cast<double>(value.Int64OrDie())); return true; case CelValue::Type::kUint64: out->set_number_value(static_cast<double>(value.Uint64OrDie())); return true; case CelValue::Type::kDouble: out->set_number_value(value.DoubleOrDie()); return true; case CelValue::Type::kString: *out->mutable_string_value() = std::string(value.StringOrDie().value()); return true; case CelValue::Type::kBytes: *out->mutable_string_value() = std::string(value.BytesOrDie().value()); return true; case CelValue::Type::kMessage: { if (value.IsNull()) { out->set_null_value(ProtobufWkt::NullValue::NULL_VALUE); } else { auto msg = value.MessageOrDie(); out->mutable_struct_value()->MergeFrom(*msg); } return true; } case CelValue::Type::kDuration: *out->mutable_string_value() = absl::FormatDuration(value.DurationOrDie()); return true; case CelValue::Type::kTimestamp: *out->mutable_string_value() = absl::FormatTime(value.TimestampOrDie()); return true; case CelValue::Type::kList: { auto list = value.ListOrDie(); auto values = out->mutable_list_value(); for (int i = 0; i < list->size(); i++) { if (!exportValue((*list)[i], values->add_values())) { return false; } } return true; } case CelValue::Type::kMap: { auto map = value.MapOrDie(); auto list = map->ListKeys(); auto struct_obj = out->mutable_struct_value(); for (int i = 0; i < list->size(); i++) { ProtobufWkt::Value field_key; if (!exportValue((*list)[i], &field_key)) { return false; } ProtobufWkt::Value field_value; if (!exportValue((*map)[(*list)[i]].value(), &field_value)) { return false; } (*struct_obj->mutable_fields())[field_key.string_value()] = field_value; } return true; } default: // do nothing for special values return false; } return false; } WasmResult serializeValue(Filters::Common::Expr::CelValue value, std::string* result) { using Filters::Common::Expr::CelValue; switch (value.type()) { case CelValue::Type::kMessage: if (value.MessageOrDie() != nullptr && value.MessageOrDie()->SerializeToString(result)) { return WasmResult::Ok; } return WasmResult::SerializationFailure; case CelValue::Type::kString: result->assign(value.StringOrDie().value().data(), value.StringOrDie().value().size()); return WasmResult::Ok; case CelValue::Type::kBytes: result->assign(value.BytesOrDie().value().data(), value.BytesOrDie().value().size()); return WasmResult::Ok; case CelValue::Type::kInt64: { auto out = value.Int64OrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(int64_t)); return WasmResult::Ok; } case CelValue::Type::kUint64: { auto out = value.Uint64OrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(uint64_t)); return WasmResult::Ok; } case CelValue::Type::kDouble: { auto out = value.DoubleOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(double)); return WasmResult::Ok; } case CelValue::Type::kBool: { auto out = value.BoolOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(bool)); return WasmResult::Ok; } case CelValue::Type::kDuration: { auto out = value.DurationOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(absl::Duration)); return WasmResult::Ok; } case CelValue::Type::kTimestamp: { auto out = value.TimestampOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(absl::Time)); return WasmResult::Ok; } case CelValue::Type::kMap: { ProtobufWkt::Value out; if (!exportValue(value, &out)) { return WasmResult::SerializationFailure; } if (!out.struct_value().SerializeToString(result)) { return WasmResult::SerializationFailure; } return WasmResult::Ok; } case CelValue::Type::kList: { ProtobufWkt::Value out; if (!exportValue(value, &out)) { return WasmResult::SerializationFailure; } if (!out.list_value().SerializeToString(result)) { return WasmResult::SerializationFailure; } return WasmResult::Ok; } default: return WasmResult::SerializationFailure; } return WasmResult::SerializationFailure; } // An expression wrapper for the WASM state class WasmStateWrapper : public google::api::expr::runtime::CelMap { public: WasmStateWrapper(const StreamInfo::FilterState& filter_state) : filter_state_(filter_state) {} absl::optional<google::api::expr::runtime::CelValue> operator[](google::api::expr::runtime::CelValue key) const override { if (!key.IsString()) { return {}; } auto value = key.StringOrDie().value(); try { const WasmState& result = filter_state_.getDataReadOnly<WasmState>(value); return google::api::expr::runtime::CelValue::CreateBytes(&result.value()); } catch (const EnvoyException& e) { return {}; } } int size() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } bool empty() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } const google::api::expr::runtime::CelList* ListKeys() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } private: const StreamInfo::FilterState& filter_state_; }; WasmResult Context::getProperty(absl::string_view path, std::string* result) { using google::api::expr::runtime::CelValue; using google::api::expr::runtime::FieldBackedListImpl; using google::api::expr::runtime::FieldBackedMapImpl; bool first = true; CelValue value; Protobuf::Arena arena; const StreamInfo::StreamInfo* info = getConstRequestStreamInfo(); const auto request_headers = request_headers_ ? request_headers_ : access_log_request_headers_; const auto response_headers = response_headers_ ? response_headers_ : access_log_response_headers_; const auto response_trailers = response_trailers_ ? response_trailers_ : access_log_response_trailers_; size_t start = 0; while (true) { if (start >= path.size()) { break; } size_t end = path.find('\0', start); if (end == absl::string_view::npos) { // this should not happen unless the input string is not null-terminated in the view return WasmResult::ParseFailure; } auto part = path.substr(start, end - start); start = end + 1; // top-level ident if (first) { first = false; if (part == "metadata") { value = CelValue::CreateMessage(&info->dynamicMetadata(), &arena); } else if (part == "filter_state") { value = CelValue::CreateMap( Protobuf::Arena::Create<WasmStateWrapper>(&arena, info->filterState())); } else if (part == "request") { value = CelValue::CreateMap(Protobuf::Arena::Create<Filters::Common::Expr::RequestWrapper>( &arena, request_headers, *info)); } else if (part == "response") { value = CelValue::CreateMap(Protobuf::Arena::Create<Filters::Common::Expr::ResponseWrapper>( &arena, response_headers, response_trailers, *info)); } else if (part == "connection") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::ConnectionWrapper>(&arena, *info)); } else if (part == "upstream") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::UpstreamWrapper>(&arena, *info)); } else if (part == "node") { value = CelValue::CreateMessage(&plugin_->local_info_.node(), &arena); } else if (part == "source") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::PeerWrapper>(&arena, *info, false)); } else if (part == "destination") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::PeerWrapper>(&arena, *info, true)); } else if (part == "request_protocol") { // TODO(kyessenov) move this upstream to CEL context if (info->protocol().has_value()) { value = CelValue::CreateString(&Http::Utility::getProtocolString(info->protocol().value())); } else { return WasmResult::NotFound; } // Reflective accessors } else if (part == "listener_direction") { value = CelValue::CreateInt64(plugin_->direction_); } else if (part == "listener_metadata") { value = CelValue::CreateMessage(plugin_->listener_metadata_, &arena); } else if (part == "cluster_name" && info->upstreamHost() != nullptr) { value = CelValue::CreateString(&info->upstreamHost()->cluster().name()); } else if (part == "cluster_metadata" && info->upstreamHost() != nullptr) { value = CelValue::CreateMessage(&info->upstreamHost()->cluster().metadata(), &arena); } else if (part == "route_name") { value = CelValue::CreateString(&info->getRouteName()); } else if (part == "route_metadata" && info->routeEntry() != nullptr) { value = CelValue::CreateMessage(&info->routeEntry()->metadata(), &arena); } else { return WasmResult::NotFound; } continue; } if (value.IsMap()) { auto& map = *value.MapOrDie(); auto field = map[CelValue::CreateString(part)]; if (field.has_value()) { value = field.value(); } else { return {}; } } else if (value.IsMessage()) { auto msg = value.MessageOrDie(); if (msg == nullptr) { return {}; } const Protobuf::Descriptor* desc = msg->GetDescriptor(); const Protobuf::FieldDescriptor* field_desc = desc->FindFieldByName(std::string(part)); if (field_desc == nullptr) { return {}; } else if (field_desc->is_map()) { value = CelValue::CreateMap( Protobuf::Arena::Create<FieldBackedMapImpl>(&arena, msg, field_desc, &arena)); } else if (field_desc->is_repeated()) { value = CelValue::CreateList( Protobuf::Arena::Create<FieldBackedListImpl>(&arena, msg, field_desc, &arena)); } else { auto status = google::api::expr::runtime::CreateValueFromSingleField(msg, field_desc, &arena, &value); if (!status.ok()) { return {}; } } } else { return {}; } } return serializeValue(value, result); } // Shared Data WasmResult Context::getSharedData(absl::string_view key, std::pair<std::string, uint32_t>* data) { return global_shared_data.get(wasm_->vm_id(), key, data); } WasmResult Context::setSharedData(absl::string_view key, absl::string_view value, uint32_t cas) { return global_shared_data.set(wasm_->vm_id(), key, value, cas); } // Shared Queue uint32_t Context::registerSharedQueue(absl::string_view queue_name) { // Get the id of the root context if this is a stream context because onQueueReady is on the root. return global_shared_data.registerQueue( wasm_->vm_id(), queue_name, isRootContext() ? id_ : root_context_id_, wasm_->dispatcher_); } WasmResult Context::resolveSharedQueue(absl::string_view vm_id, absl::string_view queue_name, uint32_t* token_ptr) { uint32_t token = global_shared_data.resolveQueue(vm_id, queue_name); if (!token) { return WasmResult::NotFound; } *token_ptr = token; return WasmResult::Ok; } WasmResult Context::dequeueSharedQueue(uint32_t token, std::string* data) { return global_shared_data.dequeue(token, data); } WasmResult Context::enqueueSharedQueue(uint32_t token, absl::string_view value) { return global_shared_data.enqueue(token, value); } // Network bytes. WasmResult Context::getDownstreamDataBufferBytes(uint32_t start, uint32_t length, absl::string_view* data) { if (!network_downstream_data_buffer_) return WasmResult::NotFound; if (network_downstream_data_buffer_->length() < static_cast<uint64_t>(start + length)) return WasmResult::InvalidMemoryAccess; *data = absl::string_view( static_cast<char*>(network_downstream_data_buffer_->linearize(start + length)) + start, length); return WasmResult::Ok; } WasmResult Context::getUpstreamDataBufferBytes(uint32_t start, uint32_t length, absl::string_view* data) { if (!network_upstream_data_buffer_) return WasmResult::NotFound; if (network_upstream_data_buffer_->length() < static_cast<uint64_t>(start + length)) return WasmResult::InvalidMemoryAccess; *data = absl::string_view( static_cast<char*>(network_upstream_data_buffer_->linearize(start + length)) + start, length); return WasmResult::Ok; } // Header/Trailer/Metadata Maps. Http::HeaderMap* Context::getMap(HeaderMapType type) { switch (type) { case HeaderMapType::RequestHeaders: return request_headers_; case HeaderMapType::RequestTrailers: return request_trailers_; case HeaderMapType::ResponseHeaders: return response_headers_; case HeaderMapType::ResponseTrailers: return response_trailers_; case HeaderMapType::GrpcCreateInitialMetadata: return grpc_create_initial_metadata_; default: return nullptr; } } const Http::HeaderMap* Context::getConstMap(HeaderMapType type) { switch (type) { case HeaderMapType::RequestHeaders: if (access_log_request_headers_) { return access_log_request_headers_; } return request_headers_; case HeaderMapType::RequestTrailers: if (access_log_request_trailers_) { return access_log_request_trailers_; } return request_trailers_; case HeaderMapType::ResponseHeaders: if (access_log_response_headers_) { return access_log_response_headers_; } return response_headers_; case HeaderMapType::ResponseTrailers: if (access_log_response_trailers_) { return access_log_response_trailers_; } return response_trailers_; case HeaderMapType::GrpcCreateInitialMetadata: return grpc_create_initial_metadata_; case HeaderMapType::GrpcReceiveInitialMetadata: return grpc_receive_initial_metadata_.get(); case HeaderMapType::GrpcReceiveTrailingMetadata: return grpc_receive_trailing_metadata_.get(); } return nullptr; } void Context::addHeaderMapValue(HeaderMapType type, absl::string_view key, absl::string_view value) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); map->addCopy(lower_key, std::string(value)); } absl::string_view Context::getHeaderMapValue(HeaderMapType type, absl::string_view key) { auto map = getConstMap(type); if (!map) { return ""; } const Http::LowerCaseString lower_key(std::move(std::string(key))); auto entry = map->get(lower_key); if (!entry) { return ""; } return entry->value().getStringView(); } Pairs headerMapToPairs(const Http::HeaderMap* map) { if (!map) { return {}; } Pairs pairs; pairs.reserve(map->size()); map->iterate( [](const Http::HeaderEntry& header, void* pairs) -> Http::HeaderMap::Iterate { (static_cast<Pairs*>(pairs)) ->push_back( std::make_pair(header.key().getStringView(), header.value().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &pairs); return pairs; } Pairs Context::getHeaderMapPairs(HeaderMapType type) { return headerMapToPairs(getConstMap(type)); } void Context::setHeaderMapPairs(HeaderMapType type, const Pairs& pairs) { auto map = getMap(type); if (!map) { return; } std::vector<std::string> keys; map->iterate( [](const Http::HeaderEntry& header, void* keys) -> Http::HeaderMap::Iterate { (static_cast<std::vector<std::string>*>(keys)) ->push_back(std::string(header.key().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &keys); for (auto& k : keys) { const Http::LowerCaseString lower_key(std::move(k)); map->remove(lower_key); } for (auto& p : pairs) { const Http::LowerCaseString lower_key(std::move(std::string(p.first))); map->addCopy(lower_key, std::move(std::string(p.second))); } } void Context::removeHeaderMapValue(HeaderMapType type, absl::string_view key) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); map->remove(lower_key); } void Context::replaceHeaderMapValue(HeaderMapType type, absl::string_view key, absl::string_view value) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); auto entry = map->get(lower_key); if (entry != nullptr) { entry->value(value.data(), value.size()); } else { map->addCopy(lower_key, std::string(value)); } } uint32_t Context::getHeaderMapSize(HeaderMapType type) { auto map = getMap(type); if (!map) { return 0; } return map->refreshByteSize(); } // Body Buffer absl::string_view Context::getRequestBodyBufferBytes(uint32_t start, uint32_t length) { if (!requestBodyBuffer_) { return ""; } if (requestBodyBuffer_->length() < static_cast<uint64_t>((start + length))) { return ""; } return absl::string_view( static_cast<char*>(requestBodyBuffer_->linearize(start + length)) + start, length); } absl::string_view Context::getResponseBodyBufferBytes(uint32_t start, uint32_t length) { if (!responseBodyBuffer_) { return ""; } if (responseBodyBuffer_->length() < static_cast<uint64_t>((start + length))) { return ""; } return absl::string_view( static_cast<char*>(responseBodyBuffer_->linearize(start + length)) + start, length); } // Async call via HTTP uint32_t Context::httpCall(absl::string_view cluster, const Pairs& request_headers, absl::string_view request_body, const Pairs& request_trailers, int timeout_milliseconds) { if (timeout_milliseconds < 0) { return 0; } auto cluster_string = std::string(cluster); if (clusterManager().get(cluster_string) == nullptr) { return 0; } Http::MessagePtr message(new Http::RequestMessageImpl(buildHeaderMapFromPairs(request_headers))); // Check that we were provided certain headers. if (message->headers().Path() == nullptr || message->headers().Method() == nullptr || message->headers().Host() == nullptr) { return 0; } if (!request_body.empty()) { message->body().reset(new Buffer::OwnedImpl(request_body.data(), request_body.size())); message->headers().insertContentLength().value(request_body.size()); } if (request_trailers.size() > 0) { message->trailers(buildHeaderMapFromPairs(request_trailers)); } absl::optional<std::chrono::milliseconds> timeout; if (timeout_milliseconds > 0) { timeout = std::chrono::milliseconds(timeout_milliseconds); } auto token = next_http_call_token_++; // Handle rollover. for (;;) { if (token == 0) { token = next_http_call_token_++; } if (!http_request_.count(token)) { break; } token = next_http_call_token_++; } auto& handler = http_request_[token]; // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::RequestOptions options; options.setTimeout(timeout); Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); auto http_request = clusterManager() .httpAsyncClientForCluster(cluster_string) .send(std::move(message), handler, options); if (!http_request) { http_request_.erase(token); return 0; } handler.context = this; handler.token = token; handler.request = http_request; return token; } uint32_t Context::grpcCall(const envoy::api::v2::core::GrpcService& grpc_service, absl::string_view service_name, absl::string_view method_name, absl::string_view request, const absl::optional<std::chrono::milliseconds>& timeout) { auto token = next_grpc_token_++; if (IsGrpcStreamToken(token)) { token = next_grpc_token_++; } // Handle rollover. for (;;) { if (token == 0) { token = next_grpc_token_ += 2; } if (!grpc_call_request_.count(token)) { break; } token = next_grpc_token_ += 2; } auto& handler = grpc_call_request_[token]; handler.context = this; handler.token = token; auto grpc_client = clusterManager() .grpcAsyncClientManager() .factoryForGrpcService(grpc_service, *wasm()->scope_, true /* skip_cluster_check */) ->create(); // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::RequestOptions options; options.setTimeout(timeout); Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); // NB: this call causes the onCreateInitialMetadata callback to occur inline *before* this call // returns. Consequently the grpc_request is not available. Attempting to close or reset from that // callback will fail. auto grpc_request = grpc_client->sendRaw(service_name, method_name, std::make_unique<Buffer::OwnedImpl>(request), handler, Tracing::NullSpan::instance(), options); if (!grpc_request) { grpc_call_request_.erase(token); return 0; } handler.client = std::move(grpc_client); handler.request = grpc_request; return token; } uint32_t Context::grpcStream(const envoy::api::v2::core::GrpcService& grpc_service, absl::string_view service_name, absl::string_view method_name) { auto token = next_grpc_token_++; if (IsGrpcCallToken(token)) { token = next_grpc_token_++; } // Handle rollover. for (;;) { if (token == 0) { token = next_grpc_token_ += 2; } if (!grpc_stream_.count(token)) { break; } token = next_grpc_token_ += 2; } auto& handler = grpc_stream_[token]; handler.context = this; handler.token = token; auto grpc_client = clusterManager() .grpcAsyncClientManager() .factoryForGrpcService(grpc_service, *wasm()->scope_, true /* skip_cluster_check */) ->create(); // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::StreamOptions options; Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); // NB: this call causes the onCreateInitialMetadata callback to occur inline *before* this call // returns. Consequently the grpc_stream is not available. Attempting to close or reset from that // callback will fail. auto grpc_stream = grpc_client->startRaw(service_name, method_name, handler, options); if (!grpc_stream) { grpc_stream_.erase(token); return 0; } handler.client = std::move(grpc_client); handler.stream = grpc_stream; return token; } void Context::httpRespond(const Pairs& response_headers, absl::string_view body, const Pairs& response_trailers) { (void)response_headers; (void)body; (void)response_trailers; } // StreamInfo const StreamInfo::StreamInfo* Context::getConstRequestStreamInfo() const { if (encoder_callbacks_) { return &encoder_callbacks_->streamInfo(); } else if (decoder_callbacks_) { return &decoder_callbacks_->streamInfo(); } else if (access_log_stream_info_) { return access_log_stream_info_; } return nullptr; } StreamInfo::StreamInfo* Context::getRequestStreamInfo() const { if (encoder_callbacks_) { return &encoder_callbacks_->streamInfo(); } else if (decoder_callbacks_) { return &decoder_callbacks_->streamInfo(); } return nullptr; } WasmResult Context::setProperty(absl::string_view key, absl::string_view serialized_value) { auto* stream_info = getRequestStreamInfo(); if (!stream_info) { return WasmResult::NotFound; } stream_info->filterState().setData(key, std::make_unique<WasmState>(serialized_value), StreamInfo::FilterState::StateType::Mutable); return WasmResult::Ok; } void Context::scriptLog(spdlog::level::level_enum level, absl::string_view message) { switch (level) { case spdlog::level::trace: ENVOY_LOG(trace, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::debug: ENVOY_LOG(debug, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::info: ENVOY_LOG(info, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::warn: ENVOY_LOG(warn, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::err: ENVOY_LOG(error, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::critical: ENVOY_LOG(critical, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::off: NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } } // Connection bool Context::isSsl() { return decoder_callbacks_->connection()->ssl() != nullptr; } // // Calls into the WASM code. // void Context::onStart(absl::string_view root_id, absl::string_view vm_configuration) { if (wasm_->onStart_) { auto root_id_addr = wasm_->copyString(root_id); auto config_addr = wasm_->copyString(vm_configuration); wasm_->onStart_(this, id_, root_id_addr, root_id.size(), config_addr, vm_configuration.size()); } } bool Context::validateConfiguration(absl::string_view configuration) { if (!wasm_->validateConfiguration_) { return true; } auto address = wasm_->copyString(configuration); return wasm_->validateConfiguration_(this, id_, address, configuration.size()).u64_ != 0; } bool Context::onConfigure(absl::string_view configuration) { if (!wasm_->onConfigure_) { return true; } auto address = wasm_->copyString(configuration); return wasm_->onConfigure_(this, id_, address, configuration.size()).u64_ != 0; } void Context::onCreate(uint32_t root_context_id) { if (wasm_->onCreate_) { wasm_->onCreate_(this, id_, root_context_id); } } Network::FilterStatus Context::onNetworkNewConnection() { onCreate(root_context_id_); if (!wasm_->onNewConnection_) { return Network::FilterStatus::Continue; } if (wasm_->onNewConnection_(this, id_).u64_ == 0) { return Network::FilterStatus::Continue; } return Network::FilterStatus::StopIteration; } Network::FilterStatus Context::onDownstreamData(int data_length, bool end_of_stream) { if (!wasm_->onDownstreamData_) { return Network::FilterStatus::Continue; } auto result = wasm_->onDownstreamData_(this, id_, static_cast<uint32_t>(data_length), static_cast<uint32_t>(end_of_stream)); // TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values. return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration; } Network::FilterStatus Context::onUpstreamData(int data_length, bool end_of_stream) { if (!wasm_->onUpstreamData_) { return Network::FilterStatus::Continue; } auto result = wasm_->onUpstreamData_(this, id_, static_cast<uint32_t>(data_length), static_cast<uint32_t>(end_of_stream)); // TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values. return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration; } void Context::onDownstreamConnectionClose(PeerType peer_type) { if (wasm_->onDownstreamConnectionClose_) { wasm_->onDownstreamConnectionClose_(this, id_, static_cast<uint32_t>(peer_type)); } } void Context::onUpstreamConnectionClose(PeerType peer_type) { if (wasm_->onUpstreamConnectionClose_) { wasm_->onUpstreamConnectionClose_(this, id_, static_cast<uint32_t>(peer_type)); } } Http::FilterHeadersStatus Context::onRequestHeaders() { onCreate(root_context_id_); in_vm_context_created_ = true; // Store the stream id so that we can use it in log(). auto& stream_info = decoder_callbacks_->streamInfo(); auto& metadata = (*stream_info.dynamicMetadata() .mutable_filter_metadata())[HttpFilters::HttpFilterNames::get().Wasm]; (*metadata.mutable_fields())[std::string("_stream_id_" + std::string(root_id()))] .set_number_value(id_); if (!wasm_->onRequestHeaders_) { return Http::FilterHeadersStatus::Continue; } if (wasm_->onRequestHeaders_(this, id_).u64_ == 0) { return Http::FilterHeadersStatus::Continue; } return Http::FilterHeadersStatus::StopIteration; } Http::FilterDataStatus Context::onRequestBody(int body_buffer_length, bool end_of_stream) { if (!wasm_->onRequestBody_) { return Http::FilterDataStatus::Continue; } switch (wasm_ ->onRequestBody_(this, id_, static_cast<uint32_t>(body_buffer_length), static_cast<uint32_t>(end_of_stream)) .u64_) { case 0: return Http::FilterDataStatus::Continue; case 1: return Http::FilterDataStatus::StopIterationAndBuffer; case 2: return Http::FilterDataStatus::StopIterationAndWatermark; default: return Http::FilterDataStatus::StopIterationNoBuffer; } } Http::FilterTrailersStatus Context::onRequestTrailers() { if (!wasm_->onRequestTrailers_) { return Http::FilterTrailersStatus::Continue; } if (wasm_->onRequestTrailers_(this, id_).u64_ == 0) { return Http::FilterTrailersStatus::Continue; } return Http::FilterTrailersStatus::StopIteration; } Http::FilterMetadataStatus Context::onRequestMetadata() { if (!wasm_->onRequestMetadata_) { return Http::FilterMetadataStatus::Continue; } if (wasm_->onRequestMetadata_(this, id_).u64_ == 0) { return Http::FilterMetadataStatus::Continue; } return Http::FilterMetadataStatus::Continue; // This is currently the only return code. } Http::FilterHeadersStatus Context::onResponseHeaders() { if (!in_vm_context_created_) { // If the request is invalid then onRequestHeaders() will not be called and neither will // onCreate() then sendLocalReply be called which will call this function. In this case we // need to call onCreate() so that the Context inside the VM is created before the // onResponseHeaders() call. onCreate(root_context_id_); in_vm_context_created_ = true; } if (!wasm_->onResponseHeaders_) { return Http::FilterHeadersStatus::Continue; } if (wasm_->onResponseHeaders_(this, id_).u64_ == 0) { return Http::FilterHeadersStatus::Continue; } return Http::FilterHeadersStatus::StopIteration; } Http::FilterDataStatus Context::onResponseBody(int body_buffer_length, bool end_of_stream) { if (!wasm_->onResponseBody_) { return Http::FilterDataStatus::Continue; } switch (wasm_ ->onResponseBody_(this, id_, static_cast<uint32_t>(body_buffer_length), static_cast<uint32_t>(end_of_stream)) .u64_) { case 0: return Http::FilterDataStatus::Continue; case 1: return Http::FilterDataStatus::StopIterationAndBuffer; case 2: return Http::FilterDataStatus::StopIterationAndWatermark; default: return Http::FilterDataStatus::StopIterationNoBuffer; } } Http::FilterTrailersStatus Context::onResponseTrailers() { if (!wasm_->onResponseTrailers_) { return Http::FilterTrailersStatus::Continue; } if (wasm_->onResponseTrailers_(this, id_).u64_ == 0) { return Http::FilterTrailersStatus::Continue; } return Http::FilterTrailersStatus::StopIteration; } Http::FilterMetadataStatus Context::onResponseMetadata() { if (!wasm_->onResponseMetadata_) { return Http::FilterMetadataStatus::Continue; } if (wasm_->onResponseMetadata_(this, id_).u64_ == 0) { return Http::FilterMetadataStatus::Continue; } return Http::FilterMetadataStatus::Continue; // This is currently the only return code. } void Context::onHttpCallResponse(uint32_t token, const Pairs& response_headers, absl::string_view response_body, const Pairs& response_trailers) { if (!wasm_->onHttpCallResponse_) { return; } uint64_t headers_ptr, headers_size, trailers_ptr, trailers_size; exportPairs(this, response_headers, &headers_ptr, &headers_size); exportPairs(this, response_trailers, &trailers_ptr, &trailers_size); auto body_ptr = wasm_->copyString(response_body); auto body_size = response_body.size(); wasm_->onHttpCallResponse_(this, id_, token, headers_ptr, headers_size, body_ptr, body_size, trailers_ptr, trailers_size); } void Context::onQueueReady(uint32_t token) { if (wasm_->onQueueReady_) { wasm_->onQueueReady_(this, id_, token); } } void Context::onGrpcCreateInitialMetadata(uint32_t token, Http::HeaderMap& metadata) { if (!wasm_->onGrpcCreateInitialMetadata_) { return; } grpc_create_initial_metadata_ = &metadata; wasm_->onGrpcCreateInitialMetadata_(this, id_, token); grpc_create_initial_metadata_ = nullptr; } void Context::onGrpcReceiveInitialMetadata(uint32_t token, Http::HeaderMapPtr&& metadata) { if (!wasm_->onGrpcReceiveInitialMetadata_) { return; } grpc_receive_initial_metadata_ = std::move(metadata); wasm_->onGrpcReceiveInitialMetadata_(this, id_, token); grpc_receive_initial_metadata_ = nullptr; } void Context::onGrpcReceiveTrailingMetadata(uint32_t token, Http::HeaderMapPtr&& metadata) { if (!wasm_->onGrpcReceiveTrailingMetadata_) { return; } grpc_receive_trailing_metadata_ = std::move(metadata); wasm_->onGrpcReceiveTrailingMetadata_(this, id_, token); grpc_receive_trailing_metadata_ = nullptr; } WasmResult Context::defineMetric(MetricType type, absl::string_view name, uint32_t* metric_id_ptr) { auto stat_name = wasm_->stat_name_set_->getDynamic(name); if (type == MetricType::Counter) { auto id = wasm_->nextCounterMetricId(); auto c = &wasm_->scope_->counterFromStatName(stat_name); wasm_->counters_.emplace(id, c); *metric_id_ptr = id; return WasmResult::Ok; } else if (type == MetricType::Gauge) { auto id = wasm_->nextGaugeMetricId(); auto g = &wasm_->scope_->gaugeFromStatName(stat_name, Stats::Gauge::ImportMode::Accumulate); wasm_->gauges_.emplace(id, g); *metric_id_ptr = id; return WasmResult::Ok; } else if (type == MetricType::Histogram) { auto id = wasm_->nextHistogramMetricId(); auto h = &wasm_->scope_->histogramFromStatName(stat_name, Stats::Histogram::Unit::Unspecified); wasm_->histograms_.emplace(id, h); *metric_id_ptr = id; return WasmResult::Ok; } return WasmResult::BadArgument; } WasmResult Context::incrementMetric(uint32_t metric_id, int64_t offset) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { if (offset > 0) { it->second->add(offset); return WasmResult::Ok; } else { return WasmResult::BadArgument; } return WasmResult::NotFound; } } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { if (offset > 0) { it->second->add(offset); return WasmResult::Ok; } else { it->second->sub(-offset); return WasmResult::Ok; } } return WasmResult::NotFound; } return WasmResult::BadArgument; } WasmResult Context::recordMetric(uint32_t metric_id, uint64_t value) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { it->second->add(value); return WasmResult::Ok; } } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { it->second->set(value); return WasmResult::Ok; } } else if (type == MetricType::Histogram) { auto it = wasm_->histograms_.find(metric_id); if (it != wasm_->histograms_.end()) { it->second->recordValue(value); return WasmResult::Ok; } } return WasmResult::NotFound; } WasmResult Context::getMetric(uint32_t metric_id, uint64_t* result_uint64_ptr) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { *result_uint64_ptr = it->second->value(); return WasmResult::Ok; } return WasmResult::NotFound; } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { *result_uint64_ptr = it->second->value(); return WasmResult::Ok; } return WasmResult::NotFound; } return WasmResult::BadArgument; } Wasm::Wasm(absl::string_view vm, absl::string_view vm_id, absl::string_view vm_configuration, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher) : vm_id_(std::string(vm_id)), wasm_vm_(Common::Wasm::createWasmVm(vm)), plugin_(plugin), scope_(scope), cluster_manager_(cluster_manager), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), vm_configuration_(vm_configuration), stat_name_set_(scope_->symbolTable().makeSet("Wasm").release()) {} std::string Plugin::makeLogPrefix() const { std::string prefix; if (!name_.empty()) { prefix = prefix + " " + name_; } if (!root_id_.empty()) { prefix = prefix + " " + std::string(root_id_); } if (vm_id_.empty()) { prefix = prefix + " " + std::string(vm_id_); } return prefix; } Context::~Context() { // Cancel any outstanding requests. for (auto& p : http_request_) { p.second.request->cancel(); } for (auto& p : grpc_call_request_) { p.second.request->cancel(); } for (auto& p : grpc_stream_) { p.second.stream->resetStream(); } // Do not remove vm or root contexts which have the same lifetime as wasm_. if (root_context_id_) { wasm_->contexts_.erase(id_); } } void Wasm::registerCallbacks() { #define _REGISTER(_fn) \ wasm_vm_->registerCallback( \ "env", #_fn, &_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(_fn##Handler), \ _fn##Handler>::convertFunctionWordToUint32) if (is_emscripten_) { _REGISTER(pthread_equal); } #undef _REGISTER #define _REGISTER_WASI(_fn) \ wasm_vm_->registerCallback( \ "wasi_unstable", #_fn, &wasi_unstable_##_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(wasi_unstable_##_fn##Handler), \ wasi_unstable_##_fn##Handler>::convertFunctionWordToUint32) if (is_emscripten_) { _REGISTER_WASI(fd_write); _REGISTER_WASI(fd_seek); _REGISTER_WASI(fd_close); _REGISTER_WASI(environ_get); _REGISTER_WASI(environ_sizes_get); _REGISTER_WASI(proc_exit); } #undef _REGISTER_WASI // Calls with the "proxy_" prefix. #define _REGISTER_PROXY(_fn) \ wasm_vm_->registerCallback( \ "env", "proxy_" #_fn, &_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(_fn##Handler), \ _fn##Handler>::convertFunctionWordToUint32); _REGISTER_PROXY(log); _REGISTER_PROXY(setProperty); _REGISTER_PROXY(getProperty); _REGISTER_PROXY(continueRequest); _REGISTER_PROXY(continueResponse); _REGISTER_PROXY(sendLocalResponse); _REGISTER_PROXY(clearRouteCache); _REGISTER_PROXY(getSharedData); _REGISTER_PROXY(setSharedData); _REGISTER_PROXY(registerSharedQueue); _REGISTER_PROXY(resolveSharedQueue); _REGISTER_PROXY(dequeueSharedQueue); _REGISTER_PROXY(enqueueSharedQueue); _REGISTER_PROXY(getDownstreamDataBufferBytes); _REGISTER_PROXY(getUpstreamDataBufferBytes); _REGISTER_PROXY(getHeaderMapValue); _REGISTER_PROXY(addHeaderMapValue); _REGISTER_PROXY(replaceHeaderMapValue); _REGISTER_PROXY(removeHeaderMapValue); _REGISTER_PROXY(getHeaderMapPairs); _REGISTER_PROXY(setHeaderMapPairs); _REGISTER_PROXY(getHeaderMapSize); _REGISTER_PROXY(getRequestBodyBufferBytes); _REGISTER_PROXY(getResponseBodyBufferBytes); _REGISTER_PROXY(httpCall); _REGISTER_PROXY(grpcCall); _REGISTER_PROXY(grpcStream); _REGISTER_PROXY(grpcClose); _REGISTER_PROXY(grpcCancel); _REGISTER_PROXY(grpcSend); _REGISTER_PROXY(setTickPeriodMilliseconds); _REGISTER_PROXY(getCurrentTimeNanoseconds); _REGISTER_PROXY(defineMetric); _REGISTER_PROXY(incrementMetric); _REGISTER_PROXY(recordMetric); _REGISTER_PROXY(getMetric); _REGISTER_PROXY(setEffectiveContext); #undef _REGISTER_PROXY } void Wasm::getFunctions() { #define _GET(_fn) wasm_vm_->getFunction(#_fn, &_fn##_); _GET(_start); _GET(__wasm_call_ctors); _GET(malloc); _GET(free); #undef _GET #define _GET_PROXY(_fn) wasm_vm_->getFunction("proxy_" #_fn, &_fn##_); _GET_PROXY(validateConfiguration); _GET_PROXY(onStart); _GET_PROXY(onConfigure); _GET_PROXY(onTick); _GET_PROXY(onCreate); _GET_PROXY(onNewConnection); _GET_PROXY(onDownstreamData); _GET_PROXY(onUpstreamData); _GET_PROXY(onDownstreamConnectionClose); _GET_PROXY(onUpstreamConnectionClose); _GET_PROXY(onRequestHeaders); _GET_PROXY(onRequestBody); _GET_PROXY(onRequestTrailers); _GET_PROXY(onRequestMetadata); _GET_PROXY(onResponseHeaders); _GET_PROXY(onResponseBody); _GET_PROXY(onResponseTrailers); _GET_PROXY(onResponseMetadata); _GET_PROXY(onHttpCallResponse); _GET_PROXY(onGrpcReceive); _GET_PROXY(onGrpcClose); _GET_PROXY(onGrpcCreateInitialMetadata); _GET_PROXY(onGrpcReceiveInitialMetadata); _GET_PROXY(onGrpcReceiveTrailingMetadata); _GET_PROXY(onQueueReady); _GET_PROXY(onDone); _GET_PROXY(onLog); _GET_PROXY(onDelete); #undef _GET_PROXY if (!malloc_ || !free_) { throw WasmException("WASM missing malloc/free"); } } Wasm::Wasm(const Wasm& wasm, Event::Dispatcher& dispatcher) : std::enable_shared_from_this<Wasm>(wasm), vm_id_(wasm.vm_id_), plugin_(wasm.plugin_), scope_(wasm.scope_), cluster_manager_(wasm.cluster_manager_), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), stat_name_set_(wasm.stat_name_set_) { if (wasm.wasmVm()->cloneable()) { wasm_vm_ = wasm.wasmVm()->clone(); vm_context_ = std::make_shared<Context>(this); getFunctions(); } else { wasm_vm_ = Common::Wasm::createWasmVm(wasm.wasmVm()->runtime()); if (!initialize(wasm.code(), wasm.allow_precompiled())) { throw WasmException("Failed to initialize WASM code"); } } } bool Wasm::initialize(const std::string& code, bool allow_precompiled) { if (!wasm_vm_) { return false; } // If the configured_vm_id is empty, then hash the code to create a unique vm_id. if (vm_id_.empty()) { vm_id_ = base64Sha256(code); } auto ok = wasm_vm_->load(code, allow_precompiled); if (!ok) { return false; } auto metadata = wasm_vm_->getCustomSection("emscripten_metadata"); if (!metadata.empty()) { // See https://github.com/emscripten-core/emscripten/blob/incoming/tools/shared.py#L3059 is_emscripten_ = true; auto start = reinterpret_cast<const uint8_t*>(metadata.data()); auto end = reinterpret_cast<const uint8_t*>(metadata.data() + metadata.size()); start = decodeVarint(start, end, &emscripten_metadata_major_version_); start = decodeVarint(start, end, &emscripten_metadata_minor_version_); start = decodeVarint(start, end, &emscripten_abi_major_version_); start = decodeVarint(start, end, &emscripten_abi_minor_version_); uint32_t temp; if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 1) { // metadata 0.2 - added: wasm_backend. start = decodeVarint(start, end, &temp); } start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 0) { // metadata 0.1 - added: global_base, dynamic_base, dynamictop_ptr and tempdouble_ptr. start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); decodeVarint(start, end, &temp); if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 2) { // metadata 0.3 - added: standalone_wasm. start = decodeVarint(start, end, &emscripten_standalone_wasm_); } } } registerCallbacks(); wasm_vm_->link(vm_id_); vm_context_ = std::make_shared<Context>(this); getFunctions(); startVm(vm_context_.get()); code_ = code; allow_precompiled_ = allow_precompiled; return true; } void Wasm::startVm(Context* root_context) { /* Call "_start" function, and fallback to "__wasm_call_ctors" if the former is not available. */ if (_start_) { _start_(root_context); } else if (__wasm_call_ctors_) { __wasm_call_ctors_(root_context); } } bool Wasm::configure(Context* root_context, absl::string_view configuration) { if (!onConfigure_) { return true; } auto address = copyString(configuration); return onConfigure_(root_context, root_context->id(), address, configuration.size()).u64_ != 0; } Context* Wasm::start() { auto root_id = plugin_->root_id_; auto it = root_contexts_.find(root_id); if (it != root_contexts_.end()) { it->second->onStart(root_id, vm_configuration()); return it->second.get(); } auto context = std::make_unique<Context>(this, root_id, plugin_); auto context_ptr = context.get(); root_contexts_[root_id] = std::move(context); context_ptr->onStart(root_id, vm_configuration()); return context_ptr; }; void Wasm::startForTesting(std::unique_ptr<Context> context) { auto context_ptr = context.get(); if (!context->wasm_) { // Initialization was delayed till the Wasm object was created. context->wasm_ = this; context->plugin_ = plugin_; context->id_ = allocContextId(); contexts_[context->id_] = context.get(); } root_contexts_[""] = std::move(context); context_ptr->onStart("", ""); } void Wasm::setTickPeriod(uint32_t context_id, std::chrono::milliseconds new_tick_period) { auto& tick_period = tick_period_[context_id]; auto& timer = timer_[context_id]; bool was_running = timer && tick_period.count() > 0; tick_period = new_tick_period; if (tick_period.count() > 0 && !was_running) { timer = dispatcher_.createTimer([weak = std::weak_ptr<Wasm>(shared_from_this()), context_id]() { auto shared = weak.lock(); if (shared) { shared->tickHandler(context_id); } }); timer->enableTimer(tick_period); } } void Wasm::tickHandler(uint32_t root_context_id) { auto& tick_period = tick_period_[root_context_id]; auto& timer = timer_[root_context_id]; if (onTick_) { onTick_(getContext(root_context_id), root_context_id); if (timer && tick_period.count() > 0) { timer->enableTimer(tick_period); } } } uint32_t Wasm::allocContextId() { while (true) { auto id = next_context_id_++; // Prevent reuse. if (contexts_.find(id) == contexts_.end()) { return id; } } } void Wasm::queueReady(uint32_t root_context_id, uint32_t token) { auto it = contexts_.find(root_context_id); if (it == contexts_.end() || !it->second->isRootContext()) { return; } it->second->onQueueReady(token); } Network::FilterStatus Context::onNewConnection() { return onNetworkNewConnection(); }; Network::FilterStatus Context::onData(Buffer::Instance& data, bool end_stream) { network_downstream_data_buffer_ = &data; auto result = onDownstreamData(data.length(), end_stream); network_downstream_data_buffer_ = nullptr; return result; } Network::FilterStatus Context::onWrite(Buffer::Instance& data, bool end_stream) { network_upstream_data_buffer_ = &data; auto result = onUpstreamData(data.length(), end_stream); network_upstream_data_buffer_ = nullptr; if (end_stream) { // This is called when seeing end_stream=true and not on an upstream connection event, // because registering for latter requires replicating the whole TCP proxy extension. onUpstreamConnectionClose(PeerType::Unknown); } return result; } void Context::onEvent(Network::ConnectionEvent event) { switch (event) { case Network::ConnectionEvent::LocalClose: onDownstreamConnectionClose(PeerType::Local); break; case Network::ConnectionEvent::RemoteClose: onDownstreamConnectionClose(PeerType::Remote); break; default: break; } } void Context::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { network_read_filter_callbacks_ = &callbacks; network_read_filter_callbacks_->connection().addConnectionCallbacks(*this); } void Context::initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) { network_write_filter_callbacks_ = &callbacks; } void Wasm::log(absl::string_view root_id, const Http::HeaderMap* request_headers, const Http::HeaderMap* response_headers, const Http::HeaderMap* response_trailers, const StreamInfo::StreamInfo& stream_info) { // Check dynamic metadata for the id_ of the stream for this root_id. Context* context = nullptr; auto metadata_it = stream_info.dynamicMetadata().filter_metadata().find( HttpFilters::HttpFilterNames::get().Wasm); if (metadata_it != stream_info.dynamicMetadata().filter_metadata().end()) { auto find_id = metadata_it->second.fields().find(std::string("_stream_id_" + std::string(root_id))); if (find_id != metadata_it->second.fields().end()) { context = getContext(static_cast<uint32_t>(find_id->second.number_value())); } } if (!context) { context = getRootContext(root_id); } context->log(request_headers, response_headers, response_trailers, stream_info); } void Context::log(const Http::HeaderMap* request_headers, const Http::HeaderMap* response_headers, const Http::HeaderMap* response_trailers, const StreamInfo::StreamInfo& stream_info) { access_log_request_headers_ = request_headers; // ? request_trailers ? access_log_response_headers_ = response_headers; access_log_response_trailers_ = response_trailers; access_log_stream_info_ = &stream_info; onLog(); access_log_request_headers_ = nullptr; // ? request_trailers ? access_log_response_headers_ = nullptr; access_log_response_trailers_ = nullptr; access_log_stream_info_ = nullptr; onDelete(); } void Context::onDestroy() { if (destroyed_) { return; } destroyed_ = true; onDone(); } void Context::onDone() { if (wasm_->onDone_) { wasm_->onDone_(this, id_); } } void Context::onLog() { if (wasm_->onLog_) { wasm_->onLog_(this, id_); } } void Context::onDelete() { if (wasm_->onDelete_) { wasm_->onDelete_(this, id_); } } Http::FilterHeadersStatus Context::decodeHeaders(Http::HeaderMap& headers, bool end_stream) { request_headers_ = &headers; request_end_of_stream_ = end_stream; auto result = onRequestHeaders(); request_headers_ = nullptr; return result; } Http::FilterDataStatus Context::decodeData(Buffer::Instance& data, bool end_stream) { requestBodyBuffer_ = &data; auto result = onRequestBody(data.length(), end_stream); requestBodyBuffer_ = nullptr; return result; } Http::FilterTrailersStatus Context::decodeTrailers(Http::HeaderMap& trailers) { request_trailers_ = &trailers; auto result = onRequestTrailers(); request_trailers_ = nullptr; return result; } Http::FilterMetadataStatus Context::decodeMetadata(Http::MetadataMap& response_metadata) { response_metadata_ = &response_metadata; auto result = onRequestMetadata(); response_metadata_ = nullptr; return result; } void Context::setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks& callbacks) { decoder_callbacks_ = &callbacks; } Http::FilterHeadersStatus Context::encode100ContinueHeaders(Http::HeaderMap&) { return Http::FilterHeadersStatus::Continue; } Http::FilterHeadersStatus Context::encodeHeaders(Http::HeaderMap& headers, bool end_stream) { response_headers_ = &headers; response_end_of_stream_ = end_stream; auto result = onResponseHeaders(); response_headers_ = nullptr; return result; } Http::FilterDataStatus Context::encodeData(Buffer::Instance& data, bool end_stream) { responseBodyBuffer_ = &data; auto result = onResponseBody(data.length(), end_stream); responseBodyBuffer_ = nullptr; return result; } Http::FilterTrailersStatus Context::encodeTrailers(Http::HeaderMap& trailers) { response_trailers_ = &trailers; auto result = onResponseTrailers(); response_trailers_ = nullptr; return result; } Http::FilterMetadataStatus Context::encodeMetadata(Http::MetadataMap& response_metadata) { response_metadata_ = &response_metadata; auto result = onResponseMetadata(); response_metadata_ = nullptr; return result; } // Http::FilterMetadataStatus::Continue; void Context::setEncoderFilterCallbacks(Envoy::Http::StreamEncoderFilterCallbacks& callbacks) { encoder_callbacks_ = &callbacks; } void Context::onHttpCallSuccess(uint32_t token, Envoy::Http::MessagePtr& response) { auto body = absl::string_view(static_cast<char*>(response->body()->linearize(response->body()->length())), response->body()->length()); onHttpCallResponse(token, headerMapToPairs(&response->headers()), body, headerMapToPairs(response->trailers())); http_request_.erase(token); } void Context::onHttpCallFailure(uint32_t token, Http::AsyncClient::FailureReason /* reason */) { onHttpCallResponse(token, {}, "", {}); http_request_.erase(token); } void AsyncClientHandler::onSuccess(Envoy::Http::MessagePtr&& response) { context->onHttpCallSuccess(token, response); } void AsyncClientHandler::onFailure(Http::AsyncClient::FailureReason reason) { context->onHttpCallFailure(token, reason); } void GrpcCallClientHandler::onCreateInitialMetadata(Http::HeaderMap& metadata) { context->onGrpcCreateInitialMetadata(token, metadata); } void GrpcStreamClientHandler::onCreateInitialMetadata(Http::HeaderMap& metadata) { context->onGrpcCreateInitialMetadata(token, metadata); } void GrpcStreamClientHandler::onReceiveInitialMetadata(Http::HeaderMapPtr&& metadata) { context->onGrpcReceiveInitialMetadata(token, std::move(metadata)); } void GrpcStreamClientHandler::onReceiveTrailingMetadata(Http::HeaderMapPtr&& metadata) { context->onGrpcReceiveTrailingMetadata(token, std::move(metadata)); } void Context::onGrpcReceive(uint32_t token, Buffer::InstancePtr response) { if (wasm_->onGrpcReceive_) { auto response_size = response->length(); auto response_ptr = wasm_->copyBuffer(*response); wasm_->onGrpcReceive_(this, id_, token, response_ptr, response_size); } if (IsGrpcCallToken(token)) { grpc_call_request_.erase(token); } } void Context::onGrpcClose(uint32_t token, const Grpc::Status::GrpcStatus& status, const absl::string_view message) { if (wasm_->onGrpcClose_) { auto message_ptr = wasm_->copyString(message); wasm_->onGrpcClose_(this, id_, token, static_cast<uint64_t>(status), message_ptr, message.size()); } if (IsGrpcCallToken(token)) { grpc_call_request_.erase(token); } else { grpc_stream_.erase(token); } } WasmResult Context::grpcSend(uint32_t token, absl::string_view message, bool end_stream) { if (IsGrpcCallToken(token)) { return WasmResult::BadArgument; } auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->sendMessageRaw( Buffer::InstancePtr(new Buffer::OwnedImpl(message.data(), message.size())), end_stream); } return WasmResult::Ok; } WasmResult Context::grpcClose(uint32_t token) { if (IsGrpcCallToken(token)) { auto it = grpc_call_request_.find(token); if (it == grpc_call_request_.end()) { return WasmResult::NotFound; } if (it != grpc_call_request_.end() && it->second.request) { it->second.request->cancel(); } grpc_call_request_.erase(token); } else { auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->closeStream(); } grpc_stream_.erase(token); } return WasmResult::Ok; } WasmResult Context::grpcCancel(uint32_t token) { if (IsGrpcCallToken(token)) { auto it = grpc_call_request_.find(token); if (it == grpc_call_request_.end()) { return WasmResult::NotFound; } if (it != grpc_call_request_.end() && it->second.request) { it->second.request->cancel(); } grpc_call_request_.erase(token); } else { auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->resetStream(); } grpc_stream_.erase(token); } return WasmResult::Ok; } void GrpcCallClientHandler::onSuccessRaw(Buffer::InstancePtr&& response, Tracing::Span&) { context->onGrpcReceive(token, std::move(response)); } void GrpcCallClientHandler::onFailure(Grpc::Status::GrpcStatus status, const std::string& message, Tracing::Span&) { context->onGrpcClose(token, status, message); } bool GrpcStreamClientHandler::onReceiveMessageRaw(Buffer::InstancePtr&& response) { context->onGrpcReceive(token, std::move(response)); return true; } void GrpcStreamClientHandler::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) { context->onGrpcClose(token, status, message); } static std::shared_ptr<Wasm> createWasmInternal(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api, std::unique_ptr<Context> root_context_for_testing) { auto wasm = std::make_shared<Wasm>(vm_config.runtime(), vm_config.vm_id(), vm_config.configuration(), plugin, scope, cluster_manager, dispatcher); const auto& code = Config::DataSource::read(vm_config.code(), true, api); const auto& path = Config::DataSource::getPath(vm_config.code()) .value_or(code.empty() ? EMPTY_STRING : INLINE_STRING); if (code.empty()) { throw WasmException(fmt::format("Failed to load WASM code from {}", path)); } if (!wasm->initialize(code, vm_config.allow_precompiled())) { throw WasmException(fmt::format("Failed to initialize WASM code from {}", path)); } if (!root_context_for_testing) { wasm->start(); } else { wasm->startForTesting(std::move(root_context_for_testing)); } return wasm; } std::shared_ptr<Wasm> createWasm(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api) { return createWasmInternal(vm_config, plugin, scope, cluster_manager, dispatcher, api, nullptr /* root_context_for_testing */); } // namespace Wasm std::shared_ptr<Wasm> createWasmForTesting(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api, std::unique_ptr<Context> root_context_for_testing) { return createWasmInternal(vm_config, plugin, scope, cluster_manager, dispatcher, api, std::move(root_context_for_testing)); } std::shared_ptr<Wasm> createThreadLocalWasm(Wasm& base_wasm, absl::string_view configuration, Event::Dispatcher& dispatcher) { auto wasm = std::make_shared<Wasm>(base_wasm, dispatcher); Context* root_context = wasm->start(); if (!wasm->configure(root_context, configuration)) { throw WasmException("Failed to configure WASM code"); } if (!wasm->vm_id().empty()) { local_wasms[wasm->vm_id()] = wasm; } return wasm; } std::shared_ptr<Wasm> getThreadLocalWasmPtr(absl::string_view vm_id) { auto it = local_wasms.find(vm_id); if (it == local_wasms.end()) { return nullptr; } auto wasm = it->second.lock(); if (!wasm) { local_wasms.erase(vm_id); } return wasm; } std::shared_ptr<Wasm> getOrCreateThreadLocalWasm(Wasm& base_wasm, absl::string_view configuration, Event::Dispatcher& dispatcher) { auto wasm = getThreadLocalWasmPtr(base_wasm.vm_id()); if (wasm) { auto root_context = wasm->start(); if (!wasm->configure(root_context, configuration)) { throw WasmException("Failed to configure WASM code"); } return wasm; } return createThreadLocalWasm(base_wasm, configuration, dispatcher); } } // namespace Wasm } // namespace Common } // namespace Extensions } // namespace Envoy
#include "extensions/common/wasm/wasm.h" #include <stdio.h> #include <limits> #include <memory> #include <string> #include "envoy/common/exception.h" #include "envoy/config/wasm/v2/wasm.pb.validate.h" #include "envoy/grpc/status.h" #include "envoy/http/codes.h" #include "envoy/local_info/local_info.h" #include "envoy/server/wasm.h" #include "envoy/thread_local/thread_local.h" #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" #include "common/common/base64.h" #include "common/common/empty_string.h" #include "common/common/enum_to_int.h" #include "common/common/logger.h" #include "common/config/datasource.h" #include "common/http/header_map_impl.h" #include "common/http/message_impl.h" #include "common/http/utility.h" #include "common/tracing/http_tracer_impl.h" #include "extensions/common/wasm/wasm_state.h" #include "extensions/common/wasm/well_known_names.h" #include "extensions/filters/common/expr/context.h" #include "absl/base/casts.h" #include "absl/container/flat_hash_map.h" #include "absl/container/node_hash_map.h" #include "absl/synchronization/mutex.h" #include "eval/eval/field_access.h" #include "eval/eval/field_backed_list_impl.h" #include "eval/eval/field_backed_map_impl.h" #include "eval/public/cel_value.h" #include "openssl/bytestring.h" #include "openssl/hmac.h" #include "openssl/sha.h" namespace Envoy { namespace Extensions { namespace Common { namespace Wasm { // Any currently executing Wasm call context. #define WASM_CONTEXT(_c) \ (ContextOrEffectiveContext(static_cast<Context*>((void)_c, current_context_))) // The id of the context which should be used for calls out of the VM in place of current_context_ // above. namespace { // TODO: move to utils during upstreaming. std::string base64Sha256(absl::string_view data) { std::vector<uint8_t> digest(SHA256_DIGEST_LENGTH); EVP_MD_CTX* ctx(EVP_MD_CTX_new()); auto rc = EVP_DigestInit(ctx, EVP_sha256()); RELEASE_ASSERT(rc == 1, "Failed to init digest context"); rc = EVP_DigestUpdate(ctx, data.data(), data.size()); RELEASE_ASSERT(rc == 1, "Failed to update digest"); rc = EVP_DigestFinal(ctx, digest.data(), nullptr); RELEASE_ASSERT(rc == 1, "Failed to finalize digest"); EVP_MD_CTX_free(ctx); return Base64::encode(reinterpret_cast<const char*>(&digest[0]), digest.size()); } inline Word wasmResultToWord(WasmResult r) { return Word(static_cast<uint64_t>(r)); } inline uint32_t convertWordToUint32(Word w) { return static_cast<uint32_t>(w.u64_); } // Convert a function of the form Word(Word...) to one of the form uint32_t(uint32_t...). template <typename F, F* fn> struct ConvertFunctionWordToUint32 { static void convertFunctionWordToUint32() {} }; template <typename R, typename... Args, auto (*F)(Args...)->R> struct ConvertFunctionWordToUint32<R(Args...), F> { static auto convertFunctionWordToUint32(typename ConvertWordTypeToUint32<Args>::type... args) { return convertWordToUint32(F(std::forward<Args>(args)...)); } }; template <typename... Args, auto (*F)(Args...)->void> struct ConvertFunctionWordToUint32<void(Args...), F> { static void convertFunctionWordToUint32(typename ConvertWordTypeToUint32<Args>::type... args) { F(std::forward<Args>(args)...); } }; class SharedData { public: WasmResult get(absl::string_view vm_id, const absl::string_view key, std::pair<std::string, uint32_t>* result) { absl::ReaderMutexLock l(&mutex); auto map = data.find(vm_id); if (map == data.end()) { return WasmResult::NotFound; } auto it = map->second.find(key); if (it != map->second.end()) { *result = it->second; return WasmResult::Ok; } return WasmResult::NotFound; } WasmResult set(absl::string_view vm_id, absl::string_view key, absl::string_view value, uint32_t cas) { absl::WriterMutexLock l(&mutex); absl::flat_hash_map<std::string, std::pair<std::string, uint32_t>>* map; auto map_it = data.find(vm_id); if (map_it == data.end()) { map = &data[vm_id]; } else { map = &map_it->second; } auto it = map->find(key); if (it != map->end()) { if (cas && cas != it->second.second) { return WasmResult::CasMismatch; } it->second = std::make_pair(std::string(value), nextCas()); } else { map->emplace(key, std::make_pair(std::string(value), nextCas())); } return WasmResult::Ok; } uint32_t registerQueue(absl::string_view vm_id, absl::string_view queue_name, uint32_t context_id, Event::Dispatcher& dispatcher) { absl::WriterMutexLock l(&mutex); auto key = std::make_pair(std::string(vm_id), std::string(queue_name)); auto it = queue_tokens.insert(std::make_pair(key, static_cast<uint32_t>(0))); if (it.second) { it.first->second = nextQueueToken(); queue_token_set.insert(it.first->second); } uint32_t token = it.first->second; auto& q = queues[token]; q.vm_id = std::string(vm_id); q.context_id = context_id; q.dispatcher = &dispatcher; // Preserve any existing data. return token; } uint32_t resolveQueue(absl::string_view vm_id, absl::string_view queue_name) { absl::WriterMutexLock l(&mutex); auto key = std::make_pair(std::string(vm_id), std::string(queue_name)); auto it = queue_tokens.find(key); if (it != queue_tokens.end()) { return it->second; } return 0; // N.B. zero indicates that the queue was not found. } WasmResult dequeue(uint32_t token, std::string* data) { absl::ReaderMutexLock l(&mutex); auto it = queues.find(token); if (it == queues.end()) { return WasmResult::NotFound; } if (it->second.queue.empty()) { return WasmResult::Empty; } *data = it->second.queue.front(); it->second.queue.pop_front(); return WasmResult::Ok; } WasmResult enqueue(uint32_t token, absl::string_view value) { absl::WriterMutexLock l(&mutex); auto it = queues.find(token); if (it == queues.end()) { return WasmResult::NotFound; } it->second.queue.push_back(std::string(value)); auto vm_id = it->second.vm_id; auto context_id = it->second.context_id; it->second.dispatcher->post([vm_id, context_id, token] { auto wasm = getThreadLocalWasmPtr(vm_id); if (wasm) { wasm->queueReady(context_id, token); } }); return WasmResult::Ok; } uint32_t nextCas() { auto result = cas; cas++; if (!cas) { // 0 is not a valid CAS value. cas++; } return result; } private: uint32_t nextQueueToken() { while (true) { uint32_t token = next_queue_token++; if (token == 0) { continue; // 0 is an illegal token. } if (queue_token_set.find(token) == queue_token_set.end()) { return token; } } } struct Queue { std::string vm_id; uint32_t context_id; Event::Dispatcher* dispatcher; std::deque<std::string> queue; }; absl::Mutex mutex; uint32_t cas = 1; uint32_t next_queue_token = 1; absl::node_hash_map<std::string, absl::flat_hash_map<std::string, std::pair<std::string, uint32_t>>> data; absl::node_hash_map<uint32_t, Queue> queues; struct pair_hash { template <class T1, class T2> std::size_t operator()(const std::pair<T1, T2>& pair) const { return std::hash<T1>()(pair.first) ^ std::hash<T2>()(pair.second); } }; absl::flat_hash_map<std::pair<std::string, std::string>, uint32_t, pair_hash> queue_tokens; absl::flat_hash_set<uint32_t> queue_token_set; }; SharedData global_shared_data; // Map from Wasm ID to the local Wasm instance. thread_local absl::flat_hash_map<std::string, std::weak_ptr<Wasm>> local_wasms; const std::string INLINE_STRING = "<inline>"; template <typename Pairs> size_t pairsSize(const Pairs& result) { size_t size = 4; // number of headers for (auto& p : result) { size += 8; // size of key, size of value size += p.first.size() + 1; // null terminated key size += p.second.size() + 1; // null terminated value } return size; } template <typename Pairs> void marshalPairs(const Pairs& result, char* buffer) { char* b = buffer; *reinterpret_cast<uint32_t*>(b) = result.size(); b += sizeof(uint32_t); for (auto& p : result) { *reinterpret_cast<uint32_t*>(b) = p.first.size(); b += sizeof(uint32_t); *reinterpret_cast<uint32_t*>(b) = p.second.size(); b += sizeof(uint32_t); } for (auto& p : result) { memcpy(b, p.first.data(), p.first.size()); b += p.first.size(); *b++ = 0; memcpy(b, p.second.data(), p.second.size()); b += p.second.size(); *b++ = 0; } } Pairs toPairs(absl::string_view buffer) { Pairs result; const char* b = buffer.data(); if (buffer.size() < sizeof(uint32_t)) { return {}; } auto size = *reinterpret_cast<const uint32_t*>(b); b += sizeof(uint32_t); if (sizeof(uint32_t) + size * 2 * sizeof(uint32_t) > buffer.size()) { return {}; } result.resize(size); for (uint32_t i = 0; i < size; i++) { result[i].first = absl::string_view(nullptr, *reinterpret_cast<const uint32_t*>(b)); b += sizeof(uint32_t); result[i].second = absl::string_view(nullptr, *reinterpret_cast<const uint32_t*>(b)); b += sizeof(uint32_t); } for (auto& p : result) { p.first = absl::string_view(b, p.first.size()); b += p.first.size() + 1; p.second = absl::string_view(b, p.second.size()); b += p.second.size() + 1; } return result; } template <typename Pairs> bool getPairs(Context* context, const Pairs& result, uint64_t ptr_ptr, uint64_t size_ptr) { if (result.empty()) { return context->wasm()->copyToPointerSize("", ptr_ptr, size_ptr); } uint64_t size = pairsSize(result); uint64_t ptr; char* buffer = static_cast<char*>(context->wasm()->allocMemory(size, &ptr)); marshalPairs(result, buffer); if (!context->wasmVm()->setWord(ptr_ptr, Word(ptr))) { return false; } if (!context->wasmVm()->setWord(size_ptr, Word(size))) { return false; } return true; } void exportPairs(Context* context, const Pairs& pairs, uint64_t* ptr_ptr, uint64_t* size_ptr) { if (pairs.empty()) { *ptr_ptr = 0; *size_ptr = 0; return; } uint64_t size = pairsSize(pairs); char* buffer = static_cast<char*>(context->wasm()->allocMemory(size, ptr_ptr)); marshalPairs(pairs, buffer); *size_ptr = size; } Http::HeaderMapPtr buildHeaderMapFromPairs(const Pairs& pairs) { auto map = std::make_unique<Http::HeaderMapImpl>(); for (auto& p : pairs) { // Note: because of the lack of a string_view interface for addCopy and // the lack of an interface to add an entry with an empty value and return // the entry, there is no efficient way to prevent either a double copy // of the valueor a double lookup of the entry. map->addCopy(Http::LowerCaseString(std::string(p.first)), std::string(p.second)); } return map; } const uint8_t* decodeVarint(const uint8_t* pos, const uint8_t* end, uint32_t* out) { uint32_t ret = 0; int shift = 0; while (pos < end && (*pos & 0x80)) { ret |= (*pos & 0x7f) << shift; shift += 7; pos++; } if (pos < end) { ret |= *pos << shift; pos++; } *out = ret; return pos; } Context* ContextOrEffectiveContext(Context* context) { if (effective_context_id_ == 0) { return context; } auto effective_context = context->wasm()->getContext(effective_context_id_); if (effective_context) { return effective_context; } // The effective_context_id_ no longer exists, revert to the true context. return context; } } // namespace // Test support. uint32_t resolveQueueForTest(absl::string_view vm_id, absl::string_view queue_name) { return global_shared_data.resolveQueue(vm_id, queue_name); } // // HTTP Handlers // Word setPropertyHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->setProperty(key.value(), value.value())); } // Generic selector Word getPropertyHandler(void* raw_context, Word path_ptr, Word path_size, Word value_ptr_ptr, Word value_size_ptr) { auto context = WASM_CONTEXT(raw_context); auto path = context->wasmVm()->getMemory(path_ptr.u64_, path_size.u64_); if (!path.has_value()) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } std::string value; auto result = context->getProperty(path.value(), &value); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(value, value_ptr_ptr.u64_, value_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } // Continue/Reply/Route Word continueRequestHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->continueRequest(); return wasmResultToWord(WasmResult::Ok); } Word continueResponseHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->continueResponse(); return wasmResultToWord(WasmResult::Ok); } Word sendLocalResponseHandler(void* raw_context, Word response_code, Word response_code_details_ptr, Word response_code_details_size, Word body_ptr, Word body_size, Word additional_response_header_pairs_ptr, Word additional_response_header_pairs_size, Word grpc_code) { auto context = WASM_CONTEXT(raw_context); auto details = context->wasmVm()->getMemory(response_code_details_ptr.u64_, response_code_details_size.u64_); auto body = context->wasmVm()->getMemory(body_ptr.u64_, body_size.u64_); auto additional_response_header_pairs = context->wasmVm()->getMemory( additional_response_header_pairs_ptr.u64_, additional_response_header_pairs_size.u64_); if (!details || !body || !additional_response_header_pairs) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto additional_headers = toPairs(additional_response_header_pairs.value()); auto modify_headers = [additional_headers](Http::HeaderMap& headers) { for (auto& p : additional_headers) { const Http::LowerCaseString lower_key(std::move(std::string(p.first))); headers.addCopy(lower_key, std::string(p.second)); } }; auto grpc_status = static_cast<Grpc::Status::GrpcStatus>(grpc_code.u64_); auto grpc_status_opt = (grpc_status != Grpc::Status::GrpcStatus::InvalidCode) ? absl::optional<Grpc::Status::GrpcStatus>(grpc_status) : absl::optional<Grpc::Status::GrpcStatus>(); context->sendLocalResponse(static_cast<Envoy::Http::Code>(response_code.u64_), body.value(), modify_headers, grpc_status_opt, details.value()); return wasmResultToWord(WasmResult::Ok); } Word setEffectiveContextHandler(void* raw_context, Word context_id) { auto context = WASM_CONTEXT(raw_context); uint32_t cid = static_cast<uint32_t>(context_id.u64_); auto c = context->wasm()->getContext(cid); if (!c) { return wasmResultToWord(WasmResult::BadArgument); } effective_context_id_ = cid; return wasmResultToWord(WasmResult::Ok); } Word clearRouteCacheHandler(void* raw_context) { auto context = WASM_CONTEXT(raw_context); context->clearRouteCache(); return wasmResultToWord(WasmResult::Ok); } // SharedData Word getSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr_ptr, Word value_size_ptr, Word cas_ptr) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } std::pair<std::string, uint32_t> data; WasmResult result = context->getSharedData(key.value(), &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(data.first, value_ptr_ptr.u64_, value_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } if (!context->wasmVm()->setMemory(cas_ptr.u64_, sizeof(uint32_t), &data.second)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word setSharedDataHandler(void* raw_context, Word key_ptr, Word key_size, Word value_ptr, Word value_size, Word cas) { auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->setSharedData(key.value(), value.value(), cas.u64_)); } Word registerSharedQueueHandler(void* raw_context, Word queue_name_ptr, Word queue_name_size, Word token_ptr) { auto context = WASM_CONTEXT(raw_context); auto queue_name = context->wasmVm()->getMemory(queue_name_ptr.u64_, queue_name_size.u64_); if (!queue_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t token = context->registerSharedQueue(queue_name.value()); if (!context->wasm()->setDatatype(token_ptr.u64_, token)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word dequeueSharedQueueHandler(void* raw_context, Word token, Word data_ptr_ptr, Word data_size_ptr) { auto context = WASM_CONTEXT(raw_context); std::string data; WasmResult result = context->dequeueSharedQueue(token.u32(), &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->copyToPointerSize(data, data_ptr_ptr.u64_, data_size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word resolveSharedQueueHandler(void* raw_context, Word vm_id_ptr, Word vm_id_size, Word queue_name_ptr, Word queue_name_size, Word token_ptr) { auto context = WASM_CONTEXT(raw_context); auto vm_id = context->wasmVm()->getMemory(vm_id_ptr.u64_, vm_id_size.u64_); auto queue_name = context->wasmVm()->getMemory(queue_name_ptr.u64_, queue_name_size.u64_); if (!vm_id || !queue_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t token = 0; auto result = context->resolveSharedQueue(vm_id.value(), queue_name.value(), &token); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(token_ptr.u64_, token)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word enqueueSharedQueueHandler(void* raw_context, Word token, Word data_ptr, Word data_size) { auto context = WASM_CONTEXT(raw_context); auto data = context->wasmVm()->getMemory(data_ptr.u64_, data_size.u64_); if (!data) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->enqueueSharedQueue(token.u32(), data.value())); } // Network Word getDownstreamDataBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); absl::string_view data; auto result = context->getDownstreamDataBufferBytes(start.u64_, length.u64_, &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } context->wasm()->copyToPointerSize(data, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word getUpstreamDataBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); absl::string_view data; auto result = context->getUpstreamDataBufferBytes(start.u64_, length.u64_, &data); if (result != WasmResult::Ok) { return wasmResultToWord(result); } context->wasm()->copyToPointerSize(data, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } // Header/Trailer/Metadata Maps Word addHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->addHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value(), value.value()); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr_ptr, Word value_size_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto result = context->getHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value()); context->wasm()->copyToPointerSize(result, value_ptr_ptr.u64_, value_size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word replaceHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size, Word value_ptr, Word value_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); auto value = context->wasmVm()->getMemory(value_ptr.u64_, value_size.u64_); if (!key || !value) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->replaceHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value(), value.value()); return wasmResultToWord(WasmResult::Ok); } Word removeHeaderMapValueHandler(void* raw_context, Word type, Word key_ptr, Word key_size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto key = context->wasmVm()->getMemory(key_ptr.u64_, key_size.u64_); if (!key) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->removeHeaderMapValue(static_cast<HeaderMapType>(type.u64_), key.value()); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapPairsHandler(void* raw_context, Word type, Word ptr_ptr, Word size_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto result = context->getHeaderMapPairs(static_cast<HeaderMapType>(type.u64_)); if (!getPairs(context, result, ptr_ptr.u64_, size_ptr.u64_)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word setHeaderMapPairsHandler(void* raw_context, Word type, Word ptr, Word size) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); auto data = context->wasmVm()->getMemory(ptr.u64_, size.u64_); if (!data) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->setHeaderMapPairs(static_cast<HeaderMapType>(type.u64_), toPairs(data.value())); return wasmResultToWord(WasmResult::Ok); } Word getHeaderMapSizeHandler(void* raw_context, Word type, Word result_ptr) { if (type.u64_ > static_cast<uint64_t>(HeaderMapType::MAX)) { return wasmResultToWord(WasmResult::BadArgument); } auto context = WASM_CONTEXT(raw_context); size_t result = context->getHeaderMapSize(static_cast<HeaderMapType>(type.u64_)); if (!context->wasmVm()->setWord(result_ptr.u64_, Word(result))) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } // Body Buffer Word getRequestBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); auto result = context->getRequestBodyBufferBytes(start.u64_, length.u64_); context->wasm()->copyToPointerSize(result, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word getResponseBodyBufferBytesHandler(void* raw_context, Word start, Word length, Word ptr_ptr, Word size_ptr) { auto context = WASM_CONTEXT(raw_context); auto result = context->getResponseBodyBufferBytes(start.u64_, length.u64_); context->wasm()->copyToPointerSize(result, ptr_ptr.u64_, size_ptr.u64_); return wasmResultToWord(WasmResult::Ok); } Word httpCallHandler(void* raw_context, Word uri_ptr, Word uri_size, Word header_pairs_ptr, Word header_pairs_size, Word body_ptr, Word body_size, Word trailer_pairs_ptr, Word trailer_pairs_size, Word timeout_milliseconds) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto uri = context->wasmVm()->getMemory(uri_ptr.u64_, uri_size.u64_); auto body = context->wasmVm()->getMemory(body_ptr.u64_, body_size.u64_); auto header_pairs = context->wasmVm()->getMemory(header_pairs_ptr.u64_, header_pairs_size.u64_); auto trailer_pairs = context->wasmVm()->getMemory(trailer_pairs_ptr.u64_, trailer_pairs_size.u64_); if (!uri || !body || !header_pairs || !trailer_pairs) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } auto headers = toPairs(header_pairs.value()); auto trailers = toPairs(trailer_pairs.value()); return context->httpCall(uri.value(), headers, body.value(), trailers, timeout_milliseconds.u64_); } Word defineMetricHandler(void* raw_context, Word metric_type, Word name_ptr, Word name_size, Word metric_id_ptr) { if (metric_type.u64_ > static_cast<uint64_t>(Context::MetricType::Max)) { return 0; } auto context = WASM_CONTEXT(raw_context); auto name = context->wasmVm()->getMemory(name_ptr.u64_, name_size.u64_); if (!name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } uint32_t metric_id = 0; auto result = context->defineMetric(static_cast<Context::MetricType>(metric_type.u64_), name.value(), &metric_id); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(metric_id_ptr.u64_, metric_id)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word incrementMetricHandler(void* raw_context, Word metric_id, int64_t offset) { auto context = WASM_CONTEXT(raw_context); return wasmResultToWord(context->incrementMetric(metric_id.u64_, offset)); } Word recordMetricHandler(void* raw_context, Word metric_id, uint64_t value) { auto context = WASM_CONTEXT(raw_context); return wasmResultToWord(context->recordMetric(metric_id.u64_, value)); } Word getMetricHandler(void* raw_context, Word metric_id, Word result_uint64_ptr) { auto context = WASM_CONTEXT(raw_context); uint64_t value = 0; auto result = context->getMetric(metric_id.u64_, &value); if (result != WasmResult::Ok) { return wasmResultToWord(result); } if (!context->wasm()->setDatatype(result_uint64_ptr.u64_, value)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word grpcCallHandler(void* raw_context, Word service_ptr, Word service_size, Word service_name_ptr, Word service_name_size, Word method_name_ptr, Word method_name_size, Word request_ptr, Word request_size, Word timeout_milliseconds) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto service = context->wasmVm()->getMemory(service_ptr.u64_, service_size.u64_); auto service_name = context->wasmVm()->getMemory(service_name_ptr.u64_, service_name_size.u64_); auto method_name = context->wasmVm()->getMemory(method_name_ptr.u64_, method_name_size.u64_); auto request = context->wasmVm()->getMemory(request_ptr.u64_, request_size.u64_); if (!service || !service_name || !method_name || !request) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } envoy::api::v2::core::GrpcService service_proto; if (!service_proto.ParseFromArray(service.value().data(), service.value().size())) { return false; } return context->grpcCall(service_proto, service_name.value(), method_name.value(), request.value(), std::chrono::milliseconds(timeout_milliseconds.u64_)); } Word grpcStreamHandler(void* raw_context, Word service_ptr, Word service_size, Word service_name_ptr, Word service_name_size, Word method_name_ptr, Word method_name_size) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto service = context->wasmVm()->getMemory(service_ptr.u64_, service_size.u64_); auto service_name = context->wasmVm()->getMemory(service_name_ptr.u64_, service_name_size.u64_); auto method_name = context->wasmVm()->getMemory(method_name_ptr.u64_, method_name_size.u64_); if (!service || !service_name || !method_name) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } envoy::api::v2::core::GrpcService service_proto; if (!service_proto.ParseFromArray(service.value().data(), service.value().size())) { return false; } return context->grpcStream(service_proto, service_name.value(), method_name.value()); } Word grpcCancelHandler(void* raw_context, Word token) { auto context = WASM_CONTEXT(raw_context)->root_context(); return wasmResultToWord(context->grpcCancel(token.u64_)); } Word grpcCloseHandler(void* raw_context, Word token) { auto context = WASM_CONTEXT(raw_context)->root_context(); return wasmResultToWord(context->grpcClose(token.u64_)); } Word grpcSendHandler(void* raw_context, Word token, Word message_ptr, Word message_size, Word end_stream) { auto context = WASM_CONTEXT(raw_context)->root_context(); auto message = context->wasmVm()->getMemory(message_ptr.u64_, message_size.u64_); if (!message) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->grpcSend(token.u64_, message.value(), end_stream.u64_)); } // Implementation of writev-like() syscall that redirects stdout/stderr to Envoy logs. Word writevImpl(void* raw_context, Word fd, Word iovs, Word iovs_len, Word* nwritten_ptr) { auto context = WASM_CONTEXT(raw_context); // Read syscall args. spdlog::level::level_enum log_level; switch (fd.u64_) { case 1 /* stdout */: log_level = spdlog::level::info; break; case 2 /* stderr */: log_level = spdlog::level::err; break; default: return 8; // __WASI_EBADF } std::string s; for (size_t i = 0; i < iovs_len.u64_; i++) { auto memslice = context->wasmVm()->getMemory(iovs.u64_ + i * 2 * sizeof(uint32_t), 2 * sizeof(uint32_t)); if (!memslice) { return 21; // __WASI_EFAULT } const uint32_t* iovec = reinterpret_cast<const uint32_t*>(memslice.value().data()); if (iovec[1] /* buf_len */) { memslice = context->wasmVm()->getMemory(iovec[0] /* buf */, iovec[1] /* buf_len */); if (!memslice) { return 21; // __WASI_EFAULT } s.append(memslice.value().data(), memslice.value().size()); } } size_t written = s.size(); if (written) { // Remove trailing newline from the logs, if any. if (s[written - 1] == '\n') { s.erase(written - 1); } context->scriptLog(log_level, s); } *nwritten_ptr = Word(written); return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_fd_write(_wasi_fd_t fd, const _wasi_ciovec_t *iov, size_t iovs_len, size_t* // nwritten); Word wasi_unstable_fd_writeHandler(void* raw_context, Word fd, Word iovs, Word iovs_len, Word nwritten_ptr) { auto context = WASM_CONTEXT(raw_context); Word nwritten(0); auto result = writevImpl(raw_context, fd, iovs, iovs_len, &nwritten); if (result.u64_ != 0) { // __WASI_ESUCCESS return result; } if (!context->wasmVm()->setWord(nwritten_ptr.u64_, Word(nwritten))) { return 21; // __WASI_EFAULT } return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_fd_seek(__wasi_fd_t fd, __wasi_filedelta_t offset, __wasi_whence_t // whence,__wasi_filesize_t *newoffset); Word wasi_unstable_fd_seekHandler(void*, Word, int64_t, Word, Word) { throw WasmException("wasi_unstable fd_seek"); } // __wasi_errno_t __wasi_fd_close(__wasi_fd_t fd); Word wasi_unstable_fd_closeHandler(void*, Word) { throw WasmException("wasi_unstable fd_close"); } // __wasi_errno_t __wasi_environ_get(char **environ, char *environ_buf); Word wasi_unstable_environ_getHandler(void*, Word, Word) { return 0; // __WASI_ESUCCESS } // __wasi_errno_t __wasi_environ_sizes_get(size_t *environ_count, size_t *environ_buf_size); Word wasi_unstable_environ_sizes_getHandler(void* raw_context, Word count_ptr, Word buf_size_ptr) { auto context = WASM_CONTEXT(raw_context); if (!context->wasmVm()->setWord(count_ptr.u64_, Word(0))) { return 21; // __WASI_EFAULT } if (!context->wasmVm()->setWord(buf_size_ptr.u64_, Word(0))) { return 21; // __WASI_EFAULT } return 0; // __WASI_ESUCCESS } // void __wasi_proc_exit(__wasi_exitcode_t rval); void wasi_unstable_proc_exitHandler(void*, Word) { throw WasmException("wasi_unstable proc_exit"); } Word pthread_equalHandler(void*, Word left, Word right) { return left.u64_ == right.u64_; } Word setTickPeriodMillisecondsHandler(void* raw_context, Word tick_period_milliseconds) { return wasmResultToWord( WASM_CONTEXT(raw_context) ->setTickPeriod(std::chrono::milliseconds(tick_period_milliseconds.u64_))); } Word getCurrentTimeNanosecondsHandler(void* raw_context, Word result_uint64_ptr) { auto context = WASM_CONTEXT(raw_context); uint64_t result = context->getCurrentTimeNanoseconds(); if (!context->wasm()->setDatatype(result_uint64_ptr.u64_, result)) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(WasmResult::Ok); } Word logHandler(void* raw_context, Word level, Word address, Word size) { auto context = WASM_CONTEXT(raw_context); auto message = context->wasmVm()->getMemory(address.u64_, size.u64_); if (!message) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } context->scriptLog(static_cast<spdlog::level::level_enum>(level.u64_), message.value()); return wasmResultToWord(WasmResult::Ok); } WasmResult Context::setTickPeriod(std::chrono::milliseconds tick_period) { wasm_->setTickPeriod(root_context_id_ ? root_context_id_ : id_, tick_period); return WasmResult::Ok; } uint64_t Context::getCurrentTimeNanoseconds() { return std::chrono::duration_cast<std::chrono::nanoseconds>( wasm_->time_source_.systemTime().time_since_epoch()) .count(); } // TODO(https://github.com/google/cel-cpp/issues/38) bool exportValue(const Filters::Common::Expr::CelValue& value, ProtobufWkt::Value* out) { using Filters::Common::Expr::CelValue; switch (value.type()) { case CelValue::Type::kBool: out->set_bool_value(value.BoolOrDie()); return true; case CelValue::Type::kInt64: out->set_number_value(static_cast<double>(value.Int64OrDie())); return true; case CelValue::Type::kUint64: out->set_number_value(static_cast<double>(value.Uint64OrDie())); return true; case CelValue::Type::kDouble: out->set_number_value(value.DoubleOrDie()); return true; case CelValue::Type::kString: *out->mutable_string_value() = std::string(value.StringOrDie().value()); return true; case CelValue::Type::kBytes: *out->mutable_string_value() = std::string(value.BytesOrDie().value()); return true; case CelValue::Type::kMessage: { if (value.IsNull()) { out->set_null_value(ProtobufWkt::NullValue::NULL_VALUE); } else { auto msg = value.MessageOrDie(); out->mutable_struct_value()->MergeFrom(*msg); } return true; } case CelValue::Type::kDuration: *out->mutable_string_value() = absl::FormatDuration(value.DurationOrDie()); return true; case CelValue::Type::kTimestamp: *out->mutable_string_value() = absl::FormatTime(value.TimestampOrDie()); return true; case CelValue::Type::kList: { auto list = value.ListOrDie(); auto values = out->mutable_list_value(); for (int i = 0; i < list->size(); i++) { if (!exportValue((*list)[i], values->add_values())) { return false; } } return true; } case CelValue::Type::kMap: { auto map = value.MapOrDie(); auto list = map->ListKeys(); auto struct_obj = out->mutable_struct_value(); for (int i = 0; i < list->size(); i++) { ProtobufWkt::Value field_key; if (!exportValue((*list)[i], &field_key)) { return false; } ProtobufWkt::Value field_value; if (!exportValue((*map)[(*list)[i]].value(), &field_value)) { return false; } (*struct_obj->mutable_fields())[field_key.string_value()] = field_value; } return true; } default: // do nothing for special values return false; } return false; } WasmResult serializeValue(Filters::Common::Expr::CelValue value, std::string* result) { using Filters::Common::Expr::CelValue; switch (value.type()) { case CelValue::Type::kMessage: if (value.MessageOrDie() != nullptr && value.MessageOrDie()->SerializeToString(result)) { return WasmResult::Ok; } return WasmResult::SerializationFailure; case CelValue::Type::kString: result->assign(value.StringOrDie().value().data(), value.StringOrDie().value().size()); return WasmResult::Ok; case CelValue::Type::kBytes: result->assign(value.BytesOrDie().value().data(), value.BytesOrDie().value().size()); return WasmResult::Ok; case CelValue::Type::kInt64: { auto out = value.Int64OrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(int64_t)); return WasmResult::Ok; } case CelValue::Type::kUint64: { auto out = value.Uint64OrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(uint64_t)); return WasmResult::Ok; } case CelValue::Type::kDouble: { auto out = value.DoubleOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(double)); return WasmResult::Ok; } case CelValue::Type::kBool: { auto out = value.BoolOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(bool)); return WasmResult::Ok; } case CelValue::Type::kDuration: { auto out = value.DurationOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(absl::Duration)); return WasmResult::Ok; } case CelValue::Type::kTimestamp: { auto out = value.TimestampOrDie(); result->assign(reinterpret_cast<const char*>(&out), sizeof(absl::Time)); return WasmResult::Ok; } case CelValue::Type::kMap: { ProtobufWkt::Value out; if (!exportValue(value, &out)) { return WasmResult::SerializationFailure; } if (!out.struct_value().SerializeToString(result)) { return WasmResult::SerializationFailure; } return WasmResult::Ok; } case CelValue::Type::kList: { ProtobufWkt::Value out; if (!exportValue(value, &out)) { return WasmResult::SerializationFailure; } if (!out.list_value().SerializeToString(result)) { return WasmResult::SerializationFailure; } return WasmResult::Ok; } default: return WasmResult::SerializationFailure; } return WasmResult::SerializationFailure; } // An expression wrapper for the WASM state class WasmStateWrapper : public google::api::expr::runtime::CelMap { public: WasmStateWrapper(const StreamInfo::FilterState& filter_state) : filter_state_(filter_state) {} absl::optional<google::api::expr::runtime::CelValue> operator[](google::api::expr::runtime::CelValue key) const override { if (!key.IsString()) { return {}; } auto value = key.StringOrDie().value(); try { const WasmState& result = filter_state_.getDataReadOnly<WasmState>(value); return google::api::expr::runtime::CelValue::CreateBytes(&result.value()); } catch (const EnvoyException& e) { return {}; } } int size() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } bool empty() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } const google::api::expr::runtime::CelList* ListKeys() const override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } private: const StreamInfo::FilterState& filter_state_; }; WasmResult Context::getProperty(absl::string_view path, std::string* result) { using google::api::expr::runtime::CelValue; using google::api::expr::runtime::FieldBackedListImpl; using google::api::expr::runtime::FieldBackedMapImpl; bool first = true; CelValue value; Protobuf::Arena arena; const StreamInfo::StreamInfo* info = getConstRequestStreamInfo(); const auto request_headers = request_headers_ ? request_headers_ : access_log_request_headers_; const auto response_headers = response_headers_ ? response_headers_ : access_log_response_headers_; const auto response_trailers = response_trailers_ ? response_trailers_ : access_log_response_trailers_; size_t start = 0; while (true) { if (start >= path.size()) { break; } size_t end = path.find('\0', start); if (end == absl::string_view::npos) { // this should not happen unless the input string is not null-terminated in the view return WasmResult::ParseFailure; } auto part = path.substr(start, end - start); start = end + 1; // top-level ident if (first) { first = false; if (part == "metadata") { value = CelValue::CreateMessage(&info->dynamicMetadata(), &arena); } else if (part == "filter_state") { value = CelValue::CreateMap( Protobuf::Arena::Create<WasmStateWrapper>(&arena, info->filterState())); } else if (part == "request") { value = CelValue::CreateMap(Protobuf::Arena::Create<Filters::Common::Expr::RequestWrapper>( &arena, request_headers, *info)); } else if (part == "response") { value = CelValue::CreateMap(Protobuf::Arena::Create<Filters::Common::Expr::ResponseWrapper>( &arena, response_headers, response_trailers, *info)); } else if (part == "connection") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::ConnectionWrapper>(&arena, *info)); } else if (part == "upstream") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::UpstreamWrapper>(&arena, *info)); } else if (part == "node") { value = CelValue::CreateMessage(&plugin_->local_info_.node(), &arena); } else if (part == "source") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::PeerWrapper>(&arena, *info, false)); } else if (part == "destination") { value = CelValue::CreateMap( Protobuf::Arena::Create<Filters::Common::Expr::PeerWrapper>(&arena, *info, true)); } else if (part == "request_protocol") { // TODO(kyessenov) move this upstream to CEL context if (info->protocol().has_value()) { value = CelValue::CreateString(&Http::Utility::getProtocolString(info->protocol().value())); } else { return WasmResult::NotFound; } // Reflective accessors } else if (part == "listener_direction") { value = CelValue::CreateInt64(plugin_->direction_); } else if (part == "listener_metadata") { value = CelValue::CreateMessage(plugin_->listener_metadata_, &arena); } else if (part == "cluster_name" && info->upstreamHost() != nullptr) { value = CelValue::CreateString(&info->upstreamHost()->cluster().name()); } else if (part == "cluster_metadata" && info->upstreamHost() != nullptr) { value = CelValue::CreateMessage(&info->upstreamHost()->cluster().metadata(), &arena); } else if (part == "route_name") { value = CelValue::CreateString(&info->getRouteName()); } else if (part == "route_metadata" && info->routeEntry() != nullptr) { value = CelValue::CreateMessage(&info->routeEntry()->metadata(), &arena); } else { return WasmResult::NotFound; } continue; } if (value.IsMap()) { auto& map = *value.MapOrDie(); auto field = map[CelValue::CreateString(part)]; if (field.has_value()) { value = field.value(); } else { return {}; } } else if (value.IsMessage()) { auto msg = value.MessageOrDie(); if (msg == nullptr) { return {}; } const Protobuf::Descriptor* desc = msg->GetDescriptor(); const Protobuf::FieldDescriptor* field_desc = desc->FindFieldByName(std::string(part)); if (field_desc == nullptr) { return {}; } else if (field_desc->is_map()) { value = CelValue::CreateMap( Protobuf::Arena::Create<FieldBackedMapImpl>(&arena, msg, field_desc, &arena)); } else if (field_desc->is_repeated()) { value = CelValue::CreateList( Protobuf::Arena::Create<FieldBackedListImpl>(&arena, msg, field_desc, &arena)); } else { auto status = google::api::expr::runtime::CreateValueFromSingleField(msg, field_desc, &arena, &value); if (!status.ok()) { return {}; } } } else { return {}; } } return serializeValue(value, result); } // Shared Data WasmResult Context::getSharedData(absl::string_view key, std::pair<std::string, uint32_t>* data) { return global_shared_data.get(wasm_->vm_id(), key, data); } WasmResult Context::setSharedData(absl::string_view key, absl::string_view value, uint32_t cas) { return global_shared_data.set(wasm_->vm_id(), key, value, cas); } // Shared Queue uint32_t Context::registerSharedQueue(absl::string_view queue_name) { // Get the id of the root context if this is a stream context because onQueueReady is on the root. return global_shared_data.registerQueue( wasm_->vm_id(), queue_name, isRootContext() ? id_ : root_context_id_, wasm_->dispatcher_); } WasmResult Context::resolveSharedQueue(absl::string_view vm_id, absl::string_view queue_name, uint32_t* token_ptr) { uint32_t token = global_shared_data.resolveQueue(vm_id, queue_name); if (!token) { return WasmResult::NotFound; } *token_ptr = token; return WasmResult::Ok; } WasmResult Context::dequeueSharedQueue(uint32_t token, std::string* data) { return global_shared_data.dequeue(token, data); } WasmResult Context::enqueueSharedQueue(uint32_t token, absl::string_view value) { return global_shared_data.enqueue(token, value); } // Network bytes. WasmResult Context::getDownstreamDataBufferBytes(uint32_t start, uint32_t length, absl::string_view* data) { if (!network_downstream_data_buffer_) return WasmResult::NotFound; if (network_downstream_data_buffer_->length() < static_cast<uint64_t>(start + length)) return WasmResult::InvalidMemoryAccess; *data = absl::string_view( static_cast<char*>(network_downstream_data_buffer_->linearize(start + length)) + start, length); return WasmResult::Ok; } WasmResult Context::getUpstreamDataBufferBytes(uint32_t start, uint32_t length, absl::string_view* data) { if (!network_upstream_data_buffer_) return WasmResult::NotFound; if (network_upstream_data_buffer_->length() < static_cast<uint64_t>(start + length)) return WasmResult::InvalidMemoryAccess; *data = absl::string_view( static_cast<char*>(network_upstream_data_buffer_->linearize(start + length)) + start, length); return WasmResult::Ok; } // Header/Trailer/Metadata Maps. Http::HeaderMap* Context::getMap(HeaderMapType type) { switch (type) { case HeaderMapType::RequestHeaders: return request_headers_; case HeaderMapType::RequestTrailers: return request_trailers_; case HeaderMapType::ResponseHeaders: return response_headers_; case HeaderMapType::ResponseTrailers: return response_trailers_; case HeaderMapType::GrpcCreateInitialMetadata: return grpc_create_initial_metadata_; default: return nullptr; } } const Http::HeaderMap* Context::getConstMap(HeaderMapType type) { switch (type) { case HeaderMapType::RequestHeaders: if (access_log_request_headers_) { return access_log_request_headers_; } return request_headers_; case HeaderMapType::RequestTrailers: if (access_log_request_trailers_) { return access_log_request_trailers_; } return request_trailers_; case HeaderMapType::ResponseHeaders: if (access_log_response_headers_) { return access_log_response_headers_; } return response_headers_; case HeaderMapType::ResponseTrailers: if (access_log_response_trailers_) { return access_log_response_trailers_; } return response_trailers_; case HeaderMapType::GrpcCreateInitialMetadata: return grpc_create_initial_metadata_; case HeaderMapType::GrpcReceiveInitialMetadata: return grpc_receive_initial_metadata_.get(); case HeaderMapType::GrpcReceiveTrailingMetadata: return grpc_receive_trailing_metadata_.get(); } return nullptr; } void Context::addHeaderMapValue(HeaderMapType type, absl::string_view key, absl::string_view value) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); map->addCopy(lower_key, std::string(value)); } absl::string_view Context::getHeaderMapValue(HeaderMapType type, absl::string_view key) { auto map = getConstMap(type); if (!map) { return ""; } const Http::LowerCaseString lower_key(std::move(std::string(key))); auto entry = map->get(lower_key); if (!entry) { return ""; } return entry->value().getStringView(); } Pairs headerMapToPairs(const Http::HeaderMap* map) { if (!map) { return {}; } Pairs pairs; pairs.reserve(map->size()); map->iterate( [](const Http::HeaderEntry& header, void* pairs) -> Http::HeaderMap::Iterate { (static_cast<Pairs*>(pairs)) ->push_back( std::make_pair(header.key().getStringView(), header.value().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &pairs); return pairs; } Pairs Context::getHeaderMapPairs(HeaderMapType type) { return headerMapToPairs(getConstMap(type)); } void Context::setHeaderMapPairs(HeaderMapType type, const Pairs& pairs) { auto map = getMap(type); if (!map) { return; } std::vector<std::string> keys; map->iterate( [](const Http::HeaderEntry& header, void* keys) -> Http::HeaderMap::Iterate { (static_cast<std::vector<std::string>*>(keys)) ->push_back(std::string(header.key().getStringView())); return Http::HeaderMap::Iterate::Continue; }, &keys); for (auto& k : keys) { const Http::LowerCaseString lower_key(std::move(k)); map->remove(lower_key); } for (auto& p : pairs) { const Http::LowerCaseString lower_key(std::move(std::string(p.first))); map->addCopy(lower_key, std::move(std::string(p.second))); } } void Context::removeHeaderMapValue(HeaderMapType type, absl::string_view key) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); map->remove(lower_key); } void Context::replaceHeaderMapValue(HeaderMapType type, absl::string_view key, absl::string_view value) { auto map = getMap(type); if (!map) { return; } const Http::LowerCaseString lower_key(std::move(std::string(key))); auto entry = map->get(lower_key); if (entry != nullptr) { entry->value(value.data(), value.size()); } else { map->addCopy(lower_key, std::string(value)); } } uint32_t Context::getHeaderMapSize(HeaderMapType type) { auto map = getMap(type); if (!map) { return 0; } return map->refreshByteSize(); } // Body Buffer absl::string_view Context::getRequestBodyBufferBytes(uint32_t start, uint32_t length) { if (!requestBodyBuffer_) { return ""; } if (requestBodyBuffer_->length() < static_cast<uint64_t>((start + length))) { return ""; } return absl::string_view( static_cast<char*>(requestBodyBuffer_->linearize(start + length)) + start, length); } absl::string_view Context::getResponseBodyBufferBytes(uint32_t start, uint32_t length) { if (!responseBodyBuffer_) { return ""; } if (responseBodyBuffer_->length() < static_cast<uint64_t>((start + length))) { return ""; } return absl::string_view( static_cast<char*>(responseBodyBuffer_->linearize(start + length)) + start, length); } // Async call via HTTP uint32_t Context::httpCall(absl::string_view cluster, const Pairs& request_headers, absl::string_view request_body, const Pairs& request_trailers, int timeout_milliseconds) { if (timeout_milliseconds < 0) { return 0; } auto cluster_string = std::string(cluster); if (clusterManager().get(cluster_string) == nullptr) { return 0; } Http::MessagePtr message(new Http::RequestMessageImpl(buildHeaderMapFromPairs(request_headers))); // Check that we were provided certain headers. if (message->headers().Path() == nullptr || message->headers().Method() == nullptr || message->headers().Host() == nullptr) { return 0; } if (!request_body.empty()) { message->body().reset(new Buffer::OwnedImpl(request_body.data(), request_body.size())); message->headers().insertContentLength().value(request_body.size()); } if (request_trailers.size() > 0) { message->trailers(buildHeaderMapFromPairs(request_trailers)); } absl::optional<std::chrono::milliseconds> timeout; if (timeout_milliseconds > 0) { timeout = std::chrono::milliseconds(timeout_milliseconds); } auto token = next_http_call_token_++; // Handle rollover. for (;;) { if (token == 0) { token = next_http_call_token_++; } if (!http_request_.count(token)) { break; } token = next_http_call_token_++; } auto& handler = http_request_[token]; // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::RequestOptions options; options.setTimeout(timeout); Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); auto http_request = clusterManager() .httpAsyncClientForCluster(cluster_string) .send(std::move(message), handler, options); if (!http_request) { http_request_.erase(token); return 0; } handler.context = this; handler.token = token; handler.request = http_request; return token; } uint32_t Context::grpcCall(const envoy::api::v2::core::GrpcService& grpc_service, absl::string_view service_name, absl::string_view method_name, absl::string_view request, const absl::optional<std::chrono::milliseconds>& timeout) { auto token = next_grpc_token_++; if (IsGrpcStreamToken(token)) { token = next_grpc_token_++; } // Handle rollover. for (;;) { if (token == 0) { token = next_grpc_token_ += 2; } if (!grpc_call_request_.count(token)) { break; } token = next_grpc_token_ += 2; } auto& handler = grpc_call_request_[token]; handler.context = this; handler.token = token; auto grpc_client = clusterManager() .grpcAsyncClientManager() .factoryForGrpcService(grpc_service, *wasm()->scope_, true /* skip_cluster_check */) ->create(); // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::RequestOptions options; options.setTimeout(timeout); Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); // NB: this call causes the onCreateInitialMetadata callback to occur inline *before* this call // returns. Consequently the grpc_request is not available. Attempting to close or reset from that // callback will fail. auto grpc_request = grpc_client->sendRaw(service_name, method_name, std::make_unique<Buffer::OwnedImpl>(request), handler, Tracing::NullSpan::instance(), options); if (!grpc_request) { grpc_call_request_.erase(token); return 0; } handler.client = std::move(grpc_client); handler.request = grpc_request; return token; } uint32_t Context::grpcStream(const envoy::api::v2::core::GrpcService& grpc_service, absl::string_view service_name, absl::string_view method_name) { auto token = next_grpc_token_++; if (IsGrpcCallToken(token)) { token = next_grpc_token_++; } // Handle rollover. for (;;) { if (token == 0) { token = next_grpc_token_ += 2; } if (!grpc_stream_.count(token)) { break; } token = next_grpc_token_ += 2; } auto& handler = grpc_stream_[token]; handler.context = this; handler.token = token; auto grpc_client = clusterManager() .grpcAsyncClientManager() .factoryForGrpcService(grpc_service, *wasm()->scope_, true /* skip_cluster_check */) ->create(); // set default hash policy to be based on :authority to enable consistent hash Http::AsyncClient::StreamOptions options; Protobuf::RepeatedPtrField<envoy::api::v2::route::RouteAction::HashPolicy> hash_policy; hash_policy.Add()->mutable_header()->set_header_name(Http::Headers::get().Host.get()); options.setHashPolicy(hash_policy); // NB: this call causes the onCreateInitialMetadata callback to occur inline *before* this call // returns. Consequently the grpc_stream is not available. Attempting to close or reset from that // callback will fail. auto grpc_stream = grpc_client->startRaw(service_name, method_name, handler, options); if (!grpc_stream) { grpc_stream_.erase(token); return 0; } handler.client = std::move(grpc_client); handler.stream = grpc_stream; return token; } void Context::httpRespond(const Pairs& response_headers, absl::string_view body, const Pairs& response_trailers) { (void)response_headers; (void)body; (void)response_trailers; } // StreamInfo const StreamInfo::StreamInfo* Context::getConstRequestStreamInfo() const { if (encoder_callbacks_) { return &encoder_callbacks_->streamInfo(); } else if (decoder_callbacks_) { return &decoder_callbacks_->streamInfo(); } else if (access_log_stream_info_) { return access_log_stream_info_; } return nullptr; } StreamInfo::StreamInfo* Context::getRequestStreamInfo() const { if (encoder_callbacks_) { return &encoder_callbacks_->streamInfo(); } else if (decoder_callbacks_) { return &decoder_callbacks_->streamInfo(); } return nullptr; } WasmResult Context::setProperty(absl::string_view key, absl::string_view serialized_value) { auto* stream_info = getRequestStreamInfo(); if (!stream_info) { return WasmResult::NotFound; } stream_info->filterState().setData(key, std::make_unique<WasmState>(serialized_value), StreamInfo::FilterState::StateType::Mutable); return WasmResult::Ok; } void Context::scriptLog(spdlog::level::level_enum level, absl::string_view message) { switch (level) { case spdlog::level::trace: ENVOY_LOG(trace, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::debug: ENVOY_LOG(debug, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::info: ENVOY_LOG(info, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::warn: ENVOY_LOG(warn, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::err: ENVOY_LOG(error, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::critical: ENVOY_LOG(critical, "wasm log{}: {}", log_prefix(), message); return; case spdlog::level::off: NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } } // Connection bool Context::isSsl() { return decoder_callbacks_->connection()->ssl() != nullptr; } // // Calls into the WASM code. // void Context::onStart(absl::string_view root_id, absl::string_view vm_configuration) { if (wasm_->onStart_) { auto root_id_addr = wasm_->copyString(root_id); auto config_addr = wasm_->copyString(vm_configuration); wasm_->onStart_(this, id_, root_id_addr, root_id.size(), config_addr, vm_configuration.size()); } in_vm_context_created_ = true; } bool Context::validateConfiguration(absl::string_view configuration) { if (!wasm_->validateConfiguration_) { return true; } auto address = wasm_->copyString(configuration); return wasm_->validateConfiguration_(this, id_, address, configuration.size()).u64_ != 0; } bool Context::onConfigure(absl::string_view configuration) { if (!wasm_->onConfigure_) { return true; } auto address = wasm_->copyString(configuration); return wasm_->onConfigure_(this, id_, address, configuration.size()).u64_ != 0; } void Context::onCreate(uint32_t root_context_id) { if (wasm_->onCreate_) { wasm_->onCreate_(this, id_, root_context_id); } } Network::FilterStatus Context::onNetworkNewConnection() { onCreate(root_context_id_); in_vm_context_created_ = true; if (!wasm_->onNewConnection_) { return Network::FilterStatus::Continue; } if (wasm_->onNewConnection_(this, id_).u64_ == 0) { return Network::FilterStatus::Continue; } return Network::FilterStatus::StopIteration; } Network::FilterStatus Context::onDownstreamData(int data_length, bool end_of_stream) { if (!in_vm_context_created_ || !wasm_->onDownstreamData_) { return Network::FilterStatus::Continue; } auto result = wasm_->onDownstreamData_(this, id_, static_cast<uint32_t>(data_length), static_cast<uint32_t>(end_of_stream)); // TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values. return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration; } Network::FilterStatus Context::onUpstreamData(int data_length, bool end_of_stream) { if (!in_vm_context_created_ || !wasm_->onUpstreamData_) { return Network::FilterStatus::Continue; } auto result = wasm_->onUpstreamData_(this, id_, static_cast<uint32_t>(data_length), static_cast<uint32_t>(end_of_stream)); // TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values. return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration; } void Context::onDownstreamConnectionClose(PeerType peer_type) { if (in_vm_context_created_ && wasm_->onDownstreamConnectionClose_) { wasm_->onDownstreamConnectionClose_(this, id_, static_cast<uint32_t>(peer_type)); } } void Context::onUpstreamConnectionClose(PeerType peer_type) { if (in_vm_context_created_ && wasm_->onUpstreamConnectionClose_) { wasm_->onUpstreamConnectionClose_(this, id_, static_cast<uint32_t>(peer_type)); } } Http::FilterHeadersStatus Context::onRequestHeaders() { onCreate(root_context_id_); in_vm_context_created_ = true; // Store the stream id so that we can use it in log(). auto& stream_info = decoder_callbacks_->streamInfo(); auto& metadata = (*stream_info.dynamicMetadata() .mutable_filter_metadata())[HttpFilters::HttpFilterNames::get().Wasm]; (*metadata.mutable_fields())[std::string("_stream_id_" + std::string(root_id()))] .set_number_value(id_); if (!wasm_->onRequestHeaders_) { return Http::FilterHeadersStatus::Continue; } if (wasm_->onRequestHeaders_(this, id_).u64_ == 0) { return Http::FilterHeadersStatus::Continue; } return Http::FilterHeadersStatus::StopIteration; } Http::FilterDataStatus Context::onRequestBody(int body_buffer_length, bool end_of_stream) { if (!in_vm_context_created_ || !wasm_->onRequestBody_) { return Http::FilterDataStatus::Continue; } switch (wasm_ ->onRequestBody_(this, id_, static_cast<uint32_t>(body_buffer_length), static_cast<uint32_t>(end_of_stream)) .u64_) { case 0: return Http::FilterDataStatus::Continue; case 1: return Http::FilterDataStatus::StopIterationAndBuffer; case 2: return Http::FilterDataStatus::StopIterationAndWatermark; default: return Http::FilterDataStatus::StopIterationNoBuffer; } } Http::FilterTrailersStatus Context::onRequestTrailers() { if (!in_vm_context_created_ || !wasm_->onRequestTrailers_) { return Http::FilterTrailersStatus::Continue; } if (wasm_->onRequestTrailers_(this, id_).u64_ == 0) { return Http::FilterTrailersStatus::Continue; } return Http::FilterTrailersStatus::StopIteration; } Http::FilterMetadataStatus Context::onRequestMetadata() { if (!in_vm_context_created_ || !wasm_->onRequestMetadata_) { return Http::FilterMetadataStatus::Continue; } if (wasm_->onRequestMetadata_(this, id_).u64_ == 0) { return Http::FilterMetadataStatus::Continue; } return Http::FilterMetadataStatus::Continue; // This is currently the only return code. } Http::FilterHeadersStatus Context::onResponseHeaders() { if (!in_vm_context_created_) { // If the request is invalid then onRequestHeaders() will not be called and neither will // onCreate() then sendLocalReply be called which will call this function. In this case we // need to call onCreate() so that the Context inside the VM is created before the // onResponseHeaders() call. onCreate(root_context_id_); in_vm_context_created_ = true; } if (!wasm_->onResponseHeaders_) { return Http::FilterHeadersStatus::Continue; } if (wasm_->onResponseHeaders_(this, id_).u64_ == 0) { return Http::FilterHeadersStatus::Continue; } return Http::FilterHeadersStatus::StopIteration; } Http::FilterDataStatus Context::onResponseBody(int body_buffer_length, bool end_of_stream) { if (!in_vm_context_created_ || !wasm_->onResponseBody_) { return Http::FilterDataStatus::Continue; } switch (wasm_ ->onResponseBody_(this, id_, static_cast<uint32_t>(body_buffer_length), static_cast<uint32_t>(end_of_stream)) .u64_) { case 0: return Http::FilterDataStatus::Continue; case 1: return Http::FilterDataStatus::StopIterationAndBuffer; case 2: return Http::FilterDataStatus::StopIterationAndWatermark; default: return Http::FilterDataStatus::StopIterationNoBuffer; } } Http::FilterTrailersStatus Context::onResponseTrailers() { if (!in_vm_context_created_ || !wasm_->onResponseTrailers_) { return Http::FilterTrailersStatus::Continue; } if (wasm_->onResponseTrailers_(this, id_).u64_ == 0) { return Http::FilterTrailersStatus::Continue; } return Http::FilterTrailersStatus::StopIteration; } Http::FilterMetadataStatus Context::onResponseMetadata() { if (!in_vm_context_created_ || !wasm_->onResponseMetadata_) { return Http::FilterMetadataStatus::Continue; } if (wasm_->onResponseMetadata_(this, id_).u64_ == 0) { return Http::FilterMetadataStatus::Continue; } return Http::FilterMetadataStatus::Continue; // This is currently the only return code. } void Context::onHttpCallResponse(uint32_t token, const Pairs& response_headers, absl::string_view response_body, const Pairs& response_trailers) { if (!wasm_->onHttpCallResponse_) { return; } uint64_t headers_ptr, headers_size, trailers_ptr, trailers_size; exportPairs(this, response_headers, &headers_ptr, &headers_size); exportPairs(this, response_trailers, &trailers_ptr, &trailers_size); auto body_ptr = wasm_->copyString(response_body); auto body_size = response_body.size(); wasm_->onHttpCallResponse_(this, id_, token, headers_ptr, headers_size, body_ptr, body_size, trailers_ptr, trailers_size); } void Context::onQueueReady(uint32_t token) { if (wasm_->onQueueReady_) { wasm_->onQueueReady_(this, id_, token); } } void Context::onGrpcCreateInitialMetadata(uint32_t token, Http::HeaderMap& metadata) { if (!wasm_->onGrpcCreateInitialMetadata_) { return; } grpc_create_initial_metadata_ = &metadata; wasm_->onGrpcCreateInitialMetadata_(this, id_, token); grpc_create_initial_metadata_ = nullptr; } void Context::onGrpcReceiveInitialMetadata(uint32_t token, Http::HeaderMapPtr&& metadata) { if (!wasm_->onGrpcReceiveInitialMetadata_) { return; } grpc_receive_initial_metadata_ = std::move(metadata); wasm_->onGrpcReceiveInitialMetadata_(this, id_, token); grpc_receive_initial_metadata_ = nullptr; } void Context::onGrpcReceiveTrailingMetadata(uint32_t token, Http::HeaderMapPtr&& metadata) { if (!wasm_->onGrpcReceiveTrailingMetadata_) { return; } grpc_receive_trailing_metadata_ = std::move(metadata); wasm_->onGrpcReceiveTrailingMetadata_(this, id_, token); grpc_receive_trailing_metadata_ = nullptr; } WasmResult Context::defineMetric(MetricType type, absl::string_view name, uint32_t* metric_id_ptr) { auto stat_name = wasm_->stat_name_set_->getDynamic(name); if (type == MetricType::Counter) { auto id = wasm_->nextCounterMetricId(); auto c = &wasm_->scope_->counterFromStatName(stat_name); wasm_->counters_.emplace(id, c); *metric_id_ptr = id; return WasmResult::Ok; } else if (type == MetricType::Gauge) { auto id = wasm_->nextGaugeMetricId(); auto g = &wasm_->scope_->gaugeFromStatName(stat_name, Stats::Gauge::ImportMode::Accumulate); wasm_->gauges_.emplace(id, g); *metric_id_ptr = id; return WasmResult::Ok; } else if (type == MetricType::Histogram) { auto id = wasm_->nextHistogramMetricId(); auto h = &wasm_->scope_->histogramFromStatName(stat_name, Stats::Histogram::Unit::Unspecified); wasm_->histograms_.emplace(id, h); *metric_id_ptr = id; return WasmResult::Ok; } return WasmResult::BadArgument; } WasmResult Context::incrementMetric(uint32_t metric_id, int64_t offset) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { if (offset > 0) { it->second->add(offset); return WasmResult::Ok; } else { return WasmResult::BadArgument; } return WasmResult::NotFound; } } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { if (offset > 0) { it->second->add(offset); return WasmResult::Ok; } else { it->second->sub(-offset); return WasmResult::Ok; } } return WasmResult::NotFound; } return WasmResult::BadArgument; } WasmResult Context::recordMetric(uint32_t metric_id, uint64_t value) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { it->second->add(value); return WasmResult::Ok; } } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { it->second->set(value); return WasmResult::Ok; } } else if (type == MetricType::Histogram) { auto it = wasm_->histograms_.find(metric_id); if (it != wasm_->histograms_.end()) { it->second->recordValue(value); return WasmResult::Ok; } } return WasmResult::NotFound; } WasmResult Context::getMetric(uint32_t metric_id, uint64_t* result_uint64_ptr) { auto type = static_cast<MetricType>(metric_id & Wasm::kMetricTypeMask); if (type == MetricType::Counter) { auto it = wasm_->counters_.find(metric_id); if (it != wasm_->counters_.end()) { *result_uint64_ptr = it->second->value(); return WasmResult::Ok; } return WasmResult::NotFound; } else if (type == MetricType::Gauge) { auto it = wasm_->gauges_.find(metric_id); if (it != wasm_->gauges_.end()) { *result_uint64_ptr = it->second->value(); return WasmResult::Ok; } return WasmResult::NotFound; } return WasmResult::BadArgument; } Wasm::Wasm(absl::string_view vm, absl::string_view vm_id, absl::string_view vm_configuration, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher) : vm_id_(std::string(vm_id)), wasm_vm_(Common::Wasm::createWasmVm(vm)), plugin_(plugin), scope_(scope), cluster_manager_(cluster_manager), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), vm_configuration_(vm_configuration), stat_name_set_(scope_->symbolTable().makeSet("Wasm").release()) {} std::string Plugin::makeLogPrefix() const { std::string prefix; if (!name_.empty()) { prefix = prefix + " " + name_; } if (!root_id_.empty()) { prefix = prefix + " " + std::string(root_id_); } if (vm_id_.empty()) { prefix = prefix + " " + std::string(vm_id_); } return prefix; } Context::~Context() { // Cancel any outstanding requests. for (auto& p : http_request_) { p.second.request->cancel(); } for (auto& p : grpc_call_request_) { p.second.request->cancel(); } for (auto& p : grpc_stream_) { p.second.stream->resetStream(); } // Do not remove vm or root contexts which have the same lifetime as wasm_. if (root_context_id_) { wasm_->contexts_.erase(id_); } } void Wasm::registerCallbacks() { #define _REGISTER(_fn) \ wasm_vm_->registerCallback( \ "env", #_fn, &_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(_fn##Handler), \ _fn##Handler>::convertFunctionWordToUint32) if (is_emscripten_) { _REGISTER(pthread_equal); } #undef _REGISTER #define _REGISTER_WASI(_fn) \ wasm_vm_->registerCallback( \ "wasi_unstable", #_fn, &wasi_unstable_##_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(wasi_unstable_##_fn##Handler), \ wasi_unstable_##_fn##Handler>::convertFunctionWordToUint32) if (is_emscripten_) { _REGISTER_WASI(fd_write); _REGISTER_WASI(fd_seek); _REGISTER_WASI(fd_close); _REGISTER_WASI(environ_get); _REGISTER_WASI(environ_sizes_get); _REGISTER_WASI(proc_exit); } #undef _REGISTER_WASI // Calls with the "proxy_" prefix. #define _REGISTER_PROXY(_fn) \ wasm_vm_->registerCallback( \ "env", "proxy_" #_fn, &_fn##Handler, \ &ConvertFunctionWordToUint32<decltype(_fn##Handler), \ _fn##Handler>::convertFunctionWordToUint32); _REGISTER_PROXY(log); _REGISTER_PROXY(setProperty); _REGISTER_PROXY(getProperty); _REGISTER_PROXY(continueRequest); _REGISTER_PROXY(continueResponse); _REGISTER_PROXY(sendLocalResponse); _REGISTER_PROXY(clearRouteCache); _REGISTER_PROXY(getSharedData); _REGISTER_PROXY(setSharedData); _REGISTER_PROXY(registerSharedQueue); _REGISTER_PROXY(resolveSharedQueue); _REGISTER_PROXY(dequeueSharedQueue); _REGISTER_PROXY(enqueueSharedQueue); _REGISTER_PROXY(getDownstreamDataBufferBytes); _REGISTER_PROXY(getUpstreamDataBufferBytes); _REGISTER_PROXY(getHeaderMapValue); _REGISTER_PROXY(addHeaderMapValue); _REGISTER_PROXY(replaceHeaderMapValue); _REGISTER_PROXY(removeHeaderMapValue); _REGISTER_PROXY(getHeaderMapPairs); _REGISTER_PROXY(setHeaderMapPairs); _REGISTER_PROXY(getHeaderMapSize); _REGISTER_PROXY(getRequestBodyBufferBytes); _REGISTER_PROXY(getResponseBodyBufferBytes); _REGISTER_PROXY(httpCall); _REGISTER_PROXY(grpcCall); _REGISTER_PROXY(grpcStream); _REGISTER_PROXY(grpcClose); _REGISTER_PROXY(grpcCancel); _REGISTER_PROXY(grpcSend); _REGISTER_PROXY(setTickPeriodMilliseconds); _REGISTER_PROXY(getCurrentTimeNanoseconds); _REGISTER_PROXY(defineMetric); _REGISTER_PROXY(incrementMetric); _REGISTER_PROXY(recordMetric); _REGISTER_PROXY(getMetric); _REGISTER_PROXY(setEffectiveContext); #undef _REGISTER_PROXY } void Wasm::getFunctions() { #define _GET(_fn) wasm_vm_->getFunction(#_fn, &_fn##_); _GET(_start); _GET(__wasm_call_ctors); _GET(malloc); _GET(free); #undef _GET #define _GET_PROXY(_fn) wasm_vm_->getFunction("proxy_" #_fn, &_fn##_); _GET_PROXY(validateConfiguration); _GET_PROXY(onStart); _GET_PROXY(onConfigure); _GET_PROXY(onTick); _GET_PROXY(onCreate); _GET_PROXY(onNewConnection); _GET_PROXY(onDownstreamData); _GET_PROXY(onUpstreamData); _GET_PROXY(onDownstreamConnectionClose); _GET_PROXY(onUpstreamConnectionClose); _GET_PROXY(onRequestHeaders); _GET_PROXY(onRequestBody); _GET_PROXY(onRequestTrailers); _GET_PROXY(onRequestMetadata); _GET_PROXY(onResponseHeaders); _GET_PROXY(onResponseBody); _GET_PROXY(onResponseTrailers); _GET_PROXY(onResponseMetadata); _GET_PROXY(onHttpCallResponse); _GET_PROXY(onGrpcReceive); _GET_PROXY(onGrpcClose); _GET_PROXY(onGrpcCreateInitialMetadata); _GET_PROXY(onGrpcReceiveInitialMetadata); _GET_PROXY(onGrpcReceiveTrailingMetadata); _GET_PROXY(onQueueReady); _GET_PROXY(onDone); _GET_PROXY(onLog); _GET_PROXY(onDelete); #undef _GET_PROXY if (!malloc_ || !free_) { throw WasmException("WASM missing malloc/free"); } } Wasm::Wasm(const Wasm& wasm, Event::Dispatcher& dispatcher) : std::enable_shared_from_this<Wasm>(wasm), vm_id_(wasm.vm_id_), plugin_(wasm.plugin_), scope_(wasm.scope_), cluster_manager_(wasm.cluster_manager_), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), stat_name_set_(wasm.stat_name_set_) { if (wasm.wasmVm()->cloneable()) { wasm_vm_ = wasm.wasmVm()->clone(); vm_context_ = std::make_shared<Context>(this); getFunctions(); } else { wasm_vm_ = Common::Wasm::createWasmVm(wasm.wasmVm()->runtime()); if (!initialize(wasm.code(), wasm.allow_precompiled())) { throw WasmException("Failed to initialize WASM code"); } } } bool Wasm::initialize(const std::string& code, bool allow_precompiled) { if (!wasm_vm_) { return false; } // If the configured_vm_id is empty, then hash the code to create a unique vm_id. if (vm_id_.empty()) { vm_id_ = base64Sha256(code); } auto ok = wasm_vm_->load(code, allow_precompiled); if (!ok) { return false; } auto metadata = wasm_vm_->getCustomSection("emscripten_metadata"); if (!metadata.empty()) { // See https://github.com/emscripten-core/emscripten/blob/incoming/tools/shared.py#L3059 is_emscripten_ = true; auto start = reinterpret_cast<const uint8_t*>(metadata.data()); auto end = reinterpret_cast<const uint8_t*>(metadata.data() + metadata.size()); start = decodeVarint(start, end, &emscripten_metadata_major_version_); start = decodeVarint(start, end, &emscripten_metadata_minor_version_); start = decodeVarint(start, end, &emscripten_abi_major_version_); start = decodeVarint(start, end, &emscripten_abi_minor_version_); uint32_t temp; if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 1) { // metadata 0.2 - added: wasm_backend. start = decodeVarint(start, end, &temp); } start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 0) { // metadata 0.1 - added: global_base, dynamic_base, dynamictop_ptr and tempdouble_ptr. start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); start = decodeVarint(start, end, &temp); decodeVarint(start, end, &temp); if (emscripten_metadata_major_version_ > 0 || emscripten_metadata_minor_version_ > 2) { // metadata 0.3 - added: standalone_wasm. start = decodeVarint(start, end, &emscripten_standalone_wasm_); } } } registerCallbacks(); wasm_vm_->link(vm_id_); vm_context_ = std::make_shared<Context>(this); getFunctions(); startVm(vm_context_.get()); code_ = code; allow_precompiled_ = allow_precompiled; return true; } void Wasm::startVm(Context* root_context) { /* Call "_start" function, and fallback to "__wasm_call_ctors" if the former is not available. */ if (_start_) { _start_(root_context); } else if (__wasm_call_ctors_) { __wasm_call_ctors_(root_context); } } bool Wasm::configure(Context* root_context, absl::string_view configuration) { if (!onConfigure_) { return true; } auto address = copyString(configuration); return onConfigure_(root_context, root_context->id(), address, configuration.size()).u64_ != 0; } Context* Wasm::start() { auto root_id = plugin_->root_id_; auto it = root_contexts_.find(root_id); if (it != root_contexts_.end()) { it->second->onStart(root_id, vm_configuration()); return it->second.get(); } auto context = std::make_unique<Context>(this, root_id, plugin_); auto context_ptr = context.get(); root_contexts_[root_id] = std::move(context); context_ptr->onStart(root_id, vm_configuration()); return context_ptr; }; void Wasm::startForTesting(std::unique_ptr<Context> context) { auto context_ptr = context.get(); if (!context->wasm_) { // Initialization was delayed till the Wasm object was created. context->wasm_ = this; context->plugin_ = plugin_; context->id_ = allocContextId(); contexts_[context->id_] = context.get(); } root_contexts_[""] = std::move(context); context_ptr->onStart("", ""); } void Wasm::setTickPeriod(uint32_t context_id, std::chrono::milliseconds new_tick_period) { auto& tick_period = tick_period_[context_id]; auto& timer = timer_[context_id]; bool was_running = timer && tick_period.count() > 0; tick_period = new_tick_period; if (tick_period.count() > 0 && !was_running) { timer = dispatcher_.createTimer([weak = std::weak_ptr<Wasm>(shared_from_this()), context_id]() { auto shared = weak.lock(); if (shared) { shared->tickHandler(context_id); } }); timer->enableTimer(tick_period); } } void Wasm::tickHandler(uint32_t root_context_id) { auto& tick_period = tick_period_[root_context_id]; auto& timer = timer_[root_context_id]; if (onTick_) { onTick_(getContext(root_context_id), root_context_id); if (timer && tick_period.count() > 0) { timer->enableTimer(tick_period); } } } uint32_t Wasm::allocContextId() { while (true) { auto id = next_context_id_++; // Prevent reuse. if (contexts_.find(id) == contexts_.end()) { return id; } } } void Wasm::queueReady(uint32_t root_context_id, uint32_t token) { auto it = contexts_.find(root_context_id); if (it == contexts_.end() || !it->second->isRootContext()) { return; } it->second->onQueueReady(token); } Network::FilterStatus Context::onNewConnection() { return onNetworkNewConnection(); }; Network::FilterStatus Context::onData(Buffer::Instance& data, bool end_stream) { network_downstream_data_buffer_ = &data; auto result = onDownstreamData(data.length(), end_stream); network_downstream_data_buffer_ = nullptr; return result; } Network::FilterStatus Context::onWrite(Buffer::Instance& data, bool end_stream) { network_upstream_data_buffer_ = &data; auto result = onUpstreamData(data.length(), end_stream); network_upstream_data_buffer_ = nullptr; if (end_stream) { // This is called when seeing end_stream=true and not on an upstream connection event, // because registering for latter requires replicating the whole TCP proxy extension. onUpstreamConnectionClose(PeerType::Unknown); } return result; } void Context::onEvent(Network::ConnectionEvent event) { switch (event) { case Network::ConnectionEvent::LocalClose: onDownstreamConnectionClose(PeerType::Local); break; case Network::ConnectionEvent::RemoteClose: onDownstreamConnectionClose(PeerType::Remote); break; default: break; } } void Context::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { network_read_filter_callbacks_ = &callbacks; network_read_filter_callbacks_->connection().addConnectionCallbacks(*this); } void Context::initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) { network_write_filter_callbacks_ = &callbacks; } void Wasm::log(absl::string_view root_id, const Http::HeaderMap* request_headers, const Http::HeaderMap* response_headers, const Http::HeaderMap* response_trailers, const StreamInfo::StreamInfo& stream_info) { // Check dynamic metadata for the id_ of the stream for this root_id. Context* context = nullptr; auto metadata_it = stream_info.dynamicMetadata().filter_metadata().find( HttpFilters::HttpFilterNames::get().Wasm); if (metadata_it != stream_info.dynamicMetadata().filter_metadata().end()) { auto find_id = metadata_it->second.fields().find(std::string("_stream_id_" + std::string(root_id))); if (find_id != metadata_it->second.fields().end()) { context = getContext(static_cast<uint32_t>(find_id->second.number_value())); } } if (!context) { context = getRootContext(root_id); } context->log(request_headers, response_headers, response_trailers, stream_info); } void Context::log(const Http::HeaderMap* request_headers, const Http::HeaderMap* response_headers, const Http::HeaderMap* response_trailers, const StreamInfo::StreamInfo& stream_info) { access_log_request_headers_ = request_headers; // ? request_trailers ? access_log_response_headers_ = response_headers; access_log_response_trailers_ = response_trailers; access_log_stream_info_ = &stream_info; onLog(); access_log_request_headers_ = nullptr; // ? request_trailers ? access_log_response_headers_ = nullptr; access_log_response_trailers_ = nullptr; access_log_stream_info_ = nullptr; onDelete(); } void Context::onDestroy() { if (destroyed_) { return; } destroyed_ = true; onDone(); } void Context::onDone() { if (in_vm_context_created_ && wasm_->onDone_) { wasm_->onDone_(this, id_); } } void Context::onLog() { if (in_vm_context_created_ && wasm_->onLog_) { wasm_->onLog_(this, id_); } } void Context::onDelete() { if (in_vm_context_created_ && wasm_->onDelete_) { wasm_->onDelete_(this, id_); } } Http::FilterHeadersStatus Context::decodeHeaders(Http::HeaderMap& headers, bool end_stream) { request_headers_ = &headers; request_end_of_stream_ = end_stream; auto result = onRequestHeaders(); request_headers_ = nullptr; return result; } Http::FilterDataStatus Context::decodeData(Buffer::Instance& data, bool end_stream) { requestBodyBuffer_ = &data; auto result = onRequestBody(data.length(), end_stream); requestBodyBuffer_ = nullptr; return result; } Http::FilterTrailersStatus Context::decodeTrailers(Http::HeaderMap& trailers) { request_trailers_ = &trailers; auto result = onRequestTrailers(); request_trailers_ = nullptr; return result; } Http::FilterMetadataStatus Context::decodeMetadata(Http::MetadataMap& response_metadata) { response_metadata_ = &response_metadata; auto result = onRequestMetadata(); response_metadata_ = nullptr; return result; } void Context::setDecoderFilterCallbacks(Envoy::Http::StreamDecoderFilterCallbacks& callbacks) { decoder_callbacks_ = &callbacks; } Http::FilterHeadersStatus Context::encode100ContinueHeaders(Http::HeaderMap&) { return Http::FilterHeadersStatus::Continue; } Http::FilterHeadersStatus Context::encodeHeaders(Http::HeaderMap& headers, bool end_stream) { response_headers_ = &headers; response_end_of_stream_ = end_stream; auto result = onResponseHeaders(); response_headers_ = nullptr; return result; } Http::FilterDataStatus Context::encodeData(Buffer::Instance& data, bool end_stream) { responseBodyBuffer_ = &data; auto result = onResponseBody(data.length(), end_stream); responseBodyBuffer_ = nullptr; return result; } Http::FilterTrailersStatus Context::encodeTrailers(Http::HeaderMap& trailers) { response_trailers_ = &trailers; auto result = onResponseTrailers(); response_trailers_ = nullptr; return result; } Http::FilterMetadataStatus Context::encodeMetadata(Http::MetadataMap& response_metadata) { response_metadata_ = &response_metadata; auto result = onResponseMetadata(); response_metadata_ = nullptr; return result; } // Http::FilterMetadataStatus::Continue; void Context::setEncoderFilterCallbacks(Envoy::Http::StreamEncoderFilterCallbacks& callbacks) { encoder_callbacks_ = &callbacks; } void Context::onHttpCallSuccess(uint32_t token, Envoy::Http::MessagePtr& response) { auto body = absl::string_view(static_cast<char*>(response->body()->linearize(response->body()->length())), response->body()->length()); onHttpCallResponse(token, headerMapToPairs(&response->headers()), body, headerMapToPairs(response->trailers())); http_request_.erase(token); } void Context::onHttpCallFailure(uint32_t token, Http::AsyncClient::FailureReason /* reason */) { onHttpCallResponse(token, {}, "", {}); http_request_.erase(token); } void AsyncClientHandler::onSuccess(Envoy::Http::MessagePtr&& response) { context->onHttpCallSuccess(token, response); } void AsyncClientHandler::onFailure(Http::AsyncClient::FailureReason reason) { context->onHttpCallFailure(token, reason); } void GrpcCallClientHandler::onCreateInitialMetadata(Http::HeaderMap& metadata) { context->onGrpcCreateInitialMetadata(token, metadata); } void GrpcStreamClientHandler::onCreateInitialMetadata(Http::HeaderMap& metadata) { context->onGrpcCreateInitialMetadata(token, metadata); } void GrpcStreamClientHandler::onReceiveInitialMetadata(Http::HeaderMapPtr&& metadata) { context->onGrpcReceiveInitialMetadata(token, std::move(metadata)); } void GrpcStreamClientHandler::onReceiveTrailingMetadata(Http::HeaderMapPtr&& metadata) { context->onGrpcReceiveTrailingMetadata(token, std::move(metadata)); } void Context::onGrpcReceive(uint32_t token, Buffer::InstancePtr response) { if (wasm_->onGrpcReceive_) { auto response_size = response->length(); auto response_ptr = wasm_->copyBuffer(*response); wasm_->onGrpcReceive_(this, id_, token, response_ptr, response_size); } if (IsGrpcCallToken(token)) { grpc_call_request_.erase(token); } } void Context::onGrpcClose(uint32_t token, const Grpc::Status::GrpcStatus& status, const absl::string_view message) { if (wasm_->onGrpcClose_) { auto message_ptr = wasm_->copyString(message); wasm_->onGrpcClose_(this, id_, token, static_cast<uint64_t>(status), message_ptr, message.size()); } if (IsGrpcCallToken(token)) { grpc_call_request_.erase(token); } else { grpc_stream_.erase(token); } } WasmResult Context::grpcSend(uint32_t token, absl::string_view message, bool end_stream) { if (IsGrpcCallToken(token)) { return WasmResult::BadArgument; } auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->sendMessageRaw( Buffer::InstancePtr(new Buffer::OwnedImpl(message.data(), message.size())), end_stream); } return WasmResult::Ok; } WasmResult Context::grpcClose(uint32_t token) { if (IsGrpcCallToken(token)) { auto it = grpc_call_request_.find(token); if (it == grpc_call_request_.end()) { return WasmResult::NotFound; } if (it != grpc_call_request_.end() && it->second.request) { it->second.request->cancel(); } grpc_call_request_.erase(token); } else { auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->closeStream(); } grpc_stream_.erase(token); } return WasmResult::Ok; } WasmResult Context::grpcCancel(uint32_t token) { if (IsGrpcCallToken(token)) { auto it = grpc_call_request_.find(token); if (it == grpc_call_request_.end()) { return WasmResult::NotFound; } if (it != grpc_call_request_.end() && it->second.request) { it->second.request->cancel(); } grpc_call_request_.erase(token); } else { auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; } if (it != grpc_stream_.end() && it->second.stream) { it->second.stream->resetStream(); } grpc_stream_.erase(token); } return WasmResult::Ok; } void GrpcCallClientHandler::onSuccessRaw(Buffer::InstancePtr&& response, Tracing::Span&) { context->onGrpcReceive(token, std::move(response)); } void GrpcCallClientHandler::onFailure(Grpc::Status::GrpcStatus status, const std::string& message, Tracing::Span&) { context->onGrpcClose(token, status, message); } bool GrpcStreamClientHandler::onReceiveMessageRaw(Buffer::InstancePtr&& response) { context->onGrpcReceive(token, std::move(response)); return true; } void GrpcStreamClientHandler::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) { context->onGrpcClose(token, status, message); } static std::shared_ptr<Wasm> createWasmInternal(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api, std::unique_ptr<Context> root_context_for_testing) { auto wasm = std::make_shared<Wasm>(vm_config.runtime(), vm_config.vm_id(), vm_config.configuration(), plugin, scope, cluster_manager, dispatcher); const auto& code = Config::DataSource::read(vm_config.code(), true, api); const auto& path = Config::DataSource::getPath(vm_config.code()) .value_or(code.empty() ? EMPTY_STRING : INLINE_STRING); if (code.empty()) { throw WasmException(fmt::format("Failed to load WASM code from {}", path)); } if (!wasm->initialize(code, vm_config.allow_precompiled())) { throw WasmException(fmt::format("Failed to initialize WASM code from {}", path)); } if (!root_context_for_testing) { wasm->start(); } else { wasm->startForTesting(std::move(root_context_for_testing)); } return wasm; } std::shared_ptr<Wasm> createWasm(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api) { return createWasmInternal(vm_config, plugin, scope, cluster_manager, dispatcher, api, nullptr /* root_context_for_testing */); } // namespace Wasm std::shared_ptr<Wasm> createWasmForTesting(const envoy::config::wasm::v2::VmConfig& vm_config, PluginSharedPtr plugin, Stats::ScopeSharedPtr scope, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Api::Api& api, std::unique_ptr<Context> root_context_for_testing) { return createWasmInternal(vm_config, plugin, scope, cluster_manager, dispatcher, api, std::move(root_context_for_testing)); } std::shared_ptr<Wasm> createThreadLocalWasm(Wasm& base_wasm, absl::string_view configuration, Event::Dispatcher& dispatcher) { auto wasm = std::make_shared<Wasm>(base_wasm, dispatcher); Context* root_context = wasm->start(); if (!wasm->configure(root_context, configuration)) { throw WasmException("Failed to configure WASM code"); } if (!wasm->vm_id().empty()) { local_wasms[wasm->vm_id()] = wasm; } return wasm; } std::shared_ptr<Wasm> getThreadLocalWasmPtr(absl::string_view vm_id) { auto it = local_wasms.find(vm_id); if (it == local_wasms.end()) { return nullptr; } auto wasm = it->second.lock(); if (!wasm) { local_wasms.erase(vm_id); } return wasm; } std::shared_ptr<Wasm> getOrCreateThreadLocalWasm(Wasm& base_wasm, absl::string_view configuration, Event::Dispatcher& dispatcher) { auto wasm = getThreadLocalWasmPtr(base_wasm.vm_id()); if (wasm) { auto root_context = wasm->start(); if (!wasm->configure(root_context, configuration)) { throw WasmException("Failed to configure WASM code"); } return wasm; } return createThreadLocalWasm(base_wasm, configuration, dispatcher); } } // namespace Wasm } // namespace Common } // namespace Extensions } // namespace Envoy
Network::FilterStatus Context::onDownstreamData(int data_length, bool end_of_stream) { if (!wasm_->onDownstreamData_) { return Network::FilterStatus::Continue; } auto result = wasm_->onDownstreamData_(this, id_, static_cast<uint32_t>(data_length), static_cast<uint32_t>(end_of_stream)); // TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values. return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration; }
Network::FilterStatus Context::onDownstreamData(int data_length, bool end_of_stream) { if (!in_vm_context_created_ || !wasm_->onDownstreamData_) { return Network::FilterStatus::Continue; } auto result = wasm_->onDownstreamData_(this, id_, static_cast<uint32_t>(data_length), static_cast<uint32_t>(end_of_stream)); // TODO(PiotrSikora): pull Proxy-WASM's FilterStatus values. return result.u64_ == 0 ? Network::FilterStatus::Continue : Network::FilterStatus::StopIteration; }
{'added': [(1702, ' in_vm_context_created_ = true;'), (1729, ' in_vm_context_created_ = true;'), (1740, ' if (!in_vm_context_created_ || !wasm_->onDownstreamData_) {'), (1750, ' if (!in_vm_context_created_ || !wasm_->onUpstreamData_) {'), (1760, ' if (in_vm_context_created_ && wasm_->onDownstreamConnectionClose_) {'), (1766, ' if (in_vm_context_created_ && wasm_->onUpstreamConnectionClose_) {'), (1790, ' if (!in_vm_context_created_ || !wasm_->onRequestBody_) {'), (1809, ' if (!in_vm_context_created_ || !wasm_->onRequestTrailers_) {'), (1819, ' if (!in_vm_context_created_ || !wasm_->onRequestMetadata_) {'), (1847, ' if (!in_vm_context_created_ || !wasm_->onResponseBody_) {'), (1866, ' if (!in_vm_context_created_ || !wasm_->onResponseTrailers_) {'), (1876, ' if (!in_vm_context_created_ || !wasm_->onResponseMetadata_) {'), (2450, ' if (in_vm_context_created_ && wasm_->onDone_) {'), (2456, ' if (in_vm_context_created_ && wasm_->onLog_) {'), (2462, ' if (in_vm_context_created_ && wasm_->onDelete_) {')], 'deleted': [(1738, ' if (!wasm_->onDownstreamData_) {'), (1748, ' if (!wasm_->onUpstreamData_) {'), (1758, ' if (wasm_->onDownstreamConnectionClose_) {'), (1764, ' if (wasm_->onUpstreamConnectionClose_) {'), (1788, ' if (!wasm_->onRequestBody_) {'), (1807, ' if (!wasm_->onRequestTrailers_) {'), (1817, ' if (!wasm_->onRequestMetadata_) {'), (1845, ' if (!wasm_->onResponseBody_) {'), (1864, ' if (!wasm_->onResponseTrailers_) {'), (1874, ' if (!wasm_->onResponseMetadata_) {'), (2448, ' if (wasm_->onDone_) {'), (2454, ' if (wasm_->onLog_) {'), (2460, ' if (wasm_->onDelete_) {')]}
15
13
2,395
18,810
8
75
3
https://github.com/istio/envoy
CVE-2020-10739
CWE-476
1,870
posix-timers.c
C
good_sigevent
/* * linux/kernel/posix-timers.c * * * 2002-10-15 Posix Clocks & timers * by George Anzinger george@mvista.com * * Copyright (C) 2002 2003 by MontaVista Software. * * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug. * Copyright (C) 2004 Boris Hu * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA */ /* These are all the functions necessary to implement * POSIX clocks & timers */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/sched/task.h> #include <linux/uaccess.h> #include <linux/list.h> #include <linux/init.h> #include <linux/compiler.h> #include <linux/hash.h> #include <linux/posix-clock.h> #include <linux/posix-timers.h> #include <linux/syscalls.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/export.h> #include <linux/hashtable.h> #include <linux/compat.h> #include "timekeeping.h" #include "posix-timers.h" /* * Management arrays for POSIX timers. Timers are now kept in static hash table * with 512 entries. * Timer ids are allocated by local routine, which selects proper hash head by * key, constructed from current->signal address and per signal struct counter. * This keeps timer ids unique per process, but now they can intersect between * processes. */ /* * Lets keep our timers in a slab cache :-) */ static struct kmem_cache *posix_timers_cache; static DEFINE_HASHTABLE(posix_timers_hashtable, 9); static DEFINE_SPINLOCK(hash_lock); static const struct k_clock * const posix_clocks[]; static const struct k_clock *clockid_to_kclock(const clockid_t id); static const struct k_clock clock_realtime, clock_monotonic; /* * we assume that the new SIGEV_THREAD_ID shares no bits with the other * SIGEV values. Here we put out an error if this assumption fails. */ #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \ ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD)) #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" #endif /* * parisc wants ENOTSUP instead of EOPNOTSUPP */ #ifndef ENOTSUP # define ENANOSLEEP_NOTSUP EOPNOTSUPP #else # define ENANOSLEEP_NOTSUP ENOTSUP #endif /* * The timer ID is turned into a timer address by idr_find(). * Verifying a valid ID consists of: * * a) checking that idr_find() returns other than -1. * b) checking that the timer id matches the one in the timer itself. * c) that the timer owner is in the callers thread group. */ /* * CLOCKs: The POSIX standard calls for a couple of clocks and allows us * to implement others. This structure defines the various * clocks. * * RESOLUTION: Clock resolution is used to round up timer and interval * times, NOT to report clock times, which are reported with as * much resolution as the system can muster. In some cases this * resolution may depend on the underlying clock hardware and * may not be quantifiable until run time, and only then is the * necessary code is written. The standard says we should say * something about this issue in the documentation... * * FUNCTIONS: The CLOCKs structure defines possible functions to * handle various clock functions. * * The standard POSIX timer management code assumes the * following: 1.) The k_itimer struct (sched.h) is used for * the timer. 2.) The list, it_lock, it_clock, it_id and * it_pid fields are not modified by timer code. * * Permissions: It is assumed that the clock_settime() function defined * for each clock will take care of permission checks. Some * clocks may be set able by any user (i.e. local process * clocks) others not. Currently the only set able clock we * have is CLOCK_REALTIME and its high res counter part, both of * which we beg off on and pass to do_sys_settimeofday(). */ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags); #define lock_timer(tid, flags) \ ({ struct k_itimer *__timr; \ __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \ __timr; \ }) static int hash(struct signal_struct *sig, unsigned int nr) { return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable)); } static struct k_itimer *__posix_timers_find(struct hlist_head *head, struct signal_struct *sig, timer_t id) { struct k_itimer *timer; hlist_for_each_entry_rcu(timer, head, t_hash) { if ((timer->it_signal == sig) && (timer->it_id == id)) return timer; } return NULL; } static struct k_itimer *posix_timer_by_id(timer_t id) { struct signal_struct *sig = current->signal; struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; return __posix_timers_find(head, sig, id); } static int posix_timer_add(struct k_itimer *timer) { struct signal_struct *sig = current->signal; int first_free_id = sig->posix_timer_id; struct hlist_head *head; int ret = -ENOENT; do { spin_lock(&hash_lock); head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)]; if (!__posix_timers_find(head, sig, sig->posix_timer_id)) { hlist_add_head_rcu(&timer->t_hash, head); ret = sig->posix_timer_id; } if (++sig->posix_timer_id < 0) sig->posix_timer_id = 0; if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT)) /* Loop over all possible ids completed */ ret = -EAGAIN; spin_unlock(&hash_lock); } while (ret == -ENOENT); return ret; } static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) { spin_unlock_irqrestore(&timr->it_lock, flags); } /* Get clock_realtime */ static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp) { ktime_get_real_ts64(tp); return 0; } /* Set clock_realtime */ static int posix_clock_realtime_set(const clockid_t which_clock, const struct timespec64 *tp) { return do_sys_settimeofday64(tp, NULL); } static int posix_clock_realtime_adj(const clockid_t which_clock, struct timex *t) { return do_adjtimex(t); } /* * Get monotonic time for posix timers */ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp) { ktime_get_ts64(tp); return 0; } /* * Get monotonic-raw time for posix timers */ static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp) { getrawmonotonic64(tp); return 0; } static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp) { *tp = current_kernel_time64(); return 0; } static int posix_get_monotonic_coarse(clockid_t which_clock, struct timespec64 *tp) { *tp = get_monotonic_coarse64(); return 0; } static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp) { *tp = ktime_to_timespec64(KTIME_LOW_RES); return 0; } static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp) { get_monotonic_boottime64(tp); return 0; } static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) { timekeeping_clocktai64(tp); return 0; } static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp) { tp->tv_sec = 0; tp->tv_nsec = hrtimer_resolution; return 0; } /* * Initialize everything, well, just everything in Posix clocks/timers ;) */ static __init int init_posix_timers(void) { posix_timers_cache = kmem_cache_create("posix_timers_cache", sizeof (struct k_itimer), 0, SLAB_PANIC, NULL); return 0; } __initcall(init_posix_timers); static void common_hrtimer_rearm(struct k_itimer *timr) { struct hrtimer *timer = &timr->it.real.timer; if (!timr->it_interval) return; timr->it_overrun += (unsigned int) hrtimer_forward(timer, timer->base->get_time(), timr->it_interval); hrtimer_restart(timer); } /* * This function is exported for use by the signal deliver code. It is * called just prior to the info block being released and passes that * block to us. It's function is to update the overrun entry AND to * restart the timer. It should only be called if the timer is to be * restarted (i.e. we have flagged this in the sys_private entry of the * info block). * * To protect against the timer going away while the interrupt is queued, * we require that the it_requeue_pending flag be set. */ void posixtimer_rearm(struct siginfo *info) { struct k_itimer *timr; unsigned long flags; timr = lock_timer(info->si_tid, &flags); if (!timr) return; if (timr->it_requeue_pending == info->si_sys_private) { timr->kclock->timer_rearm(timr); timr->it_active = 1; timr->it_overrun_last = timr->it_overrun; timr->it_overrun = -1; ++timr->it_requeue_pending; info->si_overrun += timr->it_overrun_last; } unlock_timer(timr, flags); } int posix_timer_event(struct k_itimer *timr, int si_private) { struct task_struct *task; int shared, ret = -1; /* * FIXME: if ->sigq is queued we can race with * dequeue_signal()->posixtimer_rearm(). * * If dequeue_signal() sees the "right" value of * si_sys_private it calls posixtimer_rearm(). * We re-queue ->sigq and drop ->it_lock(). * posixtimer_rearm() locks the timer * and re-schedules it while ->sigq is pending. * Not really bad, but not that we want. */ timr->sigq->info.si_sys_private = si_private; rcu_read_lock(); task = pid_task(timr->it_pid, PIDTYPE_PID); if (task) { shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); ret = send_sigqueue(timr->sigq, task, shared); } rcu_read_unlock(); /* If we failed to send the signal the timer stops. */ return ret > 0; } /* * This function gets called when a POSIX.1b interval timer expires. It * is used as a callback from the kernel internal timer. The * run_timer_list code ALWAYS calls with interrupts on. * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. */ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) { struct k_itimer *timr; unsigned long flags; int si_private = 0; enum hrtimer_restart ret = HRTIMER_NORESTART; timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); timr->it_active = 0; if (timr->it_interval != 0) si_private = ++timr->it_requeue_pending; if (posix_timer_event(timr, si_private)) { /* * signal was not sent because of sig_ignor * we will not get a call back to restart it AND * it should be restarted. */ if (timr->it_interval != 0) { ktime_t now = hrtimer_cb_get_time(timer); /* * FIXME: What we really want, is to stop this * timer completely and restart it in case the * SIG_IGN is removed. This is a non trivial * change which involves sighand locking * (sigh !), which we don't want to do late in * the release cycle. * * For now we just let timers with an interval * less than a jiffie expire every jiffie to * avoid softirq starvation in case of SIG_IGN * and a very small interval, which would put * the timer right back on the softirq pending * list. By moving now ahead of time we trick * hrtimer_forward() to expire the timer * later, while we still maintain the overrun * accuracy, but have some inconsistency in * the timer_gettime() case. This is at least * better than a starved softirq. A more * complex fix which solves also another related * inconsistency is already in the pipeline. */ #ifdef CONFIG_HIGH_RES_TIMERS { ktime_t kj = NSEC_PER_SEC / HZ; if (timr->it_interval < kj) now = ktime_add(now, kj); } #endif timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, timr->it_interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; timr->it_active = 1; } } unlock_timer(timr, flags); return ret; } static struct pid *good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; if ((event->sigev_notify & SIGEV_THREAD_ID ) && (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || !same_thread_group(rtn, current) || (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) return NULL; if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) return NULL; return task_pid(rtn); } static struct k_itimer * alloc_posix_timer(void) { struct k_itimer *tmr; tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL); if (!tmr) return tmr; if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { kmem_cache_free(posix_timers_cache, tmr); return NULL; } memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); return tmr; } static void k_itimer_rcu_free(struct rcu_head *head) { struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); kmem_cache_free(posix_timers_cache, tmr); } #define IT_ID_SET 1 #define IT_ID_NOT_SET 0 static void release_posix_timer(struct k_itimer *tmr, int it_id_set) { if (it_id_set) { unsigned long flags; spin_lock_irqsave(&hash_lock, flags); hlist_del_rcu(&tmr->t_hash); spin_unlock_irqrestore(&hash_lock, flags); } put_pid(tmr->it_pid); sigqueue_free(tmr->sigq); call_rcu(&tmr->it.rcu, k_itimer_rcu_free); } static int common_timer_create(struct k_itimer *new_timer) { hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); return 0; } /* Create a POSIX.1b interval timer. */ static int do_timer_create(clockid_t which_clock, struct sigevent *event, timer_t __user *created_timer_id) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct k_itimer *new_timer; int error, new_timer_id; int it_id_set = IT_ID_NOT_SET; if (!kc) return -EINVAL; if (!kc->timer_create) return -EOPNOTSUPP; new_timer = alloc_posix_timer(); if (unlikely(!new_timer)) return -EAGAIN; spin_lock_init(&new_timer->it_lock); new_timer_id = posix_timer_add(new_timer); if (new_timer_id < 0) { error = new_timer_id; goto out; } it_id_set = IT_ID_SET; new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->kclock = kc; new_timer->it_overrun = -1; if (event) { rcu_read_lock(); new_timer->it_pid = get_pid(good_sigevent(event)); rcu_read_unlock(); if (!new_timer->it_pid) { error = -EINVAL; goto out; } new_timer->it_sigev_notify = event->sigev_notify; new_timer->sigq->info.si_signo = event->sigev_signo; new_timer->sigq->info.si_value = event->sigev_value; } else { new_timer->it_sigev_notify = SIGEV_SIGNAL; new_timer->sigq->info.si_signo = SIGALRM; memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t)); new_timer->sigq->info.si_value.sival_int = new_timer->it_id; new_timer->it_pid = get_pid(task_tgid(current)); } new_timer->sigq->info.si_tid = new_timer->it_id; new_timer->sigq->info.si_code = SI_TIMER; if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { error = -EFAULT; goto out; } error = kc->timer_create(new_timer); if (error) goto out; spin_lock_irq(&current->sighand->siglock); new_timer->it_signal = current->signal; list_add(&new_timer->list, &current->signal->posix_timers); spin_unlock_irq(&current->sighand->siglock); return 0; /* * In the case of the timer belonging to another task, after * the task is unlocked, the timer is owned by the other task * and may cease to exist at any time. Don't use or modify * new_timer after the unlock call. */ out: release_posix_timer(new_timer, it_id_set); return error; } SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, struct sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { if (timer_event_spec) { sigevent_t event; if (copy_from_user(&event, timer_event_spec, sizeof (event))) return -EFAULT; return do_timer_create(which_clock, &event, created_timer_id); } return do_timer_create(which_clock, NULL, created_timer_id); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock, struct compat_sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { if (timer_event_spec) { sigevent_t event; if (get_compat_sigevent(&event, timer_event_spec)) return -EFAULT; return do_timer_create(which_clock, &event, created_timer_id); } return do_timer_create(which_clock, NULL, created_timer_id); } #endif /* * Locking issues: We need to protect the result of the id look up until * we get the timer locked down so it is not deleted under us. The * removal is done under the idr spinlock so we use that here to bridge * the find to the timer lock. To avoid a dead lock, the timer id MUST * be release with out holding the timer lock. */ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) { struct k_itimer *timr; /* * timer_t could be any type >= int and we want to make sure any * @timer_id outside positive int range fails lookup. */ if ((unsigned long long)timer_id > INT_MAX) return NULL; rcu_read_lock(); timr = posix_timer_by_id(timer_id); if (timr) { spin_lock_irqsave(&timr->it_lock, *flags); if (timr->it_signal == current->signal) { rcu_read_unlock(); return timr; } spin_unlock_irqrestore(&timr->it_lock, *flags); } rcu_read_unlock(); return NULL; } static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now) { struct hrtimer *timer = &timr->it.real.timer; return __hrtimer_expires_remaining_adjusted(timer, now); } static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now) { struct hrtimer *timer = &timr->it.real.timer; return (int)hrtimer_forward(timer, now, timr->it_interval); } /* * Get the time remaining on a POSIX.1b interval timer. This function * is ALWAYS called with spin_lock_irq on the timer, thus it must not * mess with irq. * * We have a couple of messes to clean up here. First there is the case * of a timer that has a requeue pending. These timers should appear to * be in the timer list with an expiry as if we were to requeue them * now. * * The second issue is the SIGEV_NONE timer which may be active but is * not really ever put in the timer list (to save system resources). * This timer may be expired, and if so, we will do it here. Otherwise * it is the same as a requeue pending timer WRT to what we should * report. */ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) { const struct k_clock *kc = timr->kclock; ktime_t now, remaining, iv; struct timespec64 ts64; bool sig_none; sig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE; iv = timr->it_interval; /* interval timer ? */ if (iv) { cur_setting->it_interval = ktime_to_timespec64(iv); } else if (!timr->it_active) { /* * SIGEV_NONE oneshot timers are never queued. Check them * below. */ if (!sig_none) return; } /* * The timespec64 based conversion is suboptimal, but it's not * worth to implement yet another callback. */ kc->clock_get(timr->it_clock, &ts64); now = timespec64_to_ktime(ts64); /* * When a requeue is pending or this is a SIGEV_NONE timer move the * expiry time forward by intervals, so expiry is > now. */ if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none)) timr->it_overrun += kc->timer_forward(timr, now); remaining = kc->timer_remaining(timr, now); /* Return 0 only, when the timer is expired and not pending */ if (remaining <= 0) { /* * A single shot SIGEV_NONE timer must return 0, when * it is expired ! */ if (!sig_none) cur_setting->it_value.tv_nsec = 1; } else { cur_setting->it_value = ktime_to_timespec64(remaining); } } /* Get the time remaining on a POSIX.1b interval timer. */ static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting) { struct k_itimer *timr; const struct k_clock *kc; unsigned long flags; int ret = 0; timr = lock_timer(timer_id, &flags); if (!timr) return -EINVAL; memset(setting, 0, sizeof(*setting)); kc = timr->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_get)) ret = -EINVAL; else kc->timer_get(timr, setting); unlock_timer(timr, flags); return ret; } /* Get the time remaining on a POSIX.1b interval timer. */ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, struct itimerspec __user *, setting) { struct itimerspec64 cur_setting; int ret = do_timer_gettime(timer_id, &cur_setting); if (!ret) { if (put_itimerspec64(&cur_setting, setting)) ret = -EFAULT; } return ret; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, struct compat_itimerspec __user *, setting) { struct itimerspec64 cur_setting; int ret = do_timer_gettime(timer_id, &cur_setting); if (!ret) { if (put_compat_itimerspec64(&cur_setting, setting)) ret = -EFAULT; } return ret; } #endif /* * Get the number of overruns of a POSIX.1b interval timer. This is to * be the overrun of the timer last delivered. At the same time we are * accumulating overruns on the next timer. The overrun is frozen when * the signal is delivered, either at the notify time (if the info block * is not queued) or at the actual delivery time (as we are informed by * the call back to posixtimer_rearm(). So all we need to do is * to pick up the frozen overrun. */ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) { struct k_itimer *timr; int overrun; unsigned long flags; timr = lock_timer(timer_id, &flags); if (!timr) return -EINVAL; overrun = timr->it_overrun_last; unlock_timer(timr, flags); return overrun; } static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires, bool absolute, bool sigev_none) { struct hrtimer *timer = &timr->it.real.timer; enum hrtimer_mode mode; mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL; /* * Posix magic: Relative CLOCK_REALTIME timers are not affected by * clock modifications, so they become CLOCK_MONOTONIC based under the * hood. See hrtimer_init(). Update timr->kclock, so the generic * functions which use timr->kclock->clock_get() work. * * Note: it_clock stays unmodified, because the next timer_set() might * use ABSTIME, so it needs to switch back. */ if (timr->it_clock == CLOCK_REALTIME) timr->kclock = absolute ? &clock_realtime : &clock_monotonic; hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); timr->it.real.timer.function = posix_timer_fn; if (!absolute) expires = ktime_add_safe(expires, timer->base->get_time()); hrtimer_set_expires(timer, expires); if (!sigev_none) hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } static int common_hrtimer_try_to_cancel(struct k_itimer *timr) { return hrtimer_try_to_cancel(&timr->it.real.timer); } /* Set a POSIX.1b interval timer. */ int common_timer_set(struct k_itimer *timr, int flags, struct itimerspec64 *new_setting, struct itimerspec64 *old_setting) { const struct k_clock *kc = timr->kclock; bool sigev_none; ktime_t expires; if (old_setting) common_timer_get(timr, old_setting); /* Prevent rearming by clearing the interval */ timr->it_interval = 0; /* * Careful here. On SMP systems the timer expiry function could be * active and spinning on timr->it_lock. */ if (kc->timer_try_to_cancel(timr) < 0) return TIMER_RETRY; timr->it_active = 0; timr->it_requeue_pending = (timr->it_requeue_pending + 2) & ~REQUEUE_PENDING; timr->it_overrun_last = 0; /* Switch off the timer when it_value is zero */ if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) return 0; timr->it_interval = timespec64_to_ktime(new_setting->it_interval); expires = timespec64_to_ktime(new_setting->it_value); sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE; kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none); timr->it_active = !sigev_none; return 0; } static int do_timer_settime(timer_t timer_id, int flags, struct itimerspec64 *new_spec64, struct itimerspec64 *old_spec64) { const struct k_clock *kc; struct k_itimer *timr; unsigned long flag; int error = 0; if (!timespec64_valid(&new_spec64->it_interval) || !timespec64_valid(&new_spec64->it_value)) return -EINVAL; if (old_spec64) memset(old_spec64, 0, sizeof(*old_spec64)); retry: timr = lock_timer(timer_id, &flag); if (!timr) return -EINVAL; kc = timr->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; else error = kc->timer_set(timr, flags, new_spec64, old_spec64); unlock_timer(timr, flag); if (error == TIMER_RETRY) { old_spec64 = NULL; // We already got the old time... goto retry; } return error; } /* Set a POSIX.1b interval timer */ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, const struct itimerspec __user *, new_setting, struct itimerspec __user *, old_setting) { struct itimerspec64 new_spec, old_spec; struct itimerspec64 *rtn = old_setting ? &old_spec : NULL; int error = 0; if (!new_setting) return -EINVAL; if (get_itimerspec64(&new_spec, new_setting)) return -EFAULT; error = do_timer_settime(timer_id, flags, &new_spec, rtn); if (!error && old_setting) { if (put_itimerspec64(&old_spec, old_setting)) error = -EFAULT; } return error; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, struct compat_itimerspec __user *, new, struct compat_itimerspec __user *, old) { struct itimerspec64 new_spec, old_spec; struct itimerspec64 *rtn = old ? &old_spec : NULL; int error = 0; if (!new) return -EINVAL; if (get_compat_itimerspec64(&new_spec, new)) return -EFAULT; error = do_timer_settime(timer_id, flags, &new_spec, rtn); if (!error && old) { if (put_compat_itimerspec64(&old_spec, old)) error = -EFAULT; } return error; } #endif int common_timer_del(struct k_itimer *timer) { const struct k_clock *kc = timer->kclock; timer->it_interval = 0; if (kc->timer_try_to_cancel(timer) < 0) return TIMER_RETRY; timer->it_active = 0; return 0; } static inline int timer_delete_hook(struct k_itimer *timer) { const struct k_clock *kc = timer->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_del)) return -EINVAL; return kc->timer_del(timer); } /* Delete a POSIX.1b interval timer. */ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) { struct k_itimer *timer; unsigned long flags; retry_delete: timer = lock_timer(timer_id, &flags); if (!timer) return -EINVAL; if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); goto retry_delete; } spin_lock(&current->sighand->siglock); list_del(&timer->list); spin_unlock(&current->sighand->siglock); /* * This keeps any tasks waiting on the spin lock from thinking * they got something (see the lock code above). */ timer->it_signal = NULL; unlock_timer(timer, flags); release_posix_timer(timer, IT_ID_SET); return 0; } /* * return timer owned by the process, used by exit_itimers */ static void itimer_delete(struct k_itimer *timer) { unsigned long flags; retry_delete: spin_lock_irqsave(&timer->it_lock, flags); if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); goto retry_delete; } list_del(&timer->list); /* * This keeps any tasks waiting on the spin lock from thinking * they got something (see the lock code above). */ timer->it_signal = NULL; unlock_timer(timer, flags); release_posix_timer(timer, IT_ID_SET); } /* * This is called by do_exit or de_thread, only when there are no more * references to the shared signal_struct. */ void exit_itimers(struct signal_struct *sig) { struct k_itimer *tmr; while (!list_empty(&sig->posix_timers)) { tmr = list_entry(sig->posix_timers.next, struct k_itimer, list); itimer_delete(tmr); } } SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, const struct timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 new_tp; if (!kc || !kc->clock_set) return -EINVAL; if (get_timespec64(&new_tp, tp)) return -EFAULT; return kc->clock_set(which_clock, &new_tp); } SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, struct timespec __user *,tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 kernel_tp; int error; if (!kc) return -EINVAL; error = kc->clock_get(which_clock, &kernel_tp); if (!error && put_timespec64(&kernel_tp, tp)) error = -EFAULT; return error; } SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, struct timex __user *, utx) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timex ktx; int err; if (!kc) return -EINVAL; if (!kc->clock_adj) return -EOPNOTSUPP; if (copy_from_user(&ktx, utx, sizeof(ktx))) return -EFAULT; err = kc->clock_adj(which_clock, &ktx); if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx))) return -EFAULT; return err; } SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, struct timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 rtn_tp; int error; if (!kc) return -EINVAL; error = kc->clock_getres(which_clock, &rtn_tp); if (!error && tp && put_timespec64(&rtn_tp, tp)) error = -EFAULT; return error; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock, struct compat_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; if (!kc || !kc->clock_set) return -EINVAL; if (compat_get_timespec64(&ts, tp)) return -EFAULT; return kc->clock_set(which_clock, &ts); } COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock, struct compat_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; int err; if (!kc) return -EINVAL; err = kc->clock_get(which_clock, &ts); if (!err && compat_put_timespec64(&ts, tp)) err = -EFAULT; return err; } COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock, struct compat_timex __user *, utp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timex ktx; int err; if (!kc) return -EINVAL; if (!kc->clock_adj) return -EOPNOTSUPP; err = compat_get_timex(&ktx, utp); if (err) return err; err = kc->clock_adj(which_clock, &ktx); if (err >= 0) err = compat_put_timex(utp, &ktx); return err; } COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock, struct compat_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; int err; if (!kc) return -EINVAL; err = kc->clock_getres(which_clock, &ts); if (!err && tp && compat_put_timespec64(&ts, tp)) return -EFAULT; return err; } #endif /* * nanosleep for monotonic and realtime clocks */ static int common_nsleep(const clockid_t which_clock, int flags, const struct timespec64 *rqtp) { return hrtimer_nanosleep(rqtp, flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL, which_clock); } SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, const struct timespec __user *, rqtp, struct timespec __user *, rmtp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 t; if (!kc) return -EINVAL; if (!kc->nsleep) return -ENANOSLEEP_NOTSUP; if (get_timespec64(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current->restart_block.nanosleep.rmtp = rmtp; return kc->nsleep(which_clock, flags, &t); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags, struct compat_timespec __user *, rqtp, struct compat_timespec __user *, rmtp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 t; if (!kc) return -EINVAL; if (!kc->nsleep) return -ENANOSLEEP_NOTSUP; if (compat_get_timespec64(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current->restart_block.nanosleep.compat_rmtp = rmtp; return kc->nsleep(which_clock, flags, &t); } #endif static const struct k_clock clock_realtime = { .clock_getres = posix_get_hrtimer_res, .clock_get = posix_clock_realtime_get, .clock_set = posix_clock_realtime_set, .clock_adj = posix_clock_realtime_adj, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_monotonic = { .clock_getres = posix_get_hrtimer_res, .clock_get = posix_ktime_get_ts, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_monotonic_raw = { .clock_getres = posix_get_hrtimer_res, .clock_get = posix_get_monotonic_raw, }; static const struct k_clock clock_realtime_coarse = { .clock_getres = posix_get_coarse_res, .clock_get = posix_get_realtime_coarse, }; static const struct k_clock clock_monotonic_coarse = { .clock_getres = posix_get_coarse_res, .clock_get = posix_get_monotonic_coarse, }; static const struct k_clock clock_tai = { .clock_getres = posix_get_hrtimer_res, .clock_get = posix_get_tai, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_boottime = { .clock_getres = posix_get_hrtimer_res, .clock_get = posix_get_boottime, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_arm = common_hrtimer_arm, }; static const struct k_clock * const posix_clocks[] = { [CLOCK_REALTIME] = &clock_realtime, [CLOCK_MONOTONIC] = &clock_monotonic, [CLOCK_PROCESS_CPUTIME_ID] = &clock_process, [CLOCK_THREAD_CPUTIME_ID] = &clock_thread, [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw, [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse, [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse, [CLOCK_BOOTTIME] = &clock_boottime, [CLOCK_REALTIME_ALARM] = &alarm_clock, [CLOCK_BOOTTIME_ALARM] = &alarm_clock, [CLOCK_TAI] = &clock_tai, }; static const struct k_clock *clockid_to_kclock(const clockid_t id) { if (id < 0) return (id & CLOCKFD_MASK) == CLOCKFD ? &clock_posix_dynamic : &clock_posix_cpu; if (id >= ARRAY_SIZE(posix_clocks) || !posix_clocks[id]) return NULL; return posix_clocks[id]; }
/* * linux/kernel/posix-timers.c * * * 2002-10-15 Posix Clocks & timers * by George Anzinger george@mvista.com * * Copyright (C) 2002 2003 by MontaVista Software. * * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug. * Copyright (C) 2004 Boris Hu * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA */ /* These are all the functions necessary to implement * POSIX clocks & timers */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/sched/task.h> #include <linux/uaccess.h> #include <linux/list.h> #include <linux/init.h> #include <linux/compiler.h> #include <linux/hash.h> #include <linux/posix-clock.h> #include <linux/posix-timers.h> #include <linux/syscalls.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/export.h> #include <linux/hashtable.h> #include <linux/compat.h> #include "timekeeping.h" #include "posix-timers.h" /* * Management arrays for POSIX timers. Timers are now kept in static hash table * with 512 entries. * Timer ids are allocated by local routine, which selects proper hash head by * key, constructed from current->signal address and per signal struct counter. * This keeps timer ids unique per process, but now they can intersect between * processes. */ /* * Lets keep our timers in a slab cache :-) */ static struct kmem_cache *posix_timers_cache; static DEFINE_HASHTABLE(posix_timers_hashtable, 9); static DEFINE_SPINLOCK(hash_lock); static const struct k_clock * const posix_clocks[]; static const struct k_clock *clockid_to_kclock(const clockid_t id); static const struct k_clock clock_realtime, clock_monotonic; /* * we assume that the new SIGEV_THREAD_ID shares no bits with the other * SIGEV values. Here we put out an error if this assumption fails. */ #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \ ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD)) #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" #endif /* * parisc wants ENOTSUP instead of EOPNOTSUPP */ #ifndef ENOTSUP # define ENANOSLEEP_NOTSUP EOPNOTSUPP #else # define ENANOSLEEP_NOTSUP ENOTSUP #endif /* * The timer ID is turned into a timer address by idr_find(). * Verifying a valid ID consists of: * * a) checking that idr_find() returns other than -1. * b) checking that the timer id matches the one in the timer itself. * c) that the timer owner is in the callers thread group. */ /* * CLOCKs: The POSIX standard calls for a couple of clocks and allows us * to implement others. This structure defines the various * clocks. * * RESOLUTION: Clock resolution is used to round up timer and interval * times, NOT to report clock times, which are reported with as * much resolution as the system can muster. In some cases this * resolution may depend on the underlying clock hardware and * may not be quantifiable until run time, and only then is the * necessary code is written. The standard says we should say * something about this issue in the documentation... * * FUNCTIONS: The CLOCKs structure defines possible functions to * handle various clock functions. * * The standard POSIX timer management code assumes the * following: 1.) The k_itimer struct (sched.h) is used for * the timer. 2.) The list, it_lock, it_clock, it_id and * it_pid fields are not modified by timer code. * * Permissions: It is assumed that the clock_settime() function defined * for each clock will take care of permission checks. Some * clocks may be set able by any user (i.e. local process * clocks) others not. Currently the only set able clock we * have is CLOCK_REALTIME and its high res counter part, both of * which we beg off on and pass to do_sys_settimeofday(). */ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags); #define lock_timer(tid, flags) \ ({ struct k_itimer *__timr; \ __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \ __timr; \ }) static int hash(struct signal_struct *sig, unsigned int nr) { return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable)); } static struct k_itimer *__posix_timers_find(struct hlist_head *head, struct signal_struct *sig, timer_t id) { struct k_itimer *timer; hlist_for_each_entry_rcu(timer, head, t_hash) { if ((timer->it_signal == sig) && (timer->it_id == id)) return timer; } return NULL; } static struct k_itimer *posix_timer_by_id(timer_t id) { struct signal_struct *sig = current->signal; struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; return __posix_timers_find(head, sig, id); } static int posix_timer_add(struct k_itimer *timer) { struct signal_struct *sig = current->signal; int first_free_id = sig->posix_timer_id; struct hlist_head *head; int ret = -ENOENT; do { spin_lock(&hash_lock); head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)]; if (!__posix_timers_find(head, sig, sig->posix_timer_id)) { hlist_add_head_rcu(&timer->t_hash, head); ret = sig->posix_timer_id; } if (++sig->posix_timer_id < 0) sig->posix_timer_id = 0; if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT)) /* Loop over all possible ids completed */ ret = -EAGAIN; spin_unlock(&hash_lock); } while (ret == -ENOENT); return ret; } static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) { spin_unlock_irqrestore(&timr->it_lock, flags); } /* Get clock_realtime */ static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp) { ktime_get_real_ts64(tp); return 0; } /* Set clock_realtime */ static int posix_clock_realtime_set(const clockid_t which_clock, const struct timespec64 *tp) { return do_sys_settimeofday64(tp, NULL); } static int posix_clock_realtime_adj(const clockid_t which_clock, struct timex *t) { return do_adjtimex(t); } /* * Get monotonic time for posix timers */ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp) { ktime_get_ts64(tp); return 0; } /* * Get monotonic-raw time for posix timers */ static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp) { getrawmonotonic64(tp); return 0; } static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp) { *tp = current_kernel_time64(); return 0; } static int posix_get_monotonic_coarse(clockid_t which_clock, struct timespec64 *tp) { *tp = get_monotonic_coarse64(); return 0; } static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp) { *tp = ktime_to_timespec64(KTIME_LOW_RES); return 0; } static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp) { get_monotonic_boottime64(tp); return 0; } static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) { timekeeping_clocktai64(tp); return 0; } static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp) { tp->tv_sec = 0; tp->tv_nsec = hrtimer_resolution; return 0; } /* * Initialize everything, well, just everything in Posix clocks/timers ;) */ static __init int init_posix_timers(void) { posix_timers_cache = kmem_cache_create("posix_timers_cache", sizeof (struct k_itimer), 0, SLAB_PANIC, NULL); return 0; } __initcall(init_posix_timers); static void common_hrtimer_rearm(struct k_itimer *timr) { struct hrtimer *timer = &timr->it.real.timer; if (!timr->it_interval) return; timr->it_overrun += (unsigned int) hrtimer_forward(timer, timer->base->get_time(), timr->it_interval); hrtimer_restart(timer); } /* * This function is exported for use by the signal deliver code. It is * called just prior to the info block being released and passes that * block to us. It's function is to update the overrun entry AND to * restart the timer. It should only be called if the timer is to be * restarted (i.e. we have flagged this in the sys_private entry of the * info block). * * To protect against the timer going away while the interrupt is queued, * we require that the it_requeue_pending flag be set. */ void posixtimer_rearm(struct siginfo *info) { struct k_itimer *timr; unsigned long flags; timr = lock_timer(info->si_tid, &flags); if (!timr) return; if (timr->it_requeue_pending == info->si_sys_private) { timr->kclock->timer_rearm(timr); timr->it_active = 1; timr->it_overrun_last = timr->it_overrun; timr->it_overrun = -1; ++timr->it_requeue_pending; info->si_overrun += timr->it_overrun_last; } unlock_timer(timr, flags); } int posix_timer_event(struct k_itimer *timr, int si_private) { struct task_struct *task; int shared, ret = -1; /* * FIXME: if ->sigq is queued we can race with * dequeue_signal()->posixtimer_rearm(). * * If dequeue_signal() sees the "right" value of * si_sys_private it calls posixtimer_rearm(). * We re-queue ->sigq and drop ->it_lock(). * posixtimer_rearm() locks the timer * and re-schedules it while ->sigq is pending. * Not really bad, but not that we want. */ timr->sigq->info.si_sys_private = si_private; rcu_read_lock(); task = pid_task(timr->it_pid, PIDTYPE_PID); if (task) { shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); ret = send_sigqueue(timr->sigq, task, shared); } rcu_read_unlock(); /* If we failed to send the signal the timer stops. */ return ret > 0; } /* * This function gets called when a POSIX.1b interval timer expires. It * is used as a callback from the kernel internal timer. The * run_timer_list code ALWAYS calls with interrupts on. * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. */ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) { struct k_itimer *timr; unsigned long flags; int si_private = 0; enum hrtimer_restart ret = HRTIMER_NORESTART; timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); timr->it_active = 0; if (timr->it_interval != 0) si_private = ++timr->it_requeue_pending; if (posix_timer_event(timr, si_private)) { /* * signal was not sent because of sig_ignor * we will not get a call back to restart it AND * it should be restarted. */ if (timr->it_interval != 0) { ktime_t now = hrtimer_cb_get_time(timer); /* * FIXME: What we really want, is to stop this * timer completely and restart it in case the * SIG_IGN is removed. This is a non trivial * change which involves sighand locking * (sigh !), which we don't want to do late in * the release cycle. * * For now we just let timers with an interval * less than a jiffie expire every jiffie to * avoid softirq starvation in case of SIG_IGN * and a very small interval, which would put * the timer right back on the softirq pending * list. By moving now ahead of time we trick * hrtimer_forward() to expire the timer * later, while we still maintain the overrun * accuracy, but have some inconsistency in * the timer_gettime() case. This is at least * better than a starved softirq. A more * complex fix which solves also another related * inconsistency is already in the pipeline. */ #ifdef CONFIG_HIGH_RES_TIMERS { ktime_t kj = NSEC_PER_SEC / HZ; if (timr->it_interval < kj) now = ktime_add(now, kj); } #endif timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, timr->it_interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; timr->it_active = 1; } } unlock_timer(timr, flags); return ret; } static struct pid *good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; switch (event->sigev_notify) { case SIGEV_SIGNAL | SIGEV_THREAD_ID: rtn = find_task_by_vpid(event->sigev_notify_thread_id); if (!rtn || !same_thread_group(rtn, current)) return NULL; /* FALLTHRU */ case SIGEV_SIGNAL: case SIGEV_THREAD: if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) return NULL; /* FALLTHRU */ case SIGEV_NONE: return task_pid(rtn); default: return NULL; } } static struct k_itimer * alloc_posix_timer(void) { struct k_itimer *tmr; tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL); if (!tmr) return tmr; if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { kmem_cache_free(posix_timers_cache, tmr); return NULL; } memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); return tmr; } static void k_itimer_rcu_free(struct rcu_head *head) { struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); kmem_cache_free(posix_timers_cache, tmr); } #define IT_ID_SET 1 #define IT_ID_NOT_SET 0 static void release_posix_timer(struct k_itimer *tmr, int it_id_set) { if (it_id_set) { unsigned long flags; spin_lock_irqsave(&hash_lock, flags); hlist_del_rcu(&tmr->t_hash); spin_unlock_irqrestore(&hash_lock, flags); } put_pid(tmr->it_pid); sigqueue_free(tmr->sigq); call_rcu(&tmr->it.rcu, k_itimer_rcu_free); } static int common_timer_create(struct k_itimer *new_timer) { hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); return 0; } /* Create a POSIX.1b interval timer. */ static int do_timer_create(clockid_t which_clock, struct sigevent *event, timer_t __user *created_timer_id) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct k_itimer *new_timer; int error, new_timer_id; int it_id_set = IT_ID_NOT_SET; if (!kc) return -EINVAL; if (!kc->timer_create) return -EOPNOTSUPP; new_timer = alloc_posix_timer(); if (unlikely(!new_timer)) return -EAGAIN; spin_lock_init(&new_timer->it_lock); new_timer_id = posix_timer_add(new_timer); if (new_timer_id < 0) { error = new_timer_id; goto out; } it_id_set = IT_ID_SET; new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->kclock = kc; new_timer->it_overrun = -1; if (event) { rcu_read_lock(); new_timer->it_pid = get_pid(good_sigevent(event)); rcu_read_unlock(); if (!new_timer->it_pid) { error = -EINVAL; goto out; } new_timer->it_sigev_notify = event->sigev_notify; new_timer->sigq->info.si_signo = event->sigev_signo; new_timer->sigq->info.si_value = event->sigev_value; } else { new_timer->it_sigev_notify = SIGEV_SIGNAL; new_timer->sigq->info.si_signo = SIGALRM; memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t)); new_timer->sigq->info.si_value.sival_int = new_timer->it_id; new_timer->it_pid = get_pid(task_tgid(current)); } new_timer->sigq->info.si_tid = new_timer->it_id; new_timer->sigq->info.si_code = SI_TIMER; if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { error = -EFAULT; goto out; } error = kc->timer_create(new_timer); if (error) goto out; spin_lock_irq(&current->sighand->siglock); new_timer->it_signal = current->signal; list_add(&new_timer->list, &current->signal->posix_timers); spin_unlock_irq(&current->sighand->siglock); return 0; /* * In the case of the timer belonging to another task, after * the task is unlocked, the timer is owned by the other task * and may cease to exist at any time. Don't use or modify * new_timer after the unlock call. */ out: release_posix_timer(new_timer, it_id_set); return error; } SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, struct sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { if (timer_event_spec) { sigevent_t event; if (copy_from_user(&event, timer_event_spec, sizeof (event))) return -EFAULT; return do_timer_create(which_clock, &event, created_timer_id); } return do_timer_create(which_clock, NULL, created_timer_id); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock, struct compat_sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { if (timer_event_spec) { sigevent_t event; if (get_compat_sigevent(&event, timer_event_spec)) return -EFAULT; return do_timer_create(which_clock, &event, created_timer_id); } return do_timer_create(which_clock, NULL, created_timer_id); } #endif /* * Locking issues: We need to protect the result of the id look up until * we get the timer locked down so it is not deleted under us. The * removal is done under the idr spinlock so we use that here to bridge * the find to the timer lock. To avoid a dead lock, the timer id MUST * be release with out holding the timer lock. */ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) { struct k_itimer *timr; /* * timer_t could be any type >= int and we want to make sure any * @timer_id outside positive int range fails lookup. */ if ((unsigned long long)timer_id > INT_MAX) return NULL; rcu_read_lock(); timr = posix_timer_by_id(timer_id); if (timr) { spin_lock_irqsave(&timr->it_lock, *flags); if (timr->it_signal == current->signal) { rcu_read_unlock(); return timr; } spin_unlock_irqrestore(&timr->it_lock, *flags); } rcu_read_unlock(); return NULL; } static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now) { struct hrtimer *timer = &timr->it.real.timer; return __hrtimer_expires_remaining_adjusted(timer, now); } static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now) { struct hrtimer *timer = &timr->it.real.timer; return (int)hrtimer_forward(timer, now, timr->it_interval); } /* * Get the time remaining on a POSIX.1b interval timer. This function * is ALWAYS called with spin_lock_irq on the timer, thus it must not * mess with irq. * * We have a couple of messes to clean up here. First there is the case * of a timer that has a requeue pending. These timers should appear to * be in the timer list with an expiry as if we were to requeue them * now. * * The second issue is the SIGEV_NONE timer which may be active but is * not really ever put in the timer list (to save system resources). * This timer may be expired, and if so, we will do it here. Otherwise * it is the same as a requeue pending timer WRT to what we should * report. */ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) { const struct k_clock *kc = timr->kclock; ktime_t now, remaining, iv; struct timespec64 ts64; bool sig_none; sig_none = timr->it_sigev_notify == SIGEV_NONE; iv = timr->it_interval; /* interval timer ? */ if (iv) { cur_setting->it_interval = ktime_to_timespec64(iv); } else if (!timr->it_active) { /* * SIGEV_NONE oneshot timers are never queued. Check them * below. */ if (!sig_none) return; } /* * The timespec64 based conversion is suboptimal, but it's not * worth to implement yet another callback. */ kc->clock_get(timr->it_clock, &ts64); now = timespec64_to_ktime(ts64); /* * When a requeue is pending or this is a SIGEV_NONE timer move the * expiry time forward by intervals, so expiry is > now. */ if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none)) timr->it_overrun += kc->timer_forward(timr, now); remaining = kc->timer_remaining(timr, now); /* Return 0 only, when the timer is expired and not pending */ if (remaining <= 0) { /* * A single shot SIGEV_NONE timer must return 0, when * it is expired ! */ if (!sig_none) cur_setting->it_value.tv_nsec = 1; } else { cur_setting->it_value = ktime_to_timespec64(remaining); } } /* Get the time remaining on a POSIX.1b interval timer. */ static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting) { struct k_itimer *timr; const struct k_clock *kc; unsigned long flags; int ret = 0; timr = lock_timer(timer_id, &flags); if (!timr) return -EINVAL; memset(setting, 0, sizeof(*setting)); kc = timr->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_get)) ret = -EINVAL; else kc->timer_get(timr, setting); unlock_timer(timr, flags); return ret; } /* Get the time remaining on a POSIX.1b interval timer. */ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, struct itimerspec __user *, setting) { struct itimerspec64 cur_setting; int ret = do_timer_gettime(timer_id, &cur_setting); if (!ret) { if (put_itimerspec64(&cur_setting, setting)) ret = -EFAULT; } return ret; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, struct compat_itimerspec __user *, setting) { struct itimerspec64 cur_setting; int ret = do_timer_gettime(timer_id, &cur_setting); if (!ret) { if (put_compat_itimerspec64(&cur_setting, setting)) ret = -EFAULT; } return ret; } #endif /* * Get the number of overruns of a POSIX.1b interval timer. This is to * be the overrun of the timer last delivered. At the same time we are * accumulating overruns on the next timer. The overrun is frozen when * the signal is delivered, either at the notify time (if the info block * is not queued) or at the actual delivery time (as we are informed by * the call back to posixtimer_rearm(). So all we need to do is * to pick up the frozen overrun. */ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) { struct k_itimer *timr; int overrun; unsigned long flags; timr = lock_timer(timer_id, &flags); if (!timr) return -EINVAL; overrun = timr->it_overrun_last; unlock_timer(timr, flags); return overrun; } static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires, bool absolute, bool sigev_none) { struct hrtimer *timer = &timr->it.real.timer; enum hrtimer_mode mode; mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL; /* * Posix magic: Relative CLOCK_REALTIME timers are not affected by * clock modifications, so they become CLOCK_MONOTONIC based under the * hood. See hrtimer_init(). Update timr->kclock, so the generic * functions which use timr->kclock->clock_get() work. * * Note: it_clock stays unmodified, because the next timer_set() might * use ABSTIME, so it needs to switch back. */ if (timr->it_clock == CLOCK_REALTIME) timr->kclock = absolute ? &clock_realtime : &clock_monotonic; hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); timr->it.real.timer.function = posix_timer_fn; if (!absolute) expires = ktime_add_safe(expires, timer->base->get_time()); hrtimer_set_expires(timer, expires); if (!sigev_none) hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } static int common_hrtimer_try_to_cancel(struct k_itimer *timr) { return hrtimer_try_to_cancel(&timr->it.real.timer); } /* Set a POSIX.1b interval timer. */ int common_timer_set(struct k_itimer *timr, int flags, struct itimerspec64 *new_setting, struct itimerspec64 *old_setting) { const struct k_clock *kc = timr->kclock; bool sigev_none; ktime_t expires; if (old_setting) common_timer_get(timr, old_setting); /* Prevent rearming by clearing the interval */ timr->it_interval = 0; /* * Careful here. On SMP systems the timer expiry function could be * active and spinning on timr->it_lock. */ if (kc->timer_try_to_cancel(timr) < 0) return TIMER_RETRY; timr->it_active = 0; timr->it_requeue_pending = (timr->it_requeue_pending + 2) & ~REQUEUE_PENDING; timr->it_overrun_last = 0; /* Switch off the timer when it_value is zero */ if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) return 0; timr->it_interval = timespec64_to_ktime(new_setting->it_interval); expires = timespec64_to_ktime(new_setting->it_value); sigev_none = timr->it_sigev_notify == SIGEV_NONE; kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none); timr->it_active = !sigev_none; return 0; } static int do_timer_settime(timer_t timer_id, int flags, struct itimerspec64 *new_spec64, struct itimerspec64 *old_spec64) { const struct k_clock *kc; struct k_itimer *timr; unsigned long flag; int error = 0; if (!timespec64_valid(&new_spec64->it_interval) || !timespec64_valid(&new_spec64->it_value)) return -EINVAL; if (old_spec64) memset(old_spec64, 0, sizeof(*old_spec64)); retry: timr = lock_timer(timer_id, &flag); if (!timr) return -EINVAL; kc = timr->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; else error = kc->timer_set(timr, flags, new_spec64, old_spec64); unlock_timer(timr, flag); if (error == TIMER_RETRY) { old_spec64 = NULL; // We already got the old time... goto retry; } return error; } /* Set a POSIX.1b interval timer */ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, const struct itimerspec __user *, new_setting, struct itimerspec __user *, old_setting) { struct itimerspec64 new_spec, old_spec; struct itimerspec64 *rtn = old_setting ? &old_spec : NULL; int error = 0; if (!new_setting) return -EINVAL; if (get_itimerspec64(&new_spec, new_setting)) return -EFAULT; error = do_timer_settime(timer_id, flags, &new_spec, rtn); if (!error && old_setting) { if (put_itimerspec64(&old_spec, old_setting)) error = -EFAULT; } return error; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, struct compat_itimerspec __user *, new, struct compat_itimerspec __user *, old) { struct itimerspec64 new_spec, old_spec; struct itimerspec64 *rtn = old ? &old_spec : NULL; int error = 0; if (!new) return -EINVAL; if (get_compat_itimerspec64(&new_spec, new)) return -EFAULT; error = do_timer_settime(timer_id, flags, &new_spec, rtn); if (!error && old) { if (put_compat_itimerspec64(&old_spec, old)) error = -EFAULT; } return error; } #endif int common_timer_del(struct k_itimer *timer) { const struct k_clock *kc = timer->kclock; timer->it_interval = 0; if (kc->timer_try_to_cancel(timer) < 0) return TIMER_RETRY; timer->it_active = 0; return 0; } static inline int timer_delete_hook(struct k_itimer *timer) { const struct k_clock *kc = timer->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_del)) return -EINVAL; return kc->timer_del(timer); } /* Delete a POSIX.1b interval timer. */ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) { struct k_itimer *timer; unsigned long flags; retry_delete: timer = lock_timer(timer_id, &flags); if (!timer) return -EINVAL; if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); goto retry_delete; } spin_lock(&current->sighand->siglock); list_del(&timer->list); spin_unlock(&current->sighand->siglock); /* * This keeps any tasks waiting on the spin lock from thinking * they got something (see the lock code above). */ timer->it_signal = NULL; unlock_timer(timer, flags); release_posix_timer(timer, IT_ID_SET); return 0; } /* * return timer owned by the process, used by exit_itimers */ static void itimer_delete(struct k_itimer *timer) { unsigned long flags; retry_delete: spin_lock_irqsave(&timer->it_lock, flags); if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); goto retry_delete; } list_del(&timer->list); /* * This keeps any tasks waiting on the spin lock from thinking * they got something (see the lock code above). */ timer->it_signal = NULL; unlock_timer(timer, flags); release_posix_timer(timer, IT_ID_SET); } /* * This is called by do_exit or de_thread, only when there are no more * references to the shared signal_struct. */ void exit_itimers(struct signal_struct *sig) { struct k_itimer *tmr; while (!list_empty(&sig->posix_timers)) { tmr = list_entry(sig->posix_timers.next, struct k_itimer, list); itimer_delete(tmr); } } SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, const struct timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 new_tp; if (!kc || !kc->clock_set) return -EINVAL; if (get_timespec64(&new_tp, tp)) return -EFAULT; return kc->clock_set(which_clock, &new_tp); } SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, struct timespec __user *,tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 kernel_tp; int error; if (!kc) return -EINVAL; error = kc->clock_get(which_clock, &kernel_tp); if (!error && put_timespec64(&kernel_tp, tp)) error = -EFAULT; return error; } SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, struct timex __user *, utx) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timex ktx; int err; if (!kc) return -EINVAL; if (!kc->clock_adj) return -EOPNOTSUPP; if (copy_from_user(&ktx, utx, sizeof(ktx))) return -EFAULT; err = kc->clock_adj(which_clock, &ktx); if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx))) return -EFAULT; return err; } SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, struct timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 rtn_tp; int error; if (!kc) return -EINVAL; error = kc->clock_getres(which_clock, &rtn_tp); if (!error && tp && put_timespec64(&rtn_tp, tp)) error = -EFAULT; return error; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock, struct compat_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; if (!kc || !kc->clock_set) return -EINVAL; if (compat_get_timespec64(&ts, tp)) return -EFAULT; return kc->clock_set(which_clock, &ts); } COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock, struct compat_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; int err; if (!kc) return -EINVAL; err = kc->clock_get(which_clock, &ts); if (!err && compat_put_timespec64(&ts, tp)) err = -EFAULT; return err; } COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock, struct compat_timex __user *, utp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timex ktx; int err; if (!kc) return -EINVAL; if (!kc->clock_adj) return -EOPNOTSUPP; err = compat_get_timex(&ktx, utp); if (err) return err; err = kc->clock_adj(which_clock, &ktx); if (err >= 0) err = compat_put_timex(utp, &ktx); return err; } COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock, struct compat_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; int err; if (!kc) return -EINVAL; err = kc->clock_getres(which_clock, &ts); if (!err && tp && compat_put_timespec64(&ts, tp)) return -EFAULT; return err; } #endif /* * nanosleep for monotonic and realtime clocks */ static int common_nsleep(const clockid_t which_clock, int flags, const struct timespec64 *rqtp) { return hrtimer_nanosleep(rqtp, flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL, which_clock); } SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, const struct timespec __user *, rqtp, struct timespec __user *, rmtp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 t; if (!kc) return -EINVAL; if (!kc->nsleep) return -ENANOSLEEP_NOTSUP; if (get_timespec64(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current->restart_block.nanosleep.rmtp = rmtp; return kc->nsleep(which_clock, flags, &t); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags, struct compat_timespec __user *, rqtp, struct compat_timespec __user *, rmtp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 t; if (!kc) return -EINVAL; if (!kc->nsleep) return -ENANOSLEEP_NOTSUP; if (compat_get_timespec64(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current->restart_block.nanosleep.compat_rmtp = rmtp; return kc->nsleep(which_clock, flags, &t); } #endif static const struct k_clock clock_realtime = { .clock_getres = posix_get_hrtimer_res, .clock_get = posix_clock_realtime_get, .clock_set = posix_clock_realtime_set, .clock_adj = posix_clock_realtime_adj, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_monotonic = { .clock_getres = posix_get_hrtimer_res, .clock_get = posix_ktime_get_ts, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_monotonic_raw = { .clock_getres = posix_get_hrtimer_res, .clock_get = posix_get_monotonic_raw, }; static const struct k_clock clock_realtime_coarse = { .clock_getres = posix_get_coarse_res, .clock_get = posix_get_realtime_coarse, }; static const struct k_clock clock_monotonic_coarse = { .clock_getres = posix_get_coarse_res, .clock_get = posix_get_monotonic_coarse, }; static const struct k_clock clock_tai = { .clock_getres = posix_get_hrtimer_res, .clock_get = posix_get_tai, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_boottime = { .clock_getres = posix_get_hrtimer_res, .clock_get = posix_get_boottime, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_arm = common_hrtimer_arm, }; static const struct k_clock * const posix_clocks[] = { [CLOCK_REALTIME] = &clock_realtime, [CLOCK_MONOTONIC] = &clock_monotonic, [CLOCK_PROCESS_CPUTIME_ID] = &clock_process, [CLOCK_THREAD_CPUTIME_ID] = &clock_thread, [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw, [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse, [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse, [CLOCK_BOOTTIME] = &clock_boottime, [CLOCK_REALTIME_ALARM] = &alarm_clock, [CLOCK_BOOTTIME_ALARM] = &alarm_clock, [CLOCK_TAI] = &clock_tai, }; static const struct k_clock *clockid_to_kclock(const clockid_t id) { if (id < 0) return (id & CLOCKFD_MASK) == CLOCKFD ? &clock_posix_dynamic : &clock_posix_cpu; if (id >= ARRAY_SIZE(posix_clocks) || !posix_clocks[id]) return NULL; return posix_clocks[id]; }
static struct pid *good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; if ((event->sigev_notify & SIGEV_THREAD_ID ) && (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || !same_thread_group(rtn, current) || (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) return NULL; if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) return NULL; return task_pid(rtn); }
static struct pid *good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; switch (event->sigev_notify) { case SIGEV_SIGNAL | SIGEV_THREAD_ID: rtn = find_task_by_vpid(event->sigev_notify_thread_id); if (!rtn || !same_thread_group(rtn, current)) return NULL; /* FALLTHRU */ case SIGEV_SIGNAL: case SIGEV_THREAD: if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) return NULL; /* FALLTHRU */ case SIGEV_NONE: return task_pid(rtn); default: return NULL; } }
{'added': [(437, '\tswitch (event->sigev_notify) {'), (438, '\tcase SIGEV_SIGNAL | SIGEV_THREAD_ID:'), (439, '\t\trtn = find_task_by_vpid(event->sigev_notify_thread_id);'), (440, '\t\tif (!rtn || !same_thread_group(rtn, current))'), (441, '\t\t\treturn NULL;'), (442, '\t\t/* FALLTHRU */'), (443, '\tcase SIGEV_SIGNAL:'), (444, '\tcase SIGEV_THREAD:'), (445, '\t\tif (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)'), (446, '\t\t\treturn NULL;'), (447, '\t\t/* FALLTHRU */'), (448, '\tcase SIGEV_NONE:'), (449, '\t\treturn task_pid(rtn);'), (450, '\tdefault:'), (452, '\t}'), (677, '\tsig_none = timr->it_sigev_notify == SIGEV_NONE;'), (864, '\tsigev_none = timr->it_sigev_notify == SIGEV_NONE;')], 'deleted': [(437, '\tif ((event->sigev_notify & SIGEV_THREAD_ID ) &&'), (438, '\t\t(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||'), (439, '\t\t !same_thread_group(rtn, current) ||'), (440, '\t\t (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))'), (442, ''), (443, '\tif (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&'), (444, '\t ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))'), (445, '\t\treturn NULL;'), (446, ''), (447, '\treturn task_pid(rtn);'), (672, '\tsig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;'), (859, '\tsigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;')]}
17
12
871
5,177
13
103
7
https://github.com/torvalds/linux
CVE-2017-18344
CWE-125
9
netbios.c
C
ndpi_netbios_name_interpret
/* * netbios.c * * Copyright (C) 2011-21 - ntop.org * Copyright (C) 2009-11 - ipoque GmbH * * This file is part of nDPI, an open source deep packet inspection * library based on the OpenDPI and PACE technology by ipoque GmbH * * nDPI is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * nDPI is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with nDPI. If not, see <http://www.gnu.org/licenses/>. * */ #include "ndpi_protocol_ids.h" #define NDPI_CURRENT_PROTO NDPI_PROTOCOL_NETBIOS #include "ndpi_api.h" /* ****************************************************************** */ struct netbios_header { u_int16_t transaction_id, flags, questions, answer_rrs, authority_rrs, additional_rrs; }; /* ****************************************************************** */ /* The function below has been inherited by tcpdump */ int ndpi_netbios_name_interpret(char *in, size_t in_len, char *out, u_int out_len) { u_int ret = 0, len, idx = in_len, out_idx = 0; len = (*in++)/2; out_len--; out[out_idx] = 0; if((len > out_len) || (len < 1) || ((2*len) > in_len)) return(-1); while((len--) && (out_idx < out_len)) { if((idx < 2) || (in[0] < 'A') || (in[0] > 'P') || (in[1] < 'A') || (in[1] > 'P')) { out[out_idx] = 0; break; } out[out_idx] = ((in[0] - 'A') << 4) + (in[1] - 'A'); in += 2, idx -= 2; if(isprint(out[out_idx])) out_idx++, ret++; } /* Trim trailing whitespace from the returned string */ if(out_idx > 0) { out[out_idx] = 0; out_idx--; while((out_idx > 0) && (out[out_idx] == ' ')) { out[out_idx] = 0; out_idx--; } } return(ret); } /* ****************************************************************** */ static void ndpi_int_netbios_add_connection(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int16_t sub_protocol) { char name[64]; u_int off = flow->packet.payload[12] == 0x20 ? 12 : 14; if((off < flow->packet.payload_packet_len) && ndpi_netbios_name_interpret((char*)&flow->packet.payload[off], flow->packet.payload_packet_len - off, name, sizeof(name)) > 0) { snprintf((char*)flow->host_server_name, sizeof(flow->host_server_name)-1, "%s", name); ndpi_check_dga_name(ndpi_struct, flow, (char*)flow->host_server_name, 1); } if(sub_protocol == NDPI_PROTOCOL_UNKNOWN) ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_NETBIOS, NDPI_PROTOCOL_UNKNOWN); else ndpi_set_detected_protocol(ndpi_struct, flow, sub_protocol, NDPI_PROTOCOL_NETBIOS); } /* ****************************************************************** */ void ndpi_search_netbios(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int16_t dport; NDPI_LOG_DBG(ndpi_struct, "search netbios\n"); if(packet->udp != NULL) { dport = ntohs(packet->udp->dest); /*check standard NETBIOS over udp to port 137 */ if((dport == 137 || 0) && packet->payload_packet_len >= 50) { struct netbios_header h; memcpy(&h, packet->payload, sizeof(struct netbios_header)); h.transaction_id = ntohs(h.transaction_id), h.flags = ntohs(h.flags), h.questions = ntohs(h.questions), h.answer_rrs = ntohs(h.answer_rrs), h.authority_rrs = ntohs(h.authority_rrs), h.additional_rrs = ntohs(h.additional_rrs); NDPI_LOG_DBG(ndpi_struct, "found netbios port 137 and payload_packet_len 50\n"); if(h.flags == 0 && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0 && h.additional_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with questions = 1 and answers = 0, authority = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(((h.flags & 0x8710) == 0x10) && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with questions = 1 and answers = 0, authority = 0 and broadcast \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(packet->payload[2] == 0x80 && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0 && h.additional_rrs == 1) { NDPI_LOG_INFO(ndpi_struct, "found netbios with questions = 1 and answers, authority, additional = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(h.flags == 0x4000 && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0 && h.additional_rrs == 1) { NDPI_LOG_INFO(ndpi_struct, "found netbios with questions = 1 and answers = 0, authority = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(h.flags == 0x8400 && h.questions == 0 && h.answer_rrs == 1 && h.authority_rrs == 0 && h.additional_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with flag 8400 questions = 0 and answers = 1, authority, additional = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(h.flags == 0x8500 && h.questions == 0 && h.answer_rrs == 1 && h.authority_rrs == 0 && h.additional_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with flag 8500 questions = 0 and answers = 1, authority, additional = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(((h.flags == 0x2900) || (h.flags == 0x2910)) && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0 && h.additional_rrs == 1) { NDPI_LOG_INFO(ndpi_struct, "found netbios with flag 2910, questions = 1 and answers, authority=0, additional = 1 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(h.flags == 0xAD86 && h.questions == 0 && h.answer_rrs == 1 && h.authority_rrs == 0 && h.additional_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with flag ad86 questions = 0 and answers = 1, authority, additional = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(h.flags == 0x0110 && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0 && h.additional_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with flag 0110 questions = 1 and answers = 0, authority, additional = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if((h.flags & 0xf800) == 0) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query request\n"); if(get_u_int16_t(packet->payload, 4) == htons(1) && get_u_int16_t(packet->payload, 6) == 0 && get_u_int16_t(packet->payload, 8) == 0 && get_u_int16_t(packet->payload, 10) == 0) { /* name is encoded as described in rfc883 */ u_int8_t name_length = packet->payload[12]; NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query request, one question\n"); if(packet->payload_packet_len == 12 + 1 + name_length + 1 + 2 + 2) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query request, length matches\n"); /* null terminated? */ if(packet->payload[12 + name_length + 1] == 0 && get_u_int16_t(packet->payload, 12 + name_length + 2) == htons(0x0020) && get_u_int16_t(packet->payload, 12 + name_length + 4) == htons(0x0001)) { NDPI_LOG_INFO(ndpi_struct, "found netbios name query request\n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } } } } else if((h.flags & 0xf800) == 0x8000) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query response\n"); if(get_u_int16_t(packet->payload, 4) == 0 && get_u_int16_t(packet->payload, 6) == htons(1) && get_u_int16_t(packet->payload, 8) == 0 && get_u_int16_t(packet->payload, 10) == 0) { /* name is encoded as described in rfc883 */ u_int8_t name_length = packet->payload[12]; NDPI_LOG_DBG2(ndpi_struct, "possible netbios positive name query response, one answer\n"); if(packet->payload_packet_len >= 12 + 1 + name_length + 1 + 2 + 2) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query response, length matches\n"); /* null terminated? */ if(packet->payload[12 + name_length + 1] == 0 && get_u_int16_t(packet->payload, 12 + name_length + 2) == htons(0x0020) && get_u_int16_t(packet->payload, 12 + name_length + 4) == htons(0x0001)) { NDPI_LOG_INFO(ndpi_struct, "found netbios name query response\n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } } } else if(get_u_int16_t(packet->payload, 4) == 0 && get_u_int16_t(packet->payload, 6) == 0 && get_u_int16_t(packet->payload, 8) == 0 && get_u_int16_t(packet->payload, 10) == 0) { /* name is encoded as described in rfc883 */ u_int8_t name_length = packet->payload[12]; NDPI_LOG_DBG2(ndpi_struct, "possible netbios negative name query response, one answer\n"); if(packet->payload_packet_len >= 12 + 1 + name_length + 1 + 2 + 2) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query response, length matches\n"); /* null terminated? */ if(packet->payload[12 + name_length + 1] == 0 && get_u_int16_t(packet->payload, 12 + name_length + 2) == htons(0x000A) && get_u_int16_t(packet->payload, 12 + name_length + 4) == htons(0x0001)) { NDPI_LOG_INFO(ndpi_struct, "found netbios name query response\n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } } } else if(get_u_int16_t(packet->payload, 4) == 0 && get_u_int16_t(packet->payload, 6) == 0 && get_u_int16_t(packet->payload, 8) == htons(1) && get_u_int16_t(packet->payload, 10) == htons(1)) { /* name is encoded as described in rfc883 */ u_int8_t name_length = packet->payload[12]; NDPI_LOG_DBG2(ndpi_struct, "possible netbios redirect name query response, one answer\n"); if(packet->payload_packet_len >= 12 + 1 + name_length + 1 + 2 + 2) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query response, length matches\n"); /* null terminated? */ if(packet->payload[12 + name_length + 1] == 0 && get_u_int16_t(packet->payload, 12 + name_length + 2) == htons(0x0002) && get_u_int16_t(packet->payload, 12 + name_length + 4) == htons(0x0001)) { NDPI_LOG_INFO(ndpi_struct, "found netbios name query response\n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } } } } /* TODO: extend according to rfc1002 */ } /* check standard NETBIOS over udp to port 138 */ /* netbios header token from http://www.protocolbase.net/protocols/protocol_NBDGM.php */ if((dport == 138) && (packet->payload_packet_len >= 14)) { u_int16_t netbios_len = ntohs(get_u_int16_t(packet->payload, 10)); if(netbios_len == packet->payload_packet_len - 14) { NDPI_LOG_DBG2(ndpi_struct, "found netbios port 138 and payload length >= 112 \n"); if(packet->payload[0] >= 0x10 && packet->payload[0] <= 0x16) { u_int32_t source_ip = ntohl(get_u_int32_t(packet->payload, 4)); NDPI_LOG_DBG2(ndpi_struct, "found netbios with MSG-type 0x10,0x11,0x12,0x13,0x14,0x15 or 0x16\n"); if(source_ip == ntohl(packet->iph->saddr)) { int16_t leftover = netbios_len - 82; /* NetBIOS len */ NDPI_LOG_INFO(ndpi_struct, "found netbios with checked ip-address\n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, (leftover > 0) ? NDPI_PROTOCOL_SMBV1 : NDPI_PROTOCOL_UNKNOWN); return; } } } } } if(packet->tcp != NULL) { dport = ntohs(packet->tcp->dest); /* destination port must be 139 */ if(dport == 139) { NDPI_LOG_DBG2(ndpi_struct, "found netbios with destination port 139\n"); /* payload_packet_len must be 72 */ if(packet->payload_packet_len == 72) { NDPI_LOG_DBG2(ndpi_struct, "found netbios with payload_packen_len = 72. \n"); if(packet->payload[0] == 0x81 && packet->payload[1] == 0 && ntohs(get_u_int16_t(packet->payload, 2)) == 68) { NDPI_LOG_INFO(ndpi_struct, "found netbios with session request = 81, flags=0 and length od following bytes = 68. \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } } } } NDPI_EXCLUDE_PROTO(ndpi_struct, flow); } /* ****************************************************************** */ void init_netbios_dissector(struct ndpi_detection_module_struct *ndpi_struct, u_int32_t *id, NDPI_PROTOCOL_BITMASK *detection_bitmask) { ndpi_set_bitmask_protocol_detection("NETBIOS", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_NETBIOS, ndpi_search_netbios, NDPI_SELECTION_BITMASK_PROTOCOL_TCP_OR_UDP_WITH_PAYLOAD_WITHOUT_RETRANSMISSION, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; }
/* * netbios.c * * Copyright (C) 2011-21 - ntop.org * Copyright (C) 2009-11 - ipoque GmbH * * This file is part of nDPI, an open source deep packet inspection * library based on the OpenDPI and PACE technology by ipoque GmbH * * nDPI is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * nDPI is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with nDPI. If not, see <http://www.gnu.org/licenses/>. * */ #include "ndpi_protocol_ids.h" #define NDPI_CURRENT_PROTO NDPI_PROTOCOL_NETBIOS #include "ndpi_api.h" /* ****************************************************************** */ struct netbios_header { u_int16_t transaction_id, flags, questions, answer_rrs, authority_rrs, additional_rrs; }; /* ****************************************************************** */ /* The function below has been inherited by tcpdump */ int ndpi_netbios_name_interpret(char *in, size_t in_len, char *out, u_int out_len) { u_int ret = 0, len, idx = in_len, out_idx = 0; len = (*in++)/2, in_len--; out_len--; out[out_idx] = 0; if((len > out_len) || (len < 1) || ((2*len) > in_len)) return(-1); while((len--) && (out_idx < out_len)) { if((idx < 2) || (in[0] < 'A') || (in[0] > 'P') || (in[1] < 'A') || (in[1] > 'P')) { out[out_idx] = 0; break; } out[out_idx] = ((in[0] - 'A') << 4) + (in[1] - 'A'); in += 2, idx -= 2; if(isprint(out[out_idx])) out_idx++, ret++; } /* Trim trailing whitespace from the returned string */ if(out_idx > 0) { out[out_idx] = 0; out_idx--; while((out_idx > 0) && (out[out_idx] == ' ')) { out[out_idx] = 0; out_idx--; } } return(ret); } /* ****************************************************************** */ static void ndpi_int_netbios_add_connection(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int16_t sub_protocol) { char name[64]; u_int off = flow->packet.payload[12] == 0x20 ? 12 : 14; if((off < flow->packet.payload_packet_len) && ndpi_netbios_name_interpret((char*)&flow->packet.payload[off], flow->packet.payload_packet_len - off, name, sizeof(name)) > 0) { snprintf((char*)flow->host_server_name, sizeof(flow->host_server_name)-1, "%s", name); ndpi_check_dga_name(ndpi_struct, flow, (char*)flow->host_server_name, 1); } if(sub_protocol == NDPI_PROTOCOL_UNKNOWN) ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_NETBIOS, NDPI_PROTOCOL_UNKNOWN); else ndpi_set_detected_protocol(ndpi_struct, flow, sub_protocol, NDPI_PROTOCOL_NETBIOS); } /* ****************************************************************** */ void ndpi_search_netbios(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int16_t dport; NDPI_LOG_DBG(ndpi_struct, "search netbios\n"); if(packet->udp != NULL) { dport = ntohs(packet->udp->dest); /*check standard NETBIOS over udp to port 137 */ if((dport == 137 || 0) && packet->payload_packet_len >= 50) { struct netbios_header h; memcpy(&h, packet->payload, sizeof(struct netbios_header)); h.transaction_id = ntohs(h.transaction_id), h.flags = ntohs(h.flags), h.questions = ntohs(h.questions), h.answer_rrs = ntohs(h.answer_rrs), h.authority_rrs = ntohs(h.authority_rrs), h.additional_rrs = ntohs(h.additional_rrs); NDPI_LOG_DBG(ndpi_struct, "found netbios port 137 and payload_packet_len 50\n"); if(h.flags == 0 && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0 && h.additional_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with questions = 1 and answers = 0, authority = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(((h.flags & 0x8710) == 0x10) && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with questions = 1 and answers = 0, authority = 0 and broadcast \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(packet->payload[2] == 0x80 && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0 && h.additional_rrs == 1) { NDPI_LOG_INFO(ndpi_struct, "found netbios with questions = 1 and answers, authority, additional = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(h.flags == 0x4000 && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0 && h.additional_rrs == 1) { NDPI_LOG_INFO(ndpi_struct, "found netbios with questions = 1 and answers = 0, authority = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(h.flags == 0x8400 && h.questions == 0 && h.answer_rrs == 1 && h.authority_rrs == 0 && h.additional_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with flag 8400 questions = 0 and answers = 1, authority, additional = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(h.flags == 0x8500 && h.questions == 0 && h.answer_rrs == 1 && h.authority_rrs == 0 && h.additional_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with flag 8500 questions = 0 and answers = 1, authority, additional = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(((h.flags == 0x2900) || (h.flags == 0x2910)) && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0 && h.additional_rrs == 1) { NDPI_LOG_INFO(ndpi_struct, "found netbios with flag 2910, questions = 1 and answers, authority=0, additional = 1 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(h.flags == 0xAD86 && h.questions == 0 && h.answer_rrs == 1 && h.authority_rrs == 0 && h.additional_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with flag ad86 questions = 0 and answers = 1, authority, additional = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if(h.flags == 0x0110 && h.questions == 1 && h.answer_rrs == 0 && h.authority_rrs == 0 && h.additional_rrs == 0) { NDPI_LOG_INFO(ndpi_struct, "found netbios with flag 0110 questions = 1 and answers = 0, authority, additional = 0 \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } if((h.flags & 0xf800) == 0) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query request\n"); if(get_u_int16_t(packet->payload, 4) == htons(1) && get_u_int16_t(packet->payload, 6) == 0 && get_u_int16_t(packet->payload, 8) == 0 && get_u_int16_t(packet->payload, 10) == 0) { /* name is encoded as described in rfc883 */ u_int8_t name_length = packet->payload[12]; NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query request, one question\n"); if(packet->payload_packet_len == 12 + 1 + name_length + 1 + 2 + 2) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query request, length matches\n"); /* null terminated? */ if(packet->payload[12 + name_length + 1] == 0 && get_u_int16_t(packet->payload, 12 + name_length + 2) == htons(0x0020) && get_u_int16_t(packet->payload, 12 + name_length + 4) == htons(0x0001)) { NDPI_LOG_INFO(ndpi_struct, "found netbios name query request\n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } } } } else if((h.flags & 0xf800) == 0x8000) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query response\n"); if(get_u_int16_t(packet->payload, 4) == 0 && get_u_int16_t(packet->payload, 6) == htons(1) && get_u_int16_t(packet->payload, 8) == 0 && get_u_int16_t(packet->payload, 10) == 0) { /* name is encoded as described in rfc883 */ u_int8_t name_length = packet->payload[12]; NDPI_LOG_DBG2(ndpi_struct, "possible netbios positive name query response, one answer\n"); if(packet->payload_packet_len >= 12 + 1 + name_length + 1 + 2 + 2) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query response, length matches\n"); /* null terminated? */ if(packet->payload[12 + name_length + 1] == 0 && get_u_int16_t(packet->payload, 12 + name_length + 2) == htons(0x0020) && get_u_int16_t(packet->payload, 12 + name_length + 4) == htons(0x0001)) { NDPI_LOG_INFO(ndpi_struct, "found netbios name query response\n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } } } else if(get_u_int16_t(packet->payload, 4) == 0 && get_u_int16_t(packet->payload, 6) == 0 && get_u_int16_t(packet->payload, 8) == 0 && get_u_int16_t(packet->payload, 10) == 0) { /* name is encoded as described in rfc883 */ u_int8_t name_length = packet->payload[12]; NDPI_LOG_DBG2(ndpi_struct, "possible netbios negative name query response, one answer\n"); if(packet->payload_packet_len >= 12 + 1 + name_length + 1 + 2 + 2) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query response, length matches\n"); /* null terminated? */ if(packet->payload[12 + name_length + 1] == 0 && get_u_int16_t(packet->payload, 12 + name_length + 2) == htons(0x000A) && get_u_int16_t(packet->payload, 12 + name_length + 4) == htons(0x0001)) { NDPI_LOG_INFO(ndpi_struct, "found netbios name query response\n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } } } else if(get_u_int16_t(packet->payload, 4) == 0 && get_u_int16_t(packet->payload, 6) == 0 && get_u_int16_t(packet->payload, 8) == htons(1) && get_u_int16_t(packet->payload, 10) == htons(1)) { /* name is encoded as described in rfc883 */ u_int8_t name_length = packet->payload[12]; NDPI_LOG_DBG2(ndpi_struct, "possible netbios redirect name query response, one answer\n"); if(packet->payload_packet_len >= 12 + 1 + name_length + 1 + 2 + 2) { NDPI_LOG_DBG2(ndpi_struct, "possible netbios name query response, length matches\n"); /* null terminated? */ if(packet->payload[12 + name_length + 1] == 0 && get_u_int16_t(packet->payload, 12 + name_length + 2) == htons(0x0002) && get_u_int16_t(packet->payload, 12 + name_length + 4) == htons(0x0001)) { NDPI_LOG_INFO(ndpi_struct, "found netbios name query response\n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } } } } /* TODO: extend according to rfc1002 */ } /* check standard NETBIOS over udp to port 138 */ /* netbios header token from http://www.protocolbase.net/protocols/protocol_NBDGM.php */ if((dport == 138) && (packet->payload_packet_len >= 14)) { u_int16_t netbios_len = ntohs(get_u_int16_t(packet->payload, 10)); if(netbios_len == packet->payload_packet_len - 14) { NDPI_LOG_DBG2(ndpi_struct, "found netbios port 138 and payload length >= 112 \n"); if(packet->payload[0] >= 0x10 && packet->payload[0] <= 0x16) { u_int32_t source_ip = ntohl(get_u_int32_t(packet->payload, 4)); NDPI_LOG_DBG2(ndpi_struct, "found netbios with MSG-type 0x10,0x11,0x12,0x13,0x14,0x15 or 0x16\n"); if(source_ip == ntohl(packet->iph->saddr)) { int16_t leftover = netbios_len - 82; /* NetBIOS len */ NDPI_LOG_INFO(ndpi_struct, "found netbios with checked ip-address\n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, (leftover > 0) ? NDPI_PROTOCOL_SMBV1 : NDPI_PROTOCOL_UNKNOWN); return; } } } } } if(packet->tcp != NULL) { dport = ntohs(packet->tcp->dest); /* destination port must be 139 */ if(dport == 139) { NDPI_LOG_DBG2(ndpi_struct, "found netbios with destination port 139\n"); /* payload_packet_len must be 72 */ if(packet->payload_packet_len == 72) { NDPI_LOG_DBG2(ndpi_struct, "found netbios with payload_packen_len = 72. \n"); if(packet->payload[0] == 0x81 && packet->payload[1] == 0 && ntohs(get_u_int16_t(packet->payload, 2)) == 68) { NDPI_LOG_INFO(ndpi_struct, "found netbios with session request = 81, flags=0 and length od following bytes = 68. \n"); ndpi_int_netbios_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_UNKNOWN); return; } } } } NDPI_EXCLUDE_PROTO(ndpi_struct, flow); } /* ****************************************************************** */ void init_netbios_dissector(struct ndpi_detection_module_struct *ndpi_struct, u_int32_t *id, NDPI_PROTOCOL_BITMASK *detection_bitmask) { ndpi_set_bitmask_protocol_detection("NETBIOS", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_NETBIOS, ndpi_search_netbios, NDPI_SELECTION_BITMASK_PROTOCOL_TCP_OR_UDP_WITH_PAYLOAD_WITHOUT_RETRANSMISSION, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; }
int ndpi_netbios_name_interpret(char *in, size_t in_len, char *out, u_int out_len) { u_int ret = 0, len, idx = in_len, out_idx = 0; len = (*in++)/2; out_len--; out[out_idx] = 0; if((len > out_len) || (len < 1) || ((2*len) > in_len)) return(-1); while((len--) && (out_idx < out_len)) { if((idx < 2) || (in[0] < 'A') || (in[0] > 'P') || (in[1] < 'A') || (in[1] > 'P')) { out[out_idx] = 0; break; } out[out_idx] = ((in[0] - 'A') << 4) + (in[1] - 'A'); in += 2, idx -= 2; if(isprint(out[out_idx])) out_idx++, ret++; } /* Trim trailing whitespace from the returned string */ if(out_idx > 0) { out[out_idx] = 0; out_idx--; while((out_idx > 0) && (out[out_idx] == ' ')) { out[out_idx] = 0; out_idx--; } } return(ret); }
int ndpi_netbios_name_interpret(char *in, size_t in_len, char *out, u_int out_len) { u_int ret = 0, len, idx = in_len, out_idx = 0; len = (*in++)/2, in_len--; out_len--; out[out_idx] = 0; if((len > out_len) || (len < 1) || ((2*len) > in_len)) return(-1); while((len--) && (out_idx < out_len)) { if((idx < 2) || (in[0] < 'A') || (in[0] > 'P') || (in[1] < 'A') || (in[1] > 'P')) { out[out_idx] = 0; break; } out[out_idx] = ((in[0] - 'A') << 4) + (in[1] - 'A'); in += 2, idx -= 2; if(isprint(out[out_idx])) out_idx++, ret++; } /* Trim trailing whitespace from the returned string */ if(out_idx > 0) { out[out_idx] = 0; out_idx--; while((out_idx > 0) && (out[out_idx] == ' ')) { out[out_idx] = 0; out_idx--; } } return(ret); }
{'added': [(44, ' len = (*in++)/2, in_len--;')], 'deleted': [(44, ' len = (*in++)/2;')]}
1
1
264
2,106
27
257
15
https://github.com/ntop/nDPI
CVE-2021-36082
CWE-787
2,598
ast.c
C
ast_for_atom
/* * This file includes functions to transform a concrete syntax tree (CST) to * an abstract syntax tree (AST). The main function is Ta3AST_FromNode(). * */ #include "Python.h" #include "Python-ast.h" #include "node.h" #include "ast.h" #include "token.h" #include <assert.h> #if PY_MINOR_VERSION < 4 #define PyErr_ProgramTextObject PyErr_ProgramText #define PyMem_RawMalloc PyMem_Malloc #define PyMem_RawRealloc PyMem_Realloc #define PyMem_RawFree PyMem_Free #endif static int validate_stmts(asdl_seq *); static int validate_exprs(asdl_seq *, expr_context_ty, int); static int validate_nonempty_seq(asdl_seq *, const char *, const char *); static int validate_stmt(stmt_ty); static int validate_expr(expr_ty, expr_context_ty); mod_ty string_object_to_c_ast(const char *s, PyObject *filename, int start, PyCompilerFlags *flags, int feature_version, PyArena *arena); static int validate_comprehension(asdl_seq *gens) { int i; if (!asdl_seq_LEN(gens)) { PyErr_SetString(PyExc_ValueError, "comprehension with no generators"); return 0; } for (i = 0; i < asdl_seq_LEN(gens); i++) { comprehension_ty comp = asdl_seq_GET(gens, i); if (!validate_expr(comp->target, Store) || !validate_expr(comp->iter, Load) || !validate_exprs(comp->ifs, Load, 0)) return 0; } return 1; } static int validate_slice(slice_ty slice) { switch (slice->kind) { case Slice_kind: return (!slice->v.Slice.lower || validate_expr(slice->v.Slice.lower, Load)) && (!slice->v.Slice.upper || validate_expr(slice->v.Slice.upper, Load)) && (!slice->v.Slice.step || validate_expr(slice->v.Slice.step, Load)); case ExtSlice_kind: { int i; if (!validate_nonempty_seq(slice->v.ExtSlice.dims, "dims", "ExtSlice")) return 0; for (i = 0; i < asdl_seq_LEN(slice->v.ExtSlice.dims); i++) if (!validate_slice(asdl_seq_GET(slice->v.ExtSlice.dims, i))) return 0; return 1; } case Index_kind: return validate_expr(slice->v.Index.value, Load); default: PyErr_SetString(PyExc_SystemError, "unknown slice node"); return 0; } } static int validate_keywords(asdl_seq *keywords) { int i; for (i = 0; i < asdl_seq_LEN(keywords); i++) if (!validate_expr(((keyword_ty)asdl_seq_GET(keywords, i))->value, Load)) return 0; return 1; } static int validate_args(asdl_seq *args) { int i; for (i = 0; i < asdl_seq_LEN(args); i++) { arg_ty arg = asdl_seq_GET(args, i); if (arg->annotation && !validate_expr(arg->annotation, Load)) return 0; } return 1; } static const char * expr_context_name(expr_context_ty ctx) { switch (ctx) { case Load: return "Load"; case Store: return "Store"; case Del: return "Del"; case AugLoad: return "AugLoad"; case AugStore: return "AugStore"; case Param: return "Param"; default: assert(0); return "(unknown)"; } } static int validate_arguments(arguments_ty args) { if (!validate_args(args->args)) return 0; if (args->vararg && args->vararg->annotation && !validate_expr(args->vararg->annotation, Load)) { return 0; } if (!validate_args(args->kwonlyargs)) return 0; if (args->kwarg && args->kwarg->annotation && !validate_expr(args->kwarg->annotation, Load)) { return 0; } if (asdl_seq_LEN(args->defaults) > asdl_seq_LEN(args->args)) { PyErr_SetString(PyExc_ValueError, "more positional defaults than args on arguments"); return 0; } if (asdl_seq_LEN(args->kw_defaults) != asdl_seq_LEN(args->kwonlyargs)) { PyErr_SetString(PyExc_ValueError, "length of kwonlyargs is not the same as " "kw_defaults on arguments"); return 0; } return validate_exprs(args->defaults, Load, 0) && validate_exprs(args->kw_defaults, Load, 1); } static int validate_constant(PyObject *value) { if (value == Py_None || value == Py_Ellipsis) return 1; if (PyLong_CheckExact(value) || PyFloat_CheckExact(value) || PyComplex_CheckExact(value) || PyBool_Check(value) || PyUnicode_CheckExact(value) || PyBytes_CheckExact(value)) return 1; if (PyTuple_CheckExact(value) || PyFrozenSet_CheckExact(value)) { PyObject *it; it = PyObject_GetIter(value); if (it == NULL) return 0; while (1) { PyObject *item = PyIter_Next(it); if (item == NULL) { if (PyErr_Occurred()) { Py_DECREF(it); return 0; } break; } if (!validate_constant(item)) { Py_DECREF(it); Py_DECREF(item); return 0; } Py_DECREF(item); } Py_DECREF(it); return 1; } return 0; } static int validate_expr(expr_ty exp, expr_context_ty ctx) { int check_ctx = 1; expr_context_ty actual_ctx; /* First check expression context. */ switch (exp->kind) { case Attribute_kind: actual_ctx = exp->v.Attribute.ctx; break; case Subscript_kind: actual_ctx = exp->v.Subscript.ctx; break; case Starred_kind: actual_ctx = exp->v.Starred.ctx; break; case Name_kind: actual_ctx = exp->v.Name.ctx; break; case List_kind: actual_ctx = exp->v.List.ctx; break; case Tuple_kind: actual_ctx = exp->v.Tuple.ctx; break; default: if (ctx != Load) { PyErr_Format(PyExc_ValueError, "expression which can't be " "assigned to in %s context", expr_context_name(ctx)); return 0; } check_ctx = 0; /* set actual_ctx to prevent gcc warning */ actual_ctx = 0; } if (check_ctx && actual_ctx != ctx) { PyErr_Format(PyExc_ValueError, "expression must have %s context but has %s instead", expr_context_name(ctx), expr_context_name(actual_ctx)); return 0; } /* Now validate expression. */ switch (exp->kind) { case BoolOp_kind: if (asdl_seq_LEN(exp->v.BoolOp.values) < 2) { PyErr_SetString(PyExc_ValueError, "BoolOp with less than 2 values"); return 0; } return validate_exprs(exp->v.BoolOp.values, Load, 0); case BinOp_kind: return validate_expr(exp->v.BinOp.left, Load) && validate_expr(exp->v.BinOp.right, Load); case UnaryOp_kind: return validate_expr(exp->v.UnaryOp.operand, Load); case Lambda_kind: return validate_arguments(exp->v.Lambda.args) && validate_expr(exp->v.Lambda.body, Load); case IfExp_kind: return validate_expr(exp->v.IfExp.test, Load) && validate_expr(exp->v.IfExp.body, Load) && validate_expr(exp->v.IfExp.orelse, Load); case Dict_kind: if (asdl_seq_LEN(exp->v.Dict.keys) != asdl_seq_LEN(exp->v.Dict.values)) { PyErr_SetString(PyExc_ValueError, "Dict doesn't have the same number of keys as values"); return 0; } /* null_ok=1 for keys expressions to allow dict unpacking to work in dict literals, i.e. ``{**{a:b}}`` */ return validate_exprs(exp->v.Dict.keys, Load, /*null_ok=*/ 1) && validate_exprs(exp->v.Dict.values, Load, /*null_ok=*/ 0); case Set_kind: return validate_exprs(exp->v.Set.elts, Load, 0); #define COMP(NAME) \ case NAME ## _kind: \ return validate_comprehension(exp->v.NAME.generators) && \ validate_expr(exp->v.NAME.elt, Load); COMP(ListComp) COMP(SetComp) COMP(GeneratorExp) #undef COMP case DictComp_kind: return validate_comprehension(exp->v.DictComp.generators) && validate_expr(exp->v.DictComp.key, Load) && validate_expr(exp->v.DictComp.value, Load); case Yield_kind: return !exp->v.Yield.value || validate_expr(exp->v.Yield.value, Load); case YieldFrom_kind: return validate_expr(exp->v.YieldFrom.value, Load); case Await_kind: return validate_expr(exp->v.Await.value, Load); case Compare_kind: if (!asdl_seq_LEN(exp->v.Compare.comparators)) { PyErr_SetString(PyExc_ValueError, "Compare with no comparators"); return 0; } if (asdl_seq_LEN(exp->v.Compare.comparators) != asdl_seq_LEN(exp->v.Compare.ops)) { PyErr_SetString(PyExc_ValueError, "Compare has a different number " "of comparators and operands"); return 0; } return validate_exprs(exp->v.Compare.comparators, Load, 0) && validate_expr(exp->v.Compare.left, Load); case Call_kind: return validate_expr(exp->v.Call.func, Load) && validate_exprs(exp->v.Call.args, Load, 0) && validate_keywords(exp->v.Call.keywords); case Constant_kind: if (!validate_constant(exp->v.Constant.value)) { PyErr_Format(PyExc_TypeError, "got an invalid type in Constant: %s", Py_TYPE(exp->v.Constant.value)->tp_name); return 0; } return 1; case Num_kind: { PyObject *n = exp->v.Num.n; if (!PyLong_CheckExact(n) && !PyFloat_CheckExact(n) && !PyComplex_CheckExact(n)) { PyErr_SetString(PyExc_TypeError, "non-numeric type in Num"); return 0; } return 1; } case Str_kind: { PyObject *s = exp->v.Str.s; if (!PyUnicode_CheckExact(s)) { PyErr_SetString(PyExc_TypeError, "non-string type in Str"); return 0; } return 1; } case JoinedStr_kind: return validate_exprs(exp->v.JoinedStr.values, Load, 0); case FormattedValue_kind: if (validate_expr(exp->v.FormattedValue.value, Load) == 0) return 0; if (exp->v.FormattedValue.format_spec) return validate_expr(exp->v.FormattedValue.format_spec, Load); return 1; case Bytes_kind: { PyObject *b = exp->v.Bytes.s; if (!PyBytes_CheckExact(b)) { PyErr_SetString(PyExc_TypeError, "non-bytes type in Bytes"); return 0; } return 1; } case Attribute_kind: return validate_expr(exp->v.Attribute.value, Load); case Subscript_kind: return validate_slice(exp->v.Subscript.slice) && validate_expr(exp->v.Subscript.value, Load); case Starred_kind: return validate_expr(exp->v.Starred.value, ctx); case List_kind: return validate_exprs(exp->v.List.elts, ctx, 0); case Tuple_kind: return validate_exprs(exp->v.Tuple.elts, ctx, 0); /* These last cases don't have any checking. */ case Name_kind: case NameConstant_kind: case Ellipsis_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected expression"); return 0; } } static int validate_nonempty_seq(asdl_seq *seq, const char *what, const char *owner) { if (asdl_seq_LEN(seq)) return 1; PyErr_Format(PyExc_ValueError, "empty %s on %s", what, owner); return 0; } static int validate_assignlist(asdl_seq *targets, expr_context_ty ctx) { return validate_nonempty_seq(targets, "targets", ctx == Del ? "Delete" : "Assign") && validate_exprs(targets, ctx, 0); } static int validate_body(asdl_seq *body, const char *owner) { return validate_nonempty_seq(body, "body", owner) && validate_stmts(body); } static int validate_stmt(stmt_ty stmt) { int i; switch (stmt->kind) { case FunctionDef_kind: return validate_body(stmt->v.FunctionDef.body, "FunctionDef") && validate_arguments(stmt->v.FunctionDef.args) && validate_exprs(stmt->v.FunctionDef.decorator_list, Load, 0) && (!stmt->v.FunctionDef.returns || validate_expr(stmt->v.FunctionDef.returns, Load)); case ClassDef_kind: return validate_body(stmt->v.ClassDef.body, "ClassDef") && validate_exprs(stmt->v.ClassDef.bases, Load, 0) && validate_keywords(stmt->v.ClassDef.keywords) && validate_exprs(stmt->v.ClassDef.decorator_list, Load, 0); case Return_kind: return !stmt->v.Return.value || validate_expr(stmt->v.Return.value, Load); case Delete_kind: return validate_assignlist(stmt->v.Delete.targets, Del); case Assign_kind: return validate_assignlist(stmt->v.Assign.targets, Store) && validate_expr(stmt->v.Assign.value, Load); case AugAssign_kind: return validate_expr(stmt->v.AugAssign.target, Store) && validate_expr(stmt->v.AugAssign.value, Load); case AnnAssign_kind: if (stmt->v.AnnAssign.target->kind != Name_kind && stmt->v.AnnAssign.simple) { PyErr_SetString(PyExc_TypeError, "AnnAssign with simple non-Name target"); return 0; } return validate_expr(stmt->v.AnnAssign.target, Store) && (!stmt->v.AnnAssign.value || validate_expr(stmt->v.AnnAssign.value, Load)) && validate_expr(stmt->v.AnnAssign.annotation, Load); case For_kind: return validate_expr(stmt->v.For.target, Store) && validate_expr(stmt->v.For.iter, Load) && validate_body(stmt->v.For.body, "For") && validate_stmts(stmt->v.For.orelse); case AsyncFor_kind: return validate_expr(stmt->v.AsyncFor.target, Store) && validate_expr(stmt->v.AsyncFor.iter, Load) && validate_body(stmt->v.AsyncFor.body, "AsyncFor") && validate_stmts(stmt->v.AsyncFor.orelse); case While_kind: return validate_expr(stmt->v.While.test, Load) && validate_body(stmt->v.While.body, "While") && validate_stmts(stmt->v.While.orelse); case If_kind: return validate_expr(stmt->v.If.test, Load) && validate_body(stmt->v.If.body, "If") && validate_stmts(stmt->v.If.orelse); case With_kind: if (!validate_nonempty_seq(stmt->v.With.items, "items", "With")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.With.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.With.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.With.body, "With"); case AsyncWith_kind: if (!validate_nonempty_seq(stmt->v.AsyncWith.items, "items", "AsyncWith")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.AsyncWith.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.AsyncWith.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.AsyncWith.body, "AsyncWith"); case Raise_kind: if (stmt->v.Raise.exc) { return validate_expr(stmt->v.Raise.exc, Load) && (!stmt->v.Raise.cause || validate_expr(stmt->v.Raise.cause, Load)); } if (stmt->v.Raise.cause) { PyErr_SetString(PyExc_ValueError, "Raise with cause but no exception"); return 0; } return 1; case Try_kind: if (!validate_body(stmt->v.Try.body, "Try")) return 0; if (!asdl_seq_LEN(stmt->v.Try.handlers) && !asdl_seq_LEN(stmt->v.Try.finalbody)) { PyErr_SetString(PyExc_ValueError, "Try has neither except handlers nor finalbody"); return 0; } if (!asdl_seq_LEN(stmt->v.Try.handlers) && asdl_seq_LEN(stmt->v.Try.orelse)) { PyErr_SetString(PyExc_ValueError, "Try has orelse but no except handlers"); return 0; } for (i = 0; i < asdl_seq_LEN(stmt->v.Try.handlers); i++) { excepthandler_ty handler = asdl_seq_GET(stmt->v.Try.handlers, i); if ((handler->v.ExceptHandler.type && !validate_expr(handler->v.ExceptHandler.type, Load)) || !validate_body(handler->v.ExceptHandler.body, "ExceptHandler")) return 0; } return (!asdl_seq_LEN(stmt->v.Try.finalbody) || validate_stmts(stmt->v.Try.finalbody)) && (!asdl_seq_LEN(stmt->v.Try.orelse) || validate_stmts(stmt->v.Try.orelse)); case Assert_kind: return validate_expr(stmt->v.Assert.test, Load) && (!stmt->v.Assert.msg || validate_expr(stmt->v.Assert.msg, Load)); case Import_kind: return validate_nonempty_seq(stmt->v.Import.names, "names", "Import"); case ImportFrom_kind: if (stmt->v.ImportFrom.level < 0) { PyErr_SetString(PyExc_ValueError, "Negative ImportFrom level"); return 0; } return validate_nonempty_seq(stmt->v.ImportFrom.names, "names", "ImportFrom"); case Global_kind: return validate_nonempty_seq(stmt->v.Global.names, "names", "Global"); case Nonlocal_kind: return validate_nonempty_seq(stmt->v.Nonlocal.names, "names", "Nonlocal"); case Expr_kind: return validate_expr(stmt->v.Expr.value, Load); case AsyncFunctionDef_kind: return validate_body(stmt->v.AsyncFunctionDef.body, "AsyncFunctionDef") && validate_arguments(stmt->v.AsyncFunctionDef.args) && validate_exprs(stmt->v.AsyncFunctionDef.decorator_list, Load, 0) && (!stmt->v.AsyncFunctionDef.returns || validate_expr(stmt->v.AsyncFunctionDef.returns, Load)); case Pass_kind: case Break_kind: case Continue_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected statement"); return 0; } } static int validate_stmts(asdl_seq *seq) { int i; for (i = 0; i < asdl_seq_LEN(seq); i++) { stmt_ty stmt = asdl_seq_GET(seq, i); if (stmt) { if (!validate_stmt(stmt)) return 0; } else { PyErr_SetString(PyExc_ValueError, "None disallowed in statement list"); return 0; } } return 1; } static int validate_exprs(asdl_seq *exprs, expr_context_ty ctx, int null_ok) { int i; for (i = 0; i < asdl_seq_LEN(exprs); i++) { expr_ty expr = asdl_seq_GET(exprs, i); if (expr) { if (!validate_expr(expr, ctx)) return 0; } else if (!null_ok) { PyErr_SetString(PyExc_ValueError, "None disallowed in expression list"); return 0; } } return 1; } int Ta3AST_Validate(mod_ty mod) { int res = 0; switch (mod->kind) { case Module_kind: res = validate_stmts(mod->v.Module.body); break; case Interactive_kind: res = validate_stmts(mod->v.Interactive.body); break; case Expression_kind: res = validate_expr(mod->v.Expression.body, Load); break; case Suite_kind: PyErr_SetString(PyExc_ValueError, "Suite is not valid in the CPython compiler"); break; default: PyErr_SetString(PyExc_SystemError, "impossible module node"); res = 0; break; } return res; } /* This is done here, so defines like "test" don't interfere with AST use above. */ #include "grammar.h" #include "parsetok.h" #include "graminit.h" /* Data structure used internally */ struct compiling { PyArena *c_arena; /* Arena for allocating memory. */ PyObject *c_filename; /* filename */ PyObject *c_normalize; /* Normalization function from unicodedata. */ PyObject *c_normalize_args; /* Normalization argument tuple. */ int c_feature_version; /* Latest minior version of Python for allowed features */ }; static asdl_seq *seq_for_testlist(struct compiling *, const node *); static expr_ty ast_for_expr(struct compiling *, const node *); static stmt_ty ast_for_stmt(struct compiling *, const node *); static asdl_seq *ast_for_suite(struct compiling *, const node *); static asdl_seq *ast_for_exprlist(struct compiling *, const node *, expr_context_ty); static expr_ty ast_for_testlist(struct compiling *, const node *); static stmt_ty ast_for_classdef(struct compiling *, const node *, asdl_seq *); static stmt_ty ast_for_with_stmt(struct compiling *, const node *, int); static stmt_ty ast_for_for_stmt(struct compiling *, const node *, int); /* Note different signature for ast_for_call */ static expr_ty ast_for_call(struct compiling *, const node *, expr_ty); static PyObject *parsenumber(struct compiling *, const char *); static expr_ty parsestrplus(struct compiling *, const node *n); #define COMP_GENEXP 0 #define COMP_LISTCOMP 1 #define COMP_SETCOMP 2 static int init_normalization(struct compiling *c) { PyObject *m = PyImport_ImportModuleNoBlock("unicodedata"); if (!m) return 0; c->c_normalize = PyObject_GetAttrString(m, "normalize"); Py_DECREF(m); if (!c->c_normalize) return 0; c->c_normalize_args = Py_BuildValue("(sN)", "NFKC", Py_None); if (!c->c_normalize_args) { Py_CLEAR(c->c_normalize); return 0; } PyTuple_SET_ITEM(c->c_normalize_args, 1, NULL); return 1; } static identifier new_identifier(const char *n, struct compiling *c) { PyObject *id = PyUnicode_DecodeUTF8(n, strlen(n), NULL); if (!id) return NULL; /* PyUnicode_DecodeUTF8 should always return a ready string. */ assert(PyUnicode_IS_READY(id)); /* Check whether there are non-ASCII characters in the identifier; if so, normalize to NFKC. */ if (!PyUnicode_IS_ASCII(id)) { PyObject *id2; if (!c->c_normalize && !init_normalization(c)) { Py_DECREF(id); return NULL; } PyTuple_SET_ITEM(c->c_normalize_args, 1, id); id2 = PyObject_Call(c->c_normalize, c->c_normalize_args, NULL); Py_DECREF(id); if (!id2) return NULL; id = id2; } PyUnicode_InternInPlace(&id); if (PyArena_AddPyObject(c->c_arena, id) < 0) { Py_DECREF(id); return NULL; } return id; } #define NEW_IDENTIFIER(n) new_identifier(STR(n), c) static string new_type_comment(const char *s, struct compiling *c) { return PyUnicode_DecodeUTF8(s, strlen(s), NULL); } #define NEW_TYPE_COMMENT(n) new_type_comment(STR(n), c) static int ast_error(struct compiling *c, const node *n, const char *errmsg) { PyObject *value, *errstr, *loc, *tmp; loc = PyErr_ProgramTextObject(c->c_filename, LINENO(n)); if (!loc) { Py_INCREF(Py_None); loc = Py_None; } tmp = Py_BuildValue("(OiiN)", c->c_filename, LINENO(n), n->n_col_offset, loc); if (!tmp) return 0; errstr = PyUnicode_FromString(errmsg); if (!errstr) { Py_DECREF(tmp); return 0; } value = PyTuple_Pack(2, errstr, tmp); Py_DECREF(errstr); Py_DECREF(tmp); if (value) { PyErr_SetObject(PyExc_SyntaxError, value); Py_DECREF(value); } return 0; } /* num_stmts() returns number of contained statements. Use this routine to determine how big a sequence is needed for the statements in a parse tree. Its raison d'etre is this bit of grammar: stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE A simple_stmt can contain multiple small_stmt elements joined by semicolons. If the arg is a simple_stmt, the number of small_stmt elements is returned. */ static int num_stmts(const node *n) { int i, l; node *ch; switch (TYPE(n)) { case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) return 0; else return num_stmts(CHILD(n, 0)); case file_input: l = 0; for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == stmt) l += num_stmts(ch); } return l; case stmt: return num_stmts(CHILD(n, 0)); case compound_stmt: return 1; case simple_stmt: return NCH(n) / 2; /* Divide by 2 to remove count of semi-colons */ case suite: /* suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */ if (NCH(n) == 1) return num_stmts(CHILD(n, 0)); else { i = 2; l = 0; if (TYPE(CHILD(n, 1)) == TYPE_COMMENT) i += 2; for (; i < (NCH(n) - 1); i++) l += num_stmts(CHILD(n, i)); return l; } default: { char buf[128]; sprintf(buf, "Non-statement found: %d %d", TYPE(n), NCH(n)); Py_FatalError(buf); } } assert(0); return 0; } /* Transform the CST rooted at node * to the appropriate AST */ mod_ty Ta3AST_FromNodeObject(const node *n, PyCompilerFlags *flags, PyObject *filename, int feature_version, PyArena *arena) { int i, j, k, num; asdl_seq *stmts = NULL; asdl_seq *type_ignores = NULL; stmt_ty s; node *ch; struct compiling c; mod_ty res = NULL; asdl_seq *argtypes = NULL; expr_ty ret, arg; c.c_arena = arena; /* borrowed reference */ c.c_filename = filename; c.c_normalize = NULL; c.c_normalize_args = NULL; c.c_feature_version = feature_version; if (TYPE(n) == encoding_decl) n = CHILD(n, 0); k = 0; switch (TYPE(n)) { case file_input: stmts = _Ta3_asdl_seq_new(num_stmts(n), arena); if (!stmts) goto out; for (i = 0; i < NCH(n) - 1; i++) { ch = CHILD(n, i); if (TYPE(ch) == NEWLINE) continue; REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { s = ast_for_stmt(&c, ch); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } else { ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < num; j++) { s = ast_for_stmt(&c, CHILD(ch, j * 2)); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } } } /* Type ignores are stored under the ENDMARKER in file_input. */ ch = CHILD(n, NCH(n) - 1); REQ(ch, ENDMARKER); num = NCH(ch); type_ignores = _Ta3_asdl_seq_new(num, arena); if (!type_ignores) goto out; for (i = 0; i < num; i++) { type_ignore_ty ti = TypeIgnore(LINENO(CHILD(ch, i)), arena); if (!ti) goto out; asdl_seq_SET(type_ignores, i, ti); } res = Module(stmts, type_ignores, arena); break; case eval_input: { expr_ty testlist_ast; /* XXX Why not comp_for here? */ testlist_ast = ast_for_testlist(&c, CHILD(n, 0)); if (!testlist_ast) goto out; res = Expression(testlist_ast, arena); break; } case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) { stmts = _Ta3_asdl_seq_new(1, arena); if (!stmts) goto out; asdl_seq_SET(stmts, 0, Pass(n->n_lineno, n->n_col_offset, arena)); if (!asdl_seq_GET(stmts, 0)) goto out; res = Interactive(stmts, arena); } else { n = CHILD(n, 0); num = num_stmts(n); stmts = _Ta3_asdl_seq_new(num, arena); if (!stmts) goto out; if (num == 1) { s = ast_for_stmt(&c, n); if (!s) goto out; asdl_seq_SET(stmts, 0, s); } else { /* Only a simple_stmt can contain multiple statements. */ REQ(n, simple_stmt); for (i = 0; i < NCH(n); i += 2) { if (TYPE(CHILD(n, i)) == NEWLINE) break; s = ast_for_stmt(&c, CHILD(n, i)); if (!s) goto out; asdl_seq_SET(stmts, i / 2, s); } } res = Interactive(stmts, arena); } break; case func_type_input: n = CHILD(n, 0); REQ(n, func_type); if (TYPE(CHILD(n, 1)) == typelist) { ch = CHILD(n, 1); /* this is overly permissive -- we don't pay any attention to * stars on the args -- just parse them into an ordered list */ num = 0; for (i = 0; i < NCH(ch); i++) { if (TYPE(CHILD(ch, i)) == test) num++; } argtypes = _Ta3_asdl_seq_new(num, arena); j = 0; for (i = 0; i < NCH(ch); i++) { if (TYPE(CHILD(ch, i)) == test) { arg = ast_for_expr(&c, CHILD(ch, i)); if (!arg) goto out; asdl_seq_SET(argtypes, j++, arg); } } } else argtypes = _Ta3_asdl_seq_new(0, arena); ret = ast_for_expr(&c, CHILD(n, NCH(n) - 1)); if (!ret) goto out; res = FunctionType(argtypes, ret, arena); break; default: PyErr_Format(PyExc_SystemError, "invalid node %d for Ta3AST_FromNode", TYPE(n)); goto out; } out: if (c.c_normalize) { Py_DECREF(c.c_normalize); PyTuple_SET_ITEM(c.c_normalize_args, 1, NULL); Py_DECREF(c.c_normalize_args); } return res; } mod_ty Ta3AST_FromNode(const node *n, PyCompilerFlags *flags, const char *filename_str, int feature_version, PyArena *arena) { mod_ty mod; PyObject *filename; filename = PyUnicode_DecodeFSDefault(filename_str); if (filename == NULL) return NULL; mod = Ta3AST_FromNodeObject(n, flags, filename, feature_version, arena); Py_DECREF(filename); return mod; } /* Return the AST repr. of the operator represented as syntax (|, ^, etc.) */ static operator_ty get_operator(struct compiling *c, const node *n) { switch (TYPE(n)) { case VBAR: return BitOr; case CIRCUMFLEX: return BitXor; case AMPER: return BitAnd; case LEFTSHIFT: return LShift; case RIGHTSHIFT: return RShift; case PLUS: return Add; case MINUS: return Sub; case STAR: return Mult; case AT: if (c->c_feature_version < 5) { ast_error(c, n, "The '@' operator is only supported in Python 3.5 and greater"); return (operator_ty)0; } return MatMult; case SLASH: return Div; case DOUBLESLASH: return FloorDiv; case PERCENT: return Mod; default: return (operator_ty)0; } } static const char * const FORBIDDEN[] = { "None", "True", "False", NULL, }; static int forbidden_name(struct compiling *c, identifier name, const node *n, int full_checks) { assert(PyUnicode_Check(name)); if (PyUnicode_CompareWithASCIIString(name, "__debug__") == 0) { ast_error(c, n, "assignment to keyword"); return 1; } if (full_checks) { const char * const *p; for (p = FORBIDDEN; *p; p++) { if (PyUnicode_CompareWithASCIIString(name, *p) == 0) { ast_error(c, n, "assignment to keyword"); return 1; } } } return 0; } /* Set the context ctx for expr_ty e, recursively traversing e. Only sets context for expr kinds that "can appear in assignment context" (according to ../Parser/Python.asdl). For other expr kinds, it sets an appropriate syntax error and returns false. */ static int set_context(struct compiling *c, expr_ty e, expr_context_ty ctx, const node *n) { asdl_seq *s = NULL; /* If a particular expression type can't be used for assign / delete, set expr_name to its name and an error message will be generated. */ const char* expr_name = NULL; /* The ast defines augmented store and load contexts, but the implementation here doesn't actually use them. The code may be a little more complex than necessary as a result. It also means that expressions in an augmented assignment have a Store context. Consider restructuring so that augmented assignment uses set_context(), too. */ assert(ctx != AugStore && ctx != AugLoad); switch (e->kind) { case Attribute_kind: e->v.Attribute.ctx = ctx; if (ctx == Store && forbidden_name(c, e->v.Attribute.attr, n, 1)) return 0; break; case Subscript_kind: e->v.Subscript.ctx = ctx; break; case Starred_kind: e->v.Starred.ctx = ctx; if (!set_context(c, e->v.Starred.value, ctx, n)) return 0; break; case Name_kind: if (ctx == Store) { if (forbidden_name(c, e->v.Name.id, n, 0)) return 0; /* forbidden_name() calls ast_error() */ } e->v.Name.ctx = ctx; break; case List_kind: e->v.List.ctx = ctx; s = e->v.List.elts; break; case Tuple_kind: e->v.Tuple.ctx = ctx; s = e->v.Tuple.elts; break; case Lambda_kind: expr_name = "lambda"; break; case Call_kind: expr_name = "function call"; break; case BoolOp_kind: case BinOp_kind: case UnaryOp_kind: expr_name = "operator"; break; case GeneratorExp_kind: expr_name = "generator expression"; break; case Yield_kind: case YieldFrom_kind: expr_name = "yield expression"; break; case Await_kind: expr_name = "await expression"; break; case ListComp_kind: expr_name = "list comprehension"; break; case SetComp_kind: expr_name = "set comprehension"; break; case DictComp_kind: expr_name = "dict comprehension"; break; case Dict_kind: case Set_kind: case Num_kind: case Str_kind: case Bytes_kind: case JoinedStr_kind: case FormattedValue_kind: expr_name = "literal"; break; case NameConstant_kind: expr_name = "keyword"; break; case Ellipsis_kind: expr_name = "Ellipsis"; break; case Compare_kind: expr_name = "comparison"; break; case IfExp_kind: expr_name = "conditional expression"; break; default: PyErr_Format(PyExc_SystemError, "unexpected expression in assignment %d (line %d)", e->kind, e->lineno); return 0; } /* Check for error string set by switch */ if (expr_name) { char buf[300]; PyOS_snprintf(buf, sizeof(buf), "can't %s %s", ctx == Store ? "assign to" : "delete", expr_name); return ast_error(c, n, buf); } /* If the LHS is a list or tuple, we need to set the assignment context for all the contained elements. */ if (s) { int i; for (i = 0; i < asdl_seq_LEN(s); i++) { if (!set_context(c, (expr_ty)asdl_seq_GET(s, i), ctx, n)) return 0; } } return 1; } static operator_ty ast_for_augassign(struct compiling *c, const node *n) { REQ(n, augassign); n = CHILD(n, 0); switch (STR(n)[0]) { case '+': return Add; case '-': return Sub; case '/': if (STR(n)[1] == '/') return FloorDiv; else return Div; case '%': return Mod; case '<': return LShift; case '>': return RShift; case '&': return BitAnd; case '^': return BitXor; case '|': return BitOr; case '*': if (STR(n)[1] == '*') return Pow; else return Mult; case '@': if (c->c_feature_version < 5) { ast_error(c, n, "The '@' operator is only supported in Python 3.5 and greater"); return (operator_ty)0; } return MatMult; default: PyErr_Format(PyExc_SystemError, "invalid augassign: %s", STR(n)); return (operator_ty)0; } } static cmpop_ty ast_for_comp_op(struct compiling *c, const node *n) { /* comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is' |'is' 'not' */ REQ(n, comp_op); if (NCH(n) == 1) { n = CHILD(n, 0); switch (TYPE(n)) { case LESS: return Lt; case GREATER: return Gt; case EQEQUAL: /* == */ return Eq; case LESSEQUAL: return LtE; case GREATEREQUAL: return GtE; case NOTEQUAL: return NotEq; case NAME: if (strcmp(STR(n), "in") == 0) return In; if (strcmp(STR(n), "is") == 0) return Is; default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s", STR(n)); return (cmpop_ty)0; } } else if (NCH(n) == 2) { /* handle "not in" and "is not" */ switch (TYPE(CHILD(n, 0))) { case NAME: if (strcmp(STR(CHILD(n, 1)), "in") == 0) return NotIn; if (strcmp(STR(CHILD(n, 0)), "is") == 0) return IsNot; default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s %s", STR(CHILD(n, 0)), STR(CHILD(n, 1))); return (cmpop_ty)0; } } PyErr_Format(PyExc_SystemError, "invalid comp_op: has %d children", NCH(n)); return (cmpop_ty)0; } static asdl_seq * seq_for_testlist(struct compiling *c, const node *n) { /* testlist: test (',' test)* [','] testlist_star_expr: test|star_expr (',' test|star_expr)* [','] */ asdl_seq *seq; expr_ty expression; int i; assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr || TYPE(n) == testlist_comp); seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { const node *ch = CHILD(n, i); assert(TYPE(ch) == test || TYPE(ch) == test_nocond || TYPE(ch) == star_expr); expression = ast_for_expr(c, ch); if (!expression) return NULL; assert(i / 2 < seq->size); asdl_seq_SET(seq, i / 2, expression); } return seq; } static arg_ty ast_for_arg(struct compiling *c, const node *n) { identifier name; expr_ty annotation = NULL; node *ch; arg_ty ret; assert(TYPE(n) == tfpdef || TYPE(n) == vfpdef); ch = CHILD(n, 0); name = NEW_IDENTIFIER(ch); if (!name) return NULL; if (forbidden_name(c, name, ch, 0)) return NULL; if (NCH(n) == 3 && TYPE(CHILD(n, 1)) == COLON) { annotation = ast_for_expr(c, CHILD(n, 2)); if (!annotation) return NULL; } ret = arg(name, annotation, NULL, LINENO(n), n->n_col_offset, c->c_arena); if (!ret) return NULL; return ret; } /* returns -1 if failed to handle keyword only arguments returns new position to keep processing if successful (',' tfpdef ['=' test])* ^^^ start pointing here */ static int handle_keywordonly_args(struct compiling *c, const node *n, int start, asdl_seq *kwonlyargs, asdl_seq *kwdefaults) { PyObject *argname; node *ch; expr_ty expression, annotation; arg_ty arg; int i = start; int j = 0; /* index for kwdefaults and kwonlyargs */ if (kwonlyargs == NULL) { ast_error(c, CHILD(n, start), "named arguments must follow bare *"); return -1; } assert(kwdefaults != NULL); while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case vfpdef: case tfpdef: if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) goto error; asdl_seq_SET(kwdefaults, j, expression); i += 2; /* '=' and test */ } else { /* setting NULL if no default value exists */ asdl_seq_SET(kwdefaults, j, NULL); } if (NCH(ch) == 3) { /* ch is NAME ':' test */ annotation = ast_for_expr(c, CHILD(ch, 2)); if (!annotation) goto error; } else { annotation = NULL; } ch = CHILD(ch, 0); argname = NEW_IDENTIFIER(ch); if (!argname) goto error; if (forbidden_name(c, argname, ch, 0)) goto error; arg = arg(argname, annotation, NULL, LINENO(ch), ch->n_col_offset, c->c_arena); if (!arg) goto error; asdl_seq_SET(kwonlyargs, j++, arg); i += 1; /* the name */ if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case TYPE_COMMENT: /* arg will be equal to the last argument processed */ arg->type_comment = NEW_TYPE_COMMENT(ch); i += 1; break; case DOUBLESTAR: return i; default: ast_error(c, ch, "unexpected node"); goto error; } } return i; error: return -1; } /* Create AST for argument list. */ static arguments_ty ast_for_arguments(struct compiling *c, const node *n) { /* This function handles both typedargslist (function definition) and varargslist (lambda definition). parameters: '(' [typedargslist] ')' typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']]] | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']) tfpdef: NAME [':' test] varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [',']]] | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [','] ) vfpdef: NAME */ int i, j, k, nposargs = 0, nkwonlyargs = 0; int nposdefaults = 0, found_default = 0; asdl_seq *posargs, *posdefaults, *kwonlyargs, *kwdefaults; arg_ty vararg = NULL, kwarg = NULL; arg_ty arg; node *ch; if (TYPE(n) == parameters) { if (NCH(n) == 2) /* () as argument list */ return arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); n = CHILD(n, 1); } assert(TYPE(n) == typedargslist || TYPE(n) == varargslist); /* First count the number of positional args & defaults. The variable i is the loop index for this for loop and the next. The next loop picks up where the first leaves off. */ for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == STAR) { /* skip star */ i++; if (i < NCH(n) && /* skip argument following star */ (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { i++; } break; } if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == vfpdef || TYPE(ch) == tfpdef) nposargs++; if (TYPE(ch) == EQUAL) nposdefaults++; } /* count the number of keyword only args & defaults for keyword only args */ for ( ; i < NCH(n); ++i) { ch = CHILD(n, i); if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == tfpdef || TYPE(ch) == vfpdef) nkwonlyargs++; } posargs = (nposargs ? _Ta3_asdl_seq_new(nposargs, c->c_arena) : NULL); if (!posargs && nposargs) return NULL; kwonlyargs = (nkwonlyargs ? _Ta3_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwonlyargs && nkwonlyargs) return NULL; posdefaults = (nposdefaults ? _Ta3_asdl_seq_new(nposdefaults, c->c_arena) : NULL); if (!posdefaults && nposdefaults) return NULL; /* The length of kwonlyargs and kwdefaults are same since we set NULL as default for keyword only argument w/o default - we have sequence data structure, but no dictionary */ kwdefaults = (nkwonlyargs ? _Ta3_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwdefaults && nkwonlyargs) return NULL; if (nposargs + nkwonlyargs > 255) { ast_error(c, n, "more than 255 arguments"); return NULL; } /* tfpdef: NAME [':' test] vfpdef: NAME */ i = 0; j = 0; /* index for defaults */ k = 0; /* index for args */ while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case tfpdef: case vfpdef: /* XXX Need to worry about checking if TYPE(CHILD(n, i+1)) is anything other than EQUAL or a comma? */ /* XXX Should NCH(n) check be made a separate check? */ if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expr_ty expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) return NULL; assert(posdefaults != NULL); asdl_seq_SET(posdefaults, j++, expression); i += 2; found_default = 1; } else if (found_default) { ast_error(c, n, "non-default argument follows default argument"); return NULL; } arg = ast_for_arg(c, ch); if (!arg) return NULL; asdl_seq_SET(posargs, k++, arg); i += 1; /* the name */ if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case STAR: if (i+1 >= NCH(n) || (i+2 == NCH(n) && (TYPE(CHILD(n, i+1)) == COMMA || TYPE(CHILD(n, i+1)) == TYPE_COMMENT))) { ast_error(c, CHILD(n, i), "named arguments must follow bare *"); return NULL; } ch = CHILD(n, i+1); /* tfpdef or COMMA */ if (TYPE(ch) == COMMA) { int res = 0; i += 2; /* now follows keyword only arguments */ if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) { ast_error(c, CHILD(n, i), "bare * has associated type comment"); return NULL; } res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } else { vararg = ast_for_arg(c, ch); if (!vararg) return NULL; i += 2; /* the star and the name */ if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) { vararg->type_comment = NEW_TYPE_COMMENT(CHILD(n, i)); i += 1; } if (i < NCH(n) && (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { int res = 0; res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } } break; case DOUBLESTAR: ch = CHILD(n, i+1); /* tfpdef */ assert(TYPE(ch) == tfpdef || TYPE(ch) == vfpdef); kwarg = ast_for_arg(c, ch); if (!kwarg) return NULL; i += 2; /* the double star and the name */ if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case TYPE_COMMENT: assert(i); if (kwarg) arg = kwarg; /* arg will be equal to the last argument processed */ arg->type_comment = NEW_TYPE_COMMENT(ch); i += 1; break; default: PyErr_Format(PyExc_SystemError, "unexpected node in varargslist: %d @ %d", TYPE(ch), i); return NULL; } } return arguments(posargs, vararg, kwonlyargs, kwdefaults, kwarg, posdefaults, c->c_arena); } static expr_ty ast_for_dotted_name(struct compiling *c, const node *n) { expr_ty e; identifier id; int lineno, col_offset; int i; REQ(n, dotted_name); lineno = LINENO(n); col_offset = n->n_col_offset; id = NEW_IDENTIFIER(CHILD(n, 0)); if (!id) return NULL; e = Name(id, Load, lineno, col_offset, c->c_arena); if (!e) return NULL; for (i = 2; i < NCH(n); i+=2) { id = NEW_IDENTIFIER(CHILD(n, i)); if (!id) return NULL; e = Attribute(e, id, Load, lineno, col_offset, c->c_arena); if (!e) return NULL; } return e; } static expr_ty ast_for_decorator(struct compiling *c, const node *n) { /* decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE */ expr_ty d = NULL; expr_ty name_expr; REQ(n, decorator); REQ(CHILD(n, 0), AT); REQ(RCHILD(n, -1), NEWLINE); name_expr = ast_for_dotted_name(c, CHILD(n, 1)); if (!name_expr) return NULL; if (NCH(n) == 3) { /* No arguments */ d = name_expr; name_expr = NULL; } else if (NCH(n) == 5) { /* Call with no arguments */ d = Call(name_expr, NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); if (!d) return NULL; name_expr = NULL; } else { d = ast_for_call(c, CHILD(n, 3), name_expr); if (!d) return NULL; name_expr = NULL; } return d; } static asdl_seq* ast_for_decorators(struct compiling *c, const node *n) { asdl_seq* decorator_seq; expr_ty d; int i; REQ(n, decorators); decorator_seq = _Ta3_asdl_seq_new(NCH(n), c->c_arena); if (!decorator_seq) return NULL; for (i = 0; i < NCH(n); i++) { d = ast_for_decorator(c, CHILD(n, i)); if (!d) return NULL; asdl_seq_SET(decorator_seq, i, d); } return decorator_seq; } static stmt_ty ast_for_funcdef_impl(struct compiling *c, const node *n, asdl_seq *decorator_seq, int is_async) { /* funcdef: 'def' NAME parameters ['->' test] ':' [TYPE_COMMENT] suite */ identifier name; arguments_ty args; asdl_seq *body; expr_ty returns = NULL; int name_i = 1; node *tc; string type_comment = NULL; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async functions are only supported in Python 3.5 and greater"); return NULL; } REQ(n, funcdef); name = NEW_IDENTIFIER(CHILD(n, name_i)); if (!name) return NULL; if (forbidden_name(c, name, CHILD(n, name_i), 0)) return NULL; args = ast_for_arguments(c, CHILD(n, name_i + 1)); if (!args) return NULL; if (TYPE(CHILD(n, name_i+2)) == RARROW) { returns = ast_for_expr(c, CHILD(n, name_i + 3)); if (!returns) return NULL; name_i += 2; } if (TYPE(CHILD(n, name_i + 3)) == TYPE_COMMENT) { type_comment = NEW_TYPE_COMMENT(CHILD(n, name_i + 3)); name_i += 1; } body = ast_for_suite(c, CHILD(n, name_i + 3)); if (!body) return NULL; if (!type_comment && NCH(CHILD(n, name_i + 3)) > 1) { /* If the function doesn't have a type comment on the same line, check * if the suite has a type comment in it. */ tc = CHILD(CHILD(n, name_i + 3), 1); if (TYPE(tc) == TYPE_COMMENT) type_comment = NEW_TYPE_COMMENT(tc); } if (is_async) return AsyncFunctionDef(name, args, body, decorator_seq, returns, type_comment, LINENO(n), n->n_col_offset, c->c_arena); else return FunctionDef(name, args, body, decorator_seq, returns, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_async_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* async_funcdef: ASYNC funcdef */ REQ(n, async_funcdef); REQ(CHILD(n, 0), ASYNC); REQ(CHILD(n, 1), funcdef); return ast_for_funcdef_impl(c, CHILD(n, 1), decorator_seq, 1 /* is_async */); } static stmt_ty ast_for_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* funcdef: 'def' NAME parameters ['->' test] ':' suite */ return ast_for_funcdef_impl(c, n, decorator_seq, 0 /* is_async */); } static stmt_ty ast_for_async_stmt(struct compiling *c, const node *n) { /* async_stmt: ASYNC (funcdef | with_stmt | for_stmt) */ REQ(n, async_stmt); REQ(CHILD(n, 0), ASYNC); switch (TYPE(CHILD(n, 1))) { case funcdef: return ast_for_funcdef_impl(c, CHILD(n, 1), NULL, 1 /* is_async */); case with_stmt: return ast_for_with_stmt(c, CHILD(n, 1), 1 /* is_async */); case for_stmt: return ast_for_for_stmt(c, CHILD(n, 1), 1 /* is_async */); default: PyErr_Format(PyExc_SystemError, "invalid async stament: %s", STR(CHILD(n, 1))); return NULL; } } static stmt_ty ast_for_decorated(struct compiling *c, const node *n) { /* decorated: decorators (classdef | funcdef | async_funcdef) */ stmt_ty thing = NULL; asdl_seq *decorator_seq = NULL; REQ(n, decorated); decorator_seq = ast_for_decorators(c, CHILD(n, 0)); if (!decorator_seq) return NULL; assert(TYPE(CHILD(n, 1)) == funcdef || TYPE(CHILD(n, 1)) == async_funcdef || TYPE(CHILD(n, 1)) == classdef); if (TYPE(CHILD(n, 1)) == funcdef) { thing = ast_for_funcdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == classdef) { thing = ast_for_classdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == async_funcdef) { thing = ast_for_async_funcdef(c, CHILD(n, 1), decorator_seq); } /* we count the decorators in when talking about the class' or * function's line number */ if (thing) { thing->lineno = LINENO(n); thing->col_offset = n->n_col_offset; } return thing; } static expr_ty ast_for_lambdef(struct compiling *c, const node *n) { /* lambdef: 'lambda' [varargslist] ':' test lambdef_nocond: 'lambda' [varargslist] ':' test_nocond */ arguments_ty args; expr_ty expression; if (NCH(n) == 3) { args = arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; } else { args = ast_for_arguments(c, CHILD(n, 1)); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 3)); if (!expression) return NULL; } return Lambda(args, expression, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_ifexpr(struct compiling *c, const node *n) { /* test: or_test 'if' or_test 'else' test */ expr_ty expression, body, orelse; assert(NCH(n) == 5); body = ast_for_expr(c, CHILD(n, 0)); if (!body) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; orelse = ast_for_expr(c, CHILD(n, 4)); if (!orelse) return NULL; return IfExp(expression, body, orelse, LINENO(n), n->n_col_offset, c->c_arena); } /* Count the number of 'for' loops in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_fors(struct compiling *c, const node *n) { int n_fors = 0; int is_async; count_comp_for: is_async = 0; n_fors++; REQ(n, comp_for); if (TYPE(CHILD(n, 0)) == ASYNC) { is_async = 1; } if (NCH(n) == (5 + is_async)) { n = CHILD(n, 4 + is_async); } else { return n_fors; } count_comp_iter: REQ(n, comp_iter); n = CHILD(n, 0); if (TYPE(n) == comp_for) goto count_comp_for; else if (TYPE(n) == comp_if) { if (NCH(n) == 3) { n = CHILD(n, 2); goto count_comp_iter; } else return n_fors; } /* Should never be reached */ PyErr_SetString(PyExc_SystemError, "logic error in count_comp_fors"); return -1; } /* Count the number of 'if' statements in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_ifs(struct compiling *c, const node *n) { int n_ifs = 0; while (1) { REQ(n, comp_iter); if (TYPE(CHILD(n, 0)) == comp_for) return n_ifs; n = CHILD(n, 0); REQ(n, comp_if); n_ifs++; if (NCH(n) == 2) return n_ifs; n = CHILD(n, 2); } } static asdl_seq * ast_for_comprehension(struct compiling *c, const node *n) { int i, n_fors; asdl_seq *comps; n_fors = count_comp_fors(c, n); if (n_fors == -1) return NULL; comps = _Ta3_asdl_seq_new(n_fors, c->c_arena); if (!comps) return NULL; for (i = 0; i < n_fors; i++) { comprehension_ty comp; asdl_seq *t; expr_ty expression, first; node *for_ch; int is_async = 0; REQ(n, comp_for); if (TYPE(CHILD(n, 0)) == ASYNC) { is_async = 1; } /* Async comprehensions only allowed in Python 3.6 and greater */ if (is_async && c->c_feature_version < 6) { ast_error(c, n, "Async comprehensions are only supported in Python 3.6 and greater"); return NULL; } for_ch = CHILD(n, 1 + is_async); t = ast_for_exprlist(c, for_ch, Store); if (!t) return NULL; expression = ast_for_expr(c, CHILD(n, 3 + is_async)); if (!expression) return NULL; /* Check the # of children rather than the length of t, since (x for x, in ...) has 1 element in t, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(t, 0); if (NCH(for_ch) == 1) comp = comprehension(first, expression, NULL, is_async, c->c_arena); else comp = comprehension(Tuple(t, Store, first->lineno, first->col_offset, c->c_arena), expression, NULL, is_async, c->c_arena); if (!comp) return NULL; if (NCH(n) == (5 + is_async)) { int j, n_ifs; asdl_seq *ifs; n = CHILD(n, 4 + is_async); n_ifs = count_comp_ifs(c, n); if (n_ifs == -1) return NULL; ifs = _Ta3_asdl_seq_new(n_ifs, c->c_arena); if (!ifs) return NULL; for (j = 0; j < n_ifs; j++) { REQ(n, comp_iter); n = CHILD(n, 0); REQ(n, comp_if); expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; asdl_seq_SET(ifs, j, expression); if (NCH(n) == 3) n = CHILD(n, 2); } /* on exit, must guarantee that n is a comp_for */ if (TYPE(n) == comp_iter) n = CHILD(n, 0); comp->ifs = ifs; } asdl_seq_SET(comps, i, comp); } return comps; } static expr_ty ast_for_itercomp(struct compiling *c, const node *n, int type) { /* testlist_comp: (test|star_expr) * ( comp_for | (',' (test|star_expr))* [','] ) */ expr_ty elt; asdl_seq *comps; node *ch; assert(NCH(n) > 1); ch = CHILD(n, 0); elt = ast_for_expr(c, ch); if (!elt) return NULL; if (elt->kind == Starred_kind) { ast_error(c, ch, "iterable unpacking cannot be used in comprehension"); return NULL; } comps = ast_for_comprehension(c, CHILD(n, 1)); if (!comps) return NULL; if (type == COMP_GENEXP) return GeneratorExp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else if (type == COMP_LISTCOMP) return ListComp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else if (type == COMP_SETCOMP) return SetComp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else /* Should never happen */ return NULL; } /* Fills in the key, value pair corresponding to the dict element. In case * of an unpacking, key is NULL. *i is advanced by the number of ast * elements. Iff successful, nonzero is returned. */ static int ast_for_dictelement(struct compiling *c, const node *n, int *i, expr_ty *key, expr_ty *value) { expr_ty expression; if (TYPE(CHILD(n, *i)) == DOUBLESTAR) { assert(NCH(n) - *i >= 2); expression = ast_for_expr(c, CHILD(n, *i + 1)); if (!expression) return 0; *key = NULL; *value = expression; *i += 2; } else { assert(NCH(n) - *i >= 3); expression = ast_for_expr(c, CHILD(n, *i)); if (!expression) return 0; *key = expression; REQ(CHILD(n, *i + 1), COLON); expression = ast_for_expr(c, CHILD(n, *i + 2)); if (!expression) return 0; *value = expression; *i += 3; } return 1; } static expr_ty ast_for_dictcomp(struct compiling *c, const node *n) { expr_ty key, value; asdl_seq *comps; int i = 0; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; assert(key); assert(NCH(n) - i >= 1); comps = ast_for_comprehension(c, CHILD(n, i)); if (!comps) return NULL; return DictComp(key, value, comps, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_dictdisplay(struct compiling *c, const node *n) { int i; int j; int size; asdl_seq *keys, *values; size = (NCH(n) + 1) / 3; /* +1 in case no trailing comma */ keys = _Ta3_asdl_seq_new(size, c->c_arena); if (!keys) return NULL; values = _Ta3_asdl_seq_new(size, c->c_arena); if (!values) return NULL; j = 0; for (i = 0; i < NCH(n); i++) { expr_ty key, value; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; asdl_seq_SET(keys, j, key); asdl_seq_SET(values, j, value); j++; } keys->size = j; values->size = j; return Dict(keys, values, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_genexp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp) || TYPE(n) == (argument)); return ast_for_itercomp(c, n, COMP_GENEXP); } static expr_ty ast_for_listcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp)); return ast_for_itercomp(c, n, COMP_LISTCOMP); } static expr_ty ast_for_setcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (dictorsetmaker)); return ast_for_itercomp(c, n, COMP_SETCOMP); } static expr_ty ast_for_setdisplay(struct compiling *c, const node *n) { int i; int size; asdl_seq *elts; assert(TYPE(n) == (dictorsetmaker)); size = (NCH(n) + 1) / 2; /* +1 in case no trailing comma */ elts = _Ta3_asdl_seq_new(size, c->c_arena); if (!elts) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, i)); if (!expression) return NULL; asdl_seq_SET(elts, i / 2, expression); } return Set(elts, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_atom(struct compiling *c, const node *n) { /* atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictmaker|testlist_comp] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False' */ node *ch = CHILD(n, 0); switch (TYPE(ch)) { case NAME: { PyObject *name; const char *s = STR(ch); size_t len = strlen(s); if (len >= 4 && len <= 5) { if (!strcmp(s, "None")) return NameConstant(Py_None, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "True")) return NameConstant(Py_True, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "False")) return NameConstant(Py_False, LINENO(n), n->n_col_offset, c->c_arena); } name = new_identifier(s, c); if (!name) return NULL; /* All names start in Load context, but may later be changed. */ return Name(name, Load, LINENO(n), n->n_col_offset, c->c_arena); } case STRING: { expr_ty str = parsestrplus(c, n); if (!str) { const char *errtype = NULL; if (PyErr_ExceptionMatches(PyExc_UnicodeError)) errtype = "unicode error"; else if (PyErr_ExceptionMatches(PyExc_ValueError)) errtype = "value error"; if (errtype) { char buf[128]; const char *s = NULL; PyObject *type, *value, *tback, *errstr; PyErr_Fetch(&type, &value, &tback); errstr = PyObject_Str(value); if (errstr) s = PyUnicode_AsUTF8(errstr); if (s) { PyOS_snprintf(buf, sizeof(buf), "(%s) %s", errtype, s); } else { PyErr_Clear(); PyOS_snprintf(buf, sizeof(buf), "(%s) unknown error", errtype); } Py_XDECREF(errstr); ast_error(c, n, buf); Py_DECREF(type); Py_XDECREF(value); Py_XDECREF(tback); } return NULL; } return str; } case NUMBER: { PyObject *pynum; const char *s = STR(ch); /* Underscores in numeric literals are only allowed in Python 3.6 or greater */ /* Check for underscores here rather than in parse_number so we can report a line number on error */ if (c->c_feature_version < 6 && strchr(s, '_') != NULL) { ast_error(c, ch, "Underscores in numeric literals are only supported in Python 3.6 and greater"); return NULL; } pynum = parsenumber(c, s); if (!pynum) return NULL; if (PyArena_AddPyObject(c->c_arena, pynum) < 0) { Py_DECREF(pynum); return NULL; } return Num(pynum, LINENO(n), n->n_col_offset, c->c_arena); } case ELLIPSIS: /* Ellipsis */ return Ellipsis(LINENO(n), n->n_col_offset, c->c_arena); case LPAR: /* some parenthesized expressions */ ch = CHILD(n, 1); if (TYPE(ch) == RPAR) return Tuple(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); if (TYPE(ch) == yield_expr) return ast_for_expr(c, ch); /* testlist_comp: test ( comp_for | (',' test)* [','] ) */ if ((NCH(ch) > 1) && (TYPE(CHILD(ch, 1)) == comp_for)) return ast_for_genexp(c, ch); return ast_for_testlist(c, ch); case LSQB: /* list (or list comprehension) */ ch = CHILD(n, 1); if (TYPE(ch) == RSQB) return List(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); REQ(ch, testlist_comp); if (NCH(ch) == 1 || TYPE(CHILD(ch, 1)) == COMMA) { asdl_seq *elts = seq_for_testlist(c, ch); if (!elts) return NULL; return List(elts, Load, LINENO(n), n->n_col_offset, c->c_arena); } else return ast_for_listcomp(c, ch); case LBRACE: { /* dictorsetmaker: ( ((test ':' test | '**' test) * (comp_for | (',' (test ':' test | '**' test))* [','])) | * ((test | '*' test) * (comp_for | (',' (test | '*' test))* [','])) ) */ expr_ty res; ch = CHILD(n, 1); if (TYPE(ch) == RBRACE) { /* It's an empty dict. */ return Dict(NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else { int is_dict = (TYPE(CHILD(ch, 0)) == DOUBLESTAR); if (NCH(ch) == 1 || (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == COMMA)) { /* It's a set display. */ res = ast_for_setdisplay(c, ch); } else if (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == comp_for) { /* It's a set comprehension. */ res = ast_for_setcomp(c, ch); } else if (NCH(ch) > 3 - is_dict && TYPE(CHILD(ch, 3 - is_dict)) == comp_for) { /* It's a dictionary comprehension. */ if (is_dict) { ast_error(c, n, "dict unpacking cannot be used in " "dict comprehension"); return NULL; } res = ast_for_dictcomp(c, ch); } else { /* It's a dictionary display. */ res = ast_for_dictdisplay(c, ch); } if (res) { res->lineno = LINENO(n); res->col_offset = n->n_col_offset; } return res; } } default: PyErr_Format(PyExc_SystemError, "unhandled atom %d", TYPE(ch)); return NULL; } } static slice_ty ast_for_slice(struct compiling *c, const node *n) { node *ch; expr_ty lower = NULL, upper = NULL, step = NULL; REQ(n, subscript); /* subscript: test | [test] ':' [test] [sliceop] sliceop: ':' [test] */ ch = CHILD(n, 0); if (NCH(n) == 1 && TYPE(ch) == test) { /* 'step' variable hold no significance in terms of being used over other vars */ step = ast_for_expr(c, ch); if (!step) return NULL; return Index(step, c->c_arena); } if (TYPE(ch) == test) { lower = ast_for_expr(c, ch); if (!lower) return NULL; } /* If there's an upper bound it's in the second or third position. */ if (TYPE(ch) == COLON) { if (NCH(n) > 1) { node *n2 = CHILD(n, 1); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } } else if (NCH(n) > 2) { node *n2 = CHILD(n, 2); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } ch = CHILD(n, NCH(n) - 1); if (TYPE(ch) == sliceop) { if (NCH(ch) != 1) { ch = CHILD(ch, 1); if (TYPE(ch) == test) { step = ast_for_expr(c, ch); if (!step) return NULL; } } } return Slice(lower, upper, step, c->c_arena); } static expr_ty ast_for_binop(struct compiling *c, const node *n) { /* Must account for a sequence of expressions. How should A op B op C by represented? BinOp(BinOp(A, op, B), op, C). */ int i, nops; expr_ty expr1, expr2, result; operator_ty newoperator; expr1 = ast_for_expr(c, CHILD(n, 0)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 2)); if (!expr2) return NULL; newoperator = get_operator(c, CHILD(n, 1)); if (!newoperator) return NULL; result = BinOp(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, c->c_arena); if (!result) return NULL; nops = (NCH(n) - 1) / 2; for (i = 1; i < nops; i++) { expr_ty tmp_result, tmp; const node* next_oper = CHILD(n, i * 2 + 1); newoperator = get_operator(c, next_oper); if (!newoperator) return NULL; tmp = ast_for_expr(c, CHILD(n, i * 2 + 2)); if (!tmp) return NULL; tmp_result = BinOp(result, newoperator, tmp, LINENO(next_oper), next_oper->n_col_offset, c->c_arena); if (!tmp_result) return NULL; result = tmp_result; } return result; } static expr_ty ast_for_trailer(struct compiling *c, const node *n, expr_ty left_expr) { /* trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop] */ REQ(n, trailer); if (TYPE(CHILD(n, 0)) == LPAR) { if (NCH(n) == 2) return Call(left_expr, NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); else return ast_for_call(c, CHILD(n, 1), left_expr); } else if (TYPE(CHILD(n, 0)) == DOT) { PyObject *attr_id = NEW_IDENTIFIER(CHILD(n, 1)); if (!attr_id) return NULL; return Attribute(left_expr, attr_id, Load, LINENO(n), n->n_col_offset, c->c_arena); } else { REQ(CHILD(n, 0), LSQB); REQ(CHILD(n, 2), RSQB); n = CHILD(n, 1); if (NCH(n) == 1) { slice_ty slc = ast_for_slice(c, CHILD(n, 0)); if (!slc) return NULL; return Subscript(left_expr, slc, Load, LINENO(n), n->n_col_offset, c->c_arena); } else { /* The grammar is ambiguous here. The ambiguity is resolved by treating the sequence as a tuple literal if there are no slice features. */ int j; slice_ty slc; expr_ty e; int simple = 1; asdl_seq *slices, *elts; slices = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!slices) return NULL; for (j = 0; j < NCH(n); j += 2) { slc = ast_for_slice(c, CHILD(n, j)); if (!slc) return NULL; if (slc->kind != Index_kind) simple = 0; asdl_seq_SET(slices, j / 2, slc); } if (!simple) { return Subscript(left_expr, ExtSlice(slices, c->c_arena), Load, LINENO(n), n->n_col_offset, c->c_arena); } /* extract Index values and put them in a Tuple */ elts = _Ta3_asdl_seq_new(asdl_seq_LEN(slices), c->c_arena); if (!elts) return NULL; for (j = 0; j < asdl_seq_LEN(slices); ++j) { slc = (slice_ty)asdl_seq_GET(slices, j); assert(slc->kind == Index_kind && slc->v.Index.value); asdl_seq_SET(elts, j, slc->v.Index.value); } e = Tuple(elts, Load, LINENO(n), n->n_col_offset, c->c_arena); if (!e) return NULL; return Subscript(left_expr, Index(e, c->c_arena), Load, LINENO(n), n->n_col_offset, c->c_arena); } } } static expr_ty ast_for_factor(struct compiling *c, const node *n) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; switch (TYPE(CHILD(n, 0))) { case PLUS: return UnaryOp(UAdd, expression, LINENO(n), n->n_col_offset, c->c_arena); case MINUS: return UnaryOp(USub, expression, LINENO(n), n->n_col_offset, c->c_arena); case TILDE: return UnaryOp(Invert, expression, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unhandled factor: %d", TYPE(CHILD(n, 0))); return NULL; } static expr_ty ast_for_atom_expr(struct compiling *c, const node *n) { int i, nch, start = 0; expr_ty e, tmp; REQ(n, atom_expr); nch = NCH(n); if (TYPE(CHILD(n, 0)) == AWAIT) { if (c->c_feature_version < 5) { ast_error(c, n, "Await expressions are only supported in Python 3.5 and greater"); return NULL; } start = 1; assert(nch > 1); } e = ast_for_atom(c, CHILD(n, start)); if (!e) return NULL; if (nch == 1) return e; if (start && nch == 2) { return Await(e, LINENO(n), n->n_col_offset, c->c_arena); } for (i = start + 1; i < nch; i++) { node *ch = CHILD(n, i); if (TYPE(ch) != trailer) break; tmp = ast_for_trailer(c, ch, e); if (!tmp) return NULL; tmp->lineno = e->lineno; tmp->col_offset = e->col_offset; e = tmp; } if (start) { /* there was an AWAIT */ return Await(e, LINENO(n), n->n_col_offset, c->c_arena); } else { return e; } } static expr_ty ast_for_power(struct compiling *c, const node *n) { /* power: atom trailer* ('**' factor)* */ expr_ty e; REQ(n, power); e = ast_for_atom_expr(c, CHILD(n, 0)); if (!e) return NULL; if (NCH(n) == 1) return e; if (TYPE(CHILD(n, NCH(n) - 1)) == factor) { expr_ty f = ast_for_expr(c, CHILD(n, NCH(n) - 1)); if (!f) return NULL; e = BinOp(e, Pow, f, LINENO(n), n->n_col_offset, c->c_arena); } return e; } static expr_ty ast_for_starred(struct compiling *c, const node *n) { expr_ty tmp; REQ(n, star_expr); tmp = ast_for_expr(c, CHILD(n, 1)); if (!tmp) return NULL; /* The Load context is changed later. */ return Starred(tmp, Load, LINENO(n), n->n_col_offset, c->c_arena); } /* Do not name a variable 'expr'! Will cause a compile error. */ static expr_ty ast_for_expr(struct compiling *c, const node *n) { /* handle the full range of simple expressions test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom_expr ['**' factor] atom_expr: [AWAIT] atom trailer* yield_expr: 'yield' [yield_arg] */ asdl_seq *seq; int i; loop: switch (TYPE(n)) { case test: case test_nocond: if (TYPE(CHILD(n, 0)) == lambdef || TYPE(CHILD(n, 0)) == lambdef_nocond) return ast_for_lambdef(c, CHILD(n, 0)); else if (NCH(n) > 1) return ast_for_ifexpr(c, n); /* Fallthrough */ case or_test: case and_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); } if (!strcmp(STR(CHILD(n, 1)), "and")) return BoolOp(And, seq, LINENO(n), n->n_col_offset, c->c_arena); assert(!strcmp(STR(CHILD(n, 1)), "or")); return BoolOp(Or, seq, LINENO(n), n->n_col_offset, c->c_arena); case not_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return UnaryOp(Not, expression, LINENO(n), n->n_col_offset, c->c_arena); } case comparison: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression; asdl_int_seq *ops; asdl_seq *cmps; ops = _Ta3_asdl_int_seq_new(NCH(n) / 2, c->c_arena); if (!ops) return NULL; cmps = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!cmps) { return NULL; } for (i = 1; i < NCH(n); i += 2) { cmpop_ty newoperator; newoperator = ast_for_comp_op(c, CHILD(n, i)); if (!newoperator) { return NULL; } expression = ast_for_expr(c, CHILD(n, i + 1)); if (!expression) { return NULL; } asdl_seq_SET(ops, i / 2, newoperator); asdl_seq_SET(cmps, i / 2, expression); } expression = ast_for_expr(c, CHILD(n, 0)); if (!expression) { return NULL; } return Compare(expression, ops, cmps, LINENO(n), n->n_col_offset, c->c_arena); } break; case star_expr: return ast_for_starred(c, n); /* The next five cases all handle BinOps. The main body of code is the same in each case, but the switch turned inside out to reuse the code for each type of operator. */ case expr: case xor_expr: case and_expr: case shift_expr: case arith_expr: case term: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_binop(c, n); case yield_expr: { node *an = NULL; node *en = NULL; int is_from = 0; expr_ty exp = NULL; if (NCH(n) > 1) an = CHILD(n, 1); /* yield_arg */ if (an) { en = CHILD(an, NCH(an) - 1); if (NCH(an) == 2) { is_from = 1; exp = ast_for_expr(c, en); } else exp = ast_for_testlist(c, en); if (!exp) return NULL; } if (is_from) return YieldFrom(exp, LINENO(n), n->n_col_offset, c->c_arena); return Yield(exp, LINENO(n), n->n_col_offset, c->c_arena); } case factor: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_factor(c, n); case power: return ast_for_power(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled expr: %d", TYPE(n)); return NULL; } /* should never get here unless if error is set */ return NULL; } static expr_ty ast_for_call(struct compiling *c, const node *n, expr_ty func) { /* arglist: argument (',' argument)* [','] argument: ( test [comp_for] | '*' test | test '=' test | '**' test ) */ int i, nargs, nkeywords, ngens; int ndoublestars; asdl_seq *args; asdl_seq *keywords; REQ(n, arglist); nargs = 0; nkeywords = 0; ngens = 0; for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { if (NCH(ch) == 1) nargs++; else if (TYPE(CHILD(ch, 1)) == comp_for) ngens++; else if (TYPE(CHILD(ch, 0)) == STAR) nargs++; else /* TYPE(CHILD(ch, 0)) == DOUBLESTAR or keyword argument */ nkeywords++; } } if (ngens > 1 || (ngens && (nargs || nkeywords))) { ast_error(c, n, "Generator expression must be parenthesized " "if not sole argument"); return NULL; } if (nargs + nkeywords + ngens > 255) { ast_error(c, n, "more than 255 arguments"); return NULL; } args = _Ta3_asdl_seq_new(nargs + ngens, c->c_arena); if (!args) return NULL; keywords = _Ta3_asdl_seq_new(nkeywords, c->c_arena); if (!keywords) return NULL; nargs = 0; /* positional arguments + iterable argument unpackings */ nkeywords = 0; /* keyword arguments + keyword argument unpackings */ ndoublestars = 0; /* just keyword argument unpackings */ for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { expr_ty e; node *chch = CHILD(ch, 0); if (NCH(ch) == 1) { /* a positional argument */ if (nkeywords) { if (ndoublestars) { ast_error(c, chch, "positional argument follows " "keyword argument unpacking"); } else { ast_error(c, chch, "positional argument follows " "keyword argument"); } return NULL; } e = ast_for_expr(c, chch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else if (TYPE(chch) == STAR) { /* an iterable argument unpacking */ expr_ty starred; if (ndoublestars) { ast_error(c, chch, "iterable argument unpacking follows " "keyword argument unpacking"); return NULL; } e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; starred = Starred(e, Load, LINENO(chch), chch->n_col_offset, c->c_arena); if (!starred) return NULL; asdl_seq_SET(args, nargs++, starred); } else if (TYPE(chch) == DOUBLESTAR) { /* a keyword argument unpacking */ keyword_ty kw; i++; e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; kw = keyword(NULL, e, c->c_arena); asdl_seq_SET(keywords, nkeywords++, kw); ndoublestars++; } else if (TYPE(CHILD(ch, 1)) == comp_for) { /* the lone generator expression */ e = ast_for_genexp(c, ch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else { /* a keyword argument */ keyword_ty kw; identifier key, tmp; int k; /* chch is test, but must be an identifier? */ e = ast_for_expr(c, chch); if (!e) return NULL; /* f(lambda x: x[0] = 3) ends up getting parsed with * LHS test = lambda x: x[0], and RHS test = 3. * SF bug 132313 points out that complaining about a keyword * then is very confusing. */ if (e->kind == Lambda_kind) { ast_error(c, chch, "lambda cannot contain assignment"); return NULL; } else if (e->kind != Name_kind) { ast_error(c, chch, "keyword can't be an expression"); return NULL; } else if (forbidden_name(c, e->v.Name.id, ch, 1)) { return NULL; } key = e->v.Name.id; for (k = 0; k < nkeywords; k++) { tmp = ((keyword_ty)asdl_seq_GET(keywords, k))->arg; if (tmp && !PyUnicode_Compare(tmp, key)) { ast_error(c, chch, "keyword argument repeated"); return NULL; } } e = ast_for_expr(c, CHILD(ch, 2)); if (!e) return NULL; kw = keyword(key, e, c->c_arena); if (!kw) return NULL; asdl_seq_SET(keywords, nkeywords++, kw); } } } return Call(func, args, keywords, func->lineno, func->col_offset, c->c_arena); } static expr_ty ast_for_testlist(struct compiling *c, const node* n) { /* testlist_comp: test (comp_for | (',' test)* [',']) */ /* testlist: test (',' test)* [','] */ assert(NCH(n) > 0); if (TYPE(n) == testlist_comp) { if (NCH(n) > 1) assert(TYPE(CHILD(n, 1)) != comp_for); } else { assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr); } if (NCH(n) == 1) return ast_for_expr(c, CHILD(n, 0)); else { asdl_seq *tmp = seq_for_testlist(c, n); if (!tmp) return NULL; return Tuple(tmp, Load, LINENO(n), n->n_col_offset, c->c_arena); } } static stmt_ty ast_for_expr_stmt(struct compiling *c, const node *n) { int num; REQ(n, expr_stmt); /* expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))* [TYPE_COMMENT]) annassign: ':' test ['=' test] testlist_star_expr: (test|star_expr) (',' test|star_expr)* [','] augassign: '+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=' test: ... here starts the operator precedence dance */ num = NCH(n); if (num == 1 || (num == 2 && TYPE(CHILD(n, 1)) == TYPE_COMMENT)) { expr_ty e = ast_for_testlist(c, CHILD(n, 0)); if (!e) return NULL; return Expr(e, LINENO(n), n->n_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == augassign) { expr_ty expr1, expr2; operator_ty newoperator; node *ch = CHILD(n, 0); expr1 = ast_for_testlist(c, ch); if (!expr1) return NULL; if(!set_context(c, expr1, Store, ch)) return NULL; /* set_context checks that most expressions are not the left side. Augmented assignments can only have a name, a subscript, or an attribute on the left, though, so we have to explicitly check for those. */ switch (expr1->kind) { case Name_kind: case Attribute_kind: case Subscript_kind: break; default: ast_error(c, ch, "illegal expression for augmented assignment"); return NULL; } ch = CHILD(n, 2); if (TYPE(ch) == testlist) expr2 = ast_for_testlist(c, ch); else expr2 = ast_for_expr(c, ch); if (!expr2) return NULL; newoperator = ast_for_augassign(c, CHILD(n, 1)); if (!newoperator) return NULL; return AugAssign(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == annassign) { expr_ty expr1, expr2, expr3; node *ch = CHILD(n, 0); node *deep, *ann = CHILD(n, 1); int simple = 1; /* AnnAssigns are only allowed in Python 3.6 or greater */ if (c->c_feature_version < 6) { ast_error(c, ch, "Variable annotation syntax is only supported in Python 3.6 and greater"); return NULL; } /* we keep track of parens to qualify (x) as expression not name */ deep = ch; while (NCH(deep) == 1) { deep = CHILD(deep, 0); } if (NCH(deep) > 0 && TYPE(CHILD(deep, 0)) == LPAR) { simple = 0; } expr1 = ast_for_testlist(c, ch); if (!expr1) { return NULL; } switch (expr1->kind) { case Name_kind: if (forbidden_name(c, expr1->v.Name.id, n, 0)) { return NULL; } expr1->v.Name.ctx = Store; break; case Attribute_kind: if (forbidden_name(c, expr1->v.Attribute.attr, n, 1)) { return NULL; } expr1->v.Attribute.ctx = Store; break; case Subscript_kind: expr1->v.Subscript.ctx = Store; break; case List_kind: ast_error(c, ch, "only single target (not list) can be annotated"); return NULL; case Tuple_kind: ast_error(c, ch, "only single target (not tuple) can be annotated"); return NULL; default: ast_error(c, ch, "illegal target for annotation"); return NULL; } if (expr1->kind != Name_kind) { simple = 0; } ch = CHILD(ann, 1); expr2 = ast_for_expr(c, ch); if (!expr2) { return NULL; } if (NCH(ann) == 2) { return AnnAssign(expr1, expr2, NULL, simple, LINENO(n), n->n_col_offset, c->c_arena); } else { ch = CHILD(ann, 3); expr3 = ast_for_expr(c, ch); if (!expr3) { return NULL; } return AnnAssign(expr1, expr2, expr3, simple, LINENO(n), n->n_col_offset, c->c_arena); } } else { int i, nch_minus_type, has_type_comment; asdl_seq *targets; node *value; expr_ty expression; string type_comment; /* a normal assignment */ REQ(CHILD(n, 1), EQUAL); has_type_comment = TYPE(CHILD(n, num - 1)) == TYPE_COMMENT; nch_minus_type = num - has_type_comment; targets = _Ta3_asdl_seq_new(nch_minus_type / 2, c->c_arena); if (!targets) return NULL; for (i = 0; i < nch_minus_type - 2; i += 2) { expr_ty e; node *ch = CHILD(n, i); if (TYPE(ch) == yield_expr) { ast_error(c, ch, "assignment to yield expression not possible"); return NULL; } e = ast_for_testlist(c, ch); if (!e) return NULL; /* set context to assign */ if (!set_context(c, e, Store, CHILD(n, i))) return NULL; asdl_seq_SET(targets, i / 2, e); } value = CHILD(n, nch_minus_type - 1); if (TYPE(value) == testlist_star_expr) expression = ast_for_testlist(c, value); else expression = ast_for_expr(c, value); if (!expression) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, nch_minus_type)); else type_comment = NULL; return Assign(targets, expression, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } } static asdl_seq * ast_for_exprlist(struct compiling *c, const node *n, expr_context_ty context) { asdl_seq *seq; int i; expr_ty e; REQ(n, exprlist); seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); if (context && !set_context(c, e, context, CHILD(n, i))) return NULL; } return seq; } static stmt_ty ast_for_del_stmt(struct compiling *c, const node *n) { asdl_seq *expr_list; /* del_stmt: 'del' exprlist */ REQ(n, del_stmt); expr_list = ast_for_exprlist(c, CHILD(n, 1), Del); if (!expr_list) return NULL; return Delete(expr_list, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_flow_stmt(struct compiling *c, const node *n) { /* flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt break_stmt: 'break' continue_stmt: 'continue' return_stmt: 'return' [testlist] yield_stmt: yield_expr yield_expr: 'yield' testlist | 'yield' 'from' test raise_stmt: 'raise' [test [',' test [',' test]]] */ node *ch; REQ(n, flow_stmt); ch = CHILD(n, 0); switch (TYPE(ch)) { case break_stmt: return Break(LINENO(n), n->n_col_offset, c->c_arena); case continue_stmt: return Continue(LINENO(n), n->n_col_offset, c->c_arena); case yield_stmt: { /* will reduce to yield_expr */ expr_ty exp = ast_for_expr(c, CHILD(ch, 0)); if (!exp) return NULL; return Expr(exp, LINENO(n), n->n_col_offset, c->c_arena); } case return_stmt: if (NCH(ch) == 1) return Return(NULL, LINENO(n), n->n_col_offset, c->c_arena); else { expr_ty expression = ast_for_testlist(c, CHILD(ch, 1)); if (!expression) return NULL; return Return(expression, LINENO(n), n->n_col_offset, c->c_arena); } case raise_stmt: if (NCH(ch) == 1) return Raise(NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); else if (NCH(ch) >= 2) { expr_ty cause = NULL; expr_ty expression = ast_for_expr(c, CHILD(ch, 1)); if (!expression) return NULL; if (NCH(ch) == 4) { cause = ast_for_expr(c, CHILD(ch, 3)); if (!cause) return NULL; } return Raise(expression, cause, LINENO(n), n->n_col_offset, c->c_arena); } default: PyErr_Format(PyExc_SystemError, "unexpected flow_stmt: %d", TYPE(ch)); return NULL; } } static alias_ty alias_for_import_name(struct compiling *c, const node *n, int store) { /* import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] dotted_name: NAME ('.' NAME)* */ identifier str, name; loop: switch (TYPE(n)) { case import_as_name: { node *name_node = CHILD(n, 0); str = NULL; name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (NCH(n) == 3) { node *str_node = CHILD(n, 2); str = NEW_IDENTIFIER(str_node); if (!str) return NULL; if (store && forbidden_name(c, str, str_node, 0)) return NULL; } else { if (forbidden_name(c, name, name_node, 0)) return NULL; } return alias(name, str, c->c_arena); } case dotted_as_name: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { node *asname_node = CHILD(n, 2); alias_ty a = alias_for_import_name(c, CHILD(n, 0), 0); if (!a) return NULL; assert(!a->asname); a->asname = NEW_IDENTIFIER(asname_node); if (!a->asname) return NULL; if (forbidden_name(c, a->asname, asname_node, 0)) return NULL; return a; } break; case dotted_name: if (NCH(n) == 1) { node *name_node = CHILD(n, 0); name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (store && forbidden_name(c, name, name_node, 0)) return NULL; return alias(name, NULL, c->c_arena); } else { /* Create a string of the form "a.b.c" */ int i; size_t len; char *s; PyObject *uni; len = 0; for (i = 0; i < NCH(n); i += 2) /* length of string plus one for the dot */ len += strlen(STR(CHILD(n, i))) + 1; len--; /* the last name doesn't have a dot */ str = PyBytes_FromStringAndSize(NULL, len); if (!str) return NULL; s = PyBytes_AS_STRING(str); if (!s) return NULL; for (i = 0; i < NCH(n); i += 2) { char *sch = STR(CHILD(n, i)); strcpy(s, STR(CHILD(n, i))); s += strlen(sch); *s++ = '.'; } --s; *s = '\0'; uni = PyUnicode_DecodeUTF8(PyBytes_AS_STRING(str), PyBytes_GET_SIZE(str), NULL); Py_DECREF(str); if (!uni) return NULL; str = uni; PyUnicode_InternInPlace(&str); if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); } break; case STAR: str = PyUnicode_InternFromString("*"); if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); default: PyErr_Format(PyExc_SystemError, "unexpected import name: %d", TYPE(n)); return NULL; } PyErr_SetString(PyExc_SystemError, "unhandled import name condition"); return NULL; } static stmt_ty ast_for_import_stmt(struct compiling *c, const node *n) { /* import_stmt: import_name | import_from import_name: 'import' dotted_as_names import_from: 'from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names) */ int lineno; int col_offset; int i; asdl_seq *aliases; REQ(n, import_stmt); lineno = LINENO(n); col_offset = n->n_col_offset; n = CHILD(n, 0); if (TYPE(n) == import_name) { n = CHILD(n, 1); REQ(n, dotted_as_names); aliases = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!aliases) return NULL; for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } return Import(aliases, lineno, col_offset, c->c_arena); } else if (TYPE(n) == import_from) { int n_children; int idx, ndots = 0; alias_ty mod = NULL; identifier modname = NULL; /* Count the number of dots (for relative imports) and check for the optional module name */ for (idx = 1; idx < NCH(n); idx++) { if (TYPE(CHILD(n, idx)) == dotted_name) { mod = alias_for_import_name(c, CHILD(n, idx), 0); if (!mod) return NULL; idx++; break; } else if (TYPE(CHILD(n, idx)) == ELLIPSIS) { /* three consecutive dots are tokenized as one ELLIPSIS */ ndots += 3; continue; } else if (TYPE(CHILD(n, idx)) != DOT) { break; } ndots++; } idx++; /* skip over the 'import' keyword */ switch (TYPE(CHILD(n, idx))) { case STAR: /* from ... import * */ n = CHILD(n, idx); n_children = 1; break; case LPAR: /* from ... import (x, y, z) */ n = CHILD(n, idx + 1); n_children = NCH(n); break; case import_as_names: /* from ... import x, y, z */ n = CHILD(n, idx); n_children = NCH(n); if (n_children % 2 == 0) { ast_error(c, n, "trailing comma not allowed without" " surrounding parentheses"); return NULL; } break; default: ast_error(c, n, "Unexpected node-type in from-import"); return NULL; } aliases = _Ta3_asdl_seq_new((n_children + 1) / 2, c->c_arena); if (!aliases) return NULL; /* handle "from ... import *" special b/c there's no children */ if (TYPE(n) == STAR) { alias_ty import_alias = alias_for_import_name(c, n, 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, 0, import_alias); } else { for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } } if (mod != NULL) modname = mod->name; return ImportFrom(modname, aliases, ndots, lineno, col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unknown import statement: starts with command '%s'", STR(CHILD(n, 0))); return NULL; } static stmt_ty ast_for_global_stmt(struct compiling *c, const node *n) { /* global_stmt: 'global' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, global_stmt); s = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Global(s, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_nonlocal_stmt(struct compiling *c, const node *n) { /* nonlocal_stmt: 'nonlocal' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, nonlocal_stmt); s = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Nonlocal(s, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_assert_stmt(struct compiling *c, const node *n) { /* assert_stmt: 'assert' test [',' test] */ REQ(n, assert_stmt); if (NCH(n) == 2) { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return Assert(expression, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else if (NCH(n) == 4) { expr_ty expr1, expr2; expr1 = ast_for_expr(c, CHILD(n, 1)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 3)); if (!expr2) return NULL; return Assert(expr1, expr2, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "improper number of parts to 'assert' statement: %d", NCH(n)); return NULL; } static asdl_seq * ast_for_suite(struct compiling *c, const node *n) { /* suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */ asdl_seq *seq; stmt_ty s; int i, total, num, end, pos = 0; node *ch; REQ(n, suite); total = num_stmts(n); seq = _Ta3_asdl_seq_new(total, c->c_arena); if (!seq) return NULL; if (TYPE(CHILD(n, 0)) == simple_stmt) { n = CHILD(n, 0); /* simple_stmt always ends with a NEWLINE, and may have a trailing SEMI */ end = NCH(n) - 1; if (TYPE(CHILD(n, end - 1)) == SEMI) end--; /* loop by 2 to skip semi-colons */ for (i = 0; i < end; i += 2) { ch = CHILD(n, i); s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } else { i = 2; if (TYPE(CHILD(n, 1)) == TYPE_COMMENT) i += 2; for (; i < (NCH(n) - 1); i++) { ch = CHILD(n, i); REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { /* small_stmt or compound_stmt with only one child */ s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } else { int j; ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < NCH(ch); j += 2) { /* statement terminates with a semi-colon ';' */ if (NCH(CHILD(ch, j)) == 0) { assert((j + 1) == NCH(ch)); break; } s = ast_for_stmt(c, CHILD(ch, j)); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } } } assert(pos == seq->size); return seq; } static stmt_ty ast_for_if_stmt(struct compiling *c, const node *n) { /* if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] */ char *s; REQ(n, if_stmt); if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return If(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, c->c_arena); } s = STR(CHILD(n, 4)); /* s[2], the third character in the string, will be 's' for el_s_e, or 'i' for el_i_f */ if (s[2] == 's') { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; return If(expression, seq1, seq2, LINENO(n), n->n_col_offset, c->c_arena); } else if (s[2] == 'i') { int i, n_elif, has_else = 0; expr_ty expression; asdl_seq *suite_seq; asdl_seq *orelse = NULL; n_elif = NCH(n) - 4; /* must reference the child n_elif+1 since 'else' token is third, not fourth, child from the end. */ if (TYPE(CHILD(n, (n_elif + 1))) == NAME && STR(CHILD(n, (n_elif + 1)))[2] == 's') { has_else = 1; n_elif -= 3; } n_elif /= 4; if (has_else) { asdl_seq *suite_seq2; orelse = _Ta3_asdl_seq_new(1, c->c_arena); if (!orelse) return NULL; expression = ast_for_expr(c, CHILD(n, NCH(n) - 6)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, NCH(n) - 4)); if (!suite_seq) return NULL; suite_seq2 = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!suite_seq2) return NULL; asdl_seq_SET(orelse, 0, If(expression, suite_seq, suite_seq2, LINENO(CHILD(n, NCH(n) - 6)), CHILD(n, NCH(n) - 6)->n_col_offset, c->c_arena)); /* the just-created orelse handled the last elif */ n_elif--; } for (i = 0; i < n_elif; i++) { int off = 5 + (n_elif - i - 1) * 4; asdl_seq *newobj = _Ta3_asdl_seq_new(1, c->c_arena); if (!newobj) return NULL; expression = ast_for_expr(c, CHILD(n, off)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, off + 2)); if (!suite_seq) return NULL; asdl_seq_SET(newobj, 0, If(expression, suite_seq, orelse, LINENO(CHILD(n, off)), CHILD(n, off)->n_col_offset, c->c_arena)); orelse = newobj; } expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return If(expression, suite_seq, orelse, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unexpected token in 'if' statement: %s", s); return NULL; } static stmt_ty ast_for_while_stmt(struct compiling *c, const node *n) { /* while_stmt: 'while' test ':' suite ['else' ':' suite] */ REQ(n, while_stmt); if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return While(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else if (NCH(n) == 7) { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; return While(expression, seq1, seq2, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of tokens for 'while' statement: %d", NCH(n)); return NULL; } static stmt_ty ast_for_for_stmt(struct compiling *c, const node *n, int is_async) { asdl_seq *_target, *seq = NULL, *suite_seq; expr_ty expression; expr_ty target, first; const node *node_target; int has_type_comment; string type_comment; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async for loops are only supported in Python 3.5 and greater"); return NULL; } /* for_stmt: 'for' exprlist 'in' testlist ':' [TYPE_COMMENT] suite ['else' ':' suite] */ REQ(n, for_stmt); has_type_comment = TYPE(CHILD(n, 5)) == TYPE_COMMENT; if (NCH(n) == 9 + has_type_comment) { seq = ast_for_suite(c, CHILD(n, 8 + has_type_comment)); if (!seq) return NULL; } node_target = CHILD(n, 1); _target = ast_for_exprlist(c, node_target, Store); if (!_target) return NULL; /* Check the # of children rather than the length of _target, since for x, in ... has 1 element in _target, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(_target, 0); if (NCH(node_target) == 1) target = first; else target = Tuple(_target, Store, first->lineno, first->col_offset, c->c_arena); expression = ast_for_testlist(c, CHILD(n, 3)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 5 + has_type_comment)); if (!suite_seq) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, 5)); else type_comment = NULL; if (is_async) return AsyncFor(target, expression, suite_seq, seq, type_comment, LINENO(n), n->n_col_offset, c->c_arena); else return For(target, expression, suite_seq, seq, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static excepthandler_ty ast_for_except_clause(struct compiling *c, const node *exc, node *body) { /* except_clause: 'except' [test ['as' test]] */ REQ(exc, except_clause); REQ(body, suite); if (NCH(exc) == 1) { asdl_seq *suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(NULL, NULL, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } else if (NCH(exc) == 2) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(expression, NULL, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } else if (NCH(exc) == 4) { asdl_seq *suite_seq; expr_ty expression; identifier e = NEW_IDENTIFIER(CHILD(exc, 3)); if (!e) return NULL; if (forbidden_name(c, e, CHILD(exc, 3), 0)) return NULL; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(expression, e, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of children for 'except' clause: %d", NCH(exc)); return NULL; } static stmt_ty ast_for_try_stmt(struct compiling *c, const node *n) { const int nch = NCH(n); int n_except = (nch - 3)/3; asdl_seq *body, *handlers = NULL, *orelse = NULL, *finally = NULL; REQ(n, try_stmt); body = ast_for_suite(c, CHILD(n, 2)); if (body == NULL) return NULL; if (TYPE(CHILD(n, nch - 3)) == NAME) { if (strcmp(STR(CHILD(n, nch - 3)), "finally") == 0) { if (nch >= 9 && TYPE(CHILD(n, nch - 6)) == NAME) { /* we can assume it's an "else", because nch >= 9 for try-else-finally and it would otherwise have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 4)); if (orelse == NULL) return NULL; n_except--; } finally = ast_for_suite(c, CHILD(n, nch - 1)); if (finally == NULL) return NULL; n_except--; } else { /* we can assume it's an "else", otherwise it would have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 1)); if (orelse == NULL) return NULL; n_except--; } } else if (TYPE(CHILD(n, nch - 3)) != except_clause) { ast_error(c, n, "malformed 'try' statement"); return NULL; } if (n_except > 0) { int i; /* process except statements to create a try ... except */ handlers = _Ta3_asdl_seq_new(n_except, c->c_arena); if (handlers == NULL) return NULL; for (i = 0; i < n_except; i++) { excepthandler_ty e = ast_for_except_clause(c, CHILD(n, 3 + i * 3), CHILD(n, 5 + i * 3)); if (!e) return NULL; asdl_seq_SET(handlers, i, e); } } assert(finally != NULL || asdl_seq_LEN(handlers)); return Try(body, handlers, orelse, finally, LINENO(n), n->n_col_offset, c->c_arena); } /* with_item: test ['as' expr] */ static withitem_ty ast_for_with_item(struct compiling *c, const node *n) { expr_ty context_expr, optional_vars = NULL; REQ(n, with_item); context_expr = ast_for_expr(c, CHILD(n, 0)); if (!context_expr) return NULL; if (NCH(n) == 3) { optional_vars = ast_for_expr(c, CHILD(n, 2)); if (!optional_vars) { return NULL; } if (!set_context(c, optional_vars, Store, n)) { return NULL; } } return withitem(context_expr, optional_vars, c->c_arena); } /* with_stmt: 'with' with_item (',' with_item)* ':' [TYPE_COMMENT] suite */ static stmt_ty ast_for_with_stmt(struct compiling *c, const node *n, int is_async) { int i, n_items, nch_minus_type, has_type_comment; asdl_seq *items, *body; string type_comment; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async with statements are only supported in Python 3.5 and greater"); return NULL; } REQ(n, with_stmt); has_type_comment = TYPE(CHILD(n, NCH(n) - 2)) == TYPE_COMMENT; nch_minus_type = NCH(n) - has_type_comment; n_items = (nch_minus_type - 2) / 2; items = _Ta3_asdl_seq_new(n_items, c->c_arena); if (!items) return NULL; for (i = 1; i < nch_minus_type - 2; i += 2) { withitem_ty item = ast_for_with_item(c, CHILD(n, i)); if (!item) return NULL; asdl_seq_SET(items, (i - 1) / 2, item); } body = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!body) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, NCH(n) - 2)); else type_comment = NULL; if (is_async) return AsyncWith(items, body, type_comment, LINENO(n), n->n_col_offset, c->c_arena); else return With(items, body, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_classdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* classdef: 'class' NAME ['(' arglist ')'] ':' suite */ PyObject *classname; asdl_seq *s; expr_ty call; REQ(n, classdef); if (NCH(n) == 4) { /* class NAME ':' suite */ s = ast_for_suite(c, CHILD(n, 3)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } if (TYPE(CHILD(n, 3)) == RPAR) { /* class NAME '(' ')' ':' suite */ s = ast_for_suite(c, CHILD(n,5)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } /* class NAME '(' arglist ')' ':' suite */ /* build up a fake Call node so we can extract its pieces */ { PyObject *dummy_name; expr_ty dummy; dummy_name = NEW_IDENTIFIER(CHILD(n, 1)); if (!dummy_name) return NULL; dummy = Name(dummy_name, Load, LINENO(n), n->n_col_offset, c->c_arena); call = ast_for_call(c, CHILD(n, 3), dummy); if (!call) return NULL; } s = ast_for_suite(c, CHILD(n, 6)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 1), 0)) return NULL; return ClassDef(classname, call->v.Call.args, call->v.Call.keywords, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_stmt(struct compiling *c, const node *n) { if (TYPE(n) == stmt) { assert(NCH(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == simple_stmt) { assert(num_stmts(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == small_stmt) { n = CHILD(n, 0); /* small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt */ switch (TYPE(n)) { case expr_stmt: return ast_for_expr_stmt(c, n); case del_stmt: return ast_for_del_stmt(c, n); case pass_stmt: return Pass(LINENO(n), n->n_col_offset, c->c_arena); case flow_stmt: return ast_for_flow_stmt(c, n); case import_stmt: return ast_for_import_stmt(c, n); case global_stmt: return ast_for_global_stmt(c, n); case nonlocal_stmt: return ast_for_nonlocal_stmt(c, n); case assert_stmt: return ast_for_assert_stmt(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled small_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } else { /* compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef | decorated | async_stmt */ node *ch = CHILD(n, 0); REQ(n, compound_stmt); switch (TYPE(ch)) { case if_stmt: return ast_for_if_stmt(c, ch); case while_stmt: return ast_for_while_stmt(c, ch); case for_stmt: return ast_for_for_stmt(c, ch, 0); case try_stmt: return ast_for_try_stmt(c, ch); case with_stmt: return ast_for_with_stmt(c, ch, 0); case funcdef: return ast_for_funcdef(c, ch, NULL); case classdef: return ast_for_classdef(c, ch, NULL); case decorated: return ast_for_decorated(c, ch); case async_stmt: return ast_for_async_stmt(c, ch); default: PyErr_Format(PyExc_SystemError, "unhandled small_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } } static PyObject * parsenumber_raw(struct compiling *c, const char *s) { const char *end; long x; double dx; Py_complex compl; int imflag; assert(s != NULL); errno = 0; end = s + strlen(s) - 1; imflag = *end == 'j' || *end == 'J'; if (s[0] == '0') { x = (long) PyOS_strtoul(s, (char **)&end, 0); if (x < 0 && errno == 0) { return PyLong_FromString(s, (char **)0, 0); } } else x = PyOS_strtol(s, (char **)&end, 0); if (*end == '\0') { if (errno != 0) return PyLong_FromString(s, (char **)0, 0); return PyLong_FromLong(x); } /* XXX Huge floats may silently fail */ if (imflag) { compl.real = 0.; compl.imag = PyOS_string_to_double(s, (char **)&end, NULL); if (compl.imag == -1.0 && PyErr_Occurred()) return NULL; return PyComplex_FromCComplex(compl); } else { dx = PyOS_string_to_double(s, NULL, NULL); if (dx == -1.0 && PyErr_Occurred()) return NULL; return PyFloat_FromDouble(dx); } } static PyObject * parsenumber(struct compiling *c, const char *s) { char *dup, *end; PyObject *res = NULL; assert(s != NULL); if (strchr(s, '_') == NULL) { return parsenumber_raw(c, s); } /* Create a duplicate without underscores. */ dup = PyMem_Malloc(strlen(s) + 1); end = dup; for (; *s; s++) { if (*s != '_') { *end++ = *s; } } *end = '\0'; res = parsenumber_raw(c, dup); PyMem_Free(dup); return res; } static PyObject * decode_utf8(struct compiling *c, const char **sPtr, const char *end) { const char *s, *t; t = s = *sPtr; /* while (s < end && *s != '\\') s++; */ /* inefficient for u".." */ while (s < end && (*s & 0x80)) s++; *sPtr = s; return PyUnicode_DecodeUTF8(t, s - t, NULL); } static PyObject * decode_unicode_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { PyObject *u; char *buf; char *p; const char *end; /* check for integer overflow */ if (len > SIZE_MAX / 6) return NULL; /* "ä" (2 bytes) may become "\U000000E4" (10 bytes), or 1:5 "\ä" (3 bytes) may become "\u005c\U000000E4" (16 bytes), or ~1:6 */ u = PyBytes_FromStringAndSize((char *)NULL, len * 6); if (u == NULL) return NULL; p = buf = PyBytes_AsString(u); end = s + len; while (s < end) { if (*s == '\\') { *p++ = *s++; if (*s & 0x80) { strcpy(p, "u005c"); p += 5; } } if (*s & 0x80) { /* XXX inefficient */ PyObject *w; int kind; void *data; Py_ssize_t len, i; w = decode_utf8(c, &s, end); if (w == NULL) { Py_DECREF(u); return NULL; } kind = PyUnicode_KIND(w); data = PyUnicode_DATA(w); len = PyUnicode_GET_LENGTH(w); for (i = 0; i < len; i++) { Py_UCS4 chr = PyUnicode_READ(kind, data, i); sprintf(p, "\\U%08x", chr); p += 10; } /* Should be impossible to overflow */ assert(p - buf <= Py_SIZE(u)); Py_DECREF(w); } else { *p++ = *s++; } } len = p - buf; s = buf; return PyUnicode_DecodeUnicodeEscape(s, len, NULL); } static PyObject * decode_bytes_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { return PyBytes_DecodeEscape(s, len, NULL, 0, NULL); } /* Compile this expression in to an expr_ty. Add parens around the expression, in order to allow leading spaces in the expression. */ static expr_ty fstring_compile_expr(const char *expr_start, const char *expr_end, struct compiling *c, const node *n) { int all_whitespace = 1; int kind; void *data; PyCompilerFlags cf; mod_ty mod; char *str; PyObject *o, *fstring_name; Py_ssize_t len; Py_ssize_t i; assert(expr_end >= expr_start); assert(*(expr_start-1) == '{'); assert(*expr_end == '}' || *expr_end == '!' || *expr_end == ':'); /* We know there are no escapes here, because backslashes are not allowed, and we know it's utf-8 encoded (per PEP 263). But, in order to check that each char is not whitespace, we need to decode it to unicode. Which is unfortunate, but such is life. */ /* If the substring is all whitespace, it's an error. We need to catch this here, and not when we call PyParser_ASTFromString, because turning the expression '' in to '()' would go from being invalid to valid. */ /* Note that this code says an empty string is all whitespace. That's important. There's a test for it: f'{}'. */ o = PyUnicode_DecodeUTF8(expr_start, expr_end-expr_start, NULL); if (o == NULL) return NULL; len = PyUnicode_GET_LENGTH(o); kind = PyUnicode_KIND(o); data = PyUnicode_DATA(o); for (i = 0; i < len; i++) { if (!Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, i))) { all_whitespace = 0; break; } } Py_DECREF(o); if (all_whitespace) { ast_error(c, n, "f-string: empty expression not allowed"); return NULL; } /* Reuse len to be the length of the utf-8 input string. */ len = expr_end - expr_start; /* Allocate 3 extra bytes: open paren, close paren, null byte. */ str = PyMem_RawMalloc(len + 3); if (str == NULL) return NULL; str[0] = '('; memcpy(str+1, expr_start, len); str[len+1] = ')'; str[len+2] = 0; cf.cf_flags = PyCF_ONLY_AST; fstring_name = PyUnicode_FromString("<fstring>"); mod = string_object_to_c_ast(str, fstring_name, Py_eval_input, &cf, c->c_feature_version, c->c_arena); Py_DECREF(fstring_name); PyMem_RawFree(str); if (!mod) return NULL; return mod->v.Expression.body; } /* Return -1 on error. Return 0 if we reached the end of the literal. Return 1 if we haven't reached the end of the literal, but we want the caller to process the literal up to this point. Used for doubled braces. */ static int fstring_find_literal(const char **str, const char *end, int raw, PyObject **literal, int recurse_lvl, struct compiling *c, const node *n) { /* Get any literal string. It ends when we hit an un-doubled left brace (which isn't part of a unicode name escape such as "\N{EULER CONSTANT}"), or the end of the string. */ const char *literal_start = *str; const char *literal_end; int in_named_escape = 0; int result = 0; assert(*literal == NULL); for (; *str < end; (*str)++) { char ch = **str; if (!in_named_escape && ch == '{' && (*str)-literal_start >= 2 && *(*str-2) == '\\' && *(*str-1) == 'N') { in_named_escape = 1; } else if (in_named_escape && ch == '}') { in_named_escape = 0; } else if (ch == '{' || ch == '}') { /* Check for doubled braces, but only at the top level. If we checked at every level, then f'{0:{3}}' would fail with the two closing braces. */ if (recurse_lvl == 0) { if (*str+1 < end && *(*str+1) == ch) { /* We're going to tell the caller that the literal ends here, but that they should continue scanning. But also skip over the second brace when we resume scanning. */ literal_end = *str+1; *str += 2; result = 1; goto done; } /* Where a single '{' is the start of a new expression, a single '}' is not allowed. */ if (ch == '}') { ast_error(c, n, "f-string: single '}' is not allowed"); return -1; } } /* We're either at a '{', which means we're starting another expression; or a '}', which means we're at the end of this f-string (for a nested format_spec). */ break; } } literal_end = *str; assert(*str <= end); assert(*str == end || **str == '{' || **str == '}'); done: if (literal_start != literal_end) { if (raw) *literal = PyUnicode_DecodeUTF8Stateful(literal_start, literal_end-literal_start, NULL, NULL); else *literal = decode_unicode_with_escapes(c, n, literal_start, literal_end-literal_start); if (!*literal) return -1; } return result; } /* Forward declaration because parsing is recursive. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n); /* Parse the f-string at *str, ending at end. We know *str starts an expression (so it must be a '{'). Returns the FormattedValue node, which includes the expression, conversion character, and format_spec expression. Note that I don't do a perfect job here: I don't make sure that a closing brace doesn't match an opening paren, for example. It doesn't need to error on all invalid expressions, just correctly find the end of all valid ones. Any errors inside the expression will be caught when we parse it later. */ static int fstring_find_expr(const char **str, const char *end, int raw, int recurse_lvl, expr_ty *expression, struct compiling *c, const node *n) { /* Return -1 on error, else 0. */ const char *expr_start; const char *expr_end; expr_ty simple_expression; expr_ty format_spec = NULL; /* Optional format specifier. */ int conversion = -1; /* The conversion char. -1 if not specified. */ /* 0 if we're not in a string, else the quote char we're trying to match (single or double quote). */ char quote_char = 0; /* If we're inside a string, 1=normal, 3=triple-quoted. */ int string_type = 0; /* Keep track of nesting level for braces/parens/brackets in expressions. */ Py_ssize_t nested_depth = 0; /* Can only nest one level deep. */ if (recurse_lvl >= 2) { ast_error(c, n, "f-string: expressions nested too deeply"); return -1; } /* The first char must be a left brace, or we wouldn't have gotten here. Skip over it. */ assert(**str == '{'); *str += 1; expr_start = *str; for (; *str < end; (*str)++) { char ch; /* Loop invariants. */ assert(nested_depth >= 0); assert(*str >= expr_start && *str < end); if (quote_char) assert(string_type == 1 || string_type == 3); else assert(string_type == 0); ch = **str; /* Nowhere inside an expression is a backslash allowed. */ if (ch == '\\') { /* Error: can't include a backslash character, inside parens or strings or not. */ ast_error(c, n, "f-string expression part " "cannot include a backslash"); return -1; } if (quote_char) { /* We're inside a string. See if we're at the end. */ /* This code needs to implement the same non-error logic as tok_get from tokenizer.c, at the letter_quote label. To actually share that code would be a nightmare. But, it's unlikely to change and is small, so duplicate it here. Note we don't need to catch all of the errors, since they'll be caught when parsing the expression. We just need to match the non-error cases. Thus we can ignore \n in single-quoted strings, for example. Or non-terminated strings. */ if (ch == quote_char) { /* Does this match the string_type (single or triple quoted)? */ if (string_type == 3) { if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { /* We're at the end of a triple quoted string. */ *str += 2; string_type = 0; quote_char = 0; continue; } } else { /* We're at the end of a normal string. */ quote_char = 0; string_type = 0; continue; } } } else if (ch == '\'' || ch == '"') { /* Is this a triple quoted string? */ if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { string_type = 3; *str += 2; } else { /* Start of a normal string. */ string_type = 1; } /* Start looking for the end of the string. */ quote_char = ch; } else if (ch == '[' || ch == '{' || ch == '(') { nested_depth++; } else if (nested_depth != 0 && (ch == ']' || ch == '}' || ch == ')')) { nested_depth--; } else if (ch == '#') { /* Error: can't include a comment character, inside parens or not. */ ast_error(c, n, "f-string expression part cannot include '#'"); return -1; } else if (nested_depth == 0 && (ch == '!' || ch == ':' || ch == '}')) { /* First, test for the special case of "!=". Since '=' is not an allowed conversion character, nothing is lost in this test. */ if (ch == '!' && *str+1 < end && *(*str+1) == '=') { /* This isn't a conversion character, just continue. */ continue; } /* Normal way out of this loop. */ break; } else { /* Just consume this char and loop around. */ } } expr_end = *str; /* If we leave this loop in a string or with mismatched parens, we don't care. We'll get a syntax error when compiling the expression. But, we can produce a better error message, so let's just do that.*/ if (quote_char) { ast_error(c, n, "f-string: unterminated string"); return -1; } if (nested_depth) { ast_error(c, n, "f-string: mismatched '(', '{', or '['"); return -1; } if (*str >= end) goto unexpected_end_of_string; /* Compile the expression as soon as possible, so we show errors related to the expression before errors related to the conversion or format_spec. */ simple_expression = fstring_compile_expr(expr_start, expr_end, c, n); if (!simple_expression) return -1; /* Check for a conversion char, if present. */ if (**str == '!') { *str += 1; if (*str >= end) goto unexpected_end_of_string; conversion = **str; *str += 1; /* Validate the conversion. */ if (!(conversion == 's' || conversion == 'r' || conversion == 'a')) { ast_error(c, n, "f-string: invalid conversion character: " "expected 's', 'r', or 'a'"); return -1; } } /* Check for the format spec, if present. */ if (*str >= end) goto unexpected_end_of_string; if (**str == ':') { *str += 1; if (*str >= end) goto unexpected_end_of_string; /* Parse the format spec. */ format_spec = fstring_parse(str, end, raw, recurse_lvl+1, c, n); if (!format_spec) return -1; } if (*str >= end || **str != '}') goto unexpected_end_of_string; /* We're at a right brace. Consume it. */ assert(*str < end); assert(**str == '}'); *str += 1; /* And now create the FormattedValue node that represents this entire expression with the conversion and format spec. */ *expression = FormattedValue(simple_expression, conversion, format_spec, LINENO(n), n->n_col_offset, c->c_arena); if (!*expression) return -1; return 0; unexpected_end_of_string: ast_error(c, n, "f-string: expecting '}'"); return -1; } /* Return -1 on error. Return 0 if we have a literal (possible zero length) and an expression (zero length if at the end of the string. Return 1 if we have a literal, but no expression, and we want the caller to call us again. This is used to deal with doubled braces. When called multiple times on the string 'a{{b{0}c', this function will return: 1. the literal 'a{' with no expression, and a return value of 1. Despite the fact that there's no expression, the return value of 1 means we're not finished yet. 2. the literal 'b' and the expression '0', with a return value of 0. The fact that there's an expression means we're not finished. 3. literal 'c' with no expression and a return value of 0. The combination of the return value of 0 with no expression means we're finished. */ static int fstring_find_literal_and_expr(const char **str, const char *end, int raw, int recurse_lvl, PyObject **literal, expr_ty *expression, struct compiling *c, const node *n) { int result; assert(*literal == NULL && *expression == NULL); /* Get any literal string. */ result = fstring_find_literal(str, end, raw, literal, recurse_lvl, c, n); if (result < 0) goto error; assert(result == 0 || result == 1); if (result == 1) /* We have a literal, but don't look at the expression. */ return 1; if (*str >= end || **str == '}') /* We're at the end of the string or the end of a nested f-string: no expression. The top-level error case where we expect to be at the end of the string but we're at a '}' is handled later. */ return 0; /* We must now be the start of an expression, on a '{'. */ assert(**str == '{'); if (fstring_find_expr(str, end, raw, recurse_lvl, expression, c, n) < 0) goto error; return 0; error: Py_CLEAR(*literal); return -1; } #define EXPRLIST_N_CACHED 64 typedef struct { /* Incrementally build an array of expr_ty, so be used in an asdl_seq. Cache some small but reasonably sized number of expr_ty's, and then after that start dynamically allocating, doubling the number allocated each time. Note that the f-string f'{0}a{1}' contains 3 expr_ty's: 2 FormattedValue's, and one Str for the literal 'a'. So you add expr_ty's about twice as fast as you add exressions in an f-string. */ Py_ssize_t allocated; /* Number we've allocated. */ Py_ssize_t size; /* Number we've used. */ expr_ty *p; /* Pointer to the memory we're actually using. Will point to 'data' until we start dynamically allocating. */ expr_ty data[EXPRLIST_N_CACHED]; } ExprList; #ifdef NDEBUG #define ExprList_check_invariants(l) #else static void ExprList_check_invariants(ExprList *l) { /* Check our invariants. Make sure this object is "live", and hasn't been deallocated. */ assert(l->size >= 0); assert(l->p != NULL); if (l->size <= EXPRLIST_N_CACHED) assert(l->data == l->p); } #endif static void ExprList_Init(ExprList *l) { l->allocated = EXPRLIST_N_CACHED; l->size = 0; /* Until we start allocating dynamically, p points to data. */ l->p = l->data; ExprList_check_invariants(l); } static int ExprList_Append(ExprList *l, expr_ty exp) { ExprList_check_invariants(l); if (l->size >= l->allocated) { /* We need to alloc (or realloc) the memory. */ Py_ssize_t new_size = l->allocated * 2; /* See if we've ever allocated anything dynamically. */ if (l->p == l->data) { Py_ssize_t i; /* We're still using the cached data. Switch to alloc-ing. */ l->p = PyMem_RawMalloc(sizeof(expr_ty) * new_size); if (!l->p) return -1; /* Copy the cached data into the new buffer. */ for (i = 0; i < l->size; i++) l->p[i] = l->data[i]; } else { /* Just realloc. */ expr_ty *tmp = PyMem_RawRealloc(l->p, sizeof(expr_ty) * new_size); if (!tmp) { PyMem_RawFree(l->p); l->p = NULL; return -1; } l->p = tmp; } l->allocated = new_size; assert(l->allocated == 2 * l->size); } l->p[l->size++] = exp; ExprList_check_invariants(l); return 0; } static void ExprList_Dealloc(ExprList *l) { ExprList_check_invariants(l); /* If there's been an error, or we've never dynamically allocated, do nothing. */ if (!l->p || l->p == l->data) { /* Do nothing. */ } else { /* We have dynamically allocated. Free the memory. */ PyMem_RawFree(l->p); } l->p = NULL; l->size = -1; } static asdl_seq * ExprList_Finish(ExprList *l, PyArena *arena) { asdl_seq *seq; ExprList_check_invariants(l); /* Allocate the asdl_seq and copy the expressions in to it. */ seq = _Ta3_asdl_seq_new(l->size, arena); if (seq) { Py_ssize_t i; for (i = 0; i < l->size; i++) asdl_seq_SET(seq, i, l->p[i]); } ExprList_Dealloc(l); return seq; } /* The FstringParser is designed to add a mix of strings and f-strings, and concat them together as needed. Ultimately, it generates an expr_ty. */ typedef struct { PyObject *last_str; ExprList expr_list; } FstringParser; #ifdef NDEBUG #define FstringParser_check_invariants(state) #else static void FstringParser_check_invariants(FstringParser *state) { if (state->last_str) assert(PyUnicode_CheckExact(state->last_str)); ExprList_check_invariants(&state->expr_list); } #endif static void FstringParser_Init(FstringParser *state) { state->last_str = NULL; ExprList_Init(&state->expr_list); FstringParser_check_invariants(state); } static void FstringParser_Dealloc(FstringParser *state) { FstringParser_check_invariants(state); Py_XDECREF(state->last_str); ExprList_Dealloc(&state->expr_list); } /* Make a Str node, but decref the PyUnicode object being added. */ static expr_ty make_str_node_and_del(PyObject **str, struct compiling *c, const node* n) { PyObject *kind, *s = *str; const char *raw = STR(CHILD(n, 0)); /* currently Python allows up to 2 string modifiers */ char *ch, s_kind[3] = {0, 0, 0}; ch = s_kind; while (*raw && *raw != '\'' && *raw != '"') { *ch++ = *raw++; } kind = PyUnicode_FromString(s_kind); if (!kind) { return NULL; } *str = NULL; assert(PyUnicode_CheckExact(s)); if (PyArena_AddPyObject(c->c_arena, s) < 0) { Py_DECREF(s); return NULL; } return Str(s, kind, LINENO(n), n->n_col_offset, c->c_arena); } /* Add a non-f-string (that is, a regular literal string). str is decref'd. */ static int FstringParser_ConcatAndDel(FstringParser *state, PyObject *str) { FstringParser_check_invariants(state); assert(PyUnicode_CheckExact(str)); if (PyUnicode_GET_LENGTH(str) == 0) { Py_DECREF(str); return 0; } if (!state->last_str) { /* We didn't have a string before, so just remember this one. */ state->last_str = str; } else { /* Concatenate this with the previous string. */ PyUnicode_AppendAndDel(&state->last_str, str); if (!state->last_str) return -1; } FstringParser_check_invariants(state); return 0; } /* Parse an f-string. The f-string is in *str to end, with no 'f' or quotes. */ static int FstringParser_ConcatFstring(FstringParser *state, const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser_check_invariants(state); /* Parse the f-string. */ while (1) { PyObject *literal = NULL; expr_ty expression = NULL; /* If there's a zero length literal in front of the expression, literal will be NULL. If we're at the end of the f-string, expression will be NULL (unless result == 1, see below). */ int result = fstring_find_literal_and_expr(str, end, raw, recurse_lvl, &literal, &expression, c, n); if (result < 0) return -1; /* Add the literal, if any. */ if (!literal) { /* Do nothing. Just leave last_str alone (and possibly NULL). */ } else if (!state->last_str) { state->last_str = literal; literal = NULL; } else { /* We have a literal, concatenate it. */ assert(PyUnicode_GET_LENGTH(literal) != 0); if (FstringParser_ConcatAndDel(state, literal) < 0) return -1; literal = NULL; } assert(!state->last_str || PyUnicode_GET_LENGTH(state->last_str) != 0); /* We've dealt with the literal now. It can't be leaked on further errors. */ assert(literal == NULL); /* See if we should just loop around to get the next literal and expression, while ignoring the expression this time. This is used for un-doubling braces, as an optimization. */ if (result == 1) continue; if (!expression) /* We're done with this f-string. */ break; /* We know we have an expression. Convert any existing string to a Str node. */ if (!state->last_str) { /* Do nothing. No previous literal. */ } else { /* Convert the existing last_str literal to a Str node. */ expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) return -1; } if (ExprList_Append(&state->expr_list, expression) < 0) return -1; } /* If recurse_lvl is zero, then we must be at the end of the string. Otherwise, we must be at a right brace. */ if (recurse_lvl == 0 && *str < end-1) { ast_error(c, n, "f-string: unexpected end of string"); return -1; } if (recurse_lvl != 0 && **str != '}') { ast_error(c, n, "f-string: expecting '}'"); return -1; } FstringParser_check_invariants(state); return 0; } /* Convert the partial state reflected in last_str and expr_list to an expr_ty. The expr_ty can be a Str, or a JoinedStr. */ static expr_ty FstringParser_Finish(FstringParser *state, struct compiling *c, const node *n) { asdl_seq *seq; FstringParser_check_invariants(state); /* If we're just a constant string with no expressions, return that. */ if(state->expr_list.size == 0) { if (!state->last_str) { /* Create a zero length string. */ state->last_str = PyUnicode_FromStringAndSize(NULL, 0); if (!state->last_str) goto error; } return make_str_node_and_del(&state->last_str, c, n); } /* Create a Str node out of last_str, if needed. It will be the last node in our expression list. */ if (state->last_str) { expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) goto error; } /* This has already been freed. */ assert(state->last_str == NULL); seq = ExprList_Finish(&state->expr_list, c->c_arena); if (!seq) goto error; /* If there's only one expression, return it. Otherwise, we need to join them together. */ if (seq->size == 1) return seq->elements[0]; return JoinedStr(seq, LINENO(n), n->n_col_offset, c->c_arena); error: FstringParser_Dealloc(state); return NULL; } /* Given an f-string (with no 'f' or quotes) that's in *str and ends at end, parse it into an expr_ty. Return NULL on error. Adjust str to point past the parsed portion. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser state; FstringParser_Init(&state); if (FstringParser_ConcatFstring(&state, str, end, raw, recurse_lvl, c, n) < 0) { FstringParser_Dealloc(&state); return NULL; } return FstringParser_Finish(&state, c, n); } /* n is a Python string literal, including the bracketing quote characters, and r, b, u, &/or f prefixes (if any), and embedded escape sequences (if any). parsestr parses it, and sets *result to decoded Python string object. If the string is an f-string, set *fstr and *fstrlen to the unparsed string object. Return 0 if no errors occurred. */ static int parsestr(struct compiling *c, const node *n, int *bytesmode, int *rawmode, PyObject **result, const char **fstr, Py_ssize_t *fstrlen) { size_t len; const char *s = STR(n); int quote = Py_CHARMASK(*s); int fmode = 0; *bytesmode = 0; *rawmode = 0; *result = NULL; *fstr = NULL; if (Py_ISALPHA(quote)) { while (!*bytesmode || !*rawmode) { if (quote == 'b' || quote == 'B') { quote = *++s; *bytesmode = 1; } else if (quote == 'u' || quote == 'U') { quote = *++s; } else if (quote == 'r' || quote == 'R') { quote = *++s; *rawmode = 1; } else if (quote == 'f' || quote == 'F') { quote = *++s; fmode = 1; } else { break; } } } /* fstrings are only allowed in Python 3.6 and greater */ if (fmode && c->c_feature_version < 6) { ast_error(c, n, "Format strings are only supported in Python 3.6 and greater"); return -1; } if (fmode && *bytesmode) { PyErr_BadInternalCall(); return -1; } if (quote != '\'' && quote != '\"') { PyErr_BadInternalCall(); return -1; } /* Skip the leading quote char. */ s++; len = strlen(s); if (len > INT_MAX) { PyErr_SetString(PyExc_OverflowError, "string to parse is too long"); return -1; } if (s[--len] != quote) { /* Last quote char must match the first. */ PyErr_BadInternalCall(); return -1; } if (len >= 4 && s[0] == quote && s[1] == quote) { /* A triple quoted string. We've already skipped one quote at the start and one at the end of the string. Now skip the two at the start. */ s += 2; len -= 2; /* And check that the last two match. */ if (s[--len] != quote || s[--len] != quote) { PyErr_BadInternalCall(); return -1; } } if (fmode) { /* Just return the bytes. The caller will parse the resulting string. */ *fstr = s; *fstrlen = len; return 0; } /* Not an f-string. */ /* Avoid invoking escape decoding routines if possible. */ *rawmode = *rawmode || strchr(s, '\\') == NULL; if (*bytesmode) { /* Disallow non-ASCII characters. */ const char *ch; for (ch = s; *ch; ch++) { if (Py_CHARMASK(*ch) >= 0x80) { ast_error(c, n, "bytes can only contain ASCII " "literal characters."); return -1; } } if (*rawmode) *result = PyBytes_FromStringAndSize(s, len); else *result = decode_bytes_with_escapes(c, n, s, len); } else { if (*rawmode) *result = PyUnicode_DecodeUTF8Stateful(s, len, NULL, NULL); else *result = decode_unicode_with_escapes(c, n, s, len); } return *result == NULL ? -1 : 0; } /* Accepts a STRING+ atom, and produces an expr_ty node. Run through each STRING atom, and process it as needed. For bytes, just concatenate them together, and the result will be a Bytes node. For normal strings and f-strings, concatenate them together. The result will be a Str node if there were no f-strings; a FormattedValue node if there's just an f-string (with no leading or trailing literals), or a JoinedStr node if there are multiple f-strings or any literals involved. */ static expr_ty parsestrplus(struct compiling *c, const node *n) { int bytesmode = 0; PyObject *bytes_str = NULL; int i; FstringParser state; FstringParser_Init(&state); for (i = 0; i < NCH(n); i++) { int this_bytesmode; int this_rawmode; PyObject *s; const char *fstr; Py_ssize_t fstrlen = -1; /* Silence a compiler warning. */ REQ(CHILD(n, i), STRING); if (parsestr(c, CHILD(n, i), &this_bytesmode, &this_rawmode, &s, &fstr, &fstrlen) != 0) goto error; /* Check that we're not mixing bytes with unicode. */ if (i != 0 && bytesmode != this_bytesmode) { ast_error(c, n, "cannot mix bytes and nonbytes literals"); /* s is NULL if the current string part is an f-string. */ Py_XDECREF(s); goto error; } bytesmode = this_bytesmode; if (fstr != NULL) { int result; assert(s == NULL && !bytesmode); /* This is an f-string. Parse and concatenate it. */ result = FstringParser_ConcatFstring(&state, &fstr, fstr+fstrlen, this_rawmode, 0, c, n); if (result < 0) goto error; } else { /* A string or byte string. */ assert(s != NULL && fstr == NULL); assert(bytesmode ? PyBytes_CheckExact(s) : PyUnicode_CheckExact(s)); if (bytesmode) { /* For bytes, concat as we go. */ if (i == 0) { /* First time, just remember this value. */ bytes_str = s; } else { PyBytes_ConcatAndDel(&bytes_str, s); if (!bytes_str) goto error; } } else { /* This is a regular string. Concatenate it. */ if (FstringParser_ConcatAndDel(&state, s) < 0) goto error; } } } if (bytesmode) { /* Just return the bytes object and we're done. */ if (PyArena_AddPyObject(c->c_arena, bytes_str) < 0) goto error; return Bytes(bytes_str, LINENO(n), n->n_col_offset, c->c_arena); } /* We're not a bytes string, bytes_str should never have been set. */ assert(bytes_str == NULL); return FstringParser_Finish(&state, c, n); error: Py_XDECREF(bytes_str); FstringParser_Dealloc(&state); return NULL; }
/* * This file includes functions to transform a concrete syntax tree (CST) to * an abstract syntax tree (AST). The main function is Ta3AST_FromNode(). * */ #include "Python.h" #include "Python-ast.h" #include "node.h" #include "ast.h" #include "token.h" #include "pythonrun.h" #include <assert.h> // VS 2010 doesn't have <stdbool.h>... typedef int bool; #define false 0 #define true 1 #ifndef _PyObject_FastCall static PyObject * _PyObject_FastCall(PyObject *func, PyObject *const *args, int nargs) { PyObject *t, *res; int i; t = PyTuple_New(nargs); if (t == NULL) { return NULL; } for (i = 0; i < nargs; i++) { if (PyTuple_SetItem(t, i, args[i]) < 0) { Py_DECREF(t); return NULL; } } res = PyObject_CallObject(func, t); Py_DECREF(t); return res; } #endif #if PY_MINOR_VERSION < 6 #define _PyUnicode_EqualToASCIIString(a, b) (PyUnicode_CompareWithASCIIString((a), (b)) == 0) static PyObject * _PyBytes_DecodeEscape(const char *s, Py_ssize_t len, const char *errors, Py_ssize_t unicode, const char *recode_encoding, const char **first_invalid_escape) { *first_invalid_escape = NULL; return PyBytes_DecodeEscape(s, len, errors, unicode, recode_encoding); } PyObject * _PyUnicode_DecodeUnicodeEscape(const char *s, Py_ssize_t size, const char *errors, const char **first_invalid_escape) { *first_invalid_escape = NULL; return PyUnicode_DecodeUnicodeEscape(s, size, errors); } #endif static int validate_stmts(asdl_seq *); static int validate_exprs(asdl_seq *, expr_context_ty, int); static int validate_nonempty_seq(asdl_seq *, const char *, const char *); static int validate_stmt(stmt_ty); static int validate_expr(expr_ty, expr_context_ty); mod_ty string_object_to_c_ast(const char *s, PyObject *filename, int start, PyCompilerFlags *flags, int feature_version, PyArena *arena); static int validate_comprehension(asdl_seq *gens) { int i; if (!asdl_seq_LEN(gens)) { PyErr_SetString(PyExc_ValueError, "comprehension with no generators"); return 0; } for (i = 0; i < asdl_seq_LEN(gens); i++) { comprehension_ty comp = asdl_seq_GET(gens, i); if (!validate_expr(comp->target, Store) || !validate_expr(comp->iter, Load) || !validate_exprs(comp->ifs, Load, 0)) return 0; } return 1; } static int validate_slice(slice_ty slice) { switch (slice->kind) { case Slice_kind: return (!slice->v.Slice.lower || validate_expr(slice->v.Slice.lower, Load)) && (!slice->v.Slice.upper || validate_expr(slice->v.Slice.upper, Load)) && (!slice->v.Slice.step || validate_expr(slice->v.Slice.step, Load)); case ExtSlice_kind: { int i; if (!validate_nonempty_seq(slice->v.ExtSlice.dims, "dims", "ExtSlice")) return 0; for (i = 0; i < asdl_seq_LEN(slice->v.ExtSlice.dims); i++) if (!validate_slice(asdl_seq_GET(slice->v.ExtSlice.dims, i))) return 0; return 1; } case Index_kind: return validate_expr(slice->v.Index.value, Load); default: PyErr_SetString(PyExc_SystemError, "unknown slice node"); return 0; } } static int validate_keywords(asdl_seq *keywords) { int i; for (i = 0; i < asdl_seq_LEN(keywords); i++) if (!validate_expr(((keyword_ty)asdl_seq_GET(keywords, i))->value, Load)) return 0; return 1; } static int validate_args(asdl_seq *args) { int i; for (i = 0; i < asdl_seq_LEN(args); i++) { arg_ty arg = asdl_seq_GET(args, i); if (arg->annotation && !validate_expr(arg->annotation, Load)) return 0; } return 1; } static const char * expr_context_name(expr_context_ty ctx) { switch (ctx) { case Load: return "Load"; case Store: return "Store"; case Del: return "Del"; case AugLoad: return "AugLoad"; case AugStore: return "AugStore"; case Param: return "Param"; default: abort(); } } static int validate_arguments(arguments_ty args) { if (!validate_args(args->args)) return 0; if (args->vararg && args->vararg->annotation && !validate_expr(args->vararg->annotation, Load)) { return 0; } if (!validate_args(args->kwonlyargs)) return 0; if (args->kwarg && args->kwarg->annotation && !validate_expr(args->kwarg->annotation, Load)) { return 0; } if (asdl_seq_LEN(args->defaults) > asdl_seq_LEN(args->args)) { PyErr_SetString(PyExc_ValueError, "more positional defaults than args on arguments"); return 0; } if (asdl_seq_LEN(args->kw_defaults) != asdl_seq_LEN(args->kwonlyargs)) { PyErr_SetString(PyExc_ValueError, "length of kwonlyargs is not the same as " "kw_defaults on arguments"); return 0; } return validate_exprs(args->defaults, Load, 0) && validate_exprs(args->kw_defaults, Load, 1); } static int validate_constant(PyObject *value) { if (value == Py_None || value == Py_Ellipsis) return 1; if (PyLong_CheckExact(value) || PyFloat_CheckExact(value) || PyComplex_CheckExact(value) || PyBool_Check(value) || PyUnicode_CheckExact(value) || PyBytes_CheckExact(value)) return 1; if (PyTuple_CheckExact(value) || PyFrozenSet_CheckExact(value)) { PyObject *it; it = PyObject_GetIter(value); if (it == NULL) return 0; while (1) { PyObject *item = PyIter_Next(it); if (item == NULL) { if (PyErr_Occurred()) { Py_DECREF(it); return 0; } break; } if (!validate_constant(item)) { Py_DECREF(it); Py_DECREF(item); return 0; } Py_DECREF(item); } Py_DECREF(it); return 1; } return 0; } static int validate_expr(expr_ty exp, expr_context_ty ctx) { int check_ctx = 1; expr_context_ty actual_ctx; /* First check expression context. */ switch (exp->kind) { case Attribute_kind: actual_ctx = exp->v.Attribute.ctx; break; case Subscript_kind: actual_ctx = exp->v.Subscript.ctx; break; case Starred_kind: actual_ctx = exp->v.Starred.ctx; break; case Name_kind: actual_ctx = exp->v.Name.ctx; break; case List_kind: actual_ctx = exp->v.List.ctx; break; case Tuple_kind: actual_ctx = exp->v.Tuple.ctx; break; default: if (ctx != Load) { PyErr_Format(PyExc_ValueError, "expression which can't be " "assigned to in %s context", expr_context_name(ctx)); return 0; } check_ctx = 0; /* set actual_ctx to prevent gcc warning */ actual_ctx = 0; } if (check_ctx && actual_ctx != ctx) { PyErr_Format(PyExc_ValueError, "expression must have %s context but has %s instead", expr_context_name(ctx), expr_context_name(actual_ctx)); return 0; } /* Now validate expression. */ switch (exp->kind) { case BoolOp_kind: if (asdl_seq_LEN(exp->v.BoolOp.values) < 2) { PyErr_SetString(PyExc_ValueError, "BoolOp with less than 2 values"); return 0; } return validate_exprs(exp->v.BoolOp.values, Load, 0); case BinOp_kind: return validate_expr(exp->v.BinOp.left, Load) && validate_expr(exp->v.BinOp.right, Load); case UnaryOp_kind: return validate_expr(exp->v.UnaryOp.operand, Load); case Lambda_kind: return validate_arguments(exp->v.Lambda.args) && validate_expr(exp->v.Lambda.body, Load); case IfExp_kind: return validate_expr(exp->v.IfExp.test, Load) && validate_expr(exp->v.IfExp.body, Load) && validate_expr(exp->v.IfExp.orelse, Load); case Dict_kind: if (asdl_seq_LEN(exp->v.Dict.keys) != asdl_seq_LEN(exp->v.Dict.values)) { PyErr_SetString(PyExc_ValueError, "Dict doesn't have the same number of keys as values"); return 0; } /* null_ok=1 for keys expressions to allow dict unpacking to work in dict literals, i.e. ``{**{a:b}}`` */ return validate_exprs(exp->v.Dict.keys, Load, /*null_ok=*/ 1) && validate_exprs(exp->v.Dict.values, Load, /*null_ok=*/ 0); case Set_kind: return validate_exprs(exp->v.Set.elts, Load, 0); #define COMP(NAME) \ case NAME ## _kind: \ return validate_comprehension(exp->v.NAME.generators) && \ validate_expr(exp->v.NAME.elt, Load); COMP(ListComp) COMP(SetComp) COMP(GeneratorExp) #undef COMP case DictComp_kind: return validate_comprehension(exp->v.DictComp.generators) && validate_expr(exp->v.DictComp.key, Load) && validate_expr(exp->v.DictComp.value, Load); case Yield_kind: return !exp->v.Yield.value || validate_expr(exp->v.Yield.value, Load); case YieldFrom_kind: return validate_expr(exp->v.YieldFrom.value, Load); case Await_kind: return validate_expr(exp->v.Await.value, Load); case Compare_kind: if (!asdl_seq_LEN(exp->v.Compare.comparators)) { PyErr_SetString(PyExc_ValueError, "Compare with no comparators"); return 0; } if (asdl_seq_LEN(exp->v.Compare.comparators) != asdl_seq_LEN(exp->v.Compare.ops)) { PyErr_SetString(PyExc_ValueError, "Compare has a different number " "of comparators and operands"); return 0; } return validate_exprs(exp->v.Compare.comparators, Load, 0) && validate_expr(exp->v.Compare.left, Load); case Call_kind: return validate_expr(exp->v.Call.func, Load) && validate_exprs(exp->v.Call.args, Load, 0) && validate_keywords(exp->v.Call.keywords); case Constant_kind: if (!validate_constant(exp->v.Constant.value)) { PyErr_Format(PyExc_TypeError, "got an invalid type in Constant: %s", Py_TYPE(exp->v.Constant.value)->tp_name); return 0; } return 1; case Num_kind: { PyObject *n = exp->v.Num.n; if (!PyLong_CheckExact(n) && !PyFloat_CheckExact(n) && !PyComplex_CheckExact(n)) { PyErr_SetString(PyExc_TypeError, "non-numeric type in Num"); return 0; } return 1; } case Str_kind: { PyObject *s = exp->v.Str.s; if (!PyUnicode_CheckExact(s)) { PyErr_SetString(PyExc_TypeError, "non-string type in Str"); return 0; } return 1; } case JoinedStr_kind: return validate_exprs(exp->v.JoinedStr.values, Load, 0); case FormattedValue_kind: if (validate_expr(exp->v.FormattedValue.value, Load) == 0) return 0; if (exp->v.FormattedValue.format_spec) return validate_expr(exp->v.FormattedValue.format_spec, Load); return 1; case Bytes_kind: { PyObject *b = exp->v.Bytes.s; if (!PyBytes_CheckExact(b)) { PyErr_SetString(PyExc_TypeError, "non-bytes type in Bytes"); return 0; } return 1; } case Attribute_kind: return validate_expr(exp->v.Attribute.value, Load); case Subscript_kind: return validate_slice(exp->v.Subscript.slice) && validate_expr(exp->v.Subscript.value, Load); case Starred_kind: return validate_expr(exp->v.Starred.value, ctx); case List_kind: return validate_exprs(exp->v.List.elts, ctx, 0); case Tuple_kind: return validate_exprs(exp->v.Tuple.elts, ctx, 0); /* These last cases don't have any checking. */ case Name_kind: case NameConstant_kind: case Ellipsis_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected expression"); return 0; } } static int validate_nonempty_seq(asdl_seq *seq, const char *what, const char *owner) { if (asdl_seq_LEN(seq)) return 1; PyErr_Format(PyExc_ValueError, "empty %s on %s", what, owner); return 0; } static int validate_assignlist(asdl_seq *targets, expr_context_ty ctx) { return validate_nonempty_seq(targets, "targets", ctx == Del ? "Delete" : "Assign") && validate_exprs(targets, ctx, 0); } static int validate_body(asdl_seq *body, const char *owner) { return validate_nonempty_seq(body, "body", owner) && validate_stmts(body); } static int validate_stmt(stmt_ty stmt) { int i; switch (stmt->kind) { case FunctionDef_kind: return validate_body(stmt->v.FunctionDef.body, "FunctionDef") && validate_arguments(stmt->v.FunctionDef.args) && validate_exprs(stmt->v.FunctionDef.decorator_list, Load, 0) && (!stmt->v.FunctionDef.returns || validate_expr(stmt->v.FunctionDef.returns, Load)); case ClassDef_kind: return validate_body(stmt->v.ClassDef.body, "ClassDef") && validate_exprs(stmt->v.ClassDef.bases, Load, 0) && validate_keywords(stmt->v.ClassDef.keywords) && validate_exprs(stmt->v.ClassDef.decorator_list, Load, 0); case Return_kind: return !stmt->v.Return.value || validate_expr(stmt->v.Return.value, Load); case Delete_kind: return validate_assignlist(stmt->v.Delete.targets, Del); case Assign_kind: return validate_assignlist(stmt->v.Assign.targets, Store) && validate_expr(stmt->v.Assign.value, Load); case AugAssign_kind: return validate_expr(stmt->v.AugAssign.target, Store) && validate_expr(stmt->v.AugAssign.value, Load); case AnnAssign_kind: if (stmt->v.AnnAssign.target->kind != Name_kind && stmt->v.AnnAssign.simple) { PyErr_SetString(PyExc_TypeError, "AnnAssign with simple non-Name target"); return 0; } return validate_expr(stmt->v.AnnAssign.target, Store) && (!stmt->v.AnnAssign.value || validate_expr(stmt->v.AnnAssign.value, Load)) && validate_expr(stmt->v.AnnAssign.annotation, Load); case For_kind: return validate_expr(stmt->v.For.target, Store) && validate_expr(stmt->v.For.iter, Load) && validate_body(stmt->v.For.body, "For") && validate_stmts(stmt->v.For.orelse); case AsyncFor_kind: return validate_expr(stmt->v.AsyncFor.target, Store) && validate_expr(stmt->v.AsyncFor.iter, Load) && validate_body(stmt->v.AsyncFor.body, "AsyncFor") && validate_stmts(stmt->v.AsyncFor.orelse); case While_kind: return validate_expr(stmt->v.While.test, Load) && validate_body(stmt->v.While.body, "While") && validate_stmts(stmt->v.While.orelse); case If_kind: return validate_expr(stmt->v.If.test, Load) && validate_body(stmt->v.If.body, "If") && validate_stmts(stmt->v.If.orelse); case With_kind: if (!validate_nonempty_seq(stmt->v.With.items, "items", "With")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.With.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.With.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.With.body, "With"); case AsyncWith_kind: if (!validate_nonempty_seq(stmt->v.AsyncWith.items, "items", "AsyncWith")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.AsyncWith.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.AsyncWith.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.AsyncWith.body, "AsyncWith"); case Raise_kind: if (stmt->v.Raise.exc) { return validate_expr(stmt->v.Raise.exc, Load) && (!stmt->v.Raise.cause || validate_expr(stmt->v.Raise.cause, Load)); } if (stmt->v.Raise.cause) { PyErr_SetString(PyExc_ValueError, "Raise with cause but no exception"); return 0; } return 1; case Try_kind: if (!validate_body(stmt->v.Try.body, "Try")) return 0; if (!asdl_seq_LEN(stmt->v.Try.handlers) && !asdl_seq_LEN(stmt->v.Try.finalbody)) { PyErr_SetString(PyExc_ValueError, "Try has neither except handlers nor finalbody"); return 0; } if (!asdl_seq_LEN(stmt->v.Try.handlers) && asdl_seq_LEN(stmt->v.Try.orelse)) { PyErr_SetString(PyExc_ValueError, "Try has orelse but no except handlers"); return 0; } for (i = 0; i < asdl_seq_LEN(stmt->v.Try.handlers); i++) { excepthandler_ty handler = asdl_seq_GET(stmt->v.Try.handlers, i); if ((handler->v.ExceptHandler.type && !validate_expr(handler->v.ExceptHandler.type, Load)) || !validate_body(handler->v.ExceptHandler.body, "ExceptHandler")) return 0; } return (!asdl_seq_LEN(stmt->v.Try.finalbody) || validate_stmts(stmt->v.Try.finalbody)) && (!asdl_seq_LEN(stmt->v.Try.orelse) || validate_stmts(stmt->v.Try.orelse)); case Assert_kind: return validate_expr(stmt->v.Assert.test, Load) && (!stmt->v.Assert.msg || validate_expr(stmt->v.Assert.msg, Load)); case Import_kind: return validate_nonempty_seq(stmt->v.Import.names, "names", "Import"); case ImportFrom_kind: if (stmt->v.ImportFrom.level < 0) { PyErr_SetString(PyExc_ValueError, "Negative ImportFrom level"); return 0; } return validate_nonempty_seq(stmt->v.ImportFrom.names, "names", "ImportFrom"); case Global_kind: return validate_nonempty_seq(stmt->v.Global.names, "names", "Global"); case Nonlocal_kind: return validate_nonempty_seq(stmt->v.Nonlocal.names, "names", "Nonlocal"); case Expr_kind: return validate_expr(stmt->v.Expr.value, Load); case AsyncFunctionDef_kind: return validate_body(stmt->v.AsyncFunctionDef.body, "AsyncFunctionDef") && validate_arguments(stmt->v.AsyncFunctionDef.args) && validate_exprs(stmt->v.AsyncFunctionDef.decorator_list, Load, 0) && (!stmt->v.AsyncFunctionDef.returns || validate_expr(stmt->v.AsyncFunctionDef.returns, Load)); case Pass_kind: case Break_kind: case Continue_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected statement"); return 0; } } static int validate_stmts(asdl_seq *seq) { int i; for (i = 0; i < asdl_seq_LEN(seq); i++) { stmt_ty stmt = asdl_seq_GET(seq, i); if (stmt) { if (!validate_stmt(stmt)) return 0; } else { PyErr_SetString(PyExc_ValueError, "None disallowed in statement list"); return 0; } } return 1; } static int validate_exprs(asdl_seq *exprs, expr_context_ty ctx, int null_ok) { int i; for (i = 0; i < asdl_seq_LEN(exprs); i++) { expr_ty expr = asdl_seq_GET(exprs, i); if (expr) { if (!validate_expr(expr, ctx)) return 0; } else if (!null_ok) { PyErr_SetString(PyExc_ValueError, "None disallowed in expression list"); return 0; } } return 1; } int Ta3AST_Validate(mod_ty mod) { int res = 0; switch (mod->kind) { case Module_kind: res = validate_stmts(mod->v.Module.body); break; case Interactive_kind: res = validate_stmts(mod->v.Interactive.body); break; case Expression_kind: res = validate_expr(mod->v.Expression.body, Load); break; case Suite_kind: PyErr_SetString(PyExc_ValueError, "Suite is not valid in the CPython compiler"); break; default: PyErr_SetString(PyExc_SystemError, "impossible module node"); res = 0; break; } return res; } /* This is done here, so defines like "test" don't interfere with AST use above. */ #include "grammar.h" #include "parsetok.h" #include "graminit.h" /* Data structure used internally */ struct compiling { PyArena *c_arena; /* Arena for allocating memory. */ PyObject *c_filename; /* filename */ PyObject *c_normalize; /* Normalization function from unicodedata. */ int c_feature_version; /* Latest minior version of Python for allowed features */ }; static asdl_seq *seq_for_testlist(struct compiling *, const node *); static expr_ty ast_for_expr(struct compiling *, const node *); static stmt_ty ast_for_stmt(struct compiling *, const node *); static asdl_seq *ast_for_suite(struct compiling *c, const node *n); static asdl_seq *ast_for_exprlist(struct compiling *, const node *, expr_context_ty); static expr_ty ast_for_testlist(struct compiling *, const node *); static stmt_ty ast_for_classdef(struct compiling *, const node *, asdl_seq *); static stmt_ty ast_for_with_stmt(struct compiling *, const node *, bool); static stmt_ty ast_for_for_stmt(struct compiling *, const node *, bool); /* Note different signature for ast_for_call */ static expr_ty ast_for_call(struct compiling *, const node *, expr_ty, bool); static PyObject *parsenumber(struct compiling *, const char *); static expr_ty parsestrplus(struct compiling *, const node *n); #define COMP_GENEXP 0 #define COMP_LISTCOMP 1 #define COMP_SETCOMP 2 static int init_normalization(struct compiling *c) { PyObject *m = PyImport_ImportModuleNoBlock("unicodedata"); if (!m) return 0; c->c_normalize = PyObject_GetAttrString(m, "normalize"); Py_DECREF(m); if (!c->c_normalize) return 0; return 1; } static identifier new_identifier(const char *n, struct compiling *c) { PyObject *id = PyUnicode_DecodeUTF8(n, strlen(n), NULL); if (!id) return NULL; /* PyUnicode_DecodeUTF8 should always return a ready string. */ assert(PyUnicode_IS_READY(id)); /* Check whether there are non-ASCII characters in the identifier; if so, normalize to NFKC. */ if (!PyUnicode_IS_ASCII(id)) { PyObject *id2; PyObject *form; PyObject *args[2]; _Py_IDENTIFIER(NFKC); if (!c->c_normalize && !init_normalization(c)) { Py_DECREF(id); return NULL; } form = _PyUnicode_FromId(&PyId_NFKC); if (form == NULL) { Py_DECREF(id); return NULL; } args[0] = form; args[1] = id; id2 = _PyObject_FastCall(c->c_normalize, args, 2); Py_DECREF(id); if (!id2) return NULL; if (!PyUnicode_Check(id2)) { PyErr_Format(PyExc_TypeError, "unicodedata.normalize() must return a string, not " "%.200s", Py_TYPE(id2)->tp_name); Py_DECREF(id2); return NULL; } id = id2; } PyUnicode_InternInPlace(&id); if (PyArena_AddPyObject(c->c_arena, id) < 0) { Py_DECREF(id); return NULL; } return id; } #define NEW_IDENTIFIER(n) new_identifier(STR(n), c) static string new_type_comment(const char *s, struct compiling *c) { return PyUnicode_DecodeUTF8(s, strlen(s), NULL); } #define NEW_TYPE_COMMENT(n) new_type_comment(STR(n), c) static int ast_error(struct compiling *c, const node *n, const char *errmsg) { PyObject *value, *errstr, *loc, *tmp; loc = PyErr_ProgramTextObject(c->c_filename, LINENO(n)); if (!loc) { Py_INCREF(Py_None); loc = Py_None; } tmp = Py_BuildValue("(OiiN)", c->c_filename, LINENO(n), n->n_col_offset, loc); if (!tmp) return 0; errstr = PyUnicode_FromString(errmsg); if (!errstr) { Py_DECREF(tmp); return 0; } value = PyTuple_Pack(2, errstr, tmp); Py_DECREF(errstr); Py_DECREF(tmp); if (value) { PyErr_SetObject(PyExc_SyntaxError, value); Py_DECREF(value); } return 0; } /* num_stmts() returns number of contained statements. Use this routine to determine how big a sequence is needed for the statements in a parse tree. Its raison d'etre is this bit of grammar: stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE A simple_stmt can contain multiple small_stmt elements joined by semicolons. If the arg is a simple_stmt, the number of small_stmt elements is returned. */ static int num_stmts(const node *n) { int i, l; node *ch; switch (TYPE(n)) { case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) return 0; else return num_stmts(CHILD(n, 0)); case file_input: l = 0; for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == stmt) l += num_stmts(ch); } return l; case stmt: return num_stmts(CHILD(n, 0)); case compound_stmt: return 1; case simple_stmt: return NCH(n) / 2; /* Divide by 2 to remove count of semi-colons */ case suite: /* suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */ if (NCH(n) == 1) return num_stmts(CHILD(n, 0)); else { i = 2; l = 0; if (TYPE(CHILD(n, 1)) == TYPE_COMMENT) i += 2; for (; i < (NCH(n) - 1); i++) l += num_stmts(CHILD(n, i)); return l; } default: { char buf[128]; sprintf(buf, "Non-statement found: %d %d", TYPE(n), NCH(n)); Py_FatalError(buf); } } abort(); } /* Transform the CST rooted at node * to the appropriate AST */ mod_ty Ta3AST_FromNodeObject(const node *n, PyCompilerFlags *flags, PyObject *filename, int feature_version, PyArena *arena) { int i, j, k, num; asdl_seq *stmts = NULL; asdl_seq *type_ignores = NULL; stmt_ty s; node *ch; struct compiling c; mod_ty res = NULL; asdl_seq *argtypes = NULL; expr_ty ret, arg; c.c_arena = arena; /* borrowed reference */ c.c_filename = filename; c.c_normalize = NULL; c.c_feature_version = feature_version; if (TYPE(n) == encoding_decl) n = CHILD(n, 0); k = 0; switch (TYPE(n)) { case file_input: stmts = _Ta3_asdl_seq_new(num_stmts(n), arena); if (!stmts) goto out; for (i = 0; i < NCH(n) - 1; i++) { ch = CHILD(n, i); if (TYPE(ch) == NEWLINE) continue; REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { s = ast_for_stmt(&c, ch); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } else { ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < num; j++) { s = ast_for_stmt(&c, CHILD(ch, j * 2)); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } } } /* Type ignores are stored under the ENDMARKER in file_input. */ ch = CHILD(n, NCH(n) - 1); REQ(ch, ENDMARKER); num = NCH(ch); type_ignores = _Ta3_asdl_seq_new(num, arena); if (!type_ignores) goto out; for (i = 0; i < num; i++) { type_ignore_ty ti = TypeIgnore(LINENO(CHILD(ch, i)), arena); if (!ti) goto out; asdl_seq_SET(type_ignores, i, ti); } res = Module(stmts, type_ignores, arena); break; case eval_input: { expr_ty testlist_ast; /* XXX Why not comp_for here? */ testlist_ast = ast_for_testlist(&c, CHILD(n, 0)); if (!testlist_ast) goto out; res = Expression(testlist_ast, arena); break; } case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) { stmts = _Ta3_asdl_seq_new(1, arena); if (!stmts) goto out; asdl_seq_SET(stmts, 0, Pass(n->n_lineno, n->n_col_offset, arena)); if (!asdl_seq_GET(stmts, 0)) goto out; res = Interactive(stmts, arena); } else { n = CHILD(n, 0); num = num_stmts(n); stmts = _Ta3_asdl_seq_new(num, arena); if (!stmts) goto out; if (num == 1) { s = ast_for_stmt(&c, n); if (!s) goto out; asdl_seq_SET(stmts, 0, s); } else { /* Only a simple_stmt can contain multiple statements. */ REQ(n, simple_stmt); for (i = 0; i < NCH(n); i += 2) { if (TYPE(CHILD(n, i)) == NEWLINE) break; s = ast_for_stmt(&c, CHILD(n, i)); if (!s) goto out; asdl_seq_SET(stmts, i / 2, s); } } res = Interactive(stmts, arena); } break; case func_type_input: n = CHILD(n, 0); REQ(n, func_type); if (TYPE(CHILD(n, 1)) == typelist) { ch = CHILD(n, 1); /* this is overly permissive -- we don't pay any attention to * stars on the args -- just parse them into an ordered list */ num = 0; for (i = 0; i < NCH(ch); i++) { if (TYPE(CHILD(ch, i)) == test) num++; } argtypes = _Ta3_asdl_seq_new(num, arena); j = 0; for (i = 0; i < NCH(ch); i++) { if (TYPE(CHILD(ch, i)) == test) { arg = ast_for_expr(&c, CHILD(ch, i)); if (!arg) goto out; asdl_seq_SET(argtypes, j++, arg); } } } else argtypes = _Ta3_asdl_seq_new(0, arena); ret = ast_for_expr(&c, CHILD(n, NCH(n) - 1)); if (!ret) goto out; res = FunctionType(argtypes, ret, arena); break; default: PyErr_Format(PyExc_SystemError, "invalid node %d for Ta3AST_FromNode", TYPE(n)); goto out; } out: if (c.c_normalize) { Py_DECREF(c.c_normalize); } return res; } mod_ty Ta3AST_FromNode(const node *n, PyCompilerFlags *flags, const char *filename_str, int feature_version, PyArena *arena) { mod_ty mod; PyObject *filename; filename = PyUnicode_DecodeFSDefault(filename_str); if (filename == NULL) return NULL; mod = Ta3AST_FromNodeObject(n, flags, filename, feature_version, arena); Py_DECREF(filename); return mod; } /* Return the AST repr. of the operator represented as syntax (|, ^, etc.) */ static operator_ty get_operator(struct compiling *c, const node *n) { switch (TYPE(n)) { case VBAR: return BitOr; case CIRCUMFLEX: return BitXor; case AMPER: return BitAnd; case LEFTSHIFT: return LShift; case RIGHTSHIFT: return RShift; case PLUS: return Add; case MINUS: return Sub; case STAR: return Mult; case AT: if (c->c_feature_version < 5) { ast_error(c, n, "The '@' operator is only supported in Python 3.5 and greater"); return (operator_ty)0; } return MatMult; case SLASH: return Div; case DOUBLESLASH: return FloorDiv; case PERCENT: return Mod; default: return (operator_ty)0; } } static const char * const FORBIDDEN[] = { "None", "True", "False", NULL, }; static int forbidden_name(struct compiling *c, identifier name, const node *n, int full_checks) { assert(PyUnicode_Check(name)); if (_PyUnicode_EqualToASCIIString(name, "__debug__")) { ast_error(c, n, "assignment to keyword"); return 1; } if (full_checks) { const char * const *p; for (p = FORBIDDEN; *p; p++) { if (_PyUnicode_EqualToASCIIString(name, *p)) { ast_error(c, n, "assignment to keyword"); return 1; } } } return 0; } /* Set the context ctx for expr_ty e, recursively traversing e. Only sets context for expr kinds that "can appear in assignment context" (according to ../Parser/Python.asdl). For other expr kinds, it sets an appropriate syntax error and returns false. */ static int set_context(struct compiling *c, expr_ty e, expr_context_ty ctx, const node *n) { asdl_seq *s = NULL; /* If a particular expression type can't be used for assign / delete, set expr_name to its name and an error message will be generated. */ const char* expr_name = NULL; /* The ast defines augmented store and load contexts, but the implementation here doesn't actually use them. The code may be a little more complex than necessary as a result. It also means that expressions in an augmented assignment have a Store context. Consider restructuring so that augmented assignment uses set_context(), too. */ assert(ctx != AugStore && ctx != AugLoad); switch (e->kind) { case Attribute_kind: e->v.Attribute.ctx = ctx; if (ctx == Store && forbidden_name(c, e->v.Attribute.attr, n, 1)) return 0; break; case Subscript_kind: e->v.Subscript.ctx = ctx; break; case Starred_kind: e->v.Starred.ctx = ctx; if (!set_context(c, e->v.Starred.value, ctx, n)) return 0; break; case Name_kind: if (ctx == Store) { if (forbidden_name(c, e->v.Name.id, n, 0)) return 0; /* forbidden_name() calls ast_error() */ } e->v.Name.ctx = ctx; break; case List_kind: e->v.List.ctx = ctx; s = e->v.List.elts; break; case Tuple_kind: e->v.Tuple.ctx = ctx; s = e->v.Tuple.elts; break; case Lambda_kind: expr_name = "lambda"; break; case Call_kind: expr_name = "function call"; break; case BoolOp_kind: case BinOp_kind: case UnaryOp_kind: expr_name = "operator"; break; case GeneratorExp_kind: expr_name = "generator expression"; break; case Yield_kind: case YieldFrom_kind: expr_name = "yield expression"; break; case Await_kind: expr_name = "await expression"; break; case ListComp_kind: expr_name = "list comprehension"; break; case SetComp_kind: expr_name = "set comprehension"; break; case DictComp_kind: expr_name = "dict comprehension"; break; case Dict_kind: case Set_kind: case Num_kind: case Str_kind: case Bytes_kind: case JoinedStr_kind: case FormattedValue_kind: expr_name = "literal"; break; case NameConstant_kind: expr_name = "keyword"; break; case Ellipsis_kind: expr_name = "Ellipsis"; break; case Compare_kind: expr_name = "comparison"; break; case IfExp_kind: expr_name = "conditional expression"; break; default: PyErr_Format(PyExc_SystemError, "unexpected expression in assignment %d (line %d)", e->kind, e->lineno); return 0; } /* Check for error string set by switch */ if (expr_name) { char buf[300]; PyOS_snprintf(buf, sizeof(buf), "can't %s %s", ctx == Store ? "assign to" : "delete", expr_name); return ast_error(c, n, buf); } /* If the LHS is a list or tuple, we need to set the assignment context for all the contained elements. */ if (s) { int i; for (i = 0; i < asdl_seq_LEN(s); i++) { if (!set_context(c, (expr_ty)asdl_seq_GET(s, i), ctx, n)) return 0; } } return 1; } static operator_ty ast_for_augassign(struct compiling *c, const node *n) { REQ(n, augassign); n = CHILD(n, 0); switch (STR(n)[0]) { case '+': return Add; case '-': return Sub; case '/': if (STR(n)[1] == '/') return FloorDiv; else return Div; case '%': return Mod; case '<': return LShift; case '>': return RShift; case '&': return BitAnd; case '^': return BitXor; case '|': return BitOr; case '*': if (STR(n)[1] == '*') return Pow; else return Mult; case '@': if (c->c_feature_version < 5) { ast_error(c, n, "The '@' operator is only supported in Python 3.5 and greater"); return (operator_ty)0; } return MatMult; default: PyErr_Format(PyExc_SystemError, "invalid augassign: %s", STR(n)); return (operator_ty)0; } } static cmpop_ty ast_for_comp_op(struct compiling *c, const node *n) { /* comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is' |'is' 'not' */ REQ(n, comp_op); if (NCH(n) == 1) { n = CHILD(n, 0); switch (TYPE(n)) { case LESS: return Lt; case GREATER: return Gt; case EQEQUAL: /* == */ return Eq; case LESSEQUAL: return LtE; case GREATEREQUAL: return GtE; case NOTEQUAL: return NotEq; case NAME: if (strcmp(STR(n), "in") == 0) return In; if (strcmp(STR(n), "is") == 0) return Is; /* fall through */ default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s", STR(n)); return (cmpop_ty)0; } } else if (NCH(n) == 2) { /* handle "not in" and "is not" */ switch (TYPE(CHILD(n, 0))) { case NAME: if (strcmp(STR(CHILD(n, 1)), "in") == 0) return NotIn; if (strcmp(STR(CHILD(n, 0)), "is") == 0) return IsNot; /* fall through */ default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s %s", STR(CHILD(n, 0)), STR(CHILD(n, 1))); return (cmpop_ty)0; } } PyErr_Format(PyExc_SystemError, "invalid comp_op: has %d children", NCH(n)); return (cmpop_ty)0; } static asdl_seq * seq_for_testlist(struct compiling *c, const node *n) { /* testlist: test (',' test)* [','] testlist_star_expr: test|star_expr (',' test|star_expr)* [','] */ asdl_seq *seq; expr_ty expression; int i; assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr || TYPE(n) == testlist_comp); seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { const node *ch = CHILD(n, i); assert(TYPE(ch) == test || TYPE(ch) == test_nocond || TYPE(ch) == star_expr); expression = ast_for_expr(c, ch); if (!expression) return NULL; assert(i / 2 < seq->size); asdl_seq_SET(seq, i / 2, expression); } return seq; } static arg_ty ast_for_arg(struct compiling *c, const node *n) { identifier name; expr_ty annotation = NULL; node *ch; arg_ty ret; assert(TYPE(n) == tfpdef || TYPE(n) == vfpdef); ch = CHILD(n, 0); name = NEW_IDENTIFIER(ch); if (!name) return NULL; if (forbidden_name(c, name, ch, 0)) return NULL; if (NCH(n) == 3 && TYPE(CHILD(n, 1)) == COLON) { annotation = ast_for_expr(c, CHILD(n, 2)); if (!annotation) return NULL; } ret = arg(name, annotation, NULL, LINENO(n), n->n_col_offset, c->c_arena); if (!ret) return NULL; return ret; } /* returns -1 if failed to handle keyword only arguments returns new position to keep processing if successful (',' tfpdef ['=' test])* ^^^ start pointing here */ static int handle_keywordonly_args(struct compiling *c, const node *n, int start, asdl_seq *kwonlyargs, asdl_seq *kwdefaults) { PyObject *argname; node *ch; expr_ty expression, annotation; arg_ty arg; int i = start; int j = 0; /* index for kwdefaults and kwonlyargs */ if (kwonlyargs == NULL) { ast_error(c, CHILD(n, start), "named arguments must follow bare *"); return -1; } assert(kwdefaults != NULL); while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case vfpdef: case tfpdef: if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) goto error; asdl_seq_SET(kwdefaults, j, expression); i += 2; /* '=' and test */ } else { /* setting NULL if no default value exists */ asdl_seq_SET(kwdefaults, j, NULL); } if (NCH(ch) == 3) { /* ch is NAME ':' test */ annotation = ast_for_expr(c, CHILD(ch, 2)); if (!annotation) goto error; } else { annotation = NULL; } ch = CHILD(ch, 0); argname = NEW_IDENTIFIER(ch); if (!argname) goto error; if (forbidden_name(c, argname, ch, 0)) goto error; arg = arg(argname, annotation, NULL, LINENO(ch), ch->n_col_offset, c->c_arena); if (!arg) goto error; asdl_seq_SET(kwonlyargs, j++, arg); i += 1; /* the name */ if (TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case TYPE_COMMENT: /* arg will be equal to the last argument processed */ arg->type_comment = NEW_TYPE_COMMENT(ch); i += 1; break; case DOUBLESTAR: return i; default: ast_error(c, ch, "unexpected node"); goto error; } } return i; error: return -1; } /* Create AST for argument list. */ static arguments_ty ast_for_arguments(struct compiling *c, const node *n) { /* This function handles both typedargslist (function definition) and varargslist (lambda definition). parameters: '(' [typedargslist] ')' typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']]] | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']) tfpdef: NAME [':' test] varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [',']]] | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [','] ) vfpdef: NAME */ int i, j, k, nposargs = 0, nkwonlyargs = 0; int nposdefaults = 0, found_default = 0; asdl_seq *posargs, *posdefaults, *kwonlyargs, *kwdefaults; arg_ty vararg = NULL, kwarg = NULL; arg_ty arg; node *ch; if (TYPE(n) == parameters) { if (NCH(n) == 2) /* () as argument list */ return arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); n = CHILD(n, 1); } assert(TYPE(n) == typedargslist || TYPE(n) == varargslist); /* First count the number of positional args & defaults. The variable i is the loop index for this for loop and the next. The next loop picks up where the first leaves off. */ for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == STAR) { /* skip star */ i++; if (i < NCH(n) && /* skip argument following star */ (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { i++; } break; } if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == vfpdef || TYPE(ch) == tfpdef) nposargs++; if (TYPE(ch) == EQUAL) nposdefaults++; } /* count the number of keyword only args & defaults for keyword only args */ for ( ; i < NCH(n); ++i) { ch = CHILD(n, i); if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == tfpdef || TYPE(ch) == vfpdef) nkwonlyargs++; } posargs = (nposargs ? _Ta3_asdl_seq_new(nposargs, c->c_arena) : NULL); if (!posargs && nposargs) return NULL; kwonlyargs = (nkwonlyargs ? _Ta3_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwonlyargs && nkwonlyargs) return NULL; posdefaults = (nposdefaults ? _Ta3_asdl_seq_new(nposdefaults, c->c_arena) : NULL); if (!posdefaults && nposdefaults) return NULL; /* The length of kwonlyargs and kwdefaults are same since we set NULL as default for keyword only argument w/o default - we have sequence data structure, but no dictionary */ kwdefaults = (nkwonlyargs ? _Ta3_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwdefaults && nkwonlyargs) return NULL; /* tfpdef: NAME [':' test] vfpdef: NAME */ i = 0; j = 0; /* index for defaults */ k = 0; /* index for args */ while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case tfpdef: case vfpdef: /* XXX Need to worry about checking if TYPE(CHILD(n, i+1)) is anything other than EQUAL or a comma? */ /* XXX Should NCH(n) check be made a separate check? */ if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expr_ty expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) return NULL; assert(posdefaults != NULL); asdl_seq_SET(posdefaults, j++, expression); i += 2; found_default = 1; } else if (found_default) { ast_error(c, n, "non-default argument follows default argument"); return NULL; } arg = ast_for_arg(c, ch); if (!arg) return NULL; asdl_seq_SET(posargs, k++, arg); i += 1; /* the name */ if (TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case STAR: if (i+1 >= NCH(n) || (i+2 == NCH(n) && (TYPE(CHILD(n, i+1)) == COMMA || TYPE(CHILD(n, i+1)) == TYPE_COMMENT))) { ast_error(c, CHILD(n, i), "named arguments must follow bare *"); return NULL; } ch = CHILD(n, i+1); /* tfpdef or COMMA */ if (TYPE(ch) == COMMA) { int res = 0; i += 2; /* now follows keyword only arguments */ if (TYPE(CHILD(n, i)) == TYPE_COMMENT) { ast_error(c, CHILD(n, i), "bare * has associated type comment"); return NULL; } res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } else { vararg = ast_for_arg(c, ch); if (!vararg) return NULL; i += 2; /* the star and the name */ if (TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ if (TYPE(CHILD(n, i)) == TYPE_COMMENT) { vararg->type_comment = NEW_TYPE_COMMENT(CHILD(n, i)); i += 1; } if (i < NCH(n) && (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { int res = 0; res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } } break; case DOUBLESTAR: ch = CHILD(n, i+1); /* tfpdef */ assert(TYPE(ch) == tfpdef || TYPE(ch) == vfpdef); kwarg = ast_for_arg(c, ch); if (!kwarg) return NULL; i += 2; /* the double star and the name */ if (TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case TYPE_COMMENT: assert(i); if (kwarg) arg = kwarg; /* arg will be equal to the last argument processed */ arg->type_comment = NEW_TYPE_COMMENT(ch); i += 1; break; default: PyErr_Format(PyExc_SystemError, "unexpected node in varargslist: %d @ %d", TYPE(ch), i); return NULL; } } return arguments(posargs, vararg, kwonlyargs, kwdefaults, kwarg, posdefaults, c->c_arena); } static expr_ty ast_for_dotted_name(struct compiling *c, const node *n) { expr_ty e; identifier id; int lineno, col_offset; int i; REQ(n, dotted_name); lineno = LINENO(n); col_offset = n->n_col_offset; id = NEW_IDENTIFIER(CHILD(n, 0)); if (!id) return NULL; e = Name(id, Load, lineno, col_offset, c->c_arena); if (!e) return NULL; for (i = 2; i < NCH(n); i+=2) { id = NEW_IDENTIFIER(CHILD(n, i)); if (!id) return NULL; e = Attribute(e, id, Load, lineno, col_offset, c->c_arena); if (!e) return NULL; } return e; } static expr_ty ast_for_decorator(struct compiling *c, const node *n) { /* decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE */ expr_ty d = NULL; expr_ty name_expr; REQ(n, decorator); REQ(CHILD(n, 0), AT); REQ(RCHILD(n, -1), NEWLINE); name_expr = ast_for_dotted_name(c, CHILD(n, 1)); if (!name_expr) return NULL; if (NCH(n) == 3) { /* No arguments */ d = name_expr; name_expr = NULL; } else if (NCH(n) == 5) { /* Call with no arguments */ d = Call(name_expr, NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); if (!d) return NULL; name_expr = NULL; } else { d = ast_for_call(c, CHILD(n, 3), name_expr, true); if (!d) return NULL; name_expr = NULL; } return d; } static asdl_seq* ast_for_decorators(struct compiling *c, const node *n) { asdl_seq* decorator_seq; expr_ty d; int i; REQ(n, decorators); decorator_seq = _Ta3_asdl_seq_new(NCH(n), c->c_arena); if (!decorator_seq) return NULL; for (i = 0; i < NCH(n); i++) { d = ast_for_decorator(c, CHILD(n, i)); if (!d) return NULL; asdl_seq_SET(decorator_seq, i, d); } return decorator_seq; } static stmt_ty ast_for_funcdef_impl(struct compiling *c, const node *n0, asdl_seq *decorator_seq, bool is_async) { /* funcdef: 'def' NAME parameters ['->' test] ':' [TYPE_COMMENT] suite */ const node * const n = is_async ? CHILD(n0, 1) : n0; identifier name; arguments_ty args; asdl_seq *body; expr_ty returns = NULL; int name_i = 1; node *tc; string type_comment = NULL; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async functions are only supported in Python 3.5 and greater"); return NULL; } REQ(n, funcdef); name = NEW_IDENTIFIER(CHILD(n, name_i)); if (!name) return NULL; if (forbidden_name(c, name, CHILD(n, name_i), 0)) return NULL; args = ast_for_arguments(c, CHILD(n, name_i + 1)); if (!args) return NULL; if (TYPE(CHILD(n, name_i+2)) == RARROW) { returns = ast_for_expr(c, CHILD(n, name_i + 3)); if (!returns) return NULL; name_i += 2; } if (TYPE(CHILD(n, name_i + 3)) == TYPE_COMMENT) { type_comment = NEW_TYPE_COMMENT(CHILD(n, name_i + 3)); name_i += 1; } body = ast_for_suite(c, CHILD(n, name_i + 3)); if (!body) return NULL; if (!type_comment && NCH(CHILD(n, name_i + 3)) > 1) { /* If the function doesn't have a type comment on the same line, check * if the suite has a type comment in it. */ tc = CHILD(CHILD(n, name_i + 3), 1); if (TYPE(tc) == TYPE_COMMENT) type_comment = NEW_TYPE_COMMENT(tc); } if (is_async) return AsyncFunctionDef(name, args, body, decorator_seq, returns, type_comment, LINENO(n0), n0->n_col_offset, c->c_arena); else return FunctionDef(name, args, body, decorator_seq, returns, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_async_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* async_funcdef: 'async' funcdef */ REQ(n, async_funcdef); REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); REQ(CHILD(n, 1), funcdef); return ast_for_funcdef_impl(c, n, decorator_seq, true /* is_async */); } static stmt_ty ast_for_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* funcdef: 'def' NAME parameters ['->' test] ':' suite */ return ast_for_funcdef_impl(c, n, decorator_seq, false /* is_async */); } static stmt_ty ast_for_async_stmt(struct compiling *c, const node *n) { /* async_stmt: 'async' (funcdef | with_stmt | for_stmt) */ REQ(n, async_stmt); REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); switch (TYPE(CHILD(n, 1))) { case funcdef: return ast_for_funcdef_impl(c, n, NULL, true /* is_async */); case with_stmt: return ast_for_with_stmt(c, n, true /* is_async */); case for_stmt: return ast_for_for_stmt(c, n, true /* is_async */); default: PyErr_Format(PyExc_SystemError, "invalid async stament: %s", STR(CHILD(n, 1))); return NULL; } } static stmt_ty ast_for_decorated(struct compiling *c, const node *n) { /* decorated: decorators (classdef | funcdef | async_funcdef) */ stmt_ty thing = NULL; asdl_seq *decorator_seq = NULL; REQ(n, decorated); decorator_seq = ast_for_decorators(c, CHILD(n, 0)); if (!decorator_seq) return NULL; assert(TYPE(CHILD(n, 1)) == funcdef || TYPE(CHILD(n, 1)) == async_funcdef || TYPE(CHILD(n, 1)) == classdef); if (TYPE(CHILD(n, 1)) == funcdef) { thing = ast_for_funcdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == classdef) { thing = ast_for_classdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == async_funcdef) { thing = ast_for_async_funcdef(c, CHILD(n, 1), decorator_seq); } /* we count the decorators in when talking about the class' or * function's line number */ if (thing) { thing->lineno = LINENO(n); thing->col_offset = n->n_col_offset; } return thing; } static expr_ty ast_for_lambdef(struct compiling *c, const node *n) { /* lambdef: 'lambda' [varargslist] ':' test lambdef_nocond: 'lambda' [varargslist] ':' test_nocond */ arguments_ty args; expr_ty expression; if (NCH(n) == 3) { args = arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; } else { args = ast_for_arguments(c, CHILD(n, 1)); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 3)); if (!expression) return NULL; } return Lambda(args, expression, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_ifexpr(struct compiling *c, const node *n) { /* test: or_test 'if' or_test 'else' test */ expr_ty expression, body, orelse; assert(NCH(n) == 5); body = ast_for_expr(c, CHILD(n, 0)); if (!body) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; orelse = ast_for_expr(c, CHILD(n, 4)); if (!orelse) return NULL; return IfExp(expression, body, orelse, LINENO(n), n->n_col_offset, c->c_arena); } /* Count the number of 'for' loops in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_fors(struct compiling *c, const node *n) { int n_fors = 0; count_comp_for: n_fors++; REQ(n, comp_for); if (NCH(n) == 2) { REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); n = CHILD(n, 1); } else if (NCH(n) == 1) { n = CHILD(n, 0); } else { goto error; } if (NCH(n) == (5)) { n = CHILD(n, 4); } else { return n_fors; } count_comp_iter: REQ(n, comp_iter); n = CHILD(n, 0); if (TYPE(n) == comp_for) goto count_comp_for; else if (TYPE(n) == comp_if) { if (NCH(n) == 3) { n = CHILD(n, 2); goto count_comp_iter; } else return n_fors; } error: /* Should never be reached */ PyErr_SetString(PyExc_SystemError, "logic error in count_comp_fors"); return -1; } /* Count the number of 'if' statements in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_ifs(struct compiling *c, const node *n) { int n_ifs = 0; while (1) { REQ(n, comp_iter); if (TYPE(CHILD(n, 0)) == comp_for) return n_ifs; n = CHILD(n, 0); REQ(n, comp_if); n_ifs++; if (NCH(n) == 2) return n_ifs; n = CHILD(n, 2); } } static asdl_seq * ast_for_comprehension(struct compiling *c, const node *n) { int i, n_fors; asdl_seq *comps; n_fors = count_comp_fors(c, n); if (n_fors == -1) return NULL; comps = _Ta3_asdl_seq_new(n_fors, c->c_arena); if (!comps) return NULL; for (i = 0; i < n_fors; i++) { comprehension_ty comp; asdl_seq *t; expr_ty expression, first; node *for_ch; node *sync_n; int is_async = 0; REQ(n, comp_for); if (NCH(n) == 2) { is_async = 1; REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); sync_n = CHILD(n, 1); } else { sync_n = CHILD(n, 0); } REQ(sync_n, sync_comp_for); /* Async comprehensions only allowed in Python 3.6 and greater */ if (is_async && c->c_feature_version < 6) { ast_error(c, n, "Async comprehensions are only supported in Python 3.6 and greater"); return NULL; } for_ch = CHILD(sync_n, 1); t = ast_for_exprlist(c, for_ch, Store); if (!t) return NULL; expression = ast_for_expr(c, CHILD(sync_n, 3)); if (!expression) return NULL; /* Check the # of children rather than the length of t, since (x for x, in ...) has 1 element in t, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(t, 0); if (NCH(for_ch) == 1) comp = comprehension(first, expression, NULL, is_async, c->c_arena); else comp = comprehension(Tuple(t, Store, first->lineno, first->col_offset, c->c_arena), expression, NULL, is_async, c->c_arena); if (!comp) return NULL; if (NCH(sync_n) == 5) { int j, n_ifs; asdl_seq *ifs; n = CHILD(sync_n, 4); n_ifs = count_comp_ifs(c, n); if (n_ifs == -1) return NULL; ifs = _Ta3_asdl_seq_new(n_ifs, c->c_arena); if (!ifs) return NULL; for (j = 0; j < n_ifs; j++) { REQ(n, comp_iter); n = CHILD(n, 0); REQ(n, comp_if); expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; asdl_seq_SET(ifs, j, expression); if (NCH(n) == 3) n = CHILD(n, 2); } /* on exit, must guarantee that n is a comp_for */ if (TYPE(n) == comp_iter) n = CHILD(n, 0); comp->ifs = ifs; } asdl_seq_SET(comps, i, comp); } return comps; } static expr_ty ast_for_itercomp(struct compiling *c, const node *n, int type) { /* testlist_comp: (test|star_expr) * ( comp_for | (',' (test|star_expr))* [','] ) */ expr_ty elt; asdl_seq *comps; node *ch; assert(NCH(n) > 1); ch = CHILD(n, 0); elt = ast_for_expr(c, ch); if (!elt) return NULL; if (elt->kind == Starred_kind) { ast_error(c, ch, "iterable unpacking cannot be used in comprehension"); return NULL; } comps = ast_for_comprehension(c, CHILD(n, 1)); if (!comps) return NULL; if (type == COMP_GENEXP) return GeneratorExp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else if (type == COMP_LISTCOMP) return ListComp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else if (type == COMP_SETCOMP) return SetComp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else /* Should never happen */ return NULL; } /* Fills in the key, value pair corresponding to the dict element. In case * of an unpacking, key is NULL. *i is advanced by the number of ast * elements. Iff successful, nonzero is returned. */ static int ast_for_dictelement(struct compiling *c, const node *n, int *i, expr_ty *key, expr_ty *value) { expr_ty expression; if (TYPE(CHILD(n, *i)) == DOUBLESTAR) { assert(NCH(n) - *i >= 2); expression = ast_for_expr(c, CHILD(n, *i + 1)); if (!expression) return 0; *key = NULL; *value = expression; *i += 2; } else { assert(NCH(n) - *i >= 3); expression = ast_for_expr(c, CHILD(n, *i)); if (!expression) return 0; *key = expression; REQ(CHILD(n, *i + 1), COLON); expression = ast_for_expr(c, CHILD(n, *i + 2)); if (!expression) return 0; *value = expression; *i += 3; } return 1; } static expr_ty ast_for_dictcomp(struct compiling *c, const node *n) { expr_ty key, value; asdl_seq *comps; int i = 0; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; assert(key); assert(NCH(n) - i >= 1); comps = ast_for_comprehension(c, CHILD(n, i)); if (!comps) return NULL; return DictComp(key, value, comps, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_dictdisplay(struct compiling *c, const node *n) { int i; int j; int size; asdl_seq *keys, *values; size = (NCH(n) + 1) / 3; /* +1 in case no trailing comma */ keys = _Ta3_asdl_seq_new(size, c->c_arena); if (!keys) return NULL; values = _Ta3_asdl_seq_new(size, c->c_arena); if (!values) return NULL; j = 0; for (i = 0; i < NCH(n); i++) { expr_ty key, value; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; asdl_seq_SET(keys, j, key); asdl_seq_SET(values, j, value); j++; } keys->size = j; values->size = j; return Dict(keys, values, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_genexp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp) || TYPE(n) == (argument)); return ast_for_itercomp(c, n, COMP_GENEXP); } static expr_ty ast_for_listcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp)); return ast_for_itercomp(c, n, COMP_LISTCOMP); } static expr_ty ast_for_setcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (dictorsetmaker)); return ast_for_itercomp(c, n, COMP_SETCOMP); } static expr_ty ast_for_setdisplay(struct compiling *c, const node *n) { int i; int size; asdl_seq *elts; assert(TYPE(n) == (dictorsetmaker)); size = (NCH(n) + 1) / 2; /* +1 in case no trailing comma */ elts = _Ta3_asdl_seq_new(size, c->c_arena); if (!elts) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, i)); if (!expression) return NULL; asdl_seq_SET(elts, i / 2, expression); } return Set(elts, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_atom(struct compiling *c, const node *n) { /* atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictmaker|testlist_comp] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False' */ node *ch = CHILD(n, 0); switch (TYPE(ch)) { case NAME: { PyObject *name; const char *s = STR(ch); size_t len = strlen(s); if (len >= 4 && len <= 5) { if (!strcmp(s, "None")) return NameConstant(Py_None, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "True")) return NameConstant(Py_True, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "False")) return NameConstant(Py_False, LINENO(n), n->n_col_offset, c->c_arena); } name = new_identifier(s, c); if (!name) return NULL; /* All names start in Load context, but may later be changed. */ return Name(name, Load, LINENO(n), n->n_col_offset, c->c_arena); } case STRING: { expr_ty str = parsestrplus(c, n); if (!str) { const char *errtype = NULL; if (PyErr_ExceptionMatches(PyExc_UnicodeError)) errtype = "unicode error"; else if (PyErr_ExceptionMatches(PyExc_ValueError)) errtype = "value error"; if (errtype) { char buf[128]; const char *s = NULL; PyObject *type, *value, *tback, *errstr; PyErr_Fetch(&type, &value, &tback); errstr = PyObject_Str(value); if (errstr) s = PyUnicode_AsUTF8(errstr); if (s) { PyOS_snprintf(buf, sizeof(buf), "(%s) %s", errtype, s); } else { PyErr_Clear(); PyOS_snprintf(buf, sizeof(buf), "(%s) unknown error", errtype); } Py_XDECREF(errstr); ast_error(c, n, buf); Py_DECREF(type); Py_XDECREF(value); Py_XDECREF(tback); } return NULL; } return str; } case NUMBER: { PyObject *pynum; const char *s = STR(ch); /* Underscores in numeric literals are only allowed in Python 3.6 or greater */ /* Check for underscores here rather than in parse_number so we can report a line number on error */ if (c->c_feature_version < 6 && strchr(s, '_') != NULL) { ast_error(c, ch, "Underscores in numeric literals are only supported in Python 3.6 and greater"); return NULL; } pynum = parsenumber(c, STR(ch)); if (!pynum) return NULL; if (PyArena_AddPyObject(c->c_arena, pynum) < 0) { Py_DECREF(pynum); return NULL; } return Num(pynum, LINENO(n), n->n_col_offset, c->c_arena); } case ELLIPSIS: /* Ellipsis */ return Ellipsis(LINENO(n), n->n_col_offset, c->c_arena); case LPAR: /* some parenthesized expressions */ ch = CHILD(n, 1); if (TYPE(ch) == RPAR) return Tuple(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); if (TYPE(ch) == yield_expr) return ast_for_expr(c, ch); /* testlist_comp: test ( comp_for | (',' test)* [','] ) */ if ((NCH(ch) > 1) && (TYPE(CHILD(ch, 1)) == comp_for)) return ast_for_genexp(c, ch); return ast_for_testlist(c, ch); case LSQB: /* list (or list comprehension) */ ch = CHILD(n, 1); if (TYPE(ch) == RSQB) return List(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); REQ(ch, testlist_comp); if (NCH(ch) == 1 || TYPE(CHILD(ch, 1)) == COMMA) { asdl_seq *elts = seq_for_testlist(c, ch); if (!elts) return NULL; return List(elts, Load, LINENO(n), n->n_col_offset, c->c_arena); } else return ast_for_listcomp(c, ch); case LBRACE: { /* dictorsetmaker: ( ((test ':' test | '**' test) * (comp_for | (',' (test ':' test | '**' test))* [','])) | * ((test | '*' test) * (comp_for | (',' (test | '*' test))* [','])) ) */ expr_ty res; ch = CHILD(n, 1); if (TYPE(ch) == RBRACE) { /* It's an empty dict. */ return Dict(NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else { int is_dict = (TYPE(CHILD(ch, 0)) == DOUBLESTAR); if (NCH(ch) == 1 || (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == COMMA)) { /* It's a set display. */ res = ast_for_setdisplay(c, ch); } else if (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == comp_for) { /* It's a set comprehension. */ res = ast_for_setcomp(c, ch); } else if (NCH(ch) > 3 - is_dict && TYPE(CHILD(ch, 3 - is_dict)) == comp_for) { /* It's a dictionary comprehension. */ if (is_dict) { ast_error(c, n, "dict unpacking cannot be used in " "dict comprehension"); return NULL; } res = ast_for_dictcomp(c, ch); } else { /* It's a dictionary display. */ res = ast_for_dictdisplay(c, ch); } if (res) { res->lineno = LINENO(n); res->col_offset = n->n_col_offset; } return res; } } default: PyErr_Format(PyExc_SystemError, "unhandled atom %d", TYPE(ch)); return NULL; } } static slice_ty ast_for_slice(struct compiling *c, const node *n) { node *ch; expr_ty lower = NULL, upper = NULL, step = NULL; REQ(n, subscript); /* subscript: test | [test] ':' [test] [sliceop] sliceop: ':' [test] */ ch = CHILD(n, 0); if (NCH(n) == 1 && TYPE(ch) == test) { /* 'step' variable hold no significance in terms of being used over other vars */ step = ast_for_expr(c, ch); if (!step) return NULL; return Index(step, c->c_arena); } if (TYPE(ch) == test) { lower = ast_for_expr(c, ch); if (!lower) return NULL; } /* If there's an upper bound it's in the second or third position. */ if (TYPE(ch) == COLON) { if (NCH(n) > 1) { node *n2 = CHILD(n, 1); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } } else if (NCH(n) > 2) { node *n2 = CHILD(n, 2); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } ch = CHILD(n, NCH(n) - 1); if (TYPE(ch) == sliceop) { if (NCH(ch) != 1) { ch = CHILD(ch, 1); if (TYPE(ch) == test) { step = ast_for_expr(c, ch); if (!step) return NULL; } } } return Slice(lower, upper, step, c->c_arena); } static expr_ty ast_for_binop(struct compiling *c, const node *n) { /* Must account for a sequence of expressions. How should A op B op C by represented? BinOp(BinOp(A, op, B), op, C). */ int i, nops; expr_ty expr1, expr2, result; operator_ty newoperator; expr1 = ast_for_expr(c, CHILD(n, 0)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 2)); if (!expr2) return NULL; newoperator = get_operator(c, CHILD(n, 1)); if (!newoperator) return NULL; result = BinOp(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, c->c_arena); if (!result) return NULL; nops = (NCH(n) - 1) / 2; for (i = 1; i < nops; i++) { expr_ty tmp_result, tmp; const node* next_oper = CHILD(n, i * 2 + 1); newoperator = get_operator(c, next_oper); if (!newoperator) return NULL; tmp = ast_for_expr(c, CHILD(n, i * 2 + 2)); if (!tmp) return NULL; tmp_result = BinOp(result, newoperator, tmp, LINENO(next_oper), next_oper->n_col_offset, c->c_arena); if (!tmp_result) return NULL; result = tmp_result; } return result; } static expr_ty ast_for_trailer(struct compiling *c, const node *n, expr_ty left_expr) { /* trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop] */ REQ(n, trailer); if (TYPE(CHILD(n, 0)) == LPAR) { if (NCH(n) == 2) return Call(left_expr, NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); else return ast_for_call(c, CHILD(n, 1), left_expr, true); } else if (TYPE(CHILD(n, 0)) == DOT) { PyObject *attr_id = NEW_IDENTIFIER(CHILD(n, 1)); if (!attr_id) return NULL; return Attribute(left_expr, attr_id, Load, LINENO(n), n->n_col_offset, c->c_arena); } else { REQ(CHILD(n, 0), LSQB); REQ(CHILD(n, 2), RSQB); n = CHILD(n, 1); if (NCH(n) == 1) { slice_ty slc = ast_for_slice(c, CHILD(n, 0)); if (!slc) return NULL; return Subscript(left_expr, slc, Load, LINENO(n), n->n_col_offset, c->c_arena); } else { /* The grammar is ambiguous here. The ambiguity is resolved by treating the sequence as a tuple literal if there are no slice features. */ int j; slice_ty slc; expr_ty e; int simple = 1; asdl_seq *slices, *elts; slices = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!slices) return NULL; for (j = 0; j < NCH(n); j += 2) { slc = ast_for_slice(c, CHILD(n, j)); if (!slc) return NULL; if (slc->kind != Index_kind) simple = 0; asdl_seq_SET(slices, j / 2, slc); } if (!simple) { return Subscript(left_expr, ExtSlice(slices, c->c_arena), Load, LINENO(n), n->n_col_offset, c->c_arena); } /* extract Index values and put them in a Tuple */ elts = _Ta3_asdl_seq_new(asdl_seq_LEN(slices), c->c_arena); if (!elts) return NULL; for (j = 0; j < asdl_seq_LEN(slices); ++j) { slc = (slice_ty)asdl_seq_GET(slices, j); assert(slc->kind == Index_kind && slc->v.Index.value); asdl_seq_SET(elts, j, slc->v.Index.value); } e = Tuple(elts, Load, LINENO(n), n->n_col_offset, c->c_arena); if (!e) return NULL; return Subscript(left_expr, Index(e, c->c_arena), Load, LINENO(n), n->n_col_offset, c->c_arena); } } } static expr_ty ast_for_factor(struct compiling *c, const node *n) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; switch (TYPE(CHILD(n, 0))) { case PLUS: return UnaryOp(UAdd, expression, LINENO(n), n->n_col_offset, c->c_arena); case MINUS: return UnaryOp(USub, expression, LINENO(n), n->n_col_offset, c->c_arena); case TILDE: return UnaryOp(Invert, expression, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unhandled factor: %d", TYPE(CHILD(n, 0))); return NULL; } static expr_ty ast_for_atom_expr(struct compiling *c, const node *n) { int i, nch, start = 0; expr_ty e, tmp; REQ(n, atom_expr); nch = NCH(n); if (TYPE(CHILD(n, 0)) == AWAIT) { if (c->c_feature_version < 5) { ast_error(c, n, "Await expressions are only supported in Python 3.5 and greater"); return NULL; } start = 1; assert(nch > 1); } e = ast_for_atom(c, CHILD(n, start)); if (!e) return NULL; if (nch == 1) return e; if (start && nch == 2) { return Await(e, LINENO(n), n->n_col_offset, c->c_arena); } for (i = start + 1; i < nch; i++) { node *ch = CHILD(n, i); if (TYPE(ch) != trailer) break; tmp = ast_for_trailer(c, ch, e); if (!tmp) return NULL; tmp->lineno = e->lineno; tmp->col_offset = e->col_offset; e = tmp; } if (start) { /* there was an 'await' */ return Await(e, LINENO(n), n->n_col_offset, c->c_arena); } else { return e; } } static expr_ty ast_for_power(struct compiling *c, const node *n) { /* power: atom trailer* ('**' factor)* */ expr_ty e; REQ(n, power); e = ast_for_atom_expr(c, CHILD(n, 0)); if (!e) return NULL; if (NCH(n) == 1) return e; if (TYPE(CHILD(n, NCH(n) - 1)) == factor) { expr_ty f = ast_for_expr(c, CHILD(n, NCH(n) - 1)); if (!f) return NULL; e = BinOp(e, Pow, f, LINENO(n), n->n_col_offset, c->c_arena); } return e; } static expr_ty ast_for_starred(struct compiling *c, const node *n) { expr_ty tmp; REQ(n, star_expr); tmp = ast_for_expr(c, CHILD(n, 1)); if (!tmp) return NULL; /* The Load context is changed later. */ return Starred(tmp, Load, LINENO(n), n->n_col_offset, c->c_arena); } /* Do not name a variable 'expr'! Will cause a compile error. */ static expr_ty ast_for_expr(struct compiling *c, const node *n) { /* handle the full range of simple expressions test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom_expr ['**' factor] atom_expr: ['await'] atom trailer* yield_expr: 'yield' [yield_arg] */ asdl_seq *seq; int i; loop: switch (TYPE(n)) { case test: case test_nocond: if (TYPE(CHILD(n, 0)) == lambdef || TYPE(CHILD(n, 0)) == lambdef_nocond) return ast_for_lambdef(c, CHILD(n, 0)); else if (NCH(n) > 1) return ast_for_ifexpr(c, n); /* Fallthrough */ case or_test: case and_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); } if (!strcmp(STR(CHILD(n, 1)), "and")) return BoolOp(And, seq, LINENO(n), n->n_col_offset, c->c_arena); assert(!strcmp(STR(CHILD(n, 1)), "or")); return BoolOp(Or, seq, LINENO(n), n->n_col_offset, c->c_arena); case not_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return UnaryOp(Not, expression, LINENO(n), n->n_col_offset, c->c_arena); } case comparison: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression; asdl_int_seq *ops; asdl_seq *cmps; ops = _Ta3_asdl_int_seq_new(NCH(n) / 2, c->c_arena); if (!ops) return NULL; cmps = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!cmps) { return NULL; } for (i = 1; i < NCH(n); i += 2) { cmpop_ty newoperator; newoperator = ast_for_comp_op(c, CHILD(n, i)); if (!newoperator) { return NULL; } expression = ast_for_expr(c, CHILD(n, i + 1)); if (!expression) { return NULL; } asdl_seq_SET(ops, i / 2, newoperator); asdl_seq_SET(cmps, i / 2, expression); } expression = ast_for_expr(c, CHILD(n, 0)); if (!expression) { return NULL; } return Compare(expression, ops, cmps, LINENO(n), n->n_col_offset, c->c_arena); } break; case star_expr: return ast_for_starred(c, n); /* The next five cases all handle BinOps. The main body of code is the same in each case, but the switch turned inside out to reuse the code for each type of operator. */ case expr: case xor_expr: case and_expr: case shift_expr: case arith_expr: case term: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_binop(c, n); case yield_expr: { node *an = NULL; node *en = NULL; int is_from = 0; expr_ty exp = NULL; if (NCH(n) > 1) an = CHILD(n, 1); /* yield_arg */ if (an) { en = CHILD(an, NCH(an) - 1); if (NCH(an) == 2) { is_from = 1; exp = ast_for_expr(c, en); } else exp = ast_for_testlist(c, en); if (!exp) return NULL; } if (is_from) return YieldFrom(exp, LINENO(n), n->n_col_offset, c->c_arena); return Yield(exp, LINENO(n), n->n_col_offset, c->c_arena); } case factor: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_factor(c, n); case power: return ast_for_power(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled expr: %d", TYPE(n)); return NULL; } /* should never get here unless if error is set */ return NULL; } static expr_ty ast_for_call(struct compiling *c, const node *n, expr_ty func, bool allowgen) { /* arglist: argument (',' argument)* [','] argument: ( test [comp_for] | '*' test | test '=' test | '**' test ) */ int i, nargs, nkeywords; int ndoublestars; asdl_seq *args; asdl_seq *keywords; REQ(n, arglist); nargs = 0; nkeywords = 0; for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { if (NCH(ch) == 1) nargs++; else if (TYPE(CHILD(ch, 1)) == comp_for) { nargs++; if (!allowgen) { ast_error(c, ch, "invalid syntax"); return NULL; } if (NCH(n) > 1) { ast_error(c, ch, "Generator expression must be parenthesized"); return NULL; } } else if (TYPE(CHILD(ch, 0)) == STAR) nargs++; else /* TYPE(CHILD(ch, 0)) == DOUBLESTAR or keyword argument */ nkeywords++; } } args = _Ta3_asdl_seq_new(nargs, c->c_arena); if (!args) return NULL; keywords = _Ta3_asdl_seq_new(nkeywords, c->c_arena); if (!keywords) return NULL; nargs = 0; /* positional arguments + iterable argument unpackings */ nkeywords = 0; /* keyword arguments + keyword argument unpackings */ ndoublestars = 0; /* just keyword argument unpackings */ for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { expr_ty e; node *chch = CHILD(ch, 0); if (NCH(ch) == 1) { /* a positional argument */ if (nkeywords) { if (ndoublestars) { ast_error(c, chch, "positional argument follows " "keyword argument unpacking"); } else { ast_error(c, chch, "positional argument follows " "keyword argument"); } return NULL; } e = ast_for_expr(c, chch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else if (TYPE(chch) == STAR) { /* an iterable argument unpacking */ expr_ty starred; if (ndoublestars) { ast_error(c, chch, "iterable argument unpacking follows " "keyword argument unpacking"); return NULL; } e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; starred = Starred(e, Load, LINENO(chch), chch->n_col_offset, c->c_arena); if (!starred) return NULL; asdl_seq_SET(args, nargs++, starred); } else if (TYPE(chch) == DOUBLESTAR) { /* a keyword argument unpacking */ keyword_ty kw; i++; e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; kw = keyword(NULL, e, c->c_arena); asdl_seq_SET(keywords, nkeywords++, kw); ndoublestars++; } else if (TYPE(CHILD(ch, 1)) == comp_for) { /* the lone generator expression */ e = ast_for_genexp(c, ch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else { /* a keyword argument */ keyword_ty kw; identifier key, tmp; int k; /* chch is test, but must be an identifier? */ e = ast_for_expr(c, chch); if (!e) return NULL; /* f(lambda x: x[0] = 3) ends up getting parsed with * LHS test = lambda x: x[0], and RHS test = 3. * SF bug 132313 points out that complaining about a keyword * then is very confusing. */ if (e->kind == Lambda_kind) { ast_error(c, chch, "lambda cannot contain assignment"); return NULL; } else if (e->kind != Name_kind) { ast_error(c, chch, "keyword can't be an expression"); return NULL; } else if (forbidden_name(c, e->v.Name.id, ch, 1)) { return NULL; } key = e->v.Name.id; for (k = 0; k < nkeywords; k++) { tmp = ((keyword_ty)asdl_seq_GET(keywords, k))->arg; if (tmp && !PyUnicode_Compare(tmp, key)) { ast_error(c, chch, "keyword argument repeated"); return NULL; } } e = ast_for_expr(c, CHILD(ch, 2)); if (!e) return NULL; kw = keyword(key, e, c->c_arena); if (!kw) return NULL; asdl_seq_SET(keywords, nkeywords++, kw); } } } return Call(func, args, keywords, func->lineno, func->col_offset, c->c_arena); } static expr_ty ast_for_testlist(struct compiling *c, const node* n) { /* testlist_comp: test (comp_for | (',' test)* [',']) */ /* testlist: test (',' test)* [','] */ assert(NCH(n) > 0); if (TYPE(n) == testlist_comp) { if (NCH(n) > 1) assert(TYPE(CHILD(n, 1)) != comp_for); } else { assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr); } if (NCH(n) == 1) return ast_for_expr(c, CHILD(n, 0)); else { asdl_seq *tmp = seq_for_testlist(c, n); if (!tmp) return NULL; return Tuple(tmp, Load, LINENO(n), n->n_col_offset, c->c_arena); } } static stmt_ty ast_for_expr_stmt(struct compiling *c, const node *n) { int num; REQ(n, expr_stmt); /* expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))* [TYPE_COMMENT]) annassign: ':' test ['=' test] testlist_star_expr: (test|star_expr) (',' test|star_expr)* [','] augassign: '+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=' test: ... here starts the operator precedence dance */ num = NCH(n); if (num == 1 || (num == 2 && TYPE(CHILD(n, 1)) == TYPE_COMMENT)) { expr_ty e = ast_for_testlist(c, CHILD(n, 0)); if (!e) return NULL; return Expr(e, LINENO(n), n->n_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == augassign) { expr_ty expr1, expr2; operator_ty newoperator; node *ch = CHILD(n, 0); expr1 = ast_for_testlist(c, ch); if (!expr1) return NULL; if(!set_context(c, expr1, Store, ch)) return NULL; /* set_context checks that most expressions are not the left side. Augmented assignments can only have a name, a subscript, or an attribute on the left, though, so we have to explicitly check for those. */ switch (expr1->kind) { case Name_kind: case Attribute_kind: case Subscript_kind: break; default: ast_error(c, ch, "illegal expression for augmented assignment"); return NULL; } ch = CHILD(n, 2); if (TYPE(ch) == testlist) expr2 = ast_for_testlist(c, ch); else expr2 = ast_for_expr(c, ch); if (!expr2) return NULL; newoperator = ast_for_augassign(c, CHILD(n, 1)); if (!newoperator) return NULL; return AugAssign(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == annassign) { expr_ty expr1, expr2, expr3; node *ch = CHILD(n, 0); node *deep, *ann = CHILD(n, 1); int simple = 1; /* AnnAssigns are only allowed in Python 3.6 or greater */ if (c->c_feature_version < 6) { ast_error(c, ch, "Variable annotation syntax is only supported in Python 3.6 and greater"); return NULL; } /* we keep track of parens to qualify (x) as expression not name */ deep = ch; while (NCH(deep) == 1) { deep = CHILD(deep, 0); } if (NCH(deep) > 0 && TYPE(CHILD(deep, 0)) == LPAR) { simple = 0; } expr1 = ast_for_testlist(c, ch); if (!expr1) { return NULL; } switch (expr1->kind) { case Name_kind: if (forbidden_name(c, expr1->v.Name.id, n, 0)) { return NULL; } expr1->v.Name.ctx = Store; break; case Attribute_kind: if (forbidden_name(c, expr1->v.Attribute.attr, n, 1)) { return NULL; } expr1->v.Attribute.ctx = Store; break; case Subscript_kind: expr1->v.Subscript.ctx = Store; break; case List_kind: ast_error(c, ch, "only single target (not list) can be annotated"); return NULL; case Tuple_kind: ast_error(c, ch, "only single target (not tuple) can be annotated"); return NULL; default: ast_error(c, ch, "illegal target for annotation"); return NULL; } if (expr1->kind != Name_kind) { simple = 0; } ch = CHILD(ann, 1); expr2 = ast_for_expr(c, ch); if (!expr2) { return NULL; } if (NCH(ann) == 2) { return AnnAssign(expr1, expr2, NULL, simple, LINENO(n), n->n_col_offset, c->c_arena); } else { ch = CHILD(ann, 3); expr3 = ast_for_expr(c, ch); if (!expr3) { return NULL; } return AnnAssign(expr1, expr2, expr3, simple, LINENO(n), n->n_col_offset, c->c_arena); } } else { int i, nch_minus_type, has_type_comment; asdl_seq *targets; node *value; expr_ty expression; string type_comment; /* a normal assignment */ REQ(CHILD(n, 1), EQUAL); has_type_comment = TYPE(CHILD(n, num - 1)) == TYPE_COMMENT; nch_minus_type = num - has_type_comment; targets = _Ta3_asdl_seq_new(nch_minus_type / 2, c->c_arena); if (!targets) return NULL; for (i = 0; i < nch_minus_type - 2; i += 2) { expr_ty e; node *ch = CHILD(n, i); if (TYPE(ch) == yield_expr) { ast_error(c, ch, "assignment to yield expression not possible"); return NULL; } e = ast_for_testlist(c, ch); if (!e) return NULL; /* set context to assign */ if (!set_context(c, e, Store, CHILD(n, i))) return NULL; asdl_seq_SET(targets, i / 2, e); } value = CHILD(n, nch_minus_type - 1); if (TYPE(value) == testlist_star_expr) expression = ast_for_testlist(c, value); else expression = ast_for_expr(c, value); if (!expression) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, nch_minus_type)); else type_comment = NULL; return Assign(targets, expression, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } } static asdl_seq * ast_for_exprlist(struct compiling *c, const node *n, expr_context_ty context) { asdl_seq *seq; int i; expr_ty e; REQ(n, exprlist); seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); if (context && !set_context(c, e, context, CHILD(n, i))) return NULL; } return seq; } static stmt_ty ast_for_del_stmt(struct compiling *c, const node *n) { asdl_seq *expr_list; /* del_stmt: 'del' exprlist */ REQ(n, del_stmt); expr_list = ast_for_exprlist(c, CHILD(n, 1), Del); if (!expr_list) return NULL; return Delete(expr_list, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_flow_stmt(struct compiling *c, const node *n) { /* flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt break_stmt: 'break' continue_stmt: 'continue' return_stmt: 'return' [testlist] yield_stmt: yield_expr yield_expr: 'yield' testlist | 'yield' 'from' test raise_stmt: 'raise' [test [',' test [',' test]]] */ node *ch; REQ(n, flow_stmt); ch = CHILD(n, 0); switch (TYPE(ch)) { case break_stmt: return Break(LINENO(n), n->n_col_offset, c->c_arena); case continue_stmt: return Continue(LINENO(n), n->n_col_offset, c->c_arena); case yield_stmt: { /* will reduce to yield_expr */ expr_ty exp = ast_for_expr(c, CHILD(ch, 0)); if (!exp) return NULL; return Expr(exp, LINENO(n), n->n_col_offset, c->c_arena); } case return_stmt: if (NCH(ch) == 1) return Return(NULL, LINENO(n), n->n_col_offset, c->c_arena); else { expr_ty expression = ast_for_testlist(c, CHILD(ch, 1)); if (!expression) return NULL; return Return(expression, LINENO(n), n->n_col_offset, c->c_arena); } case raise_stmt: if (NCH(ch) == 1) return Raise(NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); else if (NCH(ch) >= 2) { expr_ty cause = NULL; expr_ty expression = ast_for_expr(c, CHILD(ch, 1)); if (!expression) return NULL; if (NCH(ch) == 4) { cause = ast_for_expr(c, CHILD(ch, 3)); if (!cause) return NULL; } return Raise(expression, cause, LINENO(n), n->n_col_offset, c->c_arena); } /* fall through */ default: PyErr_Format(PyExc_SystemError, "unexpected flow_stmt: %d", TYPE(ch)); return NULL; } } static alias_ty alias_for_import_name(struct compiling *c, const node *n, int store) { /* import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] dotted_name: NAME ('.' NAME)* */ identifier str, name; loop: switch (TYPE(n)) { case import_as_name: { node *name_node = CHILD(n, 0); str = NULL; name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (NCH(n) == 3) { node *str_node = CHILD(n, 2); str = NEW_IDENTIFIER(str_node); if (!str) return NULL; if (store && forbidden_name(c, str, str_node, 0)) return NULL; } else { if (forbidden_name(c, name, name_node, 0)) return NULL; } return alias(name, str, c->c_arena); } case dotted_as_name: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { node *asname_node = CHILD(n, 2); alias_ty a = alias_for_import_name(c, CHILD(n, 0), 0); if (!a) return NULL; assert(!a->asname); a->asname = NEW_IDENTIFIER(asname_node); if (!a->asname) return NULL; if (forbidden_name(c, a->asname, asname_node, 0)) return NULL; return a; } break; case dotted_name: if (NCH(n) == 1) { node *name_node = CHILD(n, 0); name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (store && forbidden_name(c, name, name_node, 0)) return NULL; return alias(name, NULL, c->c_arena); } else { /* Create a string of the form "a.b.c" */ int i; size_t len; char *s; PyObject *uni; len = 0; for (i = 0; i < NCH(n); i += 2) /* length of string plus one for the dot */ len += strlen(STR(CHILD(n, i))) + 1; len--; /* the last name doesn't have a dot */ str = PyBytes_FromStringAndSize(NULL, len); if (!str) return NULL; s = PyBytes_AS_STRING(str); if (!s) return NULL; for (i = 0; i < NCH(n); i += 2) { char *sch = STR(CHILD(n, i)); strcpy(s, STR(CHILD(n, i))); s += strlen(sch); *s++ = '.'; } --s; *s = '\0'; uni = PyUnicode_DecodeUTF8(PyBytes_AS_STRING(str), PyBytes_GET_SIZE(str), NULL); Py_DECREF(str); if (!uni) return NULL; str = uni; PyUnicode_InternInPlace(&str); if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); } break; case STAR: str = PyUnicode_InternFromString("*"); if (!str) return NULL; if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); default: PyErr_Format(PyExc_SystemError, "unexpected import name: %d", TYPE(n)); return NULL; } PyErr_SetString(PyExc_SystemError, "unhandled import name condition"); return NULL; } static stmt_ty ast_for_import_stmt(struct compiling *c, const node *n) { /* import_stmt: import_name | import_from import_name: 'import' dotted_as_names import_from: 'from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names) */ int lineno; int col_offset; int i; asdl_seq *aliases; REQ(n, import_stmt); lineno = LINENO(n); col_offset = n->n_col_offset; n = CHILD(n, 0); if (TYPE(n) == import_name) { n = CHILD(n, 1); REQ(n, dotted_as_names); aliases = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!aliases) return NULL; for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } return Import(aliases, lineno, col_offset, c->c_arena); } else if (TYPE(n) == import_from) { int n_children; int idx, ndots = 0; alias_ty mod = NULL; identifier modname = NULL; /* Count the number of dots (for relative imports) and check for the optional module name */ for (idx = 1; idx < NCH(n); idx++) { if (TYPE(CHILD(n, idx)) == dotted_name) { mod = alias_for_import_name(c, CHILD(n, idx), 0); if (!mod) return NULL; idx++; break; } else if (TYPE(CHILD(n, idx)) == ELLIPSIS) { /* three consecutive dots are tokenized as one ELLIPSIS */ ndots += 3; continue; } else if (TYPE(CHILD(n, idx)) != DOT) { break; } ndots++; } idx++; /* skip over the 'import' keyword */ switch (TYPE(CHILD(n, idx))) { case STAR: /* from ... import * */ n = CHILD(n, idx); n_children = 1; break; case LPAR: /* from ... import (x, y, z) */ n = CHILD(n, idx + 1); n_children = NCH(n); break; case import_as_names: /* from ... import x, y, z */ n = CHILD(n, idx); n_children = NCH(n); if (n_children % 2 == 0) { ast_error(c, n, "trailing comma not allowed without" " surrounding parentheses"); return NULL; } break; default: ast_error(c, n, "Unexpected node-type in from-import"); return NULL; } aliases = _Ta3_asdl_seq_new((n_children + 1) / 2, c->c_arena); if (!aliases) return NULL; /* handle "from ... import *" special b/c there's no children */ if (TYPE(n) == STAR) { alias_ty import_alias = alias_for_import_name(c, n, 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, 0, import_alias); } else { for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } } if (mod != NULL) modname = mod->name; return ImportFrom(modname, aliases, ndots, lineno, col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unknown import statement: starts with command '%s'", STR(CHILD(n, 0))); return NULL; } static stmt_ty ast_for_global_stmt(struct compiling *c, const node *n) { /* global_stmt: 'global' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, global_stmt); s = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Global(s, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_nonlocal_stmt(struct compiling *c, const node *n) { /* nonlocal_stmt: 'nonlocal' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, nonlocal_stmt); s = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Nonlocal(s, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_assert_stmt(struct compiling *c, const node *n) { /* assert_stmt: 'assert' test [',' test] */ REQ(n, assert_stmt); if (NCH(n) == 2) { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return Assert(expression, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else if (NCH(n) == 4) { expr_ty expr1, expr2; expr1 = ast_for_expr(c, CHILD(n, 1)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 3)); if (!expr2) return NULL; return Assert(expr1, expr2, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "improper number of parts to 'assert' statement: %d", NCH(n)); return NULL; } static asdl_seq * ast_for_suite(struct compiling *c, const node *n) { /* suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */ asdl_seq *seq; stmt_ty s; int i, total, num, end, pos = 0; node *ch; REQ(n, suite); total = num_stmts(n); seq = _Ta3_asdl_seq_new(total, c->c_arena); if (!seq) return NULL; if (TYPE(CHILD(n, 0)) == simple_stmt) { n = CHILD(n, 0); /* simple_stmt always ends with a NEWLINE, and may have a trailing SEMI */ end = NCH(n) - 1; if (TYPE(CHILD(n, end - 1)) == SEMI) end--; /* loop by 2 to skip semi-colons */ for (i = 0; i < end; i += 2) { ch = CHILD(n, i); s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } else { i = 2; if (TYPE(CHILD(n, 1)) == TYPE_COMMENT) i += 2; for (; i < (NCH(n) - 1); i++) { ch = CHILD(n, i); REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { /* small_stmt or compound_stmt with only one child */ s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } else { int j; ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < NCH(ch); j += 2) { /* statement terminates with a semi-colon ';' */ if (NCH(CHILD(ch, j)) == 0) { assert((j + 1) == NCH(ch)); break; } s = ast_for_stmt(c, CHILD(ch, j)); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } } } assert(pos == seq->size); return seq; } static stmt_ty ast_for_if_stmt(struct compiling *c, const node *n) { /* if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] */ char *s; REQ(n, if_stmt); if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return If(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, c->c_arena); } s = STR(CHILD(n, 4)); /* s[2], the third character in the string, will be 's' for el_s_e, or 'i' for el_i_f */ if (s[2] == 's') { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; return If(expression, seq1, seq2, LINENO(n), n->n_col_offset, c->c_arena); } else if (s[2] == 'i') { int i, n_elif, has_else = 0; expr_ty expression; asdl_seq *suite_seq; asdl_seq *orelse = NULL; n_elif = NCH(n) - 4; /* must reference the child n_elif+1 since 'else' token is third, not fourth, child from the end. */ if (TYPE(CHILD(n, (n_elif + 1))) == NAME && STR(CHILD(n, (n_elif + 1)))[2] == 's') { has_else = 1; n_elif -= 3; } n_elif /= 4; if (has_else) { asdl_seq *suite_seq2; orelse = _Ta3_asdl_seq_new(1, c->c_arena); if (!orelse) return NULL; expression = ast_for_expr(c, CHILD(n, NCH(n) - 6)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, NCH(n) - 4)); if (!suite_seq) return NULL; suite_seq2 = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!suite_seq2) return NULL; asdl_seq_SET(orelse, 0, If(expression, suite_seq, suite_seq2, LINENO(CHILD(n, NCH(n) - 6)), CHILD(n, NCH(n) - 6)->n_col_offset, c->c_arena)); /* the just-created orelse handled the last elif */ n_elif--; } for (i = 0; i < n_elif; i++) { int off = 5 + (n_elif - i - 1) * 4; asdl_seq *newobj = _Ta3_asdl_seq_new(1, c->c_arena); if (!newobj) return NULL; expression = ast_for_expr(c, CHILD(n, off)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, off + 2)); if (!suite_seq) return NULL; asdl_seq_SET(newobj, 0, If(expression, suite_seq, orelse, LINENO(CHILD(n, off)), CHILD(n, off)->n_col_offset, c->c_arena)); orelse = newobj; } expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return If(expression, suite_seq, orelse, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unexpected token in 'if' statement: %s", s); return NULL; } static stmt_ty ast_for_while_stmt(struct compiling *c, const node *n) { /* while_stmt: 'while' test ':' suite ['else' ':' suite] */ REQ(n, while_stmt); if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return While(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else if (NCH(n) == 7) { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; return While(expression, seq1, seq2, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of tokens for 'while' statement: %d", NCH(n)); return NULL; } static stmt_ty ast_for_for_stmt(struct compiling *c, const node *n0, bool is_async) { const node * const n = is_async ? CHILD(n0, 1) : n0; asdl_seq *_target, *seq = NULL, *suite_seq; expr_ty expression; expr_ty target, first; const node *node_target; int has_type_comment; string type_comment; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async for loops are only supported in Python 3.5 and greater"); return NULL; } /* for_stmt: 'for' exprlist 'in' testlist ':' [TYPE_COMMENT] suite ['else' ':' suite] */ REQ(n, for_stmt); has_type_comment = TYPE(CHILD(n, 5)) == TYPE_COMMENT; if (NCH(n) == 9 + has_type_comment) { seq = ast_for_suite(c, CHILD(n, 8 + has_type_comment)); if (!seq) return NULL; } node_target = CHILD(n, 1); _target = ast_for_exprlist(c, node_target, Store); if (!_target) return NULL; /* Check the # of children rather than the length of _target, since for x, in ... has 1 element in _target, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(_target, 0); if (NCH(node_target) == 1) target = first; else target = Tuple(_target, Store, first->lineno, first->col_offset, c->c_arena); expression = ast_for_testlist(c, CHILD(n, 3)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 5 + has_type_comment)); if (!suite_seq) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, 5)); else type_comment = NULL; if (is_async) return AsyncFor(target, expression, suite_seq, seq, type_comment, LINENO(n0), n0->n_col_offset, c->c_arena); else return For(target, expression, suite_seq, seq, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static excepthandler_ty ast_for_except_clause(struct compiling *c, const node *exc, node *body) { /* except_clause: 'except' [test ['as' test]] */ REQ(exc, except_clause); REQ(body, suite); if (NCH(exc) == 1) { asdl_seq *suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(NULL, NULL, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } else if (NCH(exc) == 2) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(expression, NULL, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } else if (NCH(exc) == 4) { asdl_seq *suite_seq; expr_ty expression; identifier e = NEW_IDENTIFIER(CHILD(exc, 3)); if (!e) return NULL; if (forbidden_name(c, e, CHILD(exc, 3), 0)) return NULL; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(expression, e, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of children for 'except' clause: %d", NCH(exc)); return NULL; } static stmt_ty ast_for_try_stmt(struct compiling *c, const node *n) { const int nch = NCH(n); int n_except = (nch - 3)/3; asdl_seq *body, *handlers = NULL, *orelse = NULL, *finally = NULL; REQ(n, try_stmt); body = ast_for_suite(c, CHILD(n, 2)); if (body == NULL) return NULL; if (TYPE(CHILD(n, nch - 3)) == NAME) { if (strcmp(STR(CHILD(n, nch - 3)), "finally") == 0) { if (nch >= 9 && TYPE(CHILD(n, nch - 6)) == NAME) { /* we can assume it's an "else", because nch >= 9 for try-else-finally and it would otherwise have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 4)); if (orelse == NULL) return NULL; n_except--; } finally = ast_for_suite(c, CHILD(n, nch - 1)); if (finally == NULL) return NULL; n_except--; } else { /* we can assume it's an "else", otherwise it would have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 1)); if (orelse == NULL) return NULL; n_except--; } } else if (TYPE(CHILD(n, nch - 3)) != except_clause) { ast_error(c, n, "malformed 'try' statement"); return NULL; } if (n_except > 0) { int i; /* process except statements to create a try ... except */ handlers = _Ta3_asdl_seq_new(n_except, c->c_arena); if (handlers == NULL) return NULL; for (i = 0; i < n_except; i++) { excepthandler_ty e = ast_for_except_clause(c, CHILD(n, 3 + i * 3), CHILD(n, 5 + i * 3)); if (!e) return NULL; asdl_seq_SET(handlers, i, e); } } assert(finally != NULL || asdl_seq_LEN(handlers)); return Try(body, handlers, orelse, finally, LINENO(n), n->n_col_offset, c->c_arena); } /* with_item: test ['as' expr] */ static withitem_ty ast_for_with_item(struct compiling *c, const node *n) { expr_ty context_expr, optional_vars = NULL; REQ(n, with_item); context_expr = ast_for_expr(c, CHILD(n, 0)); if (!context_expr) return NULL; if (NCH(n) == 3) { optional_vars = ast_for_expr(c, CHILD(n, 2)); if (!optional_vars) { return NULL; } if (!set_context(c, optional_vars, Store, n)) { return NULL; } } return withitem(context_expr, optional_vars, c->c_arena); } /* with_stmt: 'with' with_item (',' with_item)* ':' [TYPE_COMMENT] suite */ static stmt_ty ast_for_with_stmt(struct compiling *c, const node *n0, bool is_async) { const node * const n = is_async ? CHILD(n0, 1) : n0; int i, n_items, nch_minus_type, has_type_comment; asdl_seq *items, *body; string type_comment; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async with statements are only supported in Python 3.5 and greater"); return NULL; } REQ(n, with_stmt); has_type_comment = TYPE(CHILD(n, NCH(n) - 2)) == TYPE_COMMENT; nch_minus_type = NCH(n) - has_type_comment; n_items = (nch_minus_type - 2) / 2; items = _Ta3_asdl_seq_new(n_items, c->c_arena); if (!items) return NULL; for (i = 1; i < nch_minus_type - 2; i += 2) { withitem_ty item = ast_for_with_item(c, CHILD(n, i)); if (!item) return NULL; asdl_seq_SET(items, (i - 1) / 2, item); } body = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!body) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, NCH(n) - 2)); else type_comment = NULL; if (is_async) return AsyncWith(items, body, type_comment, LINENO(n0), n0->n_col_offset, c->c_arena); else return With(items, body, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_classdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* classdef: 'class' NAME ['(' arglist ')'] ':' suite */ PyObject *classname; asdl_seq *s; expr_ty call; REQ(n, classdef); if (NCH(n) == 4) { /* class NAME ':' suite */ s = ast_for_suite(c, CHILD(n, 3)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } if (TYPE(CHILD(n, 3)) == RPAR) { /* class NAME '(' ')' ':' suite */ s = ast_for_suite(c, CHILD(n, 5)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } /* class NAME '(' arglist ')' ':' suite */ /* build up a fake Call node so we can extract its pieces */ { PyObject *dummy_name; expr_ty dummy; dummy_name = NEW_IDENTIFIER(CHILD(n, 1)); if (!dummy_name) return NULL; dummy = Name(dummy_name, Load, LINENO(n), n->n_col_offset, c->c_arena); call = ast_for_call(c, CHILD(n, 3), dummy, false); if (!call) return NULL; } s = ast_for_suite(c, CHILD(n, 6)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 1), 0)) return NULL; return ClassDef(classname, call->v.Call.args, call->v.Call.keywords, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_stmt(struct compiling *c, const node *n) { if (TYPE(n) == stmt) { assert(NCH(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == simple_stmt) { assert(num_stmts(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == small_stmt) { n = CHILD(n, 0); /* small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt */ switch (TYPE(n)) { case expr_stmt: return ast_for_expr_stmt(c, n); case del_stmt: return ast_for_del_stmt(c, n); case pass_stmt: return Pass(LINENO(n), n->n_col_offset, c->c_arena); case flow_stmt: return ast_for_flow_stmt(c, n); case import_stmt: return ast_for_import_stmt(c, n); case global_stmt: return ast_for_global_stmt(c, n); case nonlocal_stmt: return ast_for_nonlocal_stmt(c, n); case assert_stmt: return ast_for_assert_stmt(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled small_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } else { /* compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef | decorated | async_stmt */ node *ch = CHILD(n, 0); REQ(n, compound_stmt); switch (TYPE(ch)) { case if_stmt: return ast_for_if_stmt(c, ch); case while_stmt: return ast_for_while_stmt(c, ch); case for_stmt: return ast_for_for_stmt(c, ch, 0); case try_stmt: return ast_for_try_stmt(c, ch); case with_stmt: return ast_for_with_stmt(c, ch, 0); case funcdef: return ast_for_funcdef(c, ch, NULL); case classdef: return ast_for_classdef(c, ch, NULL); case decorated: return ast_for_decorated(c, ch); case async_stmt: return ast_for_async_stmt(c, ch); default: PyErr_Format(PyExc_SystemError, "unhandled small_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } } static PyObject * parsenumber_raw(struct compiling *c, const char *s) { const char *end; long x; double dx; Py_complex compl; int imflag; assert(s != NULL); errno = 0; end = s + strlen(s) - 1; imflag = *end == 'j' || *end == 'J'; if (s[0] == '0') { x = (long) PyOS_strtoul(s, (char **)&end, 0); if (x < 0 && errno == 0) { return PyLong_FromString(s, (char **)0, 0); } } else x = PyOS_strtol(s, (char **)&end, 0); if (*end == '\0') { if (errno != 0) return PyLong_FromString(s, (char **)0, 0); return PyLong_FromLong(x); } /* XXX Huge floats may silently fail */ if (imflag) { compl.real = 0.; compl.imag = PyOS_string_to_double(s, (char **)&end, NULL); if (compl.imag == -1.0 && PyErr_Occurred()) return NULL; return PyComplex_FromCComplex(compl); } else { dx = PyOS_string_to_double(s, NULL, NULL); if (dx == -1.0 && PyErr_Occurred()) return NULL; return PyFloat_FromDouble(dx); } } static PyObject * parsenumber(struct compiling *c, const char *s) { char *dup, *end; PyObject *res = NULL; assert(s != NULL); if (strchr(s, '_') == NULL) { return parsenumber_raw(c, s); } /* Create a duplicate without underscores. */ dup = PyMem_Malloc(strlen(s) + 1); if (dup == NULL) { return PyErr_NoMemory(); } end = dup; for (; *s; s++) { if (*s != '_') { *end++ = *s; } } *end = '\0'; res = parsenumber_raw(c, dup); PyMem_Free(dup); return res; } static PyObject * decode_utf8(struct compiling *c, const char **sPtr, const char *end) { const char *s, *t; t = s = *sPtr; /* while (s < end && *s != '\\') s++; */ /* inefficient for u".." */ while (s < end && (*s & 0x80)) s++; *sPtr = s; return PyUnicode_DecodeUTF8(t, s - t, NULL); } static int warn_invalid_escape_sequence(struct compiling *c, const node *n, unsigned char first_invalid_escape_char) { PyObject *msg = PyUnicode_FromFormat("invalid escape sequence \\%c", first_invalid_escape_char); if (msg == NULL) { return -1; } if (PyErr_WarnExplicitObject(PyExc_DeprecationWarning, msg, c->c_filename, LINENO(n), NULL, NULL) < 0) { if (PyErr_ExceptionMatches(PyExc_DeprecationWarning)) { const char *s; /* Replace the DeprecationWarning exception with a SyntaxError to get a more accurate error report */ PyErr_Clear(); s = PyUnicode_AsUTF8(msg); if (s != NULL) { ast_error(c, n, s); } } Py_DECREF(msg); return -1; } Py_DECREF(msg); return 0; } static PyObject * decode_unicode_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { PyObject *v, *u; char *buf; char *p; const char *end; const char *first_invalid_escape; /* check for integer overflow */ if (len > SIZE_MAX / 6) return NULL; /* "ä" (2 bytes) may become "\U000000E4" (10 bytes), or 1:5 "\ä" (3 bytes) may become "\u005c\U000000E4" (16 bytes), or ~1:6 */ u = PyBytes_FromStringAndSize((char *)NULL, len * 6); if (u == NULL) return NULL; p = buf = PyBytes_AsString(u); end = s + len; while (s < end) { if (*s == '\\') { *p++ = *s++; if (s >= end || *s & 0x80) { strcpy(p, "u005c"); p += 5; if (s >= end) break; } } if (*s & 0x80) { /* XXX inefficient */ PyObject *w; int kind; void *data; Py_ssize_t len, i; w = decode_utf8(c, &s, end); if (w == NULL) { Py_DECREF(u); return NULL; } kind = PyUnicode_KIND(w); data = PyUnicode_DATA(w); len = PyUnicode_GET_LENGTH(w); for (i = 0; i < len; i++) { Py_UCS4 chr = PyUnicode_READ(kind, data, i); sprintf(p, "\\U%08x", chr); p += 10; } /* Should be impossible to overflow */ assert(p - buf <= PyBytes_GET_SIZE(u)); Py_DECREF(w); } else { *p++ = *s++; } } len = p - buf; s = buf; v = _PyUnicode_DecodeUnicodeEscape(s, len, NULL, &first_invalid_escape); if (v != NULL && first_invalid_escape != NULL) { if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) { /* We have not decref u before because first_invalid_escape points inside u. */ Py_XDECREF(u); Py_DECREF(v); return NULL; } } Py_XDECREF(u); return v; } static PyObject * decode_bytes_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { const char *first_invalid_escape; PyObject *result = _PyBytes_DecodeEscape(s, len, NULL, 0, NULL, &first_invalid_escape); if (result == NULL) return NULL; if (first_invalid_escape != NULL) { if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) { Py_DECREF(result); return NULL; } } return result; } /* Shift locations for the given node and all its children by adding `lineno` and `col_offset` to existing locations. */ static void fstring_shift_node_locations(node *n, int lineno, int col_offset) { int i; n->n_col_offset = n->n_col_offset + col_offset; for (i = 0; i < NCH(n); ++i) { if (n->n_lineno && n->n_lineno < CHILD(n, i)->n_lineno) { /* Shifting column offsets unnecessary if there's been newlines. */ col_offset = 0; } fstring_shift_node_locations(CHILD(n, i), lineno, col_offset); } n->n_lineno = n->n_lineno + lineno; } /* Fix locations for the given node and its children. `parent` is the enclosing node. `n` is the node which locations are going to be fixed relative to parent. `expr_str` is the child node's string representation, including braces. */ static void fstring_fix_node_location(const node *parent, node *n, char *expr_str) { char *substr = NULL; char *start; int lines = LINENO(parent) - 1; int cols = parent->n_col_offset; /* Find the full fstring to fix location information in `n`. */ while (parent && parent->n_type != STRING) parent = parent->n_child; if (parent && parent->n_str) { substr = strstr(parent->n_str, expr_str); if (substr) { start = substr; while (start > parent->n_str) { if (start[0] == '\n') break; start--; } cols += substr - start; /* Fix lineno in mulitline strings. */ while ((substr = strchr(substr + 1, '\n'))) lines--; } } fstring_shift_node_locations(n, lines, cols); } /* Compile this expression in to an expr_ty. Add parens around the expression, in order to allow leading spaces in the expression. */ static expr_ty fstring_compile_expr(const char *expr_start, const char *expr_end, struct compiling *c, const node *n) { PyCompilerFlags cf; node *mod_n; mod_ty mod; char *str; Py_ssize_t len; const char *s; PyObject *fstring_name; assert(expr_end >= expr_start); assert(*(expr_start-1) == '{'); assert(*expr_end == '}' || *expr_end == '!' || *expr_end == ':'); /* If the substring is all whitespace, it's an error. We need to catch this here, and not when we call PyParser_SimpleParseStringFlagsFilename, because turning the expression '' in to '()' would go from being invalid to valid. */ for (s = expr_start; s != expr_end; s++) { char c = *s; /* The Python parser ignores only the following whitespace characters (\r already is converted to \n). */ if (!(c == ' ' || c == '\t' || c == '\n' || c == '\f')) { break; } } if (s == expr_end) { ast_error(c, n, "f-string: empty expression not allowed"); return NULL; } len = expr_end - expr_start; /* Allocate 3 extra bytes: open paren, close paren, null byte. */ str = PyMem_RawMalloc(len + 3); if (str == NULL) { PyErr_NoMemory(); return NULL; } str[0] = '('; memcpy(str+1, expr_start, len); str[len+1] = ')'; str[len+2] = 0; cf.cf_flags = PyCF_ONLY_AST; mod_n = PyParser_SimpleParseStringFlagsFilename(str, "<fstring>", Py_eval_input, 0); if (!mod_n) { PyMem_RawFree(str); return NULL; } /* Reuse str to find the correct column offset. */ str[0] = '{'; str[len+1] = '}'; fstring_fix_node_location(n, mod_n, str); fstring_name = PyUnicode_FromString("<fstring>"); mod = string_object_to_c_ast(str, fstring_name, Py_eval_input, &cf, c->c_feature_version, c->c_arena); Py_DECREF(fstring_name); PyMem_RawFree(str); Ta3Node_Free(mod_n); if (!mod) return NULL; return mod->v.Expression.body; } /* Return -1 on error. Return 0 if we reached the end of the literal. Return 1 if we haven't reached the end of the literal, but we want the caller to process the literal up to this point. Used for doubled braces. */ static int fstring_find_literal(const char **str, const char *end, int raw, PyObject **literal, int recurse_lvl, struct compiling *c, const node *n) { /* Get any literal string. It ends when we hit an un-doubled left brace (which isn't part of a unicode name escape such as "\N{EULER CONSTANT}"), or the end of the string. */ const char *s = *str; const char *literal_start = s; int result = 0; assert(*literal == NULL); while (s < end) { char ch = *s++; if (!raw && ch == '\\' && s < end) { ch = *s++; if (ch == 'N') { if (s < end && *s++ == '{') { while (s < end && *s++ != '}') { } continue; } break; } if (ch == '{' && warn_invalid_escape_sequence(c, n, ch) < 0) { return -1; } } if (ch == '{' || ch == '}') { /* Check for doubled braces, but only at the top level. If we checked at every level, then f'{0:{3}}' would fail with the two closing braces. */ if (recurse_lvl == 0) { if (s < end && *s == ch) { /* We're going to tell the caller that the literal ends here, but that they should continue scanning. But also skip over the second brace when we resume scanning. */ *str = s + 1; result = 1; goto done; } /* Where a single '{' is the start of a new expression, a single '}' is not allowed. */ if (ch == '}') { *str = s - 1; ast_error(c, n, "f-string: single '}' is not allowed"); return -1; } } /* We're either at a '{', which means we're starting another expression; or a '}', which means we're at the end of this f-string (for a nested format_spec). */ s--; break; } } *str = s; assert(s <= end); assert(s == end || *s == '{' || *s == '}'); done: if (literal_start != s) { if (raw) *literal = PyUnicode_DecodeUTF8Stateful(literal_start, s - literal_start, NULL, NULL); else *literal = decode_unicode_with_escapes(c, n, literal_start, s - literal_start); if (!*literal) return -1; } return result; } /* Forward declaration because parsing is recursive. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n); /* Parse the f-string at *str, ending at end. We know *str starts an expression (so it must be a '{'). Returns the FormattedValue node, which includes the expression, conversion character, and format_spec expression. Note that I don't do a perfect job here: I don't make sure that a closing brace doesn't match an opening paren, for example. It doesn't need to error on all invalid expressions, just correctly find the end of all valid ones. Any errors inside the expression will be caught when we parse it later. */ static int fstring_find_expr(const char **str, const char *end, int raw, int recurse_lvl, expr_ty *expression, struct compiling *c, const node *n) { /* Return -1 on error, else 0. */ const char *expr_start; const char *expr_end; expr_ty simple_expression; expr_ty format_spec = NULL; /* Optional format specifier. */ int conversion = -1; /* The conversion char. -1 if not specified. */ /* 0 if we're not in a string, else the quote char we're trying to match (single or double quote). */ char quote_char = 0; /* If we're inside a string, 1=normal, 3=triple-quoted. */ int string_type = 0; /* Keep track of nesting level for braces/parens/brackets in expressions. */ Py_ssize_t nested_depth = 0; /* Can only nest one level deep. */ if (recurse_lvl >= 2) { ast_error(c, n, "f-string: expressions nested too deeply"); return -1; } /* The first char must be a left brace, or we wouldn't have gotten here. Skip over it. */ assert(**str == '{'); *str += 1; expr_start = *str; for (; *str < end; (*str)++) { char ch; /* Loop invariants. */ assert(nested_depth >= 0); assert(*str >= expr_start && *str < end); if (quote_char) assert(string_type == 1 || string_type == 3); else assert(string_type == 0); ch = **str; /* Nowhere inside an expression is a backslash allowed. */ if (ch == '\\') { /* Error: can't include a backslash character, inside parens or strings or not. */ ast_error(c, n, "f-string expression part " "cannot include a backslash"); return -1; } if (quote_char) { /* We're inside a string. See if we're at the end. */ /* This code needs to implement the same non-error logic as tok_get from tokenizer.c, at the letter_quote label. To actually share that code would be a nightmare. But, it's unlikely to change and is small, so duplicate it here. Note we don't need to catch all of the errors, since they'll be caught when parsing the expression. We just need to match the non-error cases. Thus we can ignore \n in single-quoted strings, for example. Or non-terminated strings. */ if (ch == quote_char) { /* Does this match the string_type (single or triple quoted)? */ if (string_type == 3) { if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { /* We're at the end of a triple quoted string. */ *str += 2; string_type = 0; quote_char = 0; continue; } } else { /* We're at the end of a normal string. */ quote_char = 0; string_type = 0; continue; } } } else if (ch == '\'' || ch == '"') { /* Is this a triple quoted string? */ if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { string_type = 3; *str += 2; } else { /* Start of a normal string. */ string_type = 1; } /* Start looking for the end of the string. */ quote_char = ch; } else if (ch == '[' || ch == '{' || ch == '(') { nested_depth++; } else if (nested_depth != 0 && (ch == ']' || ch == '}' || ch == ')')) { nested_depth--; } else if (ch == '#') { /* Error: can't include a comment character, inside parens or not. */ ast_error(c, n, "f-string expression part cannot include '#'"); return -1; } else if (nested_depth == 0 && (ch == '!' || ch == ':' || ch == '}')) { /* First, test for the special case of "!=". Since '=' is not an allowed conversion character, nothing is lost in this test. */ if (ch == '!' && *str+1 < end && *(*str+1) == '=') { /* This isn't a conversion character, just continue. */ continue; } /* Normal way out of this loop. */ break; } else { /* Just consume this char and loop around. */ } } expr_end = *str; /* If we leave this loop in a string or with mismatched parens, we don't care. We'll get a syntax error when compiling the expression. But, we can produce a better error message, so let's just do that.*/ if (quote_char) { ast_error(c, n, "f-string: unterminated string"); return -1; } if (nested_depth) { ast_error(c, n, "f-string: mismatched '(', '{', or '['"); return -1; } if (*str >= end) goto unexpected_end_of_string; /* Compile the expression as soon as possible, so we show errors related to the expression before errors related to the conversion or format_spec. */ simple_expression = fstring_compile_expr(expr_start, expr_end, c, n); if (!simple_expression) return -1; /* Check for a conversion char, if present. */ if (**str == '!') { *str += 1; if (*str >= end) goto unexpected_end_of_string; conversion = **str; *str += 1; /* Validate the conversion. */ if (!(conversion == 's' || conversion == 'r' || conversion == 'a')) { ast_error(c, n, "f-string: invalid conversion character: " "expected 's', 'r', or 'a'"); return -1; } } /* Check for the format spec, if present. */ if (*str >= end) goto unexpected_end_of_string; if (**str == ':') { *str += 1; if (*str >= end) goto unexpected_end_of_string; /* Parse the format spec. */ format_spec = fstring_parse(str, end, raw, recurse_lvl+1, c, n); if (!format_spec) return -1; } if (*str >= end || **str != '}') goto unexpected_end_of_string; /* We're at a right brace. Consume it. */ assert(*str < end); assert(**str == '}'); *str += 1; /* And now create the FormattedValue node that represents this entire expression with the conversion and format spec. */ *expression = FormattedValue(simple_expression, conversion, format_spec, LINENO(n), n->n_col_offset, c->c_arena); if (!*expression) return -1; return 0; unexpected_end_of_string: ast_error(c, n, "f-string: expecting '}'"); return -1; } /* Return -1 on error. Return 0 if we have a literal (possible zero length) and an expression (zero length if at the end of the string. Return 1 if we have a literal, but no expression, and we want the caller to call us again. This is used to deal with doubled braces. When called multiple times on the string 'a{{b{0}c', this function will return: 1. the literal 'a{' with no expression, and a return value of 1. Despite the fact that there's no expression, the return value of 1 means we're not finished yet. 2. the literal 'b' and the expression '0', with a return value of 0. The fact that there's an expression means we're not finished. 3. literal 'c' with no expression and a return value of 0. The combination of the return value of 0 with no expression means we're finished. */ static int fstring_find_literal_and_expr(const char **str, const char *end, int raw, int recurse_lvl, PyObject **literal, expr_ty *expression, struct compiling *c, const node *n) { int result; assert(*literal == NULL && *expression == NULL); /* Get any literal string. */ result = fstring_find_literal(str, end, raw, literal, recurse_lvl, c, n); if (result < 0) goto error; assert(result == 0 || result == 1); if (result == 1) /* We have a literal, but don't look at the expression. */ return 1; if (*str >= end || **str == '}') /* We're at the end of the string or the end of a nested f-string: no expression. The top-level error case where we expect to be at the end of the string but we're at a '}' is handled later. */ return 0; /* We must now be the start of an expression, on a '{'. */ assert(**str == '{'); if (fstring_find_expr(str, end, raw, recurse_lvl, expression, c, n) < 0) goto error; return 0; error: Py_CLEAR(*literal); return -1; } #define EXPRLIST_N_CACHED 64 typedef struct { /* Incrementally build an array of expr_ty, so be used in an asdl_seq. Cache some small but reasonably sized number of expr_ty's, and then after that start dynamically allocating, doubling the number allocated each time. Note that the f-string f'{0}a{1}' contains 3 expr_ty's: 2 FormattedValue's, and one Str for the literal 'a'. So you add expr_ty's about twice as fast as you add exressions in an f-string. */ Py_ssize_t allocated; /* Number we've allocated. */ Py_ssize_t size; /* Number we've used. */ expr_ty *p; /* Pointer to the memory we're actually using. Will point to 'data' until we start dynamically allocating. */ expr_ty data[EXPRLIST_N_CACHED]; } ExprList; #ifdef NDEBUG #define ExprList_check_invariants(l) #else static void ExprList_check_invariants(ExprList *l) { /* Check our invariants. Make sure this object is "live", and hasn't been deallocated. */ assert(l->size >= 0); assert(l->p != NULL); if (l->size <= EXPRLIST_N_CACHED) assert(l->data == l->p); } #endif static void ExprList_Init(ExprList *l) { l->allocated = EXPRLIST_N_CACHED; l->size = 0; /* Until we start allocating dynamically, p points to data. */ l->p = l->data; ExprList_check_invariants(l); } static int ExprList_Append(ExprList *l, expr_ty exp) { ExprList_check_invariants(l); if (l->size >= l->allocated) { /* We need to alloc (or realloc) the memory. */ Py_ssize_t new_size = l->allocated * 2; /* See if we've ever allocated anything dynamically. */ if (l->p == l->data) { Py_ssize_t i; /* We're still using the cached data. Switch to alloc-ing. */ l->p = PyMem_RawMalloc(sizeof(expr_ty) * new_size); if (!l->p) return -1; /* Copy the cached data into the new buffer. */ for (i = 0; i < l->size; i++) l->p[i] = l->data[i]; } else { /* Just realloc. */ expr_ty *tmp = PyMem_RawRealloc(l->p, sizeof(expr_ty) * new_size); if (!tmp) { PyMem_RawFree(l->p); l->p = NULL; return -1; } l->p = tmp; } l->allocated = new_size; assert(l->allocated == 2 * l->size); } l->p[l->size++] = exp; ExprList_check_invariants(l); return 0; } static void ExprList_Dealloc(ExprList *l) { ExprList_check_invariants(l); /* If there's been an error, or we've never dynamically allocated, do nothing. */ if (!l->p || l->p == l->data) { /* Do nothing. */ } else { /* We have dynamically allocated. Free the memory. */ PyMem_RawFree(l->p); } l->p = NULL; l->size = -1; } static asdl_seq * ExprList_Finish(ExprList *l, PyArena *arena) { asdl_seq *seq; ExprList_check_invariants(l); /* Allocate the asdl_seq and copy the expressions in to it. */ seq = _Ta3_asdl_seq_new(l->size, arena); if (seq) { Py_ssize_t i; for (i = 0; i < l->size; i++) asdl_seq_SET(seq, i, l->p[i]); } ExprList_Dealloc(l); return seq; } /* The FstringParser is designed to add a mix of strings and f-strings, and concat them together as needed. Ultimately, it generates an expr_ty. */ typedef struct { PyObject *last_str; ExprList expr_list; int fmode; } FstringParser; #ifdef NDEBUG #define FstringParser_check_invariants(state) #else static void FstringParser_check_invariants(FstringParser *state) { if (state->last_str) assert(PyUnicode_CheckExact(state->last_str)); ExprList_check_invariants(&state->expr_list); } #endif static void FstringParser_Init(FstringParser *state) { state->last_str = NULL; state->fmode = 0; ExprList_Init(&state->expr_list); FstringParser_check_invariants(state); } static void FstringParser_Dealloc(FstringParser *state) { FstringParser_check_invariants(state); Py_XDECREF(state->last_str); ExprList_Dealloc(&state->expr_list); } /* Make a Str node, but decref the PyUnicode object being added. */ static expr_ty make_str_node_and_del(PyObject **str, struct compiling *c, const node* n) { PyObject *kind, *s = *str; const char *raw = STR(CHILD(n, 0)); /* currently Python allows up to 2 string modifiers */ char *ch, s_kind[3] = {0, 0, 0}; ch = s_kind; while (*raw && *raw != '\'' && *raw != '"') { *ch++ = *raw++; } kind = PyUnicode_FromString(s_kind); if (!kind) { return NULL; } *str = NULL; assert(PyUnicode_CheckExact(s)); if (PyArena_AddPyObject(c->c_arena, s) < 0) { Py_DECREF(s); return NULL; } return Str(s, kind, LINENO(n), n->n_col_offset, c->c_arena); } /* Add a non-f-string (that is, a regular literal string). str is decref'd. */ static int FstringParser_ConcatAndDel(FstringParser *state, PyObject *str) { FstringParser_check_invariants(state); assert(PyUnicode_CheckExact(str)); if (PyUnicode_GET_LENGTH(str) == 0) { Py_DECREF(str); return 0; } if (!state->last_str) { /* We didn't have a string before, so just remember this one. */ state->last_str = str; } else { /* Concatenate this with the previous string. */ PyUnicode_AppendAndDel(&state->last_str, str); if (!state->last_str) return -1; } FstringParser_check_invariants(state); return 0; } /* Parse an f-string. The f-string is in *str to end, with no 'f' or quotes. */ static int FstringParser_ConcatFstring(FstringParser *state, const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser_check_invariants(state); state->fmode = 1; /* Parse the f-string. */ while (1) { PyObject *literal = NULL; expr_ty expression = NULL; /* If there's a zero length literal in front of the expression, literal will be NULL. If we're at the end of the f-string, expression will be NULL (unless result == 1, see below). */ int result = fstring_find_literal_and_expr(str, end, raw, recurse_lvl, &literal, &expression, c, n); if (result < 0) return -1; /* Add the literal, if any. */ if (!literal) { /* Do nothing. Just leave last_str alone (and possibly NULL). */ } else if (!state->last_str) { /* Note that the literal can be zero length, if the input string is "\\\n" or "\\\r", among others. */ state->last_str = literal; literal = NULL; } else { /* We have a literal, concatenate it. */ assert(PyUnicode_GET_LENGTH(literal) != 0); if (FstringParser_ConcatAndDel(state, literal) < 0) return -1; literal = NULL; } /* We've dealt with the literal now. It can't be leaked on further errors. */ assert(literal == NULL); /* See if we should just loop around to get the next literal and expression, while ignoring the expression this time. This is used for un-doubling braces, as an optimization. */ if (result == 1) continue; if (!expression) /* We're done with this f-string. */ break; /* We know we have an expression. Convert any existing string to a Str node. */ if (!state->last_str) { /* Do nothing. No previous literal. */ } else { /* Convert the existing last_str literal to a Str node. */ expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) return -1; } if (ExprList_Append(&state->expr_list, expression) < 0) return -1; } /* If recurse_lvl is zero, then we must be at the end of the string. Otherwise, we must be at a right brace. */ if (recurse_lvl == 0 && *str < end-1) { ast_error(c, n, "f-string: unexpected end of string"); return -1; } if (recurse_lvl != 0 && **str != '}') { ast_error(c, n, "f-string: expecting '}'"); return -1; } FstringParser_check_invariants(state); return 0; } /* Convert the partial state reflected in last_str and expr_list to an expr_ty. The expr_ty can be a Str, or a JoinedStr. */ static expr_ty FstringParser_Finish(FstringParser *state, struct compiling *c, const node *n) { asdl_seq *seq; FstringParser_check_invariants(state); /* If we're just a constant string with no expressions, return that. */ if (!state->fmode) { assert(!state->expr_list.size); if (!state->last_str) { /* Create a zero length string. */ state->last_str = PyUnicode_FromStringAndSize(NULL, 0); if (!state->last_str) goto error; } return make_str_node_and_del(&state->last_str, c, n); } /* Create a Str node out of last_str, if needed. It will be the last node in our expression list. */ if (state->last_str) { expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) goto error; } /* This has already been freed. */ assert(state->last_str == NULL); seq = ExprList_Finish(&state->expr_list, c->c_arena); if (!seq) goto error; return JoinedStr(seq, LINENO(n), n->n_col_offset, c->c_arena); error: FstringParser_Dealloc(state); return NULL; } /* Given an f-string (with no 'f' or quotes) that's in *str and ends at end, parse it into an expr_ty. Return NULL on error. Adjust str to point past the parsed portion. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser state; FstringParser_Init(&state); if (FstringParser_ConcatFstring(&state, str, end, raw, recurse_lvl, c, n) < 0) { FstringParser_Dealloc(&state); return NULL; } return FstringParser_Finish(&state, c, n); } /* n is a Python string literal, including the bracketing quote characters, and r, b, u, &/or f prefixes (if any), and embedded escape sequences (if any). parsestr parses it, and sets *result to decoded Python string object. If the string is an f-string, set *fstr and *fstrlen to the unparsed string object. Return 0 if no errors occurred. */ static int parsestr(struct compiling *c, const node *n, int *bytesmode, int *rawmode, PyObject **result, const char **fstr, Py_ssize_t *fstrlen) { size_t len; const char *s = STR(n); int quote = Py_CHARMASK(*s); int fmode = 0; *bytesmode = 0; *rawmode = 0; *result = NULL; *fstr = NULL; if (Py_ISALPHA(quote)) { while (!*bytesmode || !*rawmode) { if (quote == 'b' || quote == 'B') { quote = *++s; *bytesmode = 1; } else if (quote == 'u' || quote == 'U') { quote = *++s; } else if (quote == 'r' || quote == 'R') { quote = *++s; *rawmode = 1; } else if (quote == 'f' || quote == 'F') { quote = *++s; fmode = 1; } else { break; } } } /* fstrings are only allowed in Python 3.6 and greater */ if (fmode && c->c_feature_version < 6) { ast_error(c, n, "Format strings are only supported in Python 3.6 and greater"); return -1; } if (fmode && *bytesmode) { PyErr_BadInternalCall(); return -1; } if (quote != '\'' && quote != '\"') { PyErr_BadInternalCall(); return -1; } /* Skip the leading quote char. */ s++; len = strlen(s); if (len > INT_MAX) { PyErr_SetString(PyExc_OverflowError, "string to parse is too long"); return -1; } if (s[--len] != quote) { /* Last quote char must match the first. */ PyErr_BadInternalCall(); return -1; } if (len >= 4 && s[0] == quote && s[1] == quote) { /* A triple quoted string. We've already skipped one quote at the start and one at the end of the string. Now skip the two at the start. */ s += 2; len -= 2; /* And check that the last two match. */ if (s[--len] != quote || s[--len] != quote) { PyErr_BadInternalCall(); return -1; } } if (fmode) { /* Just return the bytes. The caller will parse the resulting string. */ *fstr = s; *fstrlen = len; return 0; } /* Not an f-string. */ /* Avoid invoking escape decoding routines if possible. */ *rawmode = *rawmode || strchr(s, '\\') == NULL; if (*bytesmode) { /* Disallow non-ASCII characters. */ const char *ch; for (ch = s; *ch; ch++) { if (Py_CHARMASK(*ch) >= 0x80) { ast_error(c, n, "bytes can only contain ASCII " "literal characters."); return -1; } } if (*rawmode) *result = PyBytes_FromStringAndSize(s, len); else *result = decode_bytes_with_escapes(c, n, s, len); } else { if (*rawmode) *result = PyUnicode_DecodeUTF8Stateful(s, len, NULL, NULL); else *result = decode_unicode_with_escapes(c, n, s, len); } return *result == NULL ? -1 : 0; } /* Accepts a STRING+ atom, and produces an expr_ty node. Run through each STRING atom, and process it as needed. For bytes, just concatenate them together, and the result will be a Bytes node. For normal strings and f-strings, concatenate them together. The result will be a Str node if there were no f-strings; a FormattedValue node if there's just an f-string (with no leading or trailing literals), or a JoinedStr node if there are multiple f-strings or any literals involved. */ static expr_ty parsestrplus(struct compiling *c, const node *n) { int bytesmode = 0; PyObject *bytes_str = NULL; int i; FstringParser state; FstringParser_Init(&state); for (i = 0; i < NCH(n); i++) { int this_bytesmode; int this_rawmode; PyObject *s; const char *fstr; Py_ssize_t fstrlen = -1; /* Silence a compiler warning. */ REQ(CHILD(n, i), STRING); if (parsestr(c, CHILD(n, i), &this_bytesmode, &this_rawmode, &s, &fstr, &fstrlen) != 0) goto error; /* Check that we're not mixing bytes with unicode. */ if (i != 0 && bytesmode != this_bytesmode) { ast_error(c, n, "cannot mix bytes and nonbytes literals"); /* s is NULL if the current string part is an f-string. */ Py_XDECREF(s); goto error; } bytesmode = this_bytesmode; if (fstr != NULL) { int result; assert(s == NULL && !bytesmode); /* This is an f-string. Parse and concatenate it. */ result = FstringParser_ConcatFstring(&state, &fstr, fstr+fstrlen, this_rawmode, 0, c, n); if (result < 0) goto error; } else { /* A string or byte string. */ assert(s != NULL && fstr == NULL); assert(bytesmode ? PyBytes_CheckExact(s) : PyUnicode_CheckExact(s)); if (bytesmode) { /* For bytes, concat as we go. */ if (i == 0) { /* First time, just remember this value. */ bytes_str = s; } else { PyBytes_ConcatAndDel(&bytes_str, s); if (!bytes_str) goto error; } } else { /* This is a regular string. Concatenate it. */ if (FstringParser_ConcatAndDel(&state, s) < 0) goto error; } } } if (bytesmode) { /* Just return the bytes object and we're done. */ if (PyArena_AddPyObject(c->c_arena, bytes_str) < 0) goto error; return Bytes(bytes_str, LINENO(n), n->n_col_offset, c->c_arena); } /* We're not a bytes string, bytes_str should never have been set. */ assert(bytes_str == NULL); return FstringParser_Finish(&state, c, n); error: Py_XDECREF(bytes_str); FstringParser_Dealloc(&state); return NULL; }
ast_for_atom(struct compiling *c, const node *n) { /* atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictmaker|testlist_comp] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False' */ node *ch = CHILD(n, 0); switch (TYPE(ch)) { case NAME: { PyObject *name; const char *s = STR(ch); size_t len = strlen(s); if (len >= 4 && len <= 5) { if (!strcmp(s, "None")) return NameConstant(Py_None, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "True")) return NameConstant(Py_True, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "False")) return NameConstant(Py_False, LINENO(n), n->n_col_offset, c->c_arena); } name = new_identifier(s, c); if (!name) return NULL; /* All names start in Load context, but may later be changed. */ return Name(name, Load, LINENO(n), n->n_col_offset, c->c_arena); } case STRING: { expr_ty str = parsestrplus(c, n); if (!str) { const char *errtype = NULL; if (PyErr_ExceptionMatches(PyExc_UnicodeError)) errtype = "unicode error"; else if (PyErr_ExceptionMatches(PyExc_ValueError)) errtype = "value error"; if (errtype) { char buf[128]; const char *s = NULL; PyObject *type, *value, *tback, *errstr; PyErr_Fetch(&type, &value, &tback); errstr = PyObject_Str(value); if (errstr) s = PyUnicode_AsUTF8(errstr); if (s) { PyOS_snprintf(buf, sizeof(buf), "(%s) %s", errtype, s); } else { PyErr_Clear(); PyOS_snprintf(buf, sizeof(buf), "(%s) unknown error", errtype); } Py_XDECREF(errstr); ast_error(c, n, buf); Py_DECREF(type); Py_XDECREF(value); Py_XDECREF(tback); } return NULL; } return str; } case NUMBER: { PyObject *pynum; const char *s = STR(ch); /* Underscores in numeric literals are only allowed in Python 3.6 or greater */ /* Check for underscores here rather than in parse_number so we can report a line number on error */ if (c->c_feature_version < 6 && strchr(s, '_') != NULL) { ast_error(c, ch, "Underscores in numeric literals are only supported in Python 3.6 and greater"); return NULL; } pynum = parsenumber(c, s); if (!pynum) return NULL; if (PyArena_AddPyObject(c->c_arena, pynum) < 0) { Py_DECREF(pynum); return NULL; } return Num(pynum, LINENO(n), n->n_col_offset, c->c_arena); } case ELLIPSIS: /* Ellipsis */ return Ellipsis(LINENO(n), n->n_col_offset, c->c_arena); case LPAR: /* some parenthesized expressions */ ch = CHILD(n, 1); if (TYPE(ch) == RPAR) return Tuple(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); if (TYPE(ch) == yield_expr) return ast_for_expr(c, ch); /* testlist_comp: test ( comp_for | (',' test)* [','] ) */ if ((NCH(ch) > 1) && (TYPE(CHILD(ch, 1)) == comp_for)) return ast_for_genexp(c, ch); return ast_for_testlist(c, ch); case LSQB: /* list (or list comprehension) */ ch = CHILD(n, 1); if (TYPE(ch) == RSQB) return List(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); REQ(ch, testlist_comp); if (NCH(ch) == 1 || TYPE(CHILD(ch, 1)) == COMMA) { asdl_seq *elts = seq_for_testlist(c, ch); if (!elts) return NULL; return List(elts, Load, LINENO(n), n->n_col_offset, c->c_arena); } else return ast_for_listcomp(c, ch); case LBRACE: { /* dictorsetmaker: ( ((test ':' test | '**' test) * (comp_for | (',' (test ':' test | '**' test))* [','])) | * ((test | '*' test) * (comp_for | (',' (test | '*' test))* [','])) ) */ expr_ty res; ch = CHILD(n, 1); if (TYPE(ch) == RBRACE) { /* It's an empty dict. */ return Dict(NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else { int is_dict = (TYPE(CHILD(ch, 0)) == DOUBLESTAR); if (NCH(ch) == 1 || (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == COMMA)) { /* It's a set display. */ res = ast_for_setdisplay(c, ch); } else if (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == comp_for) { /* It's a set comprehension. */ res = ast_for_setcomp(c, ch); } else if (NCH(ch) > 3 - is_dict && TYPE(CHILD(ch, 3 - is_dict)) == comp_for) { /* It's a dictionary comprehension. */ if (is_dict) { ast_error(c, n, "dict unpacking cannot be used in " "dict comprehension"); return NULL; } res = ast_for_dictcomp(c, ch); } else { /* It's a dictionary display. */ res = ast_for_dictdisplay(c, ch); } if (res) { res->lineno = LINENO(n); res->col_offset = n->n_col_offset; } return res; } } default: PyErr_Format(PyExc_SystemError, "unhandled atom %d", TYPE(ch)); return NULL; } }
ast_for_atom(struct compiling *c, const node *n) { /* atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictmaker|testlist_comp] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False' */ node *ch = CHILD(n, 0); switch (TYPE(ch)) { case NAME: { PyObject *name; const char *s = STR(ch); size_t len = strlen(s); if (len >= 4 && len <= 5) { if (!strcmp(s, "None")) return NameConstant(Py_None, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "True")) return NameConstant(Py_True, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "False")) return NameConstant(Py_False, LINENO(n), n->n_col_offset, c->c_arena); } name = new_identifier(s, c); if (!name) return NULL; /* All names start in Load context, but may later be changed. */ return Name(name, Load, LINENO(n), n->n_col_offset, c->c_arena); } case STRING: { expr_ty str = parsestrplus(c, n); if (!str) { const char *errtype = NULL; if (PyErr_ExceptionMatches(PyExc_UnicodeError)) errtype = "unicode error"; else if (PyErr_ExceptionMatches(PyExc_ValueError)) errtype = "value error"; if (errtype) { char buf[128]; const char *s = NULL; PyObject *type, *value, *tback, *errstr; PyErr_Fetch(&type, &value, &tback); errstr = PyObject_Str(value); if (errstr) s = PyUnicode_AsUTF8(errstr); if (s) { PyOS_snprintf(buf, sizeof(buf), "(%s) %s", errtype, s); } else { PyErr_Clear(); PyOS_snprintf(buf, sizeof(buf), "(%s) unknown error", errtype); } Py_XDECREF(errstr); ast_error(c, n, buf); Py_DECREF(type); Py_XDECREF(value); Py_XDECREF(tback); } return NULL; } return str; } case NUMBER: { PyObject *pynum; const char *s = STR(ch); /* Underscores in numeric literals are only allowed in Python 3.6 or greater */ /* Check for underscores here rather than in parse_number so we can report a line number on error */ if (c->c_feature_version < 6 && strchr(s, '_') != NULL) { ast_error(c, ch, "Underscores in numeric literals are only supported in Python 3.6 and greater"); return NULL; } pynum = parsenumber(c, STR(ch)); if (!pynum) return NULL; if (PyArena_AddPyObject(c->c_arena, pynum) < 0) { Py_DECREF(pynum); return NULL; } return Num(pynum, LINENO(n), n->n_col_offset, c->c_arena); } case ELLIPSIS: /* Ellipsis */ return Ellipsis(LINENO(n), n->n_col_offset, c->c_arena); case LPAR: /* some parenthesized expressions */ ch = CHILD(n, 1); if (TYPE(ch) == RPAR) return Tuple(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); if (TYPE(ch) == yield_expr) return ast_for_expr(c, ch); /* testlist_comp: test ( comp_for | (',' test)* [','] ) */ if ((NCH(ch) > 1) && (TYPE(CHILD(ch, 1)) == comp_for)) return ast_for_genexp(c, ch); return ast_for_testlist(c, ch); case LSQB: /* list (or list comprehension) */ ch = CHILD(n, 1); if (TYPE(ch) == RSQB) return List(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); REQ(ch, testlist_comp); if (NCH(ch) == 1 || TYPE(CHILD(ch, 1)) == COMMA) { asdl_seq *elts = seq_for_testlist(c, ch); if (!elts) return NULL; return List(elts, Load, LINENO(n), n->n_col_offset, c->c_arena); } else return ast_for_listcomp(c, ch); case LBRACE: { /* dictorsetmaker: ( ((test ':' test | '**' test) * (comp_for | (',' (test ':' test | '**' test))* [','])) | * ((test | '*' test) * (comp_for | (',' (test | '*' test))* [','])) ) */ expr_ty res; ch = CHILD(n, 1); if (TYPE(ch) == RBRACE) { /* It's an empty dict. */ return Dict(NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else { int is_dict = (TYPE(CHILD(ch, 0)) == DOUBLESTAR); if (NCH(ch) == 1 || (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == COMMA)) { /* It's a set display. */ res = ast_for_setdisplay(c, ch); } else if (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == comp_for) { /* It's a set comprehension. */ res = ast_for_setcomp(c, ch); } else if (NCH(ch) > 3 - is_dict && TYPE(CHILD(ch, 3 - is_dict)) == comp_for) { /* It's a dictionary comprehension. */ if (is_dict) { ast_error(c, n, "dict unpacking cannot be used in " "dict comprehension"); return NULL; } res = ast_for_dictcomp(c, ch); } else { /* It's a dictionary display. */ res = ast_for_dictdisplay(c, ch); } if (res) { res->lineno = LINENO(n); res->col_offset = n->n_col_offset; } return res; } } default: PyErr_Format(PyExc_SystemError, "unhandled atom %d", TYPE(ch)); return NULL; } }
{'added': [(11, '#include "pythonrun.h"'), (15, "// VS 2010 doesn't have <stdbool.h>..."), (16, 'typedef int bool;'), (17, '#define false 0'), (18, '#define true 1'), (19, ''), (20, '#ifndef _PyObject_FastCall'), (21, 'static PyObject *'), (22, '_PyObject_FastCall(PyObject *func, PyObject *const *args, int nargs)'), (23, '{'), (24, ' PyObject *t, *res;'), (25, ' int i;'), (26, ''), (27, ' t = PyTuple_New(nargs);'), (28, ' if (t == NULL) {'), (29, ' return NULL;'), (30, ' }'), (31, ' for (i = 0; i < nargs; i++) {'), (32, ' if (PyTuple_SetItem(t, i, args[i]) < 0) {'), (33, ' Py_DECREF(t);'), (34, ' return NULL;'), (35, ' }'), (36, ' }'), (37, ' res = PyObject_CallObject(func, t);'), (38, ' Py_DECREF(t);'), (39, ' return res;'), (40, '}'), (41, '#endif'), (42, ''), (43, '#if PY_MINOR_VERSION < 6'), (44, '#define _PyUnicode_EqualToASCIIString(a, b) (PyUnicode_CompareWithASCIIString((a), (b)) == 0)'), (45, ''), (46, 'static PyObject *'), (47, '_PyBytes_DecodeEscape(const char *s,'), (48, ' Py_ssize_t len,'), (49, ' const char *errors,'), (50, ' Py_ssize_t unicode,'), (51, ' const char *recode_encoding,'), (52, ' const char **first_invalid_escape)'), (53, '{'), (54, ' *first_invalid_escape = NULL;'), (55, ' return PyBytes_DecodeEscape(s, len, errors, unicode, recode_encoding);'), (56, '}'), (57, ''), (58, 'PyObject *'), (59, '_PyUnicode_DecodeUnicodeEscape(const char *s,'), (60, ' Py_ssize_t size,'), (61, ' const char *errors,'), (62, ' const char **first_invalid_escape)'), (63, '{'), (64, ' *first_invalid_escape = NULL;'), (65, ' return PyUnicode_DecodeUnicodeEscape(s, size, errors);'), (66, '}'), (163, ' abort();'), (657, 'static asdl_seq *ast_for_suite(struct compiling *c, const node *n);'), (663, 'static stmt_ty ast_for_with_stmt(struct compiling *, const node *, bool);'), (664, 'static stmt_ty ast_for_for_stmt(struct compiling *, const node *, bool);'), (667, 'static expr_ty ast_for_call(struct compiling *, const node *, expr_ty, bool);'), (701, ' PyObject *form;'), (702, ' PyObject *args[2];'), (703, ' _Py_IDENTIFIER(NFKC);'), (708, ' form = _PyUnicode_FromId(&PyId_NFKC);'), (709, ' if (form == NULL) {'), (710, ' Py_DECREF(id);'), (711, ' return NULL;'), (712, ' }'), (713, ' args[0] = form;'), (714, ' args[1] = id;'), (715, ' id2 = _PyObject_FastCall(c->c_normalize, args, 2);'), (719, ' if (!PyUnicode_Check(id2)) {'), (720, ' PyErr_Format(PyExc_TypeError,'), (721, ' "unicodedata.normalize() must return a string, not "'), (722, ' "%.200s",'), (723, ' Py_TYPE(id2)->tp_name);'), (724, ' Py_DECREF(id2);'), (725, ' return NULL;'), (726, ' }'), (835, ' abort();'), (843, ' PyObject *filename, int feature_version,'), (844, ' PyArena *arena)'), (906, ' goto out;'), (907, ' asdl_seq_SET(type_ignores, i, ti);'), (1009, ' int feature_version, PyArena *arena)'), (1075, ' if (_PyUnicode_EqualToASCIIString(name, "__debug__")) {'), (1082, ' if (_PyUnicode_EqualToASCIIString(name, *p)) {'), (1298, ' /* fall through */'), (1313, ' /* fall through */'), (1440, ' if (TYPE(CHILD(n, i)) == COMMA)'), (1577, ' if (TYPE(CHILD(n, i)) == COMMA)'), (1593, ' if (TYPE(CHILD(n, i)) == TYPE_COMMENT) {'), (1609, ' i += 2; /* the star and the name */'), (1610, ' if (TYPE(CHILD(n, i)) == COMMA)'), (1611, ' i += 1; /* the comma, if present */'), (1613, ' if (TYPE(CHILD(n, i)) == TYPE_COMMENT) {'), (1635, ' if (TYPE(CHILD(n, i)) == COMMA)'), (1717, ' d = ast_for_call(c, CHILD(n, 3), name_expr, true);'), (1748, 'ast_for_funcdef_impl(struct compiling *c, const node *n0,'), (1749, ' asdl_seq *decorator_seq, bool is_async)'), (1752, ' const node * const n = is_async ? CHILD(n0, 1) : n0;'), (1763, ' "Async functions are only supported in Python 3.5 and greater");'), (1802, ' type_comment, LINENO(n0), n0->n_col_offset, c->c_arena);'), (1805, ' type_comment, LINENO(n), n->n_col_offset, c->c_arena);'), (1811, " /* async_funcdef: 'async' funcdef */"), (1813, ' REQ(CHILD(n, 0), NAME);'), (1814, ' assert(strcmp(STR(CHILD(n, 0)), "async") == 0);'), (1817, ' return ast_for_funcdef_impl(c, n, decorator_seq,'), (1818, ' true /* is_async */);'), (1826, ' false /* is_async */);'), (1833, " /* async_stmt: 'async' (funcdef | with_stmt | for_stmt) */"), (1835, ' REQ(CHILD(n, 0), NAME);'), (1836, ' assert(strcmp(STR(CHILD(n, 0)), "async") == 0);'), (1840, ' return ast_for_funcdef_impl(c, n, NULL,'), (1841, ' true /* is_async */);'), (1843, ' return ast_for_with_stmt(c, n,'), (1844, ' true /* is_async */);'), (1847, ' return ast_for_for_stmt(c, n,'), (1848, ' true /* is_async */);'), (1953, ' if (NCH(n) == 2) {'), (1954, ' REQ(CHILD(n, 0), NAME);'), (1955, ' assert(strcmp(STR(CHILD(n, 0)), "async") == 0);'), (1956, ' n = CHILD(n, 1);'), (1958, ' else if (NCH(n) == 1) {'), (1959, ' n = CHILD(n, 0);'), (1960, ' }'), (1961, ' else {'), (1962, ' goto error;'), (1963, ' }'), (1964, ' if (NCH(n) == (5)) {'), (1965, ' n = CHILD(n, 4);'), (1984, ' error:'), (2033, ' node *sync_n;'), (2038, ' if (NCH(n) == 2) {'), (2040, ' REQ(CHILD(n, 0), NAME);'), (2041, ' assert(strcmp(STR(CHILD(n, 0)), "async") == 0);'), (2042, ' sync_n = CHILD(n, 1);'), (2044, ' else {'), (2045, ' sync_n = CHILD(n, 0);'), (2046, ' }'), (2047, ' REQ(sync_n, sync_comp_for);'), (2056, ' for_ch = CHILD(sync_n, 1);'), (2060, ' expression = ast_for_expr(c, CHILD(sync_n, 3));'), (2077, ' if (NCH(sync_n) == 5) {'), (2081, ' n = CHILD(sync_n, 4);'), (2352, ' pynum = parsenumber(c, STR(ch));'), (2575, ' return ast_for_call(c, CHILD(n, 1), left_expr, true);'), (2704, " /* there was an 'await' */"), (2769, " atom_expr: ['await'] atom trailer*"), (2917, 'ast_for_call(struct compiling *c, const node *n, expr_ty func, bool allowgen)'), (2924, ' int i, nargs, nkeywords;'), (2938, ' else if (TYPE(CHILD(ch, 1)) == comp_for) {'), (2939, ' nargs++;'), (2940, ' if (!allowgen) {'), (2941, ' ast_error(c, ch, "invalid syntax");'), (2942, ' return NULL;'), (2943, ' }'), (2944, ' if (NCH(n) > 1) {'), (2945, ' ast_error(c, ch, "Generator expression must be parenthesized");'), (2946, ' return NULL;'), (2947, ' }'), (2948, ' }'), (2957, ' args = _Ta3_asdl_seq_new(nargs, c->c_arena);'), (3109, ''), (3381, ' /* fall through */'), (3493, ' if (!str)'), (3494, ' return NULL;'), (3926, 'ast_for_for_stmt(struct compiling *c, const node *n0, bool is_async)'), (3928, ' const node * const n = is_async ? CHILD(n0, 1) : n0;'), (3978, ' return AsyncFor(target, expression, suite_seq, seq, type_comment,'), (3979, ' LINENO(n0), n0->n_col_offset,'), (3982, ' return For(target, expression, suite_seq, seq, type_comment,'), (3983, ' LINENO(n), n->n_col_offset,'), (4131, 'ast_for_with_stmt(struct compiling *c, const node *n0, bool is_async)'), (4133, ' const node * const n = is_async ? CHILD(n0, 1) : n0;'), (4170, ' return AsyncWith(items, body, type_comment, LINENO(n0), n0->n_col_offset, c->c_arena);'), (4194, ' return ClassDef(classname, NULL, NULL, s, decorator_seq,'), (4195, ' LINENO(n), n->n_col_offset, c->c_arena);'), (4199, ' s = ast_for_suite(c, CHILD(n, 5));'), (4207, ' return ClassDef(classname, NULL, NULL, s, decorator_seq,'), (4208, ' LINENO(n), n->n_col_offset, c->c_arena);'), (4220, ' call = ast_for_call(c, CHILD(n, 3), dummy, false);'), (4367, ' if (dup == NULL) {'), (4368, ' return PyErr_NoMemory();'), (4369, ' }'), (4393, 'static int'), (4394, 'warn_invalid_escape_sequence(struct compiling *c, const node *n,'), (4395, ' unsigned char first_invalid_escape_char)'), (4396, '{'), (4397, ' PyObject *msg = PyUnicode_FromFormat("invalid escape sequence \\\\%c",'), (4398, ' first_invalid_escape_char);'), (4399, ' if (msg == NULL) {'), (4400, ' return -1;'), (4401, ' }'), (4402, ' if (PyErr_WarnExplicitObject(PyExc_DeprecationWarning, msg,'), (4403, ' c->c_filename, LINENO(n),'), (4404, ' NULL, NULL) < 0)'), (4405, ' {'), (4406, ' if (PyErr_ExceptionMatches(PyExc_DeprecationWarning)) {'), (4407, ' const char *s;'), (4408, ''), (4409, ' /* Replace the DeprecationWarning exception with a SyntaxError'), (4410, ' to get a more accurate error report */'), (4411, ' PyErr_Clear();'), (4412, ''), (4413, ' s = PyUnicode_AsUTF8(msg);'), (4414, ' if (s != NULL) {'), (4415, ' ast_error(c, n, s);'), (4416, ' }'), (4417, ' }'), (4418, ' Py_DECREF(msg);'), (4419, ' return -1;'), (4420, ' }'), (4421, ' Py_DECREF(msg);'), (4422, ' return 0;'), (4423, '}'), (4424, ''), (4429, ' PyObject *v, *u;'), (4433, ' const char *first_invalid_escape;'), (4448, ' if (s >= end || *s & 0x80) {'), (4451, ' if (s >= end)'), (4452, ' break;'), (4474, ' assert(p - buf <= PyBytes_GET_SIZE(u));'), (4483, ' v = _PyUnicode_DecodeUnicodeEscape(s, len, NULL, &first_invalid_escape);'), (4484, ''), (4485, ' if (v != NULL && first_invalid_escape != NULL) {'), (4486, ' if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) {'), (4487, ' /* We have not decref u before because first_invalid_escape points'), (4488, ' inside u. */'), (4489, ' Py_XDECREF(u);'), (4490, ' Py_DECREF(v);'), (4491, ' return NULL;'), (4492, ' }'), (4493, ' }'), (4494, ' Py_XDECREF(u);'), (4495, ' return v;'), (4502, ' const char *first_invalid_escape;'), (4503, ' PyObject *result = _PyBytes_DecodeEscape(s, len, NULL, 0, NULL,'), (4504, ' &first_invalid_escape);'), (4505, ' if (result == NULL)'), (4506, ' return NULL;'), (4507, ''), (4508, ' if (first_invalid_escape != NULL) {'), (4509, ' if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) {'), (4510, ' Py_DECREF(result);'), (4511, ' return NULL;'), (4512, ' }'), (4513, ' }'), (4514, ' return result;'), (4515, '}'), (4516, ''), (4517, '/* Shift locations for the given node and all its children by adding `lineno`'), (4518, ' and `col_offset` to existing locations. */'), (4519, 'static void fstring_shift_node_locations(node *n, int lineno, int col_offset)'), (4520, '{'), (4521, ' int i;'), (4522, ' n->n_col_offset = n->n_col_offset + col_offset;'), (4523, ' for (i = 0; i < NCH(n); ++i) {'), (4524, ' if (n->n_lineno && n->n_lineno < CHILD(n, i)->n_lineno) {'), (4525, " /* Shifting column offsets unnecessary if there's been newlines. */"), (4526, ' col_offset = 0;'), (4527, ' }'), (4528, ' fstring_shift_node_locations(CHILD(n, i), lineno, col_offset);'), (4529, ' }'), (4530, ' n->n_lineno = n->n_lineno + lineno;'), (4531, '}'), (4532, ''), (4533, '/* Fix locations for the given node and its children.'), (4534, ''), (4535, ' `parent` is the enclosing node.'), (4536, ' `n` is the node which locations are going to be fixed relative to parent.'), (4537, " `expr_str` is the child node's string representation, including braces."), (4538, '*/'), (4539, 'static void'), (4540, 'fstring_fix_node_location(const node *parent, node *n, char *expr_str)'), (4541, '{'), (4542, ' char *substr = NULL;'), (4543, ' char *start;'), (4544, ' int lines = LINENO(parent) - 1;'), (4545, ' int cols = parent->n_col_offset;'), (4546, ' /* Find the full fstring to fix location information in `n`. */'), (4547, ' while (parent && parent->n_type != STRING)'), (4548, ' parent = parent->n_child;'), (4549, ' if (parent && parent->n_str) {'), (4550, ' substr = strstr(parent->n_str, expr_str);'), (4551, ' if (substr) {'), (4552, ' start = substr;'), (4553, ' while (start > parent->n_str) {'), (4554, " if (start[0] == '\\n')"), (4555, ' break;'), (4556, ' start--;'), (4557, ' }'), (4558, ' cols += substr - start;'), (4559, ' /* Fix lineno in mulitline strings. */'), (4560, " while ((substr = strchr(substr + 1, '\\n')))"), (4561, ' lines--;'), (4562, ' }'), (4563, ' }'), (4564, ' fstring_shift_node_locations(n, lines, cols);'), (4575, ' node *mod_n;'), (4579, ' const char *s;'), (4580, ' PyObject *fstring_name;'), (4586, " /* If the substring is all whitespace, it's an error. We need to catch this"), (4587, ' here, and not when we call PyParser_SimpleParseStringFlagsFilename,'), (4588, " because turning the expression '' in to '()' would go from being invalid"), (4589, ' to valid. */'), (4590, ' for (s = expr_start; s != expr_end; s++) {'), (4591, ' char c = *s;'), (4592, ' /* The Python parser ignores only the following whitespace'), (4593, ' characters (\\r already is converted to \\n). */'), (4594, " if (!(c == ' ' || c == '\\t' || c == '\\n' || c == '\\f')) {"), (4598, ' if (s == expr_end) {'), (4606, ' if (str == NULL) {'), (4607, ' PyErr_NoMemory();'), (4609, ' }'), (4617, ' mod_n = PyParser_SimpleParseStringFlagsFilename(str, "<fstring>",'), (4618, ' Py_eval_input, 0);'), (4619, ' if (!mod_n) {'), (4620, ' PyMem_RawFree(str);'), (4621, ' return NULL;'), (4622, ' }'), (4623, ' /* Reuse str to find the correct column offset. */'), (4624, " str[0] = '{';"), (4625, " str[len+1] = '}';"), (4626, ' fstring_fix_node_location(n, mod_n, str);'), (4633, ' Ta3Node_Free(mod_n);'), (4656, ' const char *s = *str;'), (4657, ' const char *literal_start = s;'), (4661, ' while (s < end) {'), (4662, ' char ch = *s++;'), (4663, " if (!raw && ch == '\\\\' && s < end) {"), (4664, ' ch = *s++;'), (4665, " if (ch == 'N') {"), (4666, " if (s < end && *s++ == '{') {"), (4667, " while (s < end && *s++ != '}') {"), (4668, ' }'), (4669, ' continue;'), (4670, ' }'), (4671, ' break;'), (4672, ' }'), (4673, " if (ch == '{' && warn_invalid_escape_sequence(c, n, ch) < 0) {"), (4674, ' return -1;'), (4675, ' }'), (4676, ' }'), (4677, " if (ch == '{' || ch == '}') {"), (4682, ' if (s < end && *s == ch) {'), (4686, ' *str = s + 1;'), (4694, ' *str = s - 1;'), (4702, ' s--;'), (4706, ' *str = s;'), (4707, ' assert(s <= end);'), (4708, " assert(s == end || *s == '{' || *s == '}');"), (4710, ' if (literal_start != s) {'), (4713, ' s - literal_start,'), (4717, ' s - literal_start);'), (5129, ' int fmode;'), (5148, ' state->fmode = 0;'), (5222, ' state->fmode = 1;'), (5244, ' /* Note that the literal can be zero length, if the'), (5245, ' input string is "\\\\\\n" or "\\\\\\r", among others. */'), (5314, ' if (!state->fmode) {'), (5315, ' assert(!state->expr_list.size);')], 'deleted': [(14, '#if PY_MINOR_VERSION < 4'), (15, '#define PyErr_ProgramTextObject PyErr_ProgramText'), (17, '#define PyMem_RawMalloc PyMem_Malloc'), (18, '#define PyMem_RawRealloc PyMem_Realloc'), (19, '#define PyMem_RawFree PyMem_Free'), (115, ' assert(0);'), (116, ' return "(unknown)";'), (604, ' PyObject *c_normalize_args; /* Normalization argument tuple. */'), (611, 'static asdl_seq *ast_for_suite(struct compiling *, const node *);'), (617, 'static stmt_ty ast_for_with_stmt(struct compiling *, const node *, int);'), (618, 'static stmt_ty ast_for_for_stmt(struct compiling *, const node *, int);'), (621, 'static expr_ty ast_for_call(struct compiling *, const node *, expr_ty);'), (640, ' c->c_normalize_args = Py_BuildValue("(sN)", "NFKC", Py_None);'), (641, ' if (!c->c_normalize_args) {'), (642, ' Py_CLEAR(c->c_normalize);'), (643, ' return 0;'), (644, ' }'), (645, ' PyTuple_SET_ITEM(c->c_normalize_args, 1, NULL);'), (665, ' PyTuple_SET_ITEM(c->c_normalize_args, 1, id);'), (666, ' id2 = PyObject_Call(c->c_normalize, c->c_normalize_args, NULL);'), (778, ' assert(0);'), (779, ' return 0;'), (787, ' PyObject *filename, int feature_version,'), (788, ' PyArena *arena)'), (804, ' c.c_normalize_args = NULL;'), (851, ' goto out;'), (852, ' asdl_seq_SET(type_ignores, i, ti);'), (948, ' PyTuple_SET_ITEM(c.c_normalize_args, 1, NULL);'), (949, ' Py_DECREF(c.c_normalize_args);'), (956, ' int feature_version, PyArena *arena)'), (1022, ' if (PyUnicode_CompareWithASCIIString(name, "__debug__") == 0) {'), (1029, ' if (PyUnicode_CompareWithASCIIString(name, *p) == 0) {'), (1385, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA)'), (1489, ' if (nposargs + nkwonlyargs > 255) {'), (1490, ' ast_error(c, n, "more than 255 arguments");'), (1491, ' return NULL;'), (1492, ' }'), (1493, ''), (1527, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA)'), (1543, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) {'), (1559, ' i += 2; /* the star and the name */'), (1560, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA)'), (1561, ' i += 1; /* the comma, if present */'), (1563, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) {'), (1585, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA)'), (1667, ' d = ast_for_call(c, CHILD(n, 3), name_expr);'), (1698, 'ast_for_funcdef_impl(struct compiling *c, const node *n,'), (1699, ' asdl_seq *decorator_seq, int is_async)'), (1712, ' "Async functions are only supported in Python 3.5 and greater");'), (1751, ' type_comment, LINENO(n),'), (1752, ' n->n_col_offset, c->c_arena);'), (1755, ' type_comment, LINENO(n),'), (1756, ' n->n_col_offset, c->c_arena);'), (1762, ' /* async_funcdef: ASYNC funcdef */'), (1764, ' REQ(CHILD(n, 0), ASYNC);'), (1767, ' return ast_for_funcdef_impl(c, CHILD(n, 1), decorator_seq,'), (1768, ' 1 /* is_async */);'), (1776, ' 0 /* is_async */);'), (1783, ' /* async_stmt: ASYNC (funcdef | with_stmt | for_stmt) */'), (1785, ' REQ(CHILD(n, 0), ASYNC);'), (1789, ' return ast_for_funcdef_impl(c, CHILD(n, 1), NULL,'), (1790, ' 1 /* is_async */);'), (1792, ' return ast_for_with_stmt(c, CHILD(n, 1),'), (1793, ' 1 /* is_async */);'), (1796, ' return ast_for_for_stmt(c, CHILD(n, 1),'), (1797, ' 1 /* is_async */);'), (1898, ' int is_async;'), (1901, ' is_async = 0;'), (1904, ' if (TYPE(CHILD(n, 0)) == ASYNC) {'), (1905, ' is_async = 1;'), (1907, ' if (NCH(n) == (5 + is_async)) {'), (1908, ' n = CHILD(n, 4 + is_async);'), (1979, ' if (TYPE(CHILD(n, 0)) == ASYNC) {'), (1990, ' for_ch = CHILD(n, 1 + is_async);'), (1994, ' expression = ast_for_expr(c, CHILD(n, 3 + is_async));'), (2011, ' if (NCH(n) == (5 + is_async)) {'), (2015, ' n = CHILD(n, 4 + is_async);'), (2286, ' pynum = parsenumber(c, s);'), (2509, ' return ast_for_call(c, CHILD(n, 1), left_expr);'), (2638, ' /* there was an AWAIT */'), (2703, ' atom_expr: [AWAIT] atom trailer*'), (2851, 'ast_for_call(struct compiling *c, const node *n, expr_ty func)'), (2858, ' int i, nargs, nkeywords, ngens;'), (2867, ' ngens = 0;'), (2873, ' else if (TYPE(CHILD(ch, 1)) == comp_for)'), (2874, ' ngens++;'), (2882, ' if (ngens > 1 || (ngens && (nargs || nkeywords))) {'), (2883, ' ast_error(c, n, "Generator expression must be parenthesized "'), (2884, ' "if not sole argument");'), (2885, ' return NULL;'), (2886, ' }'), (2888, ' if (nargs + nkeywords + ngens > 255) {'), (2889, ' ast_error(c, n, "more than 255 arguments");'), (2890, ' return NULL;'), (2891, ' }'), (2892, ''), (2893, ' args = _Ta3_asdl_seq_new(nargs + ngens, c->c_arena);'), (3858, 'ast_for_for_stmt(struct compiling *c, const node *n, int is_async)'), (3909, ' return AsyncFor(target, expression, suite_seq, seq,'), (3910, ' type_comment, LINENO(n), n->n_col_offset,'), (3913, ' return For(target, expression, suite_seq, seq,'), (3914, ' type_comment, LINENO(n), n->n_col_offset,'), (4062, 'ast_for_with_stmt(struct compiling *c, const node *n, int is_async)'), (4100, ' return AsyncWith(items, body, type_comment, LINENO(n), n->n_col_offset, c->c_arena);'), (4124, ' return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n),'), (4125, ' n->n_col_offset, c->c_arena);'), (4129, ' s = ast_for_suite(c, CHILD(n,5));'), (4137, ' return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n),'), (4138, ' n->n_col_offset, c->c_arena);'), (4150, ' call = ast_for_call(c, CHILD(n, 3), dummy);'), (4324, ' PyObject *u;'), (4342, ' if (*s & 0x80) {'), (4366, ' assert(p - buf <= Py_SIZE(u));'), (4375, ' return PyUnicode_DecodeUnicodeEscape(s, len, NULL);'), (4382, ' return PyBytes_DecodeEscape(s, len, NULL, 0, NULL);'), (4392, ' int all_whitespace = 1;'), (4393, ' int kind;'), (4394, ' void *data;'), (4398, ' PyObject *o, *fstring_name;'), (4400, ' Py_ssize_t i;'), (4406, ' /* We know there are no escapes here, because backslashes are not allowed,'), (4407, " and we know it's utf-8 encoded (per PEP 263). But, in order to check"), (4408, ' that each char is not whitespace, we need to decode it to unicode.'), (4409, ' Which is unfortunate, but such is life. */'), (4410, ''), (4411, " /* If the substring is all whitespace, it's an error. We need to catch"), (4412, ' this here, and not when we call PyParser_ASTFromString, because turning'), (4413, " the expression '' in to '()' would go from being invalid to valid. */"), (4414, " /* Note that this code says an empty string is all whitespace. That's"), (4415, " important. There's a test for it: f'{}'. */"), (4416, ' o = PyUnicode_DecodeUTF8(expr_start, expr_end-expr_start, NULL);'), (4417, ' if (o == NULL)'), (4418, ' return NULL;'), (4419, ' len = PyUnicode_GET_LENGTH(o);'), (4420, ' kind = PyUnicode_KIND(o);'), (4421, ' data = PyUnicode_DATA(o);'), (4422, ' for (i = 0; i < len; i++) {'), (4423, ' if (!Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, i))) {'), (4424, ' all_whitespace = 0;'), (4428, ' Py_DECREF(o);'), (4429, ' if (all_whitespace) {'), (4434, ' /* Reuse len to be the length of the utf-8 input string. */'), (4438, ' if (str == NULL)'), (4475, ' const char *literal_start = *str;'), (4476, ' const char *literal_end;'), (4477, ' int in_named_escape = 0;'), (4481, ' for (; *str < end; (*str)++) {'), (4482, ' char ch = **str;'), (4483, " if (!in_named_escape && ch == '{' && (*str)-literal_start >= 2 &&"), (4484, " *(*str-2) == '\\\\' && *(*str-1) == 'N') {"), (4485, ' in_named_escape = 1;'), (4486, " } else if (in_named_escape && ch == '}') {"), (4487, ' in_named_escape = 0;'), (4488, " } else if (ch == '{' || ch == '}') {"), (4493, ' if (*str+1 < end && *(*str+1) == ch) {'), (4497, ' literal_end = *str+1;'), (4498, ' *str += 2;'), (4516, ' literal_end = *str;'), (4517, ' assert(*str <= end);'), (4518, " assert(*str == end || **str == '{' || **str == '}');"), (4520, ' if (literal_start != literal_end) {'), (4523, ' literal_end-literal_start,'), (4527, ' literal_end-literal_start);'), (5060, ' assert(!state->last_str ||'), (5061, ' PyUnicode_GET_LENGTH(state->last_str) != 0);'), (5121, ' if(state->expr_list.size == 0) {'), (5145, " /* If there's only one expression, return it. Otherwise, we need"), (5146, ' to join them together. */'), (5147, ' if (seq->size == 1)'), (5148, ' return seq->elements[0];'), (5149, '')]}
360
171
4,475
28,946
135
994
42
https://github.com/python/typed_ast
CVE-2019-19274
CWE-125
620
SQLCreateDataSource.c
C
_multi_string_alloc_and_expand
/************************************************** * SQLCreateDataSource * * This is a 100% UI so simply pass it on to odbcinst's UI * shadow share. * ************************************************** * This code was created by Peter Harvey @ CodeByDesign. * Released under LGPL 28.JAN.99 * * Contributions from... * ----------------------------------------------- * Peter Harvey - pharvey@codebydesign.com **************************************************/ #include <config.h> #include <odbcinstext.h> /* * Take a wide string consisting of null terminated sections, and copy to a ASCII version */ char* _multi_string_alloc_and_copy( LPCWSTR in ) { char *chr; int len = 0; if ( !in ) { return in; } while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { len ++; } chr = malloc( len + 2 ); len = 0; while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { chr[ len ] = 0xFF & in[ len ]; len ++; } chr[ len ++ ] = '\0'; chr[ len ++ ] = '\0'; return chr; } char* _single_string_alloc_and_copy( LPCWSTR in ) { char *chr; int len = 0; if ( !in ) { return in; } while ( in[ len ] != 0 ) { len ++; } chr = malloc( len + 1 ); len = 0; while ( in[ len ] != 0 ) { chr[ len ] = 0xFF & in[ len ]; len ++; } chr[ len ++ ] = '\0'; return chr; } SQLWCHAR* _multi_string_alloc_and_expand( LPCSTR in ) { SQLWCHAR *chr; int len = 0; if ( !in ) { return in; } while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { len ++; } chr = malloc(sizeof( SQLWCHAR ) * ( len + 2 )); len = 0; while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { chr[ len ] = in[ len ]; len ++; } chr[ len ++ ] = 0; chr[ len ++ ] = 0; return chr; } SQLWCHAR* _single_string_alloc_and_expand( LPCSTR in ) { SQLWCHAR *chr; int len = 0; if ( !in ) { return in; } while ( in[ len ] != 0 ) { len ++; } chr = malloc( sizeof( SQLWCHAR ) * ( len + 1 )); len = 0; while ( in[ len ] != 0 ) { chr[ len ] = in[ len ]; len ++; } chr[ len ++ ] = 0; return chr; } void _single_string_copy_to_wide( SQLWCHAR *out, LPCSTR in, int len ) { while ( len > 0 && *in ) { *out = *in; out++; in++; len --; } *out = 0; } void _single_copy_to_wide( SQLWCHAR *out, LPCSTR in, int len ) { while ( len >= 0 ) { *out = *in; out++; in++; len --; } } void _single_copy_from_wide( SQLCHAR *out, LPCWSTR in, int len ) { while ( len >= 0 ) { *out = *in; out++; in++; len --; } } void _multi_string_copy_to_wide( SQLWCHAR *out, LPCSTR in, int len ) { while ( len > 0 && ( in[ 0 ] || in[ 1 ] )) { *out = *in; out++; in++; len --; } *out++ = 0; *out++ = 0; } /*! * \brief Invokes a UI (a wizard) to walk User through creating a DSN. * * \param hWnd Input. Parent window handle. This is HWND as per the ODBC * specification but in unixODBC we use a generic window * handle. Caller must cast a HODBCINSTWND to HWND at call. * \param pszDS Input. Data Source Name. This can be a NULL pointer. * * \return BOOL * * \sa ODBCINSTWND */ BOOL SQLCreateDataSource( HWND hWnd, LPCSTR pszDS ) { HODBCINSTWND hODBCInstWnd = (HODBCINSTWND)hWnd; char szName[FILENAME_MAX]; char szNameAndExtension[FILENAME_MAX]; char szPathAndName[FILENAME_MAX]; void * hDLL; BOOL (*pSQLCreateDataSource)(HWND, LPCSTR); inst_logClear(); /* ODBC specification states that hWnd is mandatory. */ if ( !hWnd ) { inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_INVALID_HWND, "" ); return FALSE; } /* initialize libtool */ if ( lt_dlinit() ) { inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_GENERAL_ERR, "lt_dlinit() failed" ); return FALSE; } /* get plugin name */ _appendUIPluginExtension( szNameAndExtension, _getUIPluginName( szName, hODBCInstWnd->szUI ) ); /* lets try loading the plugin using an implicit path */ hDLL = lt_dlopen( szNameAndExtension ); if ( hDLL ) { /* change the name, as it avoids it finding it in the calling lib */ pSQLCreateDataSource = (BOOL (*)(HWND, LPCSTR))lt_dlsym( hDLL, "ODBCCreateDataSource" ); if ( pSQLCreateDataSource ) return pSQLCreateDataSource( ( *(hODBCInstWnd->szUI) ? hODBCInstWnd->hWnd : NULL ), pszDS ); else inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_GENERAL_ERR, (char*)lt_dlerror() ); } else { /* try with explicit path */ _prependUIPluginPath( szPathAndName, szNameAndExtension ); hDLL = lt_dlopen( szPathAndName ); if ( hDLL ) { /* change the name, as it avoids linker finding it in the calling lib */ pSQLCreateDataSource = (BOOL (*)(HWND,LPCSTR))lt_dlsym( hDLL, "ODBCCreateDataSource" ); if ( pSQLCreateDataSource ) return pSQLCreateDataSource( ( *(hODBCInstWnd->szUI) ? hODBCInstWnd->hWnd : NULL ), pszDS ); else inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_GENERAL_ERR, (char*)lt_dlerror() ); } } /* report failure to caller */ inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_GENERAL_ERR, "" ); return FALSE; } /*! * \brief A wide char version of \sa SQLCreateDataSource. * * \sa SQLCreateDataSource */ BOOL INSTAPI SQLCreateDataSourceW( HWND hwndParent, LPCWSTR lpszDSN ) { BOOL ret; char *ms = _multi_string_alloc_and_copy( lpszDSN ); inst_logClear(); ret = SQLCreateDataSource( hwndParent, ms ); free( ms ); return ret; }
/************************************************** * SQLCreateDataSource * * This is a 100% UI so simply pass it on to odbcinst's UI * shadow share. * ************************************************** * This code was created by Peter Harvey @ CodeByDesign. * Released under LGPL 28.JAN.99 * * Contributions from... * ----------------------------------------------- * Peter Harvey - pharvey@codebydesign.com **************************************************/ #include <config.h> #include <odbcinstext.h> /* * Take a wide string consisting of null terminated sections, and copy to a ASCII version */ char* _multi_string_alloc_and_copy( LPCWSTR in ) { char *chr; int len = 0; if ( !in ) { return NULL; } while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { len ++; } chr = malloc( len + 2 ); len = 0; while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { chr[ len ] = 0xFF & in[ len ]; len ++; } chr[ len ++ ] = '\0'; chr[ len ++ ] = '\0'; return chr; } char* _single_string_alloc_and_copy( LPCWSTR in ) { char *chr; int len = 0; if ( !in ) { return NULL; } while ( in[ len ] != 0 ) { len ++; } chr = malloc( len + 1 ); len = 0; while ( in[ len ] != 0 ) { chr[ len ] = 0xFF & in[ len ]; len ++; } chr[ len ++ ] = '\0'; return chr; } SQLWCHAR* _multi_string_alloc_and_expand( LPCSTR in ) { SQLWCHAR *chr; int len = 0; if ( !in ) { return NULL; } while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { len ++; } chr = malloc(sizeof( SQLWCHAR ) * ( len + 2 )); len = 0; while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { chr[ len ] = in[ len ]; len ++; } chr[ len ++ ] = 0; chr[ len ++ ] = 0; return chr; } SQLWCHAR* _single_string_alloc_and_expand( LPCSTR in ) { SQLWCHAR *chr; int len = 0; if ( !in ) { return NULL; } while ( in[ len ] != 0 ) { len ++; } chr = malloc( sizeof( SQLWCHAR ) * ( len + 1 )); len = 0; while ( in[ len ] != 0 ) { chr[ len ] = in[ len ]; len ++; } chr[ len ++ ] = 0; return chr; } void _single_string_copy_to_wide( SQLWCHAR *out, LPCSTR in, int len ) { while ( len > 0 && *in ) { *out = *in; out++; in++; len --; } *out = 0; } void _single_copy_to_wide( SQLWCHAR *out, LPCSTR in, int len ) { while ( len >= 0 ) { *out = *in; out++; in++; len --; } } void _single_copy_from_wide( SQLCHAR *out, LPCWSTR in, int len ) { while ( len >= 0 ) { *out = *in; out++; in++; len --; } } void _multi_string_copy_to_wide( SQLWCHAR *out, LPCSTR in, int len ) { while ( len > 0 && ( in[ 0 ] || in[ 1 ] )) { *out = *in; out++; in++; len --; } *out++ = 0; *out++ = 0; } /*! * \brief Invokes a UI (a wizard) to walk User through creating a DSN. * * \param hWnd Input. Parent window handle. This is HWND as per the ODBC * specification but in unixODBC we use a generic window * handle. Caller must cast a HODBCINSTWND to HWND at call. * \param pszDS Input. Data Source Name. This can be a NULL pointer. * * \return BOOL * * \sa ODBCINSTWND */ BOOL SQLCreateDataSource( HWND hWnd, LPCSTR pszDS ) { HODBCINSTWND hODBCInstWnd = (HODBCINSTWND)hWnd; char szName[FILENAME_MAX]; char szNameAndExtension[FILENAME_MAX]; char szPathAndName[FILENAME_MAX]; void * hDLL; BOOL (*pSQLCreateDataSource)(HWND, LPCSTR); inst_logClear(); /* ODBC specification states that hWnd is mandatory. */ if ( !hWnd ) { inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_INVALID_HWND, "" ); return FALSE; } /* initialize libtool */ if ( lt_dlinit() ) { inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_GENERAL_ERR, "lt_dlinit() failed" ); return FALSE; } /* get plugin name */ _appendUIPluginExtension( szNameAndExtension, _getUIPluginName( szName, hODBCInstWnd->szUI ) ); /* lets try loading the plugin using an implicit path */ hDLL = lt_dlopen( szNameAndExtension ); if ( hDLL ) { /* change the name, as it avoids it finding it in the calling lib */ pSQLCreateDataSource = (BOOL (*)(HWND, LPCSTR))lt_dlsym( hDLL, "ODBCCreateDataSource" ); if ( pSQLCreateDataSource ) return pSQLCreateDataSource( ( *(hODBCInstWnd->szUI) ? hODBCInstWnd->hWnd : NULL ), pszDS ); else inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_GENERAL_ERR, (char*)lt_dlerror() ); } else { /* try with explicit path */ _prependUIPluginPath( szPathAndName, szNameAndExtension ); hDLL = lt_dlopen( szPathAndName ); if ( hDLL ) { /* change the name, as it avoids linker finding it in the calling lib */ pSQLCreateDataSource = (BOOL (*)(HWND,LPCSTR))lt_dlsym( hDLL, "ODBCCreateDataSource" ); if ( pSQLCreateDataSource ) return pSQLCreateDataSource( ( *(hODBCInstWnd->szUI) ? hODBCInstWnd->hWnd : NULL ), pszDS ); else inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_GENERAL_ERR, (char*)lt_dlerror() ); } } /* report failure to caller */ inst_logPushMsg( __FILE__, __FILE__, __LINE__, LOG_CRITICAL, ODBC_ERROR_GENERAL_ERR, "" ); return FALSE; } /*! * \brief A wide char version of \sa SQLCreateDataSource. * * \sa SQLCreateDataSource */ BOOL INSTAPI SQLCreateDataSourceW( HWND hwndParent, LPCWSTR lpszDSN ) { BOOL ret; char *ms = _multi_string_alloc_and_copy( lpszDSN ); inst_logClear(); ret = SQLCreateDataSource( hwndParent, ms ); free( ms ); return ret; }
SQLWCHAR* _multi_string_alloc_and_expand( LPCSTR in ) { SQLWCHAR *chr; int len = 0; if ( !in ) { return in; } while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { len ++; } chr = malloc(sizeof( SQLWCHAR ) * ( len + 2 )); len = 0; while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { chr[ len ] = in[ len ]; len ++; } chr[ len ++ ] = 0; chr[ len ++ ] = 0; return chr; }
SQLWCHAR* _multi_string_alloc_and_expand( LPCSTR in ) { SQLWCHAR *chr; int len = 0; if ( !in ) { return NULL; } while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { len ++; } chr = malloc(sizeof( SQLWCHAR ) * ( len + 2 )); len = 0; while ( in[ len ] != 0 || in[ len + 1 ] != 0 ) { chr[ len ] = in[ len ]; len ++; } chr[ len ++ ] = 0; chr[ len ++ ] = 0; return chr; }
{'added': [(29, ' return NULL;'), (58, ' return NULL;'), (86, ' return NULL;'), (115, ' return NULL;')], 'deleted': [(29, ' return in;'), (58, ' return in;'), (86, ' return in;'), (115, ' return in;')]}
4
4
189
972
23
121
6
https://github.com/lurcher/unixODBC
CVE-2018-7485
CWE-119
2,178
ldebug.c
C
changedline
/* ** $Id: ldebug.c $ ** Debug Interface ** See Copyright Notice in lua.h */ #define ldebug_c #define LUA_CORE #include "lprefix.h" #include <stdarg.h> #include <stddef.h> #include <string.h> #include "lua.h" #include "lapi.h" #include "lcode.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lobject.h" #include "lopcodes.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" #include "lvm.h" #define noLuaClosure(f) ((f) == NULL || (f)->c.tt == LUA_VCCL) /* inverse of 'pcRel' */ #define invpcRel(pc, p) ((p)->code + (pc) + 1) static const char *funcnamefromcode (lua_State *L, CallInfo *ci, const char **name); static int currentpc (CallInfo *ci) { lua_assert(isLua(ci)); return pcRel(ci->u.l.savedpc, ci_func(ci)->p); } /* ** Get a "base line" to find the line corresponding to an instruction. ** For that, search the array of absolute line info for the largest saved ** instruction smaller or equal to the wanted instruction. A special ** case is when there is no absolute info or the instruction is before ** the first absolute one. */ static int getbaseline (const Proto *f, int pc, int *basepc) { if (f->sizeabslineinfo == 0 || pc < f->abslineinfo[0].pc) { *basepc = -1; /* start from the beginning */ return f->linedefined; } else { unsigned int i; if (pc >= f->abslineinfo[f->sizeabslineinfo - 1].pc) i = f->sizeabslineinfo - 1; /* instruction is after last saved one */ else { /* binary search */ unsigned int j = f->sizeabslineinfo - 1; /* pc < anchorlines[j] */ i = 0; /* abslineinfo[i] <= pc */ while (i < j - 1) { unsigned int m = (j + i) / 2; if (pc >= f->abslineinfo[m].pc) i = m; else j = m; } } *basepc = f->abslineinfo[i].pc; return f->abslineinfo[i].line; } } /* ** Get the line corresponding to instruction 'pc' in function 'f'; ** first gets a base line and from there does the increments until ** the desired instruction. */ int luaG_getfuncline (const Proto *f, int pc) { if (f->lineinfo == NULL) /* no debug information? */ return -1; else { int basepc; int baseline = getbaseline(f, pc, &basepc); while (basepc++ < pc) { /* walk until given instruction */ lua_assert(f->lineinfo[basepc] != ABSLINEINFO); baseline += f->lineinfo[basepc]; /* correct line */ } return baseline; } } static int getcurrentline (CallInfo *ci) { return luaG_getfuncline(ci_func(ci)->p, currentpc(ci)); } /* ** Set 'trap' for all active Lua frames. ** This function can be called during a signal, under "reasonable" ** assumptions. A new 'ci' is completely linked in the list before it ** becomes part of the "active" list, and we assume that pointers are ** atomic; see comment in next function. ** (A compiler doing interprocedural optimizations could, theoretically, ** reorder memory writes in such a way that the list could be ** temporarily broken while inserting a new element. We simply assume it ** has no good reasons to do that.) */ static void settraps (CallInfo *ci) { for (; ci != NULL; ci = ci->previous) if (isLua(ci)) ci->u.l.trap = 1; } /* ** This function can be called during a signal, under "reasonable" ** assumptions. ** Fields 'basehookcount' and 'hookcount' (set by 'resethookcount') ** are for debug only, and it is no problem if they get arbitrary ** values (causes at most one wrong hook call). 'hookmask' is an atomic ** value. We assume that pointers are atomic too (e.g., gcc ensures that ** for all platforms where it runs). Moreover, 'hook' is always checked ** before being called (see 'luaD_hook'). */ LUA_API void lua_sethook (lua_State *L, lua_Hook func, int mask, int count) { if (func == NULL || mask == 0) { /* turn off hooks? */ mask = 0; func = NULL; } L->hook = func; L->basehookcount = count; resethookcount(L); L->hookmask = cast_byte(mask); if (mask) settraps(L->ci); /* to trace inside 'luaV_execute' */ } LUA_API lua_Hook lua_gethook (lua_State *L) { return L->hook; } LUA_API int lua_gethookmask (lua_State *L) { return L->hookmask; } LUA_API int lua_gethookcount (lua_State *L) { return L->basehookcount; } LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar) { int status; CallInfo *ci; if (level < 0) return 0; /* invalid (negative) level */ lua_lock(L); for (ci = L->ci; level > 0 && ci != &L->base_ci; ci = ci->previous) level--; if (level == 0 && ci != &L->base_ci) { /* level found? */ status = 1; ar->i_ci = ci; } else status = 0; /* no such level */ lua_unlock(L); return status; } static const char *upvalname (const Proto *p, int uv) { TString *s = check_exp(uv < p->sizeupvalues, p->upvalues[uv].name); if (s == NULL) return "?"; else return getstr(s); } static const char *findvararg (CallInfo *ci, int n, StkId *pos) { if (clLvalue(s2v(ci->func))->p->is_vararg) { int nextra = ci->u.l.nextraargs; if (n >= -nextra) { /* 'n' is negative */ *pos = ci->func - nextra - (n + 1); return "(vararg)"; /* generic name for any vararg */ } } return NULL; /* no such vararg */ } const char *luaG_findlocal (lua_State *L, CallInfo *ci, int n, StkId *pos) { StkId base = ci->func + 1; const char *name = NULL; if (isLua(ci)) { if (n < 0) /* access to vararg values? */ return findvararg(ci, n, pos); else name = luaF_getlocalname(ci_func(ci)->p, n, currentpc(ci)); } if (name == NULL) { /* no 'standard' name? */ StkId limit = (ci == L->ci) ? L->top : ci->next->func; if (limit - base >= n && n > 0) { /* is 'n' inside 'ci' stack? */ /* generic name for any valid slot */ name = isLua(ci) ? "(temporary)" : "(C temporary)"; } else return NULL; /* no name */ } if (pos) *pos = base + (n - 1); return name; } LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n) { const char *name; lua_lock(L); if (ar == NULL) { /* information about non-active function? */ if (!isLfunction(s2v(L->top - 1))) /* not a Lua function? */ name = NULL; else /* consider live variables at function start (parameters) */ name = luaF_getlocalname(clLvalue(s2v(L->top - 1))->p, n, 0); } else { /* active function; get information through 'ar' */ StkId pos = NULL; /* to avoid warnings */ name = luaG_findlocal(L, ar->i_ci, n, &pos); if (name) { setobjs2s(L, L->top, pos); api_incr_top(L); } } lua_unlock(L); return name; } LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n) { StkId pos = NULL; /* to avoid warnings */ const char *name; lua_lock(L); name = luaG_findlocal(L, ar->i_ci, n, &pos); if (name) { setobjs2s(L, pos, L->top - 1); L->top--; /* pop value */ } lua_unlock(L); return name; } static void funcinfo (lua_Debug *ar, Closure *cl) { if (noLuaClosure(cl)) { ar->source = "=[C]"; ar->srclen = LL("=[C]"); ar->linedefined = -1; ar->lastlinedefined = -1; ar->what = "C"; } else { const Proto *p = cl->l.p; if (p->source) { ar->source = getstr(p->source); ar->srclen = tsslen(p->source); } else { ar->source = "=?"; ar->srclen = LL("=?"); } ar->linedefined = p->linedefined; ar->lastlinedefined = p->lastlinedefined; ar->what = (ar->linedefined == 0) ? "main" : "Lua"; } luaO_chunkid(ar->short_src, ar->source, ar->srclen); } static int nextline (const Proto *p, int currentline, int pc) { if (p->lineinfo[pc] != ABSLINEINFO) return currentline + p->lineinfo[pc]; else return luaG_getfuncline(p, pc); } static void collectvalidlines (lua_State *L, Closure *f) { if (noLuaClosure(f)) { setnilvalue(s2v(L->top)); api_incr_top(L); } else { int i; TValue v; const Proto *p = f->l.p; int currentline = p->linedefined; Table *t = luaH_new(L); /* new table to store active lines */ sethvalue2s(L, L->top, t); /* push it on stack */ api_incr_top(L); setbtvalue(&v); /* boolean 'true' to be the value of all indices */ for (i = 0; i < p->sizelineinfo; i++) { /* for all lines with code */ currentline = nextline(p, currentline, i); luaH_setint(L, t, currentline, &v); /* table[line] = true */ } } } static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name) { if (ci == NULL) /* no 'ci'? */ return NULL; /* no info */ else if (ci->callstatus & CIST_FIN) { /* is this a finalizer? */ *name = "__gc"; return "metamethod"; /* report it as such */ } /* calling function is a known Lua function? */ else if (!(ci->callstatus & CIST_TAIL) && isLua(ci->previous)) return funcnamefromcode(L, ci->previous, name); else return NULL; /* no way to find a name */ } static int auxgetinfo (lua_State *L, const char *what, lua_Debug *ar, Closure *f, CallInfo *ci) { int status = 1; for (; *what; what++) { switch (*what) { case 'S': { funcinfo(ar, f); break; } case 'l': { ar->currentline = (ci && isLua(ci)) ? getcurrentline(ci) : -1; break; } case 'u': { ar->nups = (f == NULL) ? 0 : f->c.nupvalues; if (noLuaClosure(f)) { ar->isvararg = 1; ar->nparams = 0; } else { ar->isvararg = f->l.p->is_vararg; ar->nparams = f->l.p->numparams; } break; } case 't': { ar->istailcall = (ci) ? ci->callstatus & CIST_TAIL : 0; break; } case 'n': { ar->namewhat = getfuncname(L, ci, &ar->name); if (ar->namewhat == NULL) { ar->namewhat = ""; /* not found */ ar->name = NULL; } break; } case 'r': { if (ci == NULL || !(ci->callstatus & CIST_TRAN)) ar->ftransfer = ar->ntransfer = 0; else { ar->ftransfer = ci->u2.transferinfo.ftransfer; ar->ntransfer = ci->u2.transferinfo.ntransfer; } break; } case 'L': case 'f': /* handled by lua_getinfo */ break; default: status = 0; /* invalid option */ } } return status; } LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar) { int status; Closure *cl; CallInfo *ci; TValue *func; lua_lock(L); if (*what == '>') { ci = NULL; func = s2v(L->top - 1); api_check(L, ttisfunction(func), "function expected"); what++; /* skip the '>' */ L->top--; /* pop function */ } else { ci = ar->i_ci; func = s2v(ci->func); lua_assert(ttisfunction(func)); } cl = ttisclosure(func) ? clvalue(func) : NULL; status = auxgetinfo(L, what, ar, cl, ci); if (strchr(what, 'f')) { setobj2s(L, L->top, func); api_incr_top(L); } if (strchr(what, 'L')) collectvalidlines(L, cl); lua_unlock(L); return status; } /* ** {====================================================== ** Symbolic Execution ** ======================================================= */ static const char *getobjname (const Proto *p, int lastpc, int reg, const char **name); /* ** Find a "name" for the constant 'c'. */ static void kname (const Proto *p, int c, const char **name) { TValue *kvalue = &p->k[c]; *name = (ttisstring(kvalue)) ? svalue(kvalue) : "?"; } /* ** Find a "name" for the register 'c'. */ static void rname (const Proto *p, int pc, int c, const char **name) { const char *what = getobjname(p, pc, c, name); /* search for 'c' */ if (!(what && *what == 'c')) /* did not find a constant name? */ *name = "?"; } /* ** Find a "name" for a 'C' value in an RK instruction. */ static void rkname (const Proto *p, int pc, Instruction i, const char **name) { int c = GETARG_C(i); /* key index */ if (GETARG_k(i)) /* is 'c' a constant? */ kname(p, c, name); else /* 'c' is a register */ rname(p, pc, c, name); } static int filterpc (int pc, int jmptarget) { if (pc < jmptarget) /* is code conditional (inside a jump)? */ return -1; /* cannot know who sets that register */ else return pc; /* current position sets that register */ } /* ** Try to find last instruction before 'lastpc' that modified register 'reg'. */ static int findsetreg (const Proto *p, int lastpc, int reg) { int pc; int setreg = -1; /* keep last instruction that changed 'reg' */ int jmptarget = 0; /* any code before this address is conditional */ if (testMMMode(GET_OPCODE(p->code[lastpc]))) lastpc--; /* previous instruction was not actually executed */ for (pc = 0; pc < lastpc; pc++) { Instruction i = p->code[pc]; OpCode op = GET_OPCODE(i); int a = GETARG_A(i); int change; /* true if current instruction changed 'reg' */ switch (op) { case OP_LOADNIL: { /* set registers from 'a' to 'a+b' */ int b = GETARG_B(i); change = (a <= reg && reg <= a + b); break; } case OP_TFORCALL: { /* affect all regs above its base */ change = (reg >= a + 2); break; } case OP_CALL: case OP_TAILCALL: { /* affect all registers above base */ change = (reg >= a); break; } case OP_JMP: { /* doesn't change registers, but changes 'jmptarget' */ int b = GETARG_sJ(i); int dest = pc + 1 + b; /* jump does not skip 'lastpc' and is larger than current one? */ if (dest <= lastpc && dest > jmptarget) jmptarget = dest; /* update 'jmptarget' */ change = 0; break; } default: /* any instruction that sets A */ change = (testAMode(op) && reg == a); break; } if (change) setreg = filterpc(pc, jmptarget); } return setreg; } /* ** Check whether table being indexed by instruction 'i' is the ** environment '_ENV' */ static const char *gxf (const Proto *p, int pc, Instruction i, int isup) { int t = GETARG_B(i); /* table index */ const char *name; /* name of indexed variable */ if (isup) /* is an upvalue? */ name = upvalname(p, t); else getobjname(p, pc, t, &name); return (name && strcmp(name, LUA_ENV) == 0) ? "global" : "field"; } static const char *getobjname (const Proto *p, int lastpc, int reg, const char **name) { int pc; *name = luaF_getlocalname(p, reg + 1, lastpc); if (*name) /* is a local? */ return "local"; /* else try symbolic execution */ pc = findsetreg(p, lastpc, reg); if (pc != -1) { /* could find instruction? */ Instruction i = p->code[pc]; OpCode op = GET_OPCODE(i); switch (op) { case OP_MOVE: { int b = GETARG_B(i); /* move from 'b' to 'a' */ if (b < GETARG_A(i)) return getobjname(p, pc, b, name); /* get name for 'b' */ break; } case OP_GETTABUP: { int k = GETARG_C(i); /* key index */ kname(p, k, name); return gxf(p, pc, i, 1); } case OP_GETTABLE: { int k = GETARG_C(i); /* key index */ rname(p, pc, k, name); return gxf(p, pc, i, 0); } case OP_GETI: { *name = "integer index"; return "field"; } case OP_GETFIELD: { int k = GETARG_C(i); /* key index */ kname(p, k, name); return gxf(p, pc, i, 0); } case OP_GETUPVAL: { *name = upvalname(p, GETARG_B(i)); return "upvalue"; } case OP_LOADK: case OP_LOADKX: { int b = (op == OP_LOADK) ? GETARG_Bx(i) : GETARG_Ax(p->code[pc + 1]); if (ttisstring(&p->k[b])) { *name = svalue(&p->k[b]); return "constant"; } break; } case OP_SELF: { rkname(p, pc, i, name); return "method"; } default: break; /* go through to return NULL */ } } return NULL; /* could not find reasonable name */ } /* ** Try to find a name for a function based on the code that called it. ** (Only works when function was called by a Lua function.) ** Returns what the name is (e.g., "for iterator", "method", ** "metamethod") and sets '*name' to point to the name. */ static const char *funcnamefromcode (lua_State *L, CallInfo *ci, const char **name) { TMS tm = (TMS)0; /* (initial value avoids warnings) */ const Proto *p = ci_func(ci)->p; /* calling function */ int pc = currentpc(ci); /* calling instruction index */ Instruction i = p->code[pc]; /* calling instruction */ if (ci->callstatus & CIST_HOOKED) { /* was it called inside a hook? */ *name = "?"; return "hook"; } switch (GET_OPCODE(i)) { case OP_CALL: case OP_TAILCALL: return getobjname(p, pc, GETARG_A(i), name); /* get function name */ case OP_TFORCALL: { /* for iterator */ *name = "for iterator"; return "for iterator"; } /* other instructions can do calls through metamethods */ case OP_SELF: case OP_GETTABUP: case OP_GETTABLE: case OP_GETI: case OP_GETFIELD: tm = TM_INDEX; break; case OP_SETTABUP: case OP_SETTABLE: case OP_SETI: case OP_SETFIELD: tm = TM_NEWINDEX; break; case OP_MMBIN: case OP_MMBINI: case OP_MMBINK: { tm = cast(TMS, GETARG_C(i)); break; } case OP_UNM: tm = TM_UNM; break; case OP_BNOT: tm = TM_BNOT; break; case OP_LEN: tm = TM_LEN; break; case OP_CONCAT: tm = TM_CONCAT; break; case OP_EQ: tm = TM_EQ; break; case OP_LT: case OP_LE: case OP_LTI: case OP_LEI: *name = "order"; /* '<=' can call '__lt', etc. */ return "metamethod"; case OP_CLOSE: case OP_RETURN: *name = "close"; return "metamethod"; default: return NULL; /* cannot find a reasonable name */ } *name = getstr(G(L)->tmname[tm]) + 2; return "metamethod"; } /* }====================================================== */ /* ** The subtraction of two potentially unrelated pointers is ** not ISO C, but it should not crash a program; the subsequent ** checks are ISO C and ensure a correct result. */ static int isinstack (CallInfo *ci, const TValue *o) { StkId base = ci->func + 1; ptrdiff_t i = cast(StkId, o) - base; return (0 <= i && i < (ci->top - base) && s2v(base + i) == o); } /* ** Checks whether value 'o' came from an upvalue. (That can only happen ** with instructions OP_GETTABUP/OP_SETTABUP, which operate directly on ** upvalues.) */ static const char *getupvalname (CallInfo *ci, const TValue *o, const char **name) { LClosure *c = ci_func(ci); int i; for (i = 0; i < c->nupvalues; i++) { if (c->upvals[i]->v == o) { *name = upvalname(c->p, i); return "upvalue"; } } return NULL; } static const char *varinfo (lua_State *L, const TValue *o) { const char *name = NULL; /* to avoid warnings */ CallInfo *ci = L->ci; const char *kind = NULL; if (isLua(ci)) { kind = getupvalname(ci, o, &name); /* check whether 'o' is an upvalue */ if (!kind && isinstack(ci, o)) /* no? try a register */ kind = getobjname(ci_func(ci)->p, currentpc(ci), cast_int(cast(StkId, o) - (ci->func + 1)), &name); } return (kind) ? luaO_pushfstring(L, " (%s '%s')", kind, name) : ""; } l_noret luaG_typeerror (lua_State *L, const TValue *o, const char *op) { const char *t = luaT_objtypename(L, o); luaG_runerror(L, "attempt to %s a %s value%s", op, t, varinfo(L, o)); } l_noret luaG_forerror (lua_State *L, const TValue *o, const char *what) { luaG_runerror(L, "bad 'for' %s (number expected, got %s)", what, luaT_objtypename(L, o)); } l_noret luaG_concaterror (lua_State *L, const TValue *p1, const TValue *p2) { if (ttisstring(p1) || cvt2str(p1)) p1 = p2; luaG_typeerror(L, p1, "concatenate"); } l_noret luaG_opinterror (lua_State *L, const TValue *p1, const TValue *p2, const char *msg) { if (!ttisnumber(p1)) /* first operand is wrong? */ p2 = p1; /* now second is wrong */ luaG_typeerror(L, p2, msg); } /* ** Error when both values are convertible to numbers, but not to integers */ l_noret luaG_tointerror (lua_State *L, const TValue *p1, const TValue *p2) { lua_Integer temp; if (!tointegerns(p1, &temp)) p2 = p1; luaG_runerror(L, "number%s has no integer representation", varinfo(L, p2)); } l_noret luaG_ordererror (lua_State *L, const TValue *p1, const TValue *p2) { const char *t1 = luaT_objtypename(L, p1); const char *t2 = luaT_objtypename(L, p2); if (strcmp(t1, t2) == 0) luaG_runerror(L, "attempt to compare two %s values", t1); else luaG_runerror(L, "attempt to compare %s with %s", t1, t2); } /* add src:line information to 'msg' */ const char *luaG_addinfo (lua_State *L, const char *msg, TString *src, int line) { char buff[LUA_IDSIZE]; if (src) luaO_chunkid(buff, getstr(src), tsslen(src)); else { /* no source available; use "?" instead */ buff[0] = '?'; buff[1] = '\0'; } return luaO_pushfstring(L, "%s:%d: %s", buff, line, msg); } l_noret luaG_errormsg (lua_State *L) { if (L->errfunc != 0) { /* is there an error handling function? */ StkId errfunc = restorestack(L, L->errfunc); lua_assert(ttisfunction(s2v(errfunc))); setobjs2s(L, L->top, L->top - 1); /* move argument */ setobjs2s(L, L->top - 1, errfunc); /* push function */ L->top++; /* assume EXTRA_STACK */ luaD_callnoyield(L, L->top - 2, 1); /* call it */ } luaD_throw(L, LUA_ERRRUN); } l_noret luaG_runerror (lua_State *L, const char *fmt, ...) { CallInfo *ci = L->ci; const char *msg; va_list argp; luaC_checkGC(L); /* error message uses memory */ va_start(argp, fmt); msg = luaO_pushvfstring(L, fmt, argp); /* format message */ va_end(argp); if (isLua(ci)) /* if Lua function, add source:line information */ luaG_addinfo(L, msg, ci_func(ci)->p->source, getcurrentline(ci)); luaG_errormsg(L); } /* ** Check whether new instruction 'newpc' is in a different line from ** previous instruction 'oldpc'. */ static int changedline (const Proto *p, int oldpc, int newpc) { while (oldpc++ < newpc) { if (p->lineinfo[oldpc] != 0) return (luaG_getfuncline(p, oldpc - 1) != luaG_getfuncline(p, newpc)); } return 0; /* no line changes in the way */ } /* ** Traces the execution of a Lua function. Called before the execution ** of each opcode, when debug is on. 'L->oldpc' stores the last ** instruction traced, to detect line changes. When entering a new ** function, 'npci' will be zero and will test as a new line without ** the need for 'oldpc'; so, 'oldpc' does not need to be initialized ** before. Some exceptional conditions may return to a function without ** updating 'oldpc'. In that case, 'oldpc' may be invalid; if so, it is ** reset to zero. (A wrong but valid 'oldpc' at most causes an extra ** call to a line hook.) */ int luaG_traceexec (lua_State *L, const Instruction *pc) { CallInfo *ci = L->ci; lu_byte mask = L->hookmask; const Proto *p = ci_func(ci)->p; int counthook; /* 'L->oldpc' may be invalid; reset it in this case */ int oldpc = (L->oldpc < p->sizecode) ? L->oldpc : 0; if (!(mask & (LUA_MASKLINE | LUA_MASKCOUNT))) { /* no hooks? */ ci->u.l.trap = 0; /* don't need to stop again */ return 0; /* turn off 'trap' */ } pc++; /* reference is always next instruction */ ci->u.l.savedpc = pc; /* save 'pc' */ counthook = (--L->hookcount == 0 && (mask & LUA_MASKCOUNT)); if (counthook) resethookcount(L); /* reset count */ else if (!(mask & LUA_MASKLINE)) return 1; /* no line hook and count != 0; nothing to be done now */ if (ci->callstatus & CIST_HOOKYIELD) { /* called hook last time? */ ci->callstatus &= ~CIST_HOOKYIELD; /* erase mark */ return 1; /* do not call hook again (VM yielded, so it did not move) */ } if (!isIT(*(ci->u.l.savedpc - 1))) L->top = ci->top; /* prepare top */ if (counthook) luaD_hook(L, LUA_HOOKCOUNT, -1, 0, 0); /* call count hook */ if (mask & LUA_MASKLINE) { int npci = pcRel(pc, p); if (npci == 0 || /* call linehook when enter a new function, */ pc <= invpcRel(oldpc, p) || /* when jump back (loop), or when */ changedline(p, oldpc, npci)) { /* enter new line */ int newline = luaG_getfuncline(p, npci); luaD_hook(L, LUA_HOOKLINE, newline, 0, 0); /* call line hook */ } L->oldpc = npci; /* 'pc' of last call to line hook */ } if (L->status == LUA_YIELD) { /* did hook yield? */ if (counthook) L->hookcount = 1; /* undo decrement to zero */ ci->u.l.savedpc--; /* undo increment (resume will increment it again) */ ci->callstatus |= CIST_HOOKYIELD; /* mark that it yielded */ luaD_throw(L, LUA_YIELD); } return 1; /* keep 'trap' on */ }
/* ** $Id: ldebug.c $ ** Debug Interface ** See Copyright Notice in lua.h */ #define ldebug_c #define LUA_CORE #include "lprefix.h" #include <stdarg.h> #include <stddef.h> #include <string.h> #include "lua.h" #include "lapi.h" #include "lcode.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lobject.h" #include "lopcodes.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" #include "lvm.h" #define noLuaClosure(f) ((f) == NULL || (f)->c.tt == LUA_VCCL) /* inverse of 'pcRel' */ #define invpcRel(pc, p) ((p)->code + (pc) + 1) static const char *funcnamefromcode (lua_State *L, CallInfo *ci, const char **name); static int currentpc (CallInfo *ci) { lua_assert(isLua(ci)); return pcRel(ci->u.l.savedpc, ci_func(ci)->p); } /* ** Get a "base line" to find the line corresponding to an instruction. ** For that, search the array of absolute line info for the largest saved ** instruction smaller or equal to the wanted instruction. A special ** case is when there is no absolute info or the instruction is before ** the first absolute one. */ static int getbaseline (const Proto *f, int pc, int *basepc) { if (f->sizeabslineinfo == 0 || pc < f->abslineinfo[0].pc) { *basepc = -1; /* start from the beginning */ return f->linedefined; } else { unsigned int i; if (pc >= f->abslineinfo[f->sizeabslineinfo - 1].pc) i = f->sizeabslineinfo - 1; /* instruction is after last saved one */ else { /* binary search */ unsigned int j = f->sizeabslineinfo - 1; /* pc < anchorlines[j] */ i = 0; /* abslineinfo[i] <= pc */ while (i < j - 1) { unsigned int m = (j + i) / 2; if (pc >= f->abslineinfo[m].pc) i = m; else j = m; } } *basepc = f->abslineinfo[i].pc; return f->abslineinfo[i].line; } } /* ** Get the line corresponding to instruction 'pc' in function 'f'; ** first gets a base line and from there does the increments until ** the desired instruction. */ int luaG_getfuncline (const Proto *f, int pc) { if (f->lineinfo == NULL) /* no debug information? */ return -1; else { int basepc; int baseline = getbaseline(f, pc, &basepc); while (basepc++ < pc) { /* walk until given instruction */ lua_assert(f->lineinfo[basepc] != ABSLINEINFO); baseline += f->lineinfo[basepc]; /* correct line */ } return baseline; } } static int getcurrentline (CallInfo *ci) { return luaG_getfuncline(ci_func(ci)->p, currentpc(ci)); } /* ** Set 'trap' for all active Lua frames. ** This function can be called during a signal, under "reasonable" ** assumptions. A new 'ci' is completely linked in the list before it ** becomes part of the "active" list, and we assume that pointers are ** atomic; see comment in next function. ** (A compiler doing interprocedural optimizations could, theoretically, ** reorder memory writes in such a way that the list could be ** temporarily broken while inserting a new element. We simply assume it ** has no good reasons to do that.) */ static void settraps (CallInfo *ci) { for (; ci != NULL; ci = ci->previous) if (isLua(ci)) ci->u.l.trap = 1; } /* ** This function can be called during a signal, under "reasonable" ** assumptions. ** Fields 'basehookcount' and 'hookcount' (set by 'resethookcount') ** are for debug only, and it is no problem if they get arbitrary ** values (causes at most one wrong hook call). 'hookmask' is an atomic ** value. We assume that pointers are atomic too (e.g., gcc ensures that ** for all platforms where it runs). Moreover, 'hook' is always checked ** before being called (see 'luaD_hook'). */ LUA_API void lua_sethook (lua_State *L, lua_Hook func, int mask, int count) { if (func == NULL || mask == 0) { /* turn off hooks? */ mask = 0; func = NULL; } L->hook = func; L->basehookcount = count; resethookcount(L); L->hookmask = cast_byte(mask); if (mask) settraps(L->ci); /* to trace inside 'luaV_execute' */ } LUA_API lua_Hook lua_gethook (lua_State *L) { return L->hook; } LUA_API int lua_gethookmask (lua_State *L) { return L->hookmask; } LUA_API int lua_gethookcount (lua_State *L) { return L->basehookcount; } LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar) { int status; CallInfo *ci; if (level < 0) return 0; /* invalid (negative) level */ lua_lock(L); for (ci = L->ci; level > 0 && ci != &L->base_ci; ci = ci->previous) level--; if (level == 0 && ci != &L->base_ci) { /* level found? */ status = 1; ar->i_ci = ci; } else status = 0; /* no such level */ lua_unlock(L); return status; } static const char *upvalname (const Proto *p, int uv) { TString *s = check_exp(uv < p->sizeupvalues, p->upvalues[uv].name); if (s == NULL) return "?"; else return getstr(s); } static const char *findvararg (CallInfo *ci, int n, StkId *pos) { if (clLvalue(s2v(ci->func))->p->is_vararg) { int nextra = ci->u.l.nextraargs; if (n >= -nextra) { /* 'n' is negative */ *pos = ci->func - nextra - (n + 1); return "(vararg)"; /* generic name for any vararg */ } } return NULL; /* no such vararg */ } const char *luaG_findlocal (lua_State *L, CallInfo *ci, int n, StkId *pos) { StkId base = ci->func + 1; const char *name = NULL; if (isLua(ci)) { if (n < 0) /* access to vararg values? */ return findvararg(ci, n, pos); else name = luaF_getlocalname(ci_func(ci)->p, n, currentpc(ci)); } if (name == NULL) { /* no 'standard' name? */ StkId limit = (ci == L->ci) ? L->top : ci->next->func; if (limit - base >= n && n > 0) { /* is 'n' inside 'ci' stack? */ /* generic name for any valid slot */ name = isLua(ci) ? "(temporary)" : "(C temporary)"; } else return NULL; /* no name */ } if (pos) *pos = base + (n - 1); return name; } LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n) { const char *name; lua_lock(L); if (ar == NULL) { /* information about non-active function? */ if (!isLfunction(s2v(L->top - 1))) /* not a Lua function? */ name = NULL; else /* consider live variables at function start (parameters) */ name = luaF_getlocalname(clLvalue(s2v(L->top - 1))->p, n, 0); } else { /* active function; get information through 'ar' */ StkId pos = NULL; /* to avoid warnings */ name = luaG_findlocal(L, ar->i_ci, n, &pos); if (name) { setobjs2s(L, L->top, pos); api_incr_top(L); } } lua_unlock(L); return name; } LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n) { StkId pos = NULL; /* to avoid warnings */ const char *name; lua_lock(L); name = luaG_findlocal(L, ar->i_ci, n, &pos); if (name) { setobjs2s(L, pos, L->top - 1); L->top--; /* pop value */ } lua_unlock(L); return name; } static void funcinfo (lua_Debug *ar, Closure *cl) { if (noLuaClosure(cl)) { ar->source = "=[C]"; ar->srclen = LL("=[C]"); ar->linedefined = -1; ar->lastlinedefined = -1; ar->what = "C"; } else { const Proto *p = cl->l.p; if (p->source) { ar->source = getstr(p->source); ar->srclen = tsslen(p->source); } else { ar->source = "=?"; ar->srclen = LL("=?"); } ar->linedefined = p->linedefined; ar->lastlinedefined = p->lastlinedefined; ar->what = (ar->linedefined == 0) ? "main" : "Lua"; } luaO_chunkid(ar->short_src, ar->source, ar->srclen); } static int nextline (const Proto *p, int currentline, int pc) { if (p->lineinfo[pc] != ABSLINEINFO) return currentline + p->lineinfo[pc]; else return luaG_getfuncline(p, pc); } static void collectvalidlines (lua_State *L, Closure *f) { if (noLuaClosure(f)) { setnilvalue(s2v(L->top)); api_incr_top(L); } else { int i; TValue v; const Proto *p = f->l.p; int currentline = p->linedefined; Table *t = luaH_new(L); /* new table to store active lines */ sethvalue2s(L, L->top, t); /* push it on stack */ api_incr_top(L); setbtvalue(&v); /* boolean 'true' to be the value of all indices */ for (i = 0; i < p->sizelineinfo; i++) { /* for all lines with code */ currentline = nextline(p, currentline, i); luaH_setint(L, t, currentline, &v); /* table[line] = true */ } } } static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name) { if (ci == NULL) /* no 'ci'? */ return NULL; /* no info */ else if (ci->callstatus & CIST_FIN) { /* is this a finalizer? */ *name = "__gc"; return "metamethod"; /* report it as such */ } /* calling function is a known Lua function? */ else if (!(ci->callstatus & CIST_TAIL) && isLua(ci->previous)) return funcnamefromcode(L, ci->previous, name); else return NULL; /* no way to find a name */ } static int auxgetinfo (lua_State *L, const char *what, lua_Debug *ar, Closure *f, CallInfo *ci) { int status = 1; for (; *what; what++) { switch (*what) { case 'S': { funcinfo(ar, f); break; } case 'l': { ar->currentline = (ci && isLua(ci)) ? getcurrentline(ci) : -1; break; } case 'u': { ar->nups = (f == NULL) ? 0 : f->c.nupvalues; if (noLuaClosure(f)) { ar->isvararg = 1; ar->nparams = 0; } else { ar->isvararg = f->l.p->is_vararg; ar->nparams = f->l.p->numparams; } break; } case 't': { ar->istailcall = (ci) ? ci->callstatus & CIST_TAIL : 0; break; } case 'n': { ar->namewhat = getfuncname(L, ci, &ar->name); if (ar->namewhat == NULL) { ar->namewhat = ""; /* not found */ ar->name = NULL; } break; } case 'r': { if (ci == NULL || !(ci->callstatus & CIST_TRAN)) ar->ftransfer = ar->ntransfer = 0; else { ar->ftransfer = ci->u2.transferinfo.ftransfer; ar->ntransfer = ci->u2.transferinfo.ntransfer; } break; } case 'L': case 'f': /* handled by lua_getinfo */ break; default: status = 0; /* invalid option */ } } return status; } LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar) { int status; Closure *cl; CallInfo *ci; TValue *func; lua_lock(L); if (*what == '>') { ci = NULL; func = s2v(L->top - 1); api_check(L, ttisfunction(func), "function expected"); what++; /* skip the '>' */ L->top--; /* pop function */ } else { ci = ar->i_ci; func = s2v(ci->func); lua_assert(ttisfunction(func)); } cl = ttisclosure(func) ? clvalue(func) : NULL; status = auxgetinfo(L, what, ar, cl, ci); if (strchr(what, 'f')) { setobj2s(L, L->top, func); api_incr_top(L); } if (strchr(what, 'L')) collectvalidlines(L, cl); lua_unlock(L); return status; } /* ** {====================================================== ** Symbolic Execution ** ======================================================= */ static const char *getobjname (const Proto *p, int lastpc, int reg, const char **name); /* ** Find a "name" for the constant 'c'. */ static void kname (const Proto *p, int c, const char **name) { TValue *kvalue = &p->k[c]; *name = (ttisstring(kvalue)) ? svalue(kvalue) : "?"; } /* ** Find a "name" for the register 'c'. */ static void rname (const Proto *p, int pc, int c, const char **name) { const char *what = getobjname(p, pc, c, name); /* search for 'c' */ if (!(what && *what == 'c')) /* did not find a constant name? */ *name = "?"; } /* ** Find a "name" for a 'C' value in an RK instruction. */ static void rkname (const Proto *p, int pc, Instruction i, const char **name) { int c = GETARG_C(i); /* key index */ if (GETARG_k(i)) /* is 'c' a constant? */ kname(p, c, name); else /* 'c' is a register */ rname(p, pc, c, name); } static int filterpc (int pc, int jmptarget) { if (pc < jmptarget) /* is code conditional (inside a jump)? */ return -1; /* cannot know who sets that register */ else return pc; /* current position sets that register */ } /* ** Try to find last instruction before 'lastpc' that modified register 'reg'. */ static int findsetreg (const Proto *p, int lastpc, int reg) { int pc; int setreg = -1; /* keep last instruction that changed 'reg' */ int jmptarget = 0; /* any code before this address is conditional */ if (testMMMode(GET_OPCODE(p->code[lastpc]))) lastpc--; /* previous instruction was not actually executed */ for (pc = 0; pc < lastpc; pc++) { Instruction i = p->code[pc]; OpCode op = GET_OPCODE(i); int a = GETARG_A(i); int change; /* true if current instruction changed 'reg' */ switch (op) { case OP_LOADNIL: { /* set registers from 'a' to 'a+b' */ int b = GETARG_B(i); change = (a <= reg && reg <= a + b); break; } case OP_TFORCALL: { /* affect all regs above its base */ change = (reg >= a + 2); break; } case OP_CALL: case OP_TAILCALL: { /* affect all registers above base */ change = (reg >= a); break; } case OP_JMP: { /* doesn't change registers, but changes 'jmptarget' */ int b = GETARG_sJ(i); int dest = pc + 1 + b; /* jump does not skip 'lastpc' and is larger than current one? */ if (dest <= lastpc && dest > jmptarget) jmptarget = dest; /* update 'jmptarget' */ change = 0; break; } default: /* any instruction that sets A */ change = (testAMode(op) && reg == a); break; } if (change) setreg = filterpc(pc, jmptarget); } return setreg; } /* ** Check whether table being indexed by instruction 'i' is the ** environment '_ENV' */ static const char *gxf (const Proto *p, int pc, Instruction i, int isup) { int t = GETARG_B(i); /* table index */ const char *name; /* name of indexed variable */ if (isup) /* is an upvalue? */ name = upvalname(p, t); else getobjname(p, pc, t, &name); return (name && strcmp(name, LUA_ENV) == 0) ? "global" : "field"; } static const char *getobjname (const Proto *p, int lastpc, int reg, const char **name) { int pc; *name = luaF_getlocalname(p, reg + 1, lastpc); if (*name) /* is a local? */ return "local"; /* else try symbolic execution */ pc = findsetreg(p, lastpc, reg); if (pc != -1) { /* could find instruction? */ Instruction i = p->code[pc]; OpCode op = GET_OPCODE(i); switch (op) { case OP_MOVE: { int b = GETARG_B(i); /* move from 'b' to 'a' */ if (b < GETARG_A(i)) return getobjname(p, pc, b, name); /* get name for 'b' */ break; } case OP_GETTABUP: { int k = GETARG_C(i); /* key index */ kname(p, k, name); return gxf(p, pc, i, 1); } case OP_GETTABLE: { int k = GETARG_C(i); /* key index */ rname(p, pc, k, name); return gxf(p, pc, i, 0); } case OP_GETI: { *name = "integer index"; return "field"; } case OP_GETFIELD: { int k = GETARG_C(i); /* key index */ kname(p, k, name); return gxf(p, pc, i, 0); } case OP_GETUPVAL: { *name = upvalname(p, GETARG_B(i)); return "upvalue"; } case OP_LOADK: case OP_LOADKX: { int b = (op == OP_LOADK) ? GETARG_Bx(i) : GETARG_Ax(p->code[pc + 1]); if (ttisstring(&p->k[b])) { *name = svalue(&p->k[b]); return "constant"; } break; } case OP_SELF: { rkname(p, pc, i, name); return "method"; } default: break; /* go through to return NULL */ } } return NULL; /* could not find reasonable name */ } /* ** Try to find a name for a function based on the code that called it. ** (Only works when function was called by a Lua function.) ** Returns what the name is (e.g., "for iterator", "method", ** "metamethod") and sets '*name' to point to the name. */ static const char *funcnamefromcode (lua_State *L, CallInfo *ci, const char **name) { TMS tm = (TMS)0; /* (initial value avoids warnings) */ const Proto *p = ci_func(ci)->p; /* calling function */ int pc = currentpc(ci); /* calling instruction index */ Instruction i = p->code[pc]; /* calling instruction */ if (ci->callstatus & CIST_HOOKED) { /* was it called inside a hook? */ *name = "?"; return "hook"; } switch (GET_OPCODE(i)) { case OP_CALL: case OP_TAILCALL: return getobjname(p, pc, GETARG_A(i), name); /* get function name */ case OP_TFORCALL: { /* for iterator */ *name = "for iterator"; return "for iterator"; } /* other instructions can do calls through metamethods */ case OP_SELF: case OP_GETTABUP: case OP_GETTABLE: case OP_GETI: case OP_GETFIELD: tm = TM_INDEX; break; case OP_SETTABUP: case OP_SETTABLE: case OP_SETI: case OP_SETFIELD: tm = TM_NEWINDEX; break; case OP_MMBIN: case OP_MMBINI: case OP_MMBINK: { tm = cast(TMS, GETARG_C(i)); break; } case OP_UNM: tm = TM_UNM; break; case OP_BNOT: tm = TM_BNOT; break; case OP_LEN: tm = TM_LEN; break; case OP_CONCAT: tm = TM_CONCAT; break; case OP_EQ: tm = TM_EQ; break; case OP_LT: case OP_LE: case OP_LTI: case OP_LEI: *name = "order"; /* '<=' can call '__lt', etc. */ return "metamethod"; case OP_CLOSE: case OP_RETURN: *name = "close"; return "metamethod"; default: return NULL; /* cannot find a reasonable name */ } *name = getstr(G(L)->tmname[tm]) + 2; return "metamethod"; } /* }====================================================== */ /* ** The subtraction of two potentially unrelated pointers is ** not ISO C, but it should not crash a program; the subsequent ** checks are ISO C and ensure a correct result. */ static int isinstack (CallInfo *ci, const TValue *o) { StkId base = ci->func + 1; ptrdiff_t i = cast(StkId, o) - base; return (0 <= i && i < (ci->top - base) && s2v(base + i) == o); } /* ** Checks whether value 'o' came from an upvalue. (That can only happen ** with instructions OP_GETTABUP/OP_SETTABUP, which operate directly on ** upvalues.) */ static const char *getupvalname (CallInfo *ci, const TValue *o, const char **name) { LClosure *c = ci_func(ci); int i; for (i = 0; i < c->nupvalues; i++) { if (c->upvals[i]->v == o) { *name = upvalname(c->p, i); return "upvalue"; } } return NULL; } static const char *varinfo (lua_State *L, const TValue *o) { const char *name = NULL; /* to avoid warnings */ CallInfo *ci = L->ci; const char *kind = NULL; if (isLua(ci)) { kind = getupvalname(ci, o, &name); /* check whether 'o' is an upvalue */ if (!kind && isinstack(ci, o)) /* no? try a register */ kind = getobjname(ci_func(ci)->p, currentpc(ci), cast_int(cast(StkId, o) - (ci->func + 1)), &name); } return (kind) ? luaO_pushfstring(L, " (%s '%s')", kind, name) : ""; } l_noret luaG_typeerror (lua_State *L, const TValue *o, const char *op) { const char *t = luaT_objtypename(L, o); luaG_runerror(L, "attempt to %s a %s value%s", op, t, varinfo(L, o)); } l_noret luaG_forerror (lua_State *L, const TValue *o, const char *what) { luaG_runerror(L, "bad 'for' %s (number expected, got %s)", what, luaT_objtypename(L, o)); } l_noret luaG_concaterror (lua_State *L, const TValue *p1, const TValue *p2) { if (ttisstring(p1) || cvt2str(p1)) p1 = p2; luaG_typeerror(L, p1, "concatenate"); } l_noret luaG_opinterror (lua_State *L, const TValue *p1, const TValue *p2, const char *msg) { if (!ttisnumber(p1)) /* first operand is wrong? */ p2 = p1; /* now second is wrong */ luaG_typeerror(L, p2, msg); } /* ** Error when both values are convertible to numbers, but not to integers */ l_noret luaG_tointerror (lua_State *L, const TValue *p1, const TValue *p2) { lua_Integer temp; if (!tointegerns(p1, &temp)) p2 = p1; luaG_runerror(L, "number%s has no integer representation", varinfo(L, p2)); } l_noret luaG_ordererror (lua_State *L, const TValue *p1, const TValue *p2) { const char *t1 = luaT_objtypename(L, p1); const char *t2 = luaT_objtypename(L, p2); if (strcmp(t1, t2) == 0) luaG_runerror(L, "attempt to compare two %s values", t1); else luaG_runerror(L, "attempt to compare %s with %s", t1, t2); } /* add src:line information to 'msg' */ const char *luaG_addinfo (lua_State *L, const char *msg, TString *src, int line) { char buff[LUA_IDSIZE]; if (src) luaO_chunkid(buff, getstr(src), tsslen(src)); else { /* no source available; use "?" instead */ buff[0] = '?'; buff[1] = '\0'; } return luaO_pushfstring(L, "%s:%d: %s", buff, line, msg); } l_noret luaG_errormsg (lua_State *L) { if (L->errfunc != 0) { /* is there an error handling function? */ StkId errfunc = restorestack(L, L->errfunc); lua_assert(ttisfunction(s2v(errfunc))); setobjs2s(L, L->top, L->top - 1); /* move argument */ setobjs2s(L, L->top - 1, errfunc); /* push function */ L->top++; /* assume EXTRA_STACK */ luaD_callnoyield(L, L->top - 2, 1); /* call it */ } luaD_throw(L, LUA_ERRRUN); } l_noret luaG_runerror (lua_State *L, const char *fmt, ...) { CallInfo *ci = L->ci; const char *msg; va_list argp; luaC_checkGC(L); /* error message uses memory */ va_start(argp, fmt); msg = luaO_pushvfstring(L, fmt, argp); /* format message */ va_end(argp); if (isLua(ci)) /* if Lua function, add source:line information */ luaG_addinfo(L, msg, ci_func(ci)->p->source, getcurrentline(ci)); luaG_errormsg(L); } /* ** Check whether new instruction 'newpc' is in a different line from ** previous instruction 'oldpc'. */ static int changedline (const Proto *p, int oldpc, int newpc) { if (p->lineinfo == NULL) /* no debug information? */ return 0; while (oldpc++ < newpc) { if (p->lineinfo[oldpc] != 0) return (luaG_getfuncline(p, oldpc - 1) != luaG_getfuncline(p, newpc)); } return 0; /* no line changes between positions */ } /* ** Traces the execution of a Lua function. Called before the execution ** of each opcode, when debug is on. 'L->oldpc' stores the last ** instruction traced, to detect line changes. When entering a new ** function, 'npci' will be zero and will test as a new line without ** the need for 'oldpc'; so, 'oldpc' does not need to be initialized ** before. Some exceptional conditions may return to a function without ** updating 'oldpc'. In that case, 'oldpc' may be invalid; if so, it is ** reset to zero. (A wrong but valid 'oldpc' at most causes an extra ** call to a line hook.) */ int luaG_traceexec (lua_State *L, const Instruction *pc) { CallInfo *ci = L->ci; lu_byte mask = L->hookmask; const Proto *p = ci_func(ci)->p; int counthook; /* 'L->oldpc' may be invalid; reset it in this case */ int oldpc = (L->oldpc < p->sizecode) ? L->oldpc : 0; if (!(mask & (LUA_MASKLINE | LUA_MASKCOUNT))) { /* no hooks? */ ci->u.l.trap = 0; /* don't need to stop again */ return 0; /* turn off 'trap' */ } pc++; /* reference is always next instruction */ ci->u.l.savedpc = pc; /* save 'pc' */ counthook = (--L->hookcount == 0 && (mask & LUA_MASKCOUNT)); if (counthook) resethookcount(L); /* reset count */ else if (!(mask & LUA_MASKLINE)) return 1; /* no line hook and count != 0; nothing to be done now */ if (ci->callstatus & CIST_HOOKYIELD) { /* called hook last time? */ ci->callstatus &= ~CIST_HOOKYIELD; /* erase mark */ return 1; /* do not call hook again (VM yielded, so it did not move) */ } if (!isIT(*(ci->u.l.savedpc - 1))) L->top = ci->top; /* prepare top */ if (counthook) luaD_hook(L, LUA_HOOKCOUNT, -1, 0, 0); /* call count hook */ if (mask & LUA_MASKLINE) { int npci = pcRel(pc, p); if (npci == 0 || /* call linehook when enter a new function, */ pc <= invpcRel(oldpc, p) || /* when jump back (loop), or when */ changedline(p, oldpc, npci)) { /* enter new line */ int newline = luaG_getfuncline(p, npci); luaD_hook(L, LUA_HOOKLINE, newline, 0, 0); /* call line hook */ } L->oldpc = npci; /* 'pc' of last call to line hook */ } if (L->status == LUA_YIELD) { /* did hook yield? */ if (counthook) L->hookcount = 1; /* undo decrement to zero */ ci->u.l.savedpc--; /* undo increment (resume will increment it again) */ ci->callstatus |= CIST_HOOKYIELD; /* mark that it yielded */ luaD_throw(L, LUA_YIELD); } return 1; /* keep 'trap' on */ }
static int changedline (const Proto *p, int oldpc, int newpc) { while (oldpc++ < newpc) { if (p->lineinfo[oldpc] != 0) return (luaG_getfuncline(p, oldpc - 1) != luaG_getfuncline(p, newpc)); } return 0; /* no line changes in the way */ }
static int changedline (const Proto *p, int oldpc, int newpc) { if (p->lineinfo == NULL) /* no debug information? */ return 0; while (oldpc++ < newpc) { if (p->lineinfo[oldpc] != 0) return (luaG_getfuncline(p, oldpc - 1) != luaG_getfuncline(p, newpc)); } return 0; /* no line changes between positions */ }
{'added': [(786, ' if (p->lineinfo == NULL) /* no debug information? */'), (787, ' return 0;'), (792, ' return 0; /* no line changes between positions */')], 'deleted': [(790, ' return 0; /* no line changes in the way */')]}
3
1
643
4,477
7
57
3
https://github.com/lua/lua
CVE-2020-24369
CWE-476
3,069
SDL_pixels.c
C
Map1toN
/* Simple DirectMedia Layer Copyright (C) 1997-2021 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../SDL_internal.h" /* General (mostly internal) pixel/color manipulation routines for SDL */ #include "SDL_endian.h" #include "SDL_video.h" #include "SDL_sysvideo.h" #include "SDL_blit.h" #include "SDL_pixels_c.h" #include "SDL_RLEaccel_c.h" /* Lookup tables to expand partial bytes to the full 0..255 range */ static Uint8 lookup_0[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255 }; static Uint8 lookup_1[] = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 255 }; static Uint8 lookup_2[] = { 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 170, 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 214, 218, 222, 226, 230, 234, 238, 242, 246, 250, 255 }; static Uint8 lookup_3[] = { 0, 8, 16, 24, 32, 41, 49, 57, 65, 74, 82, 90, 98, 106, 115, 123, 131, 139, 148, 156, 164, 172, 180, 189, 197, 205, 213, 222, 230, 238, 246, 255 }; static Uint8 lookup_4[] = { 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255 }; static Uint8 lookup_5[] = { 0, 36, 72, 109, 145, 182, 218, 255 }; static Uint8 lookup_6[] = { 0, 85, 170, 255 }; static Uint8 lookup_7[] = { 0, 255 }; static Uint8 lookup_8[] = { 255 }; Uint8* SDL_expand_byte[9] = { lookup_0, lookup_1, lookup_2, lookup_3, lookup_4, lookup_5, lookup_6, lookup_7, lookup_8 }; /* Helper functions */ const char* SDL_GetPixelFormatName(Uint32 format) { switch (format) { #define CASE(X) case X: return #X; CASE(SDL_PIXELFORMAT_INDEX1LSB) CASE(SDL_PIXELFORMAT_INDEX1MSB) CASE(SDL_PIXELFORMAT_INDEX4LSB) CASE(SDL_PIXELFORMAT_INDEX4MSB) CASE(SDL_PIXELFORMAT_INDEX8) CASE(SDL_PIXELFORMAT_RGB332) CASE(SDL_PIXELFORMAT_RGB444) CASE(SDL_PIXELFORMAT_BGR444) CASE(SDL_PIXELFORMAT_RGB555) CASE(SDL_PIXELFORMAT_BGR555) CASE(SDL_PIXELFORMAT_ARGB4444) CASE(SDL_PIXELFORMAT_RGBA4444) CASE(SDL_PIXELFORMAT_ABGR4444) CASE(SDL_PIXELFORMAT_BGRA4444) CASE(SDL_PIXELFORMAT_ARGB1555) CASE(SDL_PIXELFORMAT_RGBA5551) CASE(SDL_PIXELFORMAT_ABGR1555) CASE(SDL_PIXELFORMAT_BGRA5551) CASE(SDL_PIXELFORMAT_RGB565) CASE(SDL_PIXELFORMAT_BGR565) CASE(SDL_PIXELFORMAT_RGB24) CASE(SDL_PIXELFORMAT_BGR24) CASE(SDL_PIXELFORMAT_RGB888) CASE(SDL_PIXELFORMAT_RGBX8888) CASE(SDL_PIXELFORMAT_BGR888) CASE(SDL_PIXELFORMAT_BGRX8888) CASE(SDL_PIXELFORMAT_ARGB8888) CASE(SDL_PIXELFORMAT_RGBA8888) CASE(SDL_PIXELFORMAT_ABGR8888) CASE(SDL_PIXELFORMAT_BGRA8888) CASE(SDL_PIXELFORMAT_ARGB2101010) CASE(SDL_PIXELFORMAT_YV12) CASE(SDL_PIXELFORMAT_IYUV) CASE(SDL_PIXELFORMAT_YUY2) CASE(SDL_PIXELFORMAT_UYVY) CASE(SDL_PIXELFORMAT_YVYU) CASE(SDL_PIXELFORMAT_NV12) CASE(SDL_PIXELFORMAT_NV21) #undef CASE default: return "SDL_PIXELFORMAT_UNKNOWN"; } } SDL_bool SDL_PixelFormatEnumToMasks(Uint32 format, int *bpp, Uint32 * Rmask, Uint32 * Gmask, Uint32 * Bmask, Uint32 * Amask) { Uint32 masks[4]; /* This function doesn't work with FourCC pixel formats */ if (SDL_ISPIXELFORMAT_FOURCC(format)) { SDL_SetError("FOURCC pixel formats are not supported"); return SDL_FALSE; } /* Initialize the values here */ if (SDL_BYTESPERPIXEL(format) <= 2) { *bpp = SDL_BITSPERPIXEL(format); } else { *bpp = SDL_BYTESPERPIXEL(format) * 8; } *Rmask = *Gmask = *Bmask = *Amask = 0; if (format == SDL_PIXELFORMAT_RGB24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #else *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #endif return SDL_TRUE; } if (format == SDL_PIXELFORMAT_BGR24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #else *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #endif return SDL_TRUE; } if (SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED8 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED16 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED32) { /* Not a format that uses masks */ return SDL_TRUE; } switch (SDL_PIXELLAYOUT(format)) { case SDL_PACKEDLAYOUT_332: masks[0] = 0x00000000; masks[1] = 0x000000E0; masks[2] = 0x0000001C; masks[3] = 0x00000003; break; case SDL_PACKEDLAYOUT_4444: masks[0] = 0x0000F000; masks[1] = 0x00000F00; masks[2] = 0x000000F0; masks[3] = 0x0000000F; break; case SDL_PACKEDLAYOUT_1555: masks[0] = 0x00008000; masks[1] = 0x00007C00; masks[2] = 0x000003E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_5551: masks[0] = 0x0000F800; masks[1] = 0x000007C0; masks[2] = 0x0000003E; masks[3] = 0x00000001; break; case SDL_PACKEDLAYOUT_565: masks[0] = 0x00000000; masks[1] = 0x0000F800; masks[2] = 0x000007E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_8888: masks[0] = 0xFF000000; masks[1] = 0x00FF0000; masks[2] = 0x0000FF00; masks[3] = 0x000000FF; break; case SDL_PACKEDLAYOUT_2101010: masks[0] = 0xC0000000; masks[1] = 0x3FF00000; masks[2] = 0x000FFC00; masks[3] = 0x000003FF; break; case SDL_PACKEDLAYOUT_1010102: masks[0] = 0xFFC00000; masks[1] = 0x003FF000; masks[2] = 0x00000FFC; masks[3] = 0x00000003; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } switch (SDL_PIXELORDER(format)) { case SDL_PACKEDORDER_XRGB: *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBX: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; break; case SDL_PACKEDORDER_ARGB: *Amask = masks[0]; *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBA: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_XBGR: *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; case SDL_PACKEDORDER_BGRX: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; break; case SDL_PACKEDORDER_BGRA: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_ABGR: *Amask = masks[0]; *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } return SDL_TRUE; } Uint32 SDL_MasksToPixelFormatEnum(int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask, Uint32 Amask) { switch (bpp) { case 1: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX1MSB; case 4: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX4MSB; case 8: if (Rmask == 0) { return SDL_PIXELFORMAT_INDEX8; } if (Rmask == 0xE0 && Gmask == 0x1C && Bmask == 0x03 && Amask == 0x00) { return SDL_PIXELFORMAT_RGB332; } break; case 12: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR444; } break; case 15: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB555; } SDL_FALLTHROUGH; case 16: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB555; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR555; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0xF000) { return SDL_PIXELFORMAT_ARGB4444; } if (Rmask == 0xF000 && Gmask == 0x0F00 && Bmask == 0x00F0 && Amask == 0x000F) { return SDL_PIXELFORMAT_RGBA4444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0xF000) { return SDL_PIXELFORMAT_ABGR4444; } if (Rmask == 0x00F0 && Gmask == 0x0F00 && Bmask == 0xF000 && Amask == 0x000F) { return SDL_PIXELFORMAT_BGRA4444; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x8000) { return SDL_PIXELFORMAT_ARGB1555; } if (Rmask == 0xF800 && Gmask == 0x07C0 && Bmask == 0x003E && Amask == 0x0001) { return SDL_PIXELFORMAT_RGBA5551; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x8000) { return SDL_PIXELFORMAT_ABGR1555; } if (Rmask == 0x003E && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0001) { return SDL_PIXELFORMAT_BGRA5551; } if (Rmask == 0xF800 && Gmask == 0x07E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x001F && Gmask == 0x07E0 && Bmask == 0xF800 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR565; } if (Rmask == 0x003F && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0000) { /* Technically this would be BGR556, but Witek says this works in bug 3158 */ return SDL_PIXELFORMAT_RGB565; } break; case 24: switch (Rmask) { case 0: case 0x00FF0000: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_RGB24; #else return SDL_PIXELFORMAT_BGR24; #endif case 0x000000FF: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_BGR24; #else return SDL_PIXELFORMAT_RGB24; #endif } case 32: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGBX8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGR888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGRX8888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ARGB8888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_RGBA8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ABGR8888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_BGRA8888; } if (Rmask == 0x3FF00000 && Gmask == 0x000FFC00 && Bmask == 0x000003FF && Amask == 0xC0000000) { return SDL_PIXELFORMAT_ARGB2101010; } } return SDL_PIXELFORMAT_UNKNOWN; } static SDL_PixelFormat *formats; static SDL_SpinLock formats_lock = 0; SDL_PixelFormat * SDL_AllocFormat(Uint32 pixel_format) { SDL_PixelFormat *format; SDL_AtomicLock(&formats_lock); /* Look it up in our list of previously allocated formats */ for (format = formats; format; format = format->next) { if (pixel_format == format->format) { ++format->refcount; SDL_AtomicUnlock(&formats_lock); return format; } } /* Allocate an empty pixel format structure, and initialize it */ format = SDL_malloc(sizeof(*format)); if (format == NULL) { SDL_AtomicUnlock(&formats_lock); SDL_OutOfMemory(); return NULL; } if (SDL_InitFormat(format, pixel_format) < 0) { SDL_AtomicUnlock(&formats_lock); SDL_free(format); SDL_InvalidParamError("format"); return NULL; } if (!SDL_ISPIXELFORMAT_INDEXED(pixel_format)) { /* Cache the RGB formats */ format->next = formats; formats = format; } SDL_AtomicUnlock(&formats_lock); return format; } int SDL_InitFormat(SDL_PixelFormat * format, Uint32 pixel_format) { int bpp; Uint32 Rmask, Gmask, Bmask, Amask; Uint32 mask; if (!SDL_PixelFormatEnumToMasks(pixel_format, &bpp, &Rmask, &Gmask, &Bmask, &Amask)) { return -1; } /* Set up the format */ SDL_zerop(format); format->format = pixel_format; format->BitsPerPixel = bpp; format->BytesPerPixel = (bpp + 7) / 8; format->Rmask = Rmask; format->Rshift = 0; format->Rloss = 8; if (Rmask) { for (mask = Rmask; !(mask & 0x01); mask >>= 1) ++format->Rshift; for (; (mask & 0x01); mask >>= 1) --format->Rloss; } format->Gmask = Gmask; format->Gshift = 0; format->Gloss = 8; if (Gmask) { for (mask = Gmask; !(mask & 0x01); mask >>= 1) ++format->Gshift; for (; (mask & 0x01); mask >>= 1) --format->Gloss; } format->Bmask = Bmask; format->Bshift = 0; format->Bloss = 8; if (Bmask) { for (mask = Bmask; !(mask & 0x01); mask >>= 1) ++format->Bshift; for (; (mask & 0x01); mask >>= 1) --format->Bloss; } format->Amask = Amask; format->Ashift = 0; format->Aloss = 8; if (Amask) { for (mask = Amask; !(mask & 0x01); mask >>= 1) ++format->Ashift; for (; (mask & 0x01); mask >>= 1) --format->Aloss; } format->palette = NULL; format->refcount = 1; format->next = NULL; return 0; } void SDL_FreeFormat(SDL_PixelFormat *format) { SDL_PixelFormat *prev; if (!format) { SDL_InvalidParamError("format"); return; } SDL_AtomicLock(&formats_lock); if (--format->refcount > 0) { SDL_AtomicUnlock(&formats_lock); return; } /* Remove this format from our list */ if (format == formats) { formats = format->next; } else if (formats) { for (prev = formats; prev->next; prev = prev->next) { if (prev->next == format) { prev->next = format->next; break; } } } SDL_AtomicUnlock(&formats_lock); if (format->palette) { SDL_FreePalette(format->palette); } SDL_free(format); } SDL_Palette * SDL_AllocPalette(int ncolors) { SDL_Palette *palette; /* Input validation */ if (ncolors < 1) { SDL_InvalidParamError("ncolors"); return NULL; } palette = (SDL_Palette *) SDL_malloc(sizeof(*palette)); if (!palette) { SDL_OutOfMemory(); return NULL; } palette->colors = (SDL_Color *) SDL_malloc(ncolors * sizeof(*palette->colors)); if (!palette->colors) { SDL_free(palette); return NULL; } palette->ncolors = ncolors; palette->version = 1; palette->refcount = 1; SDL_memset(palette->colors, 0xFF, ncolors * sizeof(*palette->colors)); return palette; } int SDL_SetPixelFormatPalette(SDL_PixelFormat * format, SDL_Palette *palette) { if (!format) { return SDL_SetError("SDL_SetPixelFormatPalette() passed NULL format"); } if (palette && palette->ncolors > (1 << format->BitsPerPixel)) { return SDL_SetError("SDL_SetPixelFormatPalette() passed a palette that doesn't match the format"); } if (format->palette == palette) { return 0; } if (format->palette) { SDL_FreePalette(format->palette); } format->palette = palette; if (format->palette) { ++format->palette->refcount; } return 0; } int SDL_SetPaletteColors(SDL_Palette * palette, const SDL_Color * colors, int firstcolor, int ncolors) { int status = 0; /* Verify the parameters */ if (!palette) { return -1; } if (ncolors > (palette->ncolors - firstcolor)) { ncolors = (palette->ncolors - firstcolor); status = -1; } if (colors != (palette->colors + firstcolor)) { SDL_memcpy(palette->colors + firstcolor, colors, ncolors * sizeof(*colors)); } ++palette->version; if (!palette->version) { palette->version = 1; } return status; } void SDL_FreePalette(SDL_Palette * palette) { if (!palette) { SDL_InvalidParamError("palette"); return; } if (--palette->refcount > 0) { return; } SDL_free(palette->colors); SDL_free(palette); } /* * Calculate an 8-bit (3 red, 3 green, 2 blue) dithered palette of colors */ void SDL_DitherColors(SDL_Color * colors, int bpp) { int i; if (bpp != 8) return; /* only 8bpp supported right now */ for (i = 0; i < 256; i++) { int r, g, b; /* map each bit field to the full [0, 255] interval, so 0 is mapped to (0, 0, 0) and 255 to (255, 255, 255) */ r = i & 0xe0; r |= r >> 3 | r >> 6; colors[i].r = r; g = (i << 3) & 0xe0; g |= g >> 3 | g >> 6; colors[i].g = g; b = i & 0x3; b |= b << 2; b |= b << 4; colors[i].b = b; colors[i].a = SDL_ALPHA_OPAQUE; } } /* * Match an RGB value to a particular palette index */ Uint8 SDL_FindColor(SDL_Palette * pal, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { /* Do colorspace distance matching */ unsigned int smallest; unsigned int distance; int rd, gd, bd, ad; int i; Uint8 pixel = 0; smallest = ~0; for (i = 0; i < pal->ncolors; ++i) { rd = pal->colors[i].r - r; gd = pal->colors[i].g - g; bd = pal->colors[i].b - b; ad = pal->colors[i].a - a; distance = (rd * rd) + (gd * gd) + (bd * bd) + (ad * ad); if (distance < smallest) { pixel = i; if (distance == 0) { /* Perfect match! */ break; } smallest = distance; } } return (pixel); } /* Tell whether palette is opaque, and if it has an alpha_channel */ void SDL_DetectPalette(SDL_Palette *pal, SDL_bool *is_opaque, SDL_bool *has_alpha_channel) { int i; { SDL_bool all_opaque = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_OPAQUE) { all_opaque = SDL_FALSE; break; } } if (all_opaque) { /* Palette is opaque, with an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_TRUE; return; } } { SDL_bool all_transparent = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_TRANSPARENT) { all_transparent = SDL_FALSE; break; } } if (all_transparent) { /* Palette is opaque, without an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_FALSE; return; } } /* Palette has alpha values */ *is_opaque = SDL_FALSE; *has_alpha_channel = SDL_TRUE; } /* Find the opaque pixel value corresponding to an RGB triple */ Uint32 SDL_MapRGB(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | format->Amask; } else { return SDL_FindColor(format->palette, r, g, b, SDL_ALPHA_OPAQUE); } } /* Find the pixel value corresponding to an RGBA quadruple */ Uint32 SDL_MapRGBA(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | ((Uint32)(a >> format->Aloss) << format->Ashift & format->Amask); } else { return SDL_FindColor(format->palette, r, g, b, a); } } void SDL_GetRGB(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; } else { *r = *g = *b = 0; } } } void SDL_GetRGBA(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b, Uint8 * a) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; v = (pixel & format->Amask) >> format->Ashift; *a = SDL_expand_byte[format->Aloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; *a = format->palette->colors[pixel].a; } else { *r = *g = *b = *a = 0; } } } /* Map from Palette to Palette */ static Uint8 * Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical) { Uint8 *map; int i; if (identical) { if (src->ncolors <= dst->ncolors) { /* If an identical palette, no need to map */ if (src == dst || (SDL_memcmp (src->colors, dst->colors, src->ncolors * sizeof(SDL_Color)) == 0)) { *identical = 1; return (NULL); } } *identical = 0; } map = (Uint8 *) SDL_malloc(src->ncolors); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } for (i = 0; i < src->ncolors; ++i) { map[i] = SDL_FindColor(dst, src->colors[i].r, src->colors[i].g, src->colors[i].b, src->colors[i].a); } return (map); } /* Map from Palette to BitField */ static Uint8 * Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_malloc(pal->ncolors * bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); } /* Map from BitField to Dithered-Palette to Palette */ static Uint8 * MapNto1(SDL_PixelFormat * src, SDL_PixelFormat * dst, int *identical) { /* Generate a 256 color dither palette */ SDL_Palette dithered; SDL_Color colors[256]; SDL_Palette *pal = dst->palette; dithered.ncolors = 256; SDL_DitherColors(colors, 8); dithered.colors = colors; return (Map1to1(&dithered, pal, identical)); } SDL_BlitMap * SDL_AllocBlitMap(void) { SDL_BlitMap *map; /* Allocate the empty map */ map = (SDL_BlitMap *) SDL_calloc(1, sizeof(*map)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } map->info.r = 0xFF; map->info.g = 0xFF; map->info.b = 0xFF; map->info.a = 0xFF; /* It's ready to go */ return (map); } typedef struct SDL_ListNode { void *entry; struct SDL_ListNode *next; } SDL_ListNode; void SDL_InvalidateAllBlitMap(SDL_Surface *surface) { SDL_ListNode *l = surface->list_blitmap; surface->list_blitmap = NULL; while (l) { SDL_ListNode *tmp = l; SDL_InvalidateMap((SDL_BlitMap *)l->entry); l = l->next; SDL_free(tmp); } } static void SDL_ListAdd(SDL_ListNode **head, void *ent); static void SDL_ListRemove(SDL_ListNode **head, void *ent); void SDL_ListAdd(SDL_ListNode **head, void *ent) { SDL_ListNode *node = SDL_malloc(sizeof (*node)); if (node == NULL) { SDL_OutOfMemory(); return; } node->entry = ent; node->next = *head; *head = node; } void SDL_ListRemove(SDL_ListNode **head, void *ent) { SDL_ListNode **ptr = head; while (*ptr) { if ((*ptr)->entry == ent) { SDL_ListNode *tmp = *ptr; *ptr = (*ptr)->next; SDL_free(tmp); return; } ptr = &(*ptr)->next; } } void SDL_InvalidateMap(SDL_BlitMap * map) { if (!map) { return; } if (map->dst) { /* Un-register from the destination surface */ SDL_ListRemove((SDL_ListNode **)&(map->dst->list_blitmap), map); } map->dst = NULL; map->src_palette_version = 0; map->dst_palette_version = 0; SDL_free(map->info.table); map->info.table = NULL; } int SDL_MapSurface(SDL_Surface * src, SDL_Surface * dst) { SDL_PixelFormat *srcfmt; SDL_PixelFormat *dstfmt; SDL_BlitMap *map; /* Clear out any previous mapping */ map = src->map; #if SDL_HAVE_RLE if ((src->flags & SDL_RLEACCEL) == SDL_RLEACCEL) { SDL_UnRLESurface(src, 1); } #endif SDL_InvalidateMap(map); /* Figure out what kind of mapping we're doing */ map->identity = 0; srcfmt = src->format; dstfmt = dst->format; if (SDL_ISPIXELFORMAT_INDEXED(srcfmt->format)) { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* Palette --> Palette */ map->info.table = Map1to1(srcfmt->palette, dstfmt->palette, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } if (srcfmt->BitsPerPixel != dstfmt->BitsPerPixel) map->identity = 0; } else { /* Palette --> BitField */ map->info.table = Map1toN(srcfmt, src->map->info.r, src->map->info.g, src->map->info.b, src->map->info.a, dstfmt); if (map->info.table == NULL) { return (-1); } } } else { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* BitField --> Palette */ map->info.table = MapNto1(srcfmt, dstfmt, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } map->identity = 0; /* Don't optimize to copy */ } else { /* BitField --> BitField */ if (srcfmt == dstfmt) { map->identity = 1; } } } map->dst = dst; if (map->dst) { /* Register BlitMap to the destination surface, to be invalidated when needed */ SDL_ListAdd((SDL_ListNode **)&(map->dst->list_blitmap), map); } if (dstfmt->palette) { map->dst_palette_version = dstfmt->palette->version; } else { map->dst_palette_version = 0; } if (srcfmt->palette) { map->src_palette_version = srcfmt->palette->version; } else { map->src_palette_version = 0; } /* Choose your blitters wisely */ return (SDL_CalculateBlit(src)); } void SDL_FreeBlitMap(SDL_BlitMap * map) { if (map) { SDL_InvalidateMap(map); SDL_free(map); } } void SDL_CalculateGammaRamp(float gamma, Uint16 * ramp) { int i; /* Input validation */ if (gamma < 0.0f ) { SDL_InvalidParamError("gamma"); return; } if (ramp == NULL) { SDL_InvalidParamError("ramp"); return; } /* 0.0 gamma is all black */ if (gamma == 0.0f) { SDL_memset(ramp, 0, 256 * sizeof(Uint16)); return; } else if (gamma == 1.0f) { /* 1.0 gamma is identity */ for (i = 0; i < 256; ++i) { ramp[i] = (i << 8) | i; } return; } else { /* Calculate a real gamma ramp */ int value; gamma = 1.0f / gamma; for (i = 0; i < 256; ++i) { value = (int) (SDL_pow((double) i / 256.0, gamma) * 65535.0 + 0.5); if (value > 65535) { value = 65535; } ramp[i] = (Uint16) value; } } } /* vi: set ts=4 sw=4 expandtab: */
/* Simple DirectMedia Layer Copyright (C) 1997-2021 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../SDL_internal.h" /* General (mostly internal) pixel/color manipulation routines for SDL */ #include "SDL_endian.h" #include "SDL_video.h" #include "SDL_sysvideo.h" #include "SDL_blit.h" #include "SDL_pixels_c.h" #include "SDL_RLEaccel_c.h" /* Lookup tables to expand partial bytes to the full 0..255 range */ static Uint8 lookup_0[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255 }; static Uint8 lookup_1[] = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 255 }; static Uint8 lookup_2[] = { 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 170, 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 214, 218, 222, 226, 230, 234, 238, 242, 246, 250, 255 }; static Uint8 lookup_3[] = { 0, 8, 16, 24, 32, 41, 49, 57, 65, 74, 82, 90, 98, 106, 115, 123, 131, 139, 148, 156, 164, 172, 180, 189, 197, 205, 213, 222, 230, 238, 246, 255 }; static Uint8 lookup_4[] = { 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255 }; static Uint8 lookup_5[] = { 0, 36, 72, 109, 145, 182, 218, 255 }; static Uint8 lookup_6[] = { 0, 85, 170, 255 }; static Uint8 lookup_7[] = { 0, 255 }; static Uint8 lookup_8[] = { 255 }; Uint8* SDL_expand_byte[9] = { lookup_0, lookup_1, lookup_2, lookup_3, lookup_4, lookup_5, lookup_6, lookup_7, lookup_8 }; /* Helper functions */ const char* SDL_GetPixelFormatName(Uint32 format) { switch (format) { #define CASE(X) case X: return #X; CASE(SDL_PIXELFORMAT_INDEX1LSB) CASE(SDL_PIXELFORMAT_INDEX1MSB) CASE(SDL_PIXELFORMAT_INDEX4LSB) CASE(SDL_PIXELFORMAT_INDEX4MSB) CASE(SDL_PIXELFORMAT_INDEX8) CASE(SDL_PIXELFORMAT_RGB332) CASE(SDL_PIXELFORMAT_RGB444) CASE(SDL_PIXELFORMAT_BGR444) CASE(SDL_PIXELFORMAT_RGB555) CASE(SDL_PIXELFORMAT_BGR555) CASE(SDL_PIXELFORMAT_ARGB4444) CASE(SDL_PIXELFORMAT_RGBA4444) CASE(SDL_PIXELFORMAT_ABGR4444) CASE(SDL_PIXELFORMAT_BGRA4444) CASE(SDL_PIXELFORMAT_ARGB1555) CASE(SDL_PIXELFORMAT_RGBA5551) CASE(SDL_PIXELFORMAT_ABGR1555) CASE(SDL_PIXELFORMAT_BGRA5551) CASE(SDL_PIXELFORMAT_RGB565) CASE(SDL_PIXELFORMAT_BGR565) CASE(SDL_PIXELFORMAT_RGB24) CASE(SDL_PIXELFORMAT_BGR24) CASE(SDL_PIXELFORMAT_RGB888) CASE(SDL_PIXELFORMAT_RGBX8888) CASE(SDL_PIXELFORMAT_BGR888) CASE(SDL_PIXELFORMAT_BGRX8888) CASE(SDL_PIXELFORMAT_ARGB8888) CASE(SDL_PIXELFORMAT_RGBA8888) CASE(SDL_PIXELFORMAT_ABGR8888) CASE(SDL_PIXELFORMAT_BGRA8888) CASE(SDL_PIXELFORMAT_ARGB2101010) CASE(SDL_PIXELFORMAT_YV12) CASE(SDL_PIXELFORMAT_IYUV) CASE(SDL_PIXELFORMAT_YUY2) CASE(SDL_PIXELFORMAT_UYVY) CASE(SDL_PIXELFORMAT_YVYU) CASE(SDL_PIXELFORMAT_NV12) CASE(SDL_PIXELFORMAT_NV21) #undef CASE default: return "SDL_PIXELFORMAT_UNKNOWN"; } } SDL_bool SDL_PixelFormatEnumToMasks(Uint32 format, int *bpp, Uint32 * Rmask, Uint32 * Gmask, Uint32 * Bmask, Uint32 * Amask) { Uint32 masks[4]; /* This function doesn't work with FourCC pixel formats */ if (SDL_ISPIXELFORMAT_FOURCC(format)) { SDL_SetError("FOURCC pixel formats are not supported"); return SDL_FALSE; } /* Initialize the values here */ if (SDL_BYTESPERPIXEL(format) <= 2) { *bpp = SDL_BITSPERPIXEL(format); } else { *bpp = SDL_BYTESPERPIXEL(format) * 8; } *Rmask = *Gmask = *Bmask = *Amask = 0; if (format == SDL_PIXELFORMAT_RGB24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #else *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #endif return SDL_TRUE; } if (format == SDL_PIXELFORMAT_BGR24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #else *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #endif return SDL_TRUE; } if (SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED8 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED16 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED32) { /* Not a format that uses masks */ return SDL_TRUE; } switch (SDL_PIXELLAYOUT(format)) { case SDL_PACKEDLAYOUT_332: masks[0] = 0x00000000; masks[1] = 0x000000E0; masks[2] = 0x0000001C; masks[3] = 0x00000003; break; case SDL_PACKEDLAYOUT_4444: masks[0] = 0x0000F000; masks[1] = 0x00000F00; masks[2] = 0x000000F0; masks[3] = 0x0000000F; break; case SDL_PACKEDLAYOUT_1555: masks[0] = 0x00008000; masks[1] = 0x00007C00; masks[2] = 0x000003E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_5551: masks[0] = 0x0000F800; masks[1] = 0x000007C0; masks[2] = 0x0000003E; masks[3] = 0x00000001; break; case SDL_PACKEDLAYOUT_565: masks[0] = 0x00000000; masks[1] = 0x0000F800; masks[2] = 0x000007E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_8888: masks[0] = 0xFF000000; masks[1] = 0x00FF0000; masks[2] = 0x0000FF00; masks[3] = 0x000000FF; break; case SDL_PACKEDLAYOUT_2101010: masks[0] = 0xC0000000; masks[1] = 0x3FF00000; masks[2] = 0x000FFC00; masks[3] = 0x000003FF; break; case SDL_PACKEDLAYOUT_1010102: masks[0] = 0xFFC00000; masks[1] = 0x003FF000; masks[2] = 0x00000FFC; masks[3] = 0x00000003; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } switch (SDL_PIXELORDER(format)) { case SDL_PACKEDORDER_XRGB: *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBX: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; break; case SDL_PACKEDORDER_ARGB: *Amask = masks[0]; *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBA: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_XBGR: *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; case SDL_PACKEDORDER_BGRX: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; break; case SDL_PACKEDORDER_BGRA: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_ABGR: *Amask = masks[0]; *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } return SDL_TRUE; } Uint32 SDL_MasksToPixelFormatEnum(int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask, Uint32 Amask) { switch (bpp) { case 1: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX1MSB; case 4: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX4MSB; case 8: if (Rmask == 0) { return SDL_PIXELFORMAT_INDEX8; } if (Rmask == 0xE0 && Gmask == 0x1C && Bmask == 0x03 && Amask == 0x00) { return SDL_PIXELFORMAT_RGB332; } break; case 12: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR444; } break; case 15: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB555; } SDL_FALLTHROUGH; case 16: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB555; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR555; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0xF000) { return SDL_PIXELFORMAT_ARGB4444; } if (Rmask == 0xF000 && Gmask == 0x0F00 && Bmask == 0x00F0 && Amask == 0x000F) { return SDL_PIXELFORMAT_RGBA4444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0xF000) { return SDL_PIXELFORMAT_ABGR4444; } if (Rmask == 0x00F0 && Gmask == 0x0F00 && Bmask == 0xF000 && Amask == 0x000F) { return SDL_PIXELFORMAT_BGRA4444; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x8000) { return SDL_PIXELFORMAT_ARGB1555; } if (Rmask == 0xF800 && Gmask == 0x07C0 && Bmask == 0x003E && Amask == 0x0001) { return SDL_PIXELFORMAT_RGBA5551; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x8000) { return SDL_PIXELFORMAT_ABGR1555; } if (Rmask == 0x003E && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0001) { return SDL_PIXELFORMAT_BGRA5551; } if (Rmask == 0xF800 && Gmask == 0x07E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x001F && Gmask == 0x07E0 && Bmask == 0xF800 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR565; } if (Rmask == 0x003F && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0000) { /* Technically this would be BGR556, but Witek says this works in bug 3158 */ return SDL_PIXELFORMAT_RGB565; } break; case 24: switch (Rmask) { case 0: case 0x00FF0000: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_RGB24; #else return SDL_PIXELFORMAT_BGR24; #endif case 0x000000FF: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_BGR24; #else return SDL_PIXELFORMAT_RGB24; #endif } case 32: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGBX8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGR888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGRX8888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ARGB8888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_RGBA8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ABGR8888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_BGRA8888; } if (Rmask == 0x3FF00000 && Gmask == 0x000FFC00 && Bmask == 0x000003FF && Amask == 0xC0000000) { return SDL_PIXELFORMAT_ARGB2101010; } } return SDL_PIXELFORMAT_UNKNOWN; } static SDL_PixelFormat *formats; static SDL_SpinLock formats_lock = 0; SDL_PixelFormat * SDL_AllocFormat(Uint32 pixel_format) { SDL_PixelFormat *format; SDL_AtomicLock(&formats_lock); /* Look it up in our list of previously allocated formats */ for (format = formats; format; format = format->next) { if (pixel_format == format->format) { ++format->refcount; SDL_AtomicUnlock(&formats_lock); return format; } } /* Allocate an empty pixel format structure, and initialize it */ format = SDL_malloc(sizeof(*format)); if (format == NULL) { SDL_AtomicUnlock(&formats_lock); SDL_OutOfMemory(); return NULL; } if (SDL_InitFormat(format, pixel_format) < 0) { SDL_AtomicUnlock(&formats_lock); SDL_free(format); SDL_InvalidParamError("format"); return NULL; } if (!SDL_ISPIXELFORMAT_INDEXED(pixel_format)) { /* Cache the RGB formats */ format->next = formats; formats = format; } SDL_AtomicUnlock(&formats_lock); return format; } int SDL_InitFormat(SDL_PixelFormat * format, Uint32 pixel_format) { int bpp; Uint32 Rmask, Gmask, Bmask, Amask; Uint32 mask; if (!SDL_PixelFormatEnumToMasks(pixel_format, &bpp, &Rmask, &Gmask, &Bmask, &Amask)) { return -1; } /* Set up the format */ SDL_zerop(format); format->format = pixel_format; format->BitsPerPixel = bpp; format->BytesPerPixel = (bpp + 7) / 8; format->Rmask = Rmask; format->Rshift = 0; format->Rloss = 8; if (Rmask) { for (mask = Rmask; !(mask & 0x01); mask >>= 1) ++format->Rshift; for (; (mask & 0x01); mask >>= 1) --format->Rloss; } format->Gmask = Gmask; format->Gshift = 0; format->Gloss = 8; if (Gmask) { for (mask = Gmask; !(mask & 0x01); mask >>= 1) ++format->Gshift; for (; (mask & 0x01); mask >>= 1) --format->Gloss; } format->Bmask = Bmask; format->Bshift = 0; format->Bloss = 8; if (Bmask) { for (mask = Bmask; !(mask & 0x01); mask >>= 1) ++format->Bshift; for (; (mask & 0x01); mask >>= 1) --format->Bloss; } format->Amask = Amask; format->Ashift = 0; format->Aloss = 8; if (Amask) { for (mask = Amask; !(mask & 0x01); mask >>= 1) ++format->Ashift; for (; (mask & 0x01); mask >>= 1) --format->Aloss; } format->palette = NULL; format->refcount = 1; format->next = NULL; return 0; } void SDL_FreeFormat(SDL_PixelFormat *format) { SDL_PixelFormat *prev; if (!format) { SDL_InvalidParamError("format"); return; } SDL_AtomicLock(&formats_lock); if (--format->refcount > 0) { SDL_AtomicUnlock(&formats_lock); return; } /* Remove this format from our list */ if (format == formats) { formats = format->next; } else if (formats) { for (prev = formats; prev->next; prev = prev->next) { if (prev->next == format) { prev->next = format->next; break; } } } SDL_AtomicUnlock(&formats_lock); if (format->palette) { SDL_FreePalette(format->palette); } SDL_free(format); } SDL_Palette * SDL_AllocPalette(int ncolors) { SDL_Palette *palette; /* Input validation */ if (ncolors < 1) { SDL_InvalidParamError("ncolors"); return NULL; } palette = (SDL_Palette *) SDL_malloc(sizeof(*palette)); if (!palette) { SDL_OutOfMemory(); return NULL; } palette->colors = (SDL_Color *) SDL_malloc(ncolors * sizeof(*palette->colors)); if (!palette->colors) { SDL_free(palette); return NULL; } palette->ncolors = ncolors; palette->version = 1; palette->refcount = 1; SDL_memset(palette->colors, 0xFF, ncolors * sizeof(*palette->colors)); return palette; } int SDL_SetPixelFormatPalette(SDL_PixelFormat * format, SDL_Palette *palette) { if (!format) { return SDL_SetError("SDL_SetPixelFormatPalette() passed NULL format"); } if (palette && palette->ncolors > (1 << format->BitsPerPixel)) { return SDL_SetError("SDL_SetPixelFormatPalette() passed a palette that doesn't match the format"); } if (format->palette == palette) { return 0; } if (format->palette) { SDL_FreePalette(format->palette); } format->palette = palette; if (format->palette) { ++format->palette->refcount; } return 0; } int SDL_SetPaletteColors(SDL_Palette * palette, const SDL_Color * colors, int firstcolor, int ncolors) { int status = 0; /* Verify the parameters */ if (!palette) { return -1; } if (ncolors > (palette->ncolors - firstcolor)) { ncolors = (palette->ncolors - firstcolor); status = -1; } if (colors != (palette->colors + firstcolor)) { SDL_memcpy(palette->colors + firstcolor, colors, ncolors * sizeof(*colors)); } ++palette->version; if (!palette->version) { palette->version = 1; } return status; } void SDL_FreePalette(SDL_Palette * palette) { if (!palette) { SDL_InvalidParamError("palette"); return; } if (--palette->refcount > 0) { return; } SDL_free(palette->colors); SDL_free(palette); } /* * Calculate an 8-bit (3 red, 3 green, 2 blue) dithered palette of colors */ void SDL_DitherColors(SDL_Color * colors, int bpp) { int i; if (bpp != 8) return; /* only 8bpp supported right now */ for (i = 0; i < 256; i++) { int r, g, b; /* map each bit field to the full [0, 255] interval, so 0 is mapped to (0, 0, 0) and 255 to (255, 255, 255) */ r = i & 0xe0; r |= r >> 3 | r >> 6; colors[i].r = r; g = (i << 3) & 0xe0; g |= g >> 3 | g >> 6; colors[i].g = g; b = i & 0x3; b |= b << 2; b |= b << 4; colors[i].b = b; colors[i].a = SDL_ALPHA_OPAQUE; } } /* * Match an RGB value to a particular palette index */ Uint8 SDL_FindColor(SDL_Palette * pal, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { /* Do colorspace distance matching */ unsigned int smallest; unsigned int distance; int rd, gd, bd, ad; int i; Uint8 pixel = 0; smallest = ~0; for (i = 0; i < pal->ncolors; ++i) { rd = pal->colors[i].r - r; gd = pal->colors[i].g - g; bd = pal->colors[i].b - b; ad = pal->colors[i].a - a; distance = (rd * rd) + (gd * gd) + (bd * bd) + (ad * ad); if (distance < smallest) { pixel = i; if (distance == 0) { /* Perfect match! */ break; } smallest = distance; } } return (pixel); } /* Tell whether palette is opaque, and if it has an alpha_channel */ void SDL_DetectPalette(SDL_Palette *pal, SDL_bool *is_opaque, SDL_bool *has_alpha_channel) { int i; { SDL_bool all_opaque = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_OPAQUE) { all_opaque = SDL_FALSE; break; } } if (all_opaque) { /* Palette is opaque, with an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_TRUE; return; } } { SDL_bool all_transparent = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_TRANSPARENT) { all_transparent = SDL_FALSE; break; } } if (all_transparent) { /* Palette is opaque, without an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_FALSE; return; } } /* Palette has alpha values */ *is_opaque = SDL_FALSE; *has_alpha_channel = SDL_TRUE; } /* Find the opaque pixel value corresponding to an RGB triple */ Uint32 SDL_MapRGB(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | format->Amask; } else { return SDL_FindColor(format->palette, r, g, b, SDL_ALPHA_OPAQUE); } } /* Find the pixel value corresponding to an RGBA quadruple */ Uint32 SDL_MapRGBA(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | ((Uint32)(a >> format->Aloss) << format->Ashift & format->Amask); } else { return SDL_FindColor(format->palette, r, g, b, a); } } void SDL_GetRGB(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; } else { *r = *g = *b = 0; } } } void SDL_GetRGBA(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b, Uint8 * a) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; v = (pixel & format->Amask) >> format->Ashift; *a = SDL_expand_byte[format->Aloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; *a = format->palette->colors[pixel].a; } else { *r = *g = *b = *a = 0; } } } /* Map from Palette to Palette */ static Uint8 * Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical) { Uint8 *map; int i; if (identical) { if (src->ncolors <= dst->ncolors) { /* If an identical palette, no need to map */ if (src == dst || (SDL_memcmp (src->colors, dst->colors, src->ncolors * sizeof(SDL_Color)) == 0)) { *identical = 1; return (NULL); } } *identical = 0; } map = (Uint8 *) SDL_calloc(256, sizeof(Uint8)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } for (i = 0; i < src->ncolors; ++i) { map[i] = SDL_FindColor(dst, src->colors[i].r, src->colors[i].g, src->colors[i].b, src->colors[i].a); } return (map); } /* Map from Palette to BitField */ static Uint8 * Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_calloc(256, bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); } /* Map from BitField to Dithered-Palette to Palette */ static Uint8 * MapNto1(SDL_PixelFormat * src, SDL_PixelFormat * dst, int *identical) { /* Generate a 256 color dither palette */ SDL_Palette dithered; SDL_Color colors[256]; SDL_Palette *pal = dst->palette; dithered.ncolors = 256; SDL_DitherColors(colors, 8); dithered.colors = colors; return (Map1to1(&dithered, pal, identical)); } SDL_BlitMap * SDL_AllocBlitMap(void) { SDL_BlitMap *map; /* Allocate the empty map */ map = (SDL_BlitMap *) SDL_calloc(1, sizeof(*map)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } map->info.r = 0xFF; map->info.g = 0xFF; map->info.b = 0xFF; map->info.a = 0xFF; /* It's ready to go */ return (map); } typedef struct SDL_ListNode { void *entry; struct SDL_ListNode *next; } SDL_ListNode; void SDL_InvalidateAllBlitMap(SDL_Surface *surface) { SDL_ListNode *l = surface->list_blitmap; surface->list_blitmap = NULL; while (l) { SDL_ListNode *tmp = l; SDL_InvalidateMap((SDL_BlitMap *)l->entry); l = l->next; SDL_free(tmp); } } static void SDL_ListAdd(SDL_ListNode **head, void *ent); static void SDL_ListRemove(SDL_ListNode **head, void *ent); void SDL_ListAdd(SDL_ListNode **head, void *ent) { SDL_ListNode *node = SDL_malloc(sizeof (*node)); if (node == NULL) { SDL_OutOfMemory(); return; } node->entry = ent; node->next = *head; *head = node; } void SDL_ListRemove(SDL_ListNode **head, void *ent) { SDL_ListNode **ptr = head; while (*ptr) { if ((*ptr)->entry == ent) { SDL_ListNode *tmp = *ptr; *ptr = (*ptr)->next; SDL_free(tmp); return; } ptr = &(*ptr)->next; } } void SDL_InvalidateMap(SDL_BlitMap * map) { if (!map) { return; } if (map->dst) { /* Un-register from the destination surface */ SDL_ListRemove((SDL_ListNode **)&(map->dst->list_blitmap), map); } map->dst = NULL; map->src_palette_version = 0; map->dst_palette_version = 0; SDL_free(map->info.table); map->info.table = NULL; } int SDL_MapSurface(SDL_Surface * src, SDL_Surface * dst) { SDL_PixelFormat *srcfmt; SDL_PixelFormat *dstfmt; SDL_BlitMap *map; /* Clear out any previous mapping */ map = src->map; #if SDL_HAVE_RLE if ((src->flags & SDL_RLEACCEL) == SDL_RLEACCEL) { SDL_UnRLESurface(src, 1); } #endif SDL_InvalidateMap(map); /* Figure out what kind of mapping we're doing */ map->identity = 0; srcfmt = src->format; dstfmt = dst->format; if (SDL_ISPIXELFORMAT_INDEXED(srcfmt->format)) { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* Palette --> Palette */ map->info.table = Map1to1(srcfmt->palette, dstfmt->palette, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } if (srcfmt->BitsPerPixel != dstfmt->BitsPerPixel) map->identity = 0; } else { /* Palette --> BitField */ map->info.table = Map1toN(srcfmt, src->map->info.r, src->map->info.g, src->map->info.b, src->map->info.a, dstfmt); if (map->info.table == NULL) { return (-1); } } } else { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* BitField --> Palette */ map->info.table = MapNto1(srcfmt, dstfmt, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } map->identity = 0; /* Don't optimize to copy */ } else { /* BitField --> BitField */ if (srcfmt == dstfmt) { map->identity = 1; } } } map->dst = dst; if (map->dst) { /* Register BlitMap to the destination surface, to be invalidated when needed */ SDL_ListAdd((SDL_ListNode **)&(map->dst->list_blitmap), map); } if (dstfmt->palette) { map->dst_palette_version = dstfmt->palette->version; } else { map->dst_palette_version = 0; } if (srcfmt->palette) { map->src_palette_version = srcfmt->palette->version; } else { map->src_palette_version = 0; } /* Choose your blitters wisely */ return (SDL_CalculateBlit(src)); } void SDL_FreeBlitMap(SDL_BlitMap * map) { if (map) { SDL_InvalidateMap(map); SDL_free(map); } } void SDL_CalculateGammaRamp(float gamma, Uint16 * ramp) { int i; /* Input validation */ if (gamma < 0.0f ) { SDL_InvalidParamError("gamma"); return; } if (ramp == NULL) { SDL_InvalidParamError("ramp"); return; } /* 0.0 gamma is all black */ if (gamma == 0.0f) { SDL_memset(ramp, 0, 256 * sizeof(Uint16)); return; } else if (gamma == 1.0f) { /* 1.0 gamma is identity */ for (i = 0; i < 256; ++i) { ramp[i] = (i << 8) | i; } return; } else { /* Calculate a real gamma ramp */ int value; gamma = 1.0f / gamma; for (i = 0; i < 256; ++i) { value = (int) (SDL_pow((double) i / 256.0, gamma) * 65535.0 + 0.5); if (value > 65535) { value = 65535; } ramp[i] = (Uint16) value; } } } /* vi: set ts=4 sw=4 expandtab: */
Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_malloc(pal->ncolors * bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); }
Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_calloc(256, bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); }
{'added': [(950, ' map = (Uint8 *) SDL_calloc(256, sizeof(Uint8));'), (974, ' map = (Uint8 *) SDL_calloc(256, bpp);')], 'deleted': [(950, ' map = (Uint8 *) SDL_malloc(src->ncolors);'), (974, ' map = (Uint8 *) SDL_malloc(pal->ncolors * bpp);')]}
2
2
1,025
6,535
22
243
4
https://github.com/libsdl-org/SDL
CVE-2021-33657
CWE-787
1,799
ast.c
C
ast_for_atom
/* * This file includes functions to transform a concrete syntax tree (CST) to * an abstract syntax tree (AST). The main function is PyAST_FromNode(). * */ #include "Python.h" #include "Python-ast.h" #include "node.h" #include "ast.h" #include "token.h" #include "pythonrun.h" #include <assert.h> #include <stdbool.h> #define MAXLEVEL 200 /* Max parentheses level */ static int validate_stmts(asdl_seq *); static int validate_exprs(asdl_seq *, expr_context_ty, int); static int validate_nonempty_seq(asdl_seq *, const char *, const char *); static int validate_stmt(stmt_ty); static int validate_expr(expr_ty, expr_context_ty); static int validate_comprehension(asdl_seq *gens) { Py_ssize_t i; if (!asdl_seq_LEN(gens)) { PyErr_SetString(PyExc_ValueError, "comprehension with no generators"); return 0; } for (i = 0; i < asdl_seq_LEN(gens); i++) { comprehension_ty comp = asdl_seq_GET(gens, i); if (!validate_expr(comp->target, Store) || !validate_expr(comp->iter, Load) || !validate_exprs(comp->ifs, Load, 0)) return 0; } return 1; } static int validate_slice(slice_ty slice) { switch (slice->kind) { case Slice_kind: return (!slice->v.Slice.lower || validate_expr(slice->v.Slice.lower, Load)) && (!slice->v.Slice.upper || validate_expr(slice->v.Slice.upper, Load)) && (!slice->v.Slice.step || validate_expr(slice->v.Slice.step, Load)); case ExtSlice_kind: { Py_ssize_t i; if (!validate_nonempty_seq(slice->v.ExtSlice.dims, "dims", "ExtSlice")) return 0; for (i = 0; i < asdl_seq_LEN(slice->v.ExtSlice.dims); i++) if (!validate_slice(asdl_seq_GET(slice->v.ExtSlice.dims, i))) return 0; return 1; } case Index_kind: return validate_expr(slice->v.Index.value, Load); default: PyErr_SetString(PyExc_SystemError, "unknown slice node"); return 0; } } static int validate_keywords(asdl_seq *keywords) { Py_ssize_t i; for (i = 0; i < asdl_seq_LEN(keywords); i++) if (!validate_expr(((keyword_ty)asdl_seq_GET(keywords, i))->value, Load)) return 0; return 1; } static int validate_args(asdl_seq *args) { Py_ssize_t i; for (i = 0; i < asdl_seq_LEN(args); i++) { arg_ty arg = asdl_seq_GET(args, i); if (arg->annotation && !validate_expr(arg->annotation, Load)) return 0; } return 1; } static const char * expr_context_name(expr_context_ty ctx) { switch (ctx) { case Load: return "Load"; case Store: return "Store"; case NamedStore: return "NamedStore"; case Del: return "Del"; case AugLoad: return "AugLoad"; case AugStore: return "AugStore"; case Param: return "Param"; default: Py_UNREACHABLE(); } } static int validate_arguments(arguments_ty args) { if (!validate_args(args->args)) return 0; if (args->vararg && args->vararg->annotation && !validate_expr(args->vararg->annotation, Load)) { return 0; } if (!validate_args(args->kwonlyargs)) return 0; if (args->kwarg && args->kwarg->annotation && !validate_expr(args->kwarg->annotation, Load)) { return 0; } if (asdl_seq_LEN(args->defaults) > asdl_seq_LEN(args->args)) { PyErr_SetString(PyExc_ValueError, "more positional defaults than args on arguments"); return 0; } if (asdl_seq_LEN(args->kw_defaults) != asdl_seq_LEN(args->kwonlyargs)) { PyErr_SetString(PyExc_ValueError, "length of kwonlyargs is not the same as " "kw_defaults on arguments"); return 0; } return validate_exprs(args->defaults, Load, 0) && validate_exprs(args->kw_defaults, Load, 1); } static int validate_constant(PyObject *value) { if (value == Py_None || value == Py_Ellipsis) return 1; if (PyLong_CheckExact(value) || PyFloat_CheckExact(value) || PyComplex_CheckExact(value) || PyBool_Check(value) || PyUnicode_CheckExact(value) || PyBytes_CheckExact(value)) return 1; if (PyTuple_CheckExact(value) || PyFrozenSet_CheckExact(value)) { PyObject *it; it = PyObject_GetIter(value); if (it == NULL) return 0; while (1) { PyObject *item = PyIter_Next(it); if (item == NULL) { if (PyErr_Occurred()) { Py_DECREF(it); return 0; } break; } if (!validate_constant(item)) { Py_DECREF(it); Py_DECREF(item); return 0; } Py_DECREF(item); } Py_DECREF(it); return 1; } return 0; } static int validate_expr(expr_ty exp, expr_context_ty ctx) { int check_ctx = 1; expr_context_ty actual_ctx; /* First check expression context. */ switch (exp->kind) { case Attribute_kind: actual_ctx = exp->v.Attribute.ctx; break; case Subscript_kind: actual_ctx = exp->v.Subscript.ctx; break; case Starred_kind: actual_ctx = exp->v.Starred.ctx; break; case Name_kind: actual_ctx = exp->v.Name.ctx; break; case List_kind: actual_ctx = exp->v.List.ctx; break; case Tuple_kind: actual_ctx = exp->v.Tuple.ctx; break; default: if (ctx != Load) { PyErr_Format(PyExc_ValueError, "expression which can't be " "assigned to in %s context", expr_context_name(ctx)); return 0; } check_ctx = 0; /* set actual_ctx to prevent gcc warning */ actual_ctx = 0; } if (check_ctx && actual_ctx != ctx) { PyErr_Format(PyExc_ValueError, "expression must have %s context but has %s instead", expr_context_name(ctx), expr_context_name(actual_ctx)); return 0; } /* Now validate expression. */ switch (exp->kind) { case BoolOp_kind: if (asdl_seq_LEN(exp->v.BoolOp.values) < 2) { PyErr_SetString(PyExc_ValueError, "BoolOp with less than 2 values"); return 0; } return validate_exprs(exp->v.BoolOp.values, Load, 0); case BinOp_kind: return validate_expr(exp->v.BinOp.left, Load) && validate_expr(exp->v.BinOp.right, Load); case UnaryOp_kind: return validate_expr(exp->v.UnaryOp.operand, Load); case Lambda_kind: return validate_arguments(exp->v.Lambda.args) && validate_expr(exp->v.Lambda.body, Load); case IfExp_kind: return validate_expr(exp->v.IfExp.test, Load) && validate_expr(exp->v.IfExp.body, Load) && validate_expr(exp->v.IfExp.orelse, Load); case Dict_kind: if (asdl_seq_LEN(exp->v.Dict.keys) != asdl_seq_LEN(exp->v.Dict.values)) { PyErr_SetString(PyExc_ValueError, "Dict doesn't have the same number of keys as values"); return 0; } /* null_ok=1 for keys expressions to allow dict unpacking to work in dict literals, i.e. ``{**{a:b}}`` */ return validate_exprs(exp->v.Dict.keys, Load, /*null_ok=*/ 1) && validate_exprs(exp->v.Dict.values, Load, /*null_ok=*/ 0); case Set_kind: return validate_exprs(exp->v.Set.elts, Load, 0); #define COMP(NAME) \ case NAME ## _kind: \ return validate_comprehension(exp->v.NAME.generators) && \ validate_expr(exp->v.NAME.elt, Load); COMP(ListComp) COMP(SetComp) COMP(GeneratorExp) #undef COMP case DictComp_kind: return validate_comprehension(exp->v.DictComp.generators) && validate_expr(exp->v.DictComp.key, Load) && validate_expr(exp->v.DictComp.value, Load); case Yield_kind: return !exp->v.Yield.value || validate_expr(exp->v.Yield.value, Load); case YieldFrom_kind: return validate_expr(exp->v.YieldFrom.value, Load); case Await_kind: return validate_expr(exp->v.Await.value, Load); case Compare_kind: if (!asdl_seq_LEN(exp->v.Compare.comparators)) { PyErr_SetString(PyExc_ValueError, "Compare with no comparators"); return 0; } if (asdl_seq_LEN(exp->v.Compare.comparators) != asdl_seq_LEN(exp->v.Compare.ops)) { PyErr_SetString(PyExc_ValueError, "Compare has a different number " "of comparators and operands"); return 0; } return validate_exprs(exp->v.Compare.comparators, Load, 0) && validate_expr(exp->v.Compare.left, Load); case Call_kind: return validate_expr(exp->v.Call.func, Load) && validate_exprs(exp->v.Call.args, Load, 0) && validate_keywords(exp->v.Call.keywords); case Constant_kind: if (!validate_constant(exp->v.Constant.value)) { PyErr_Format(PyExc_TypeError, "got an invalid type in Constant: %s", Py_TYPE(exp->v.Constant.value)->tp_name); return 0; } return 1; case JoinedStr_kind: return validate_exprs(exp->v.JoinedStr.values, Load, 0); case FormattedValue_kind: if (validate_expr(exp->v.FormattedValue.value, Load) == 0) return 0; if (exp->v.FormattedValue.format_spec) return validate_expr(exp->v.FormattedValue.format_spec, Load); return 1; case Attribute_kind: return validate_expr(exp->v.Attribute.value, Load); case Subscript_kind: return validate_slice(exp->v.Subscript.slice) && validate_expr(exp->v.Subscript.value, Load); case Starred_kind: return validate_expr(exp->v.Starred.value, ctx); case List_kind: return validate_exprs(exp->v.List.elts, ctx, 0); case Tuple_kind: return validate_exprs(exp->v.Tuple.elts, ctx, 0); /* This last case doesn't have any checking. */ case Name_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected expression"); return 0; } } static int validate_nonempty_seq(asdl_seq *seq, const char *what, const char *owner) { if (asdl_seq_LEN(seq)) return 1; PyErr_Format(PyExc_ValueError, "empty %s on %s", what, owner); return 0; } static int validate_assignlist(asdl_seq *targets, expr_context_ty ctx) { return validate_nonempty_seq(targets, "targets", ctx == Del ? "Delete" : "Assign") && validate_exprs(targets, ctx, 0); } static int validate_body(asdl_seq *body, const char *owner) { return validate_nonempty_seq(body, "body", owner) && validate_stmts(body); } static int validate_stmt(stmt_ty stmt) { Py_ssize_t i; switch (stmt->kind) { case FunctionDef_kind: return validate_body(stmt->v.FunctionDef.body, "FunctionDef") && validate_arguments(stmt->v.FunctionDef.args) && validate_exprs(stmt->v.FunctionDef.decorator_list, Load, 0) && (!stmt->v.FunctionDef.returns || validate_expr(stmt->v.FunctionDef.returns, Load)); case ClassDef_kind: return validate_body(stmt->v.ClassDef.body, "ClassDef") && validate_exprs(stmt->v.ClassDef.bases, Load, 0) && validate_keywords(stmt->v.ClassDef.keywords) && validate_exprs(stmt->v.ClassDef.decorator_list, Load, 0); case Return_kind: return !stmt->v.Return.value || validate_expr(stmt->v.Return.value, Load); case Delete_kind: return validate_assignlist(stmt->v.Delete.targets, Del); case Assign_kind: return validate_assignlist(stmt->v.Assign.targets, Store) && validate_expr(stmt->v.Assign.value, Load); case AugAssign_kind: return validate_expr(stmt->v.AugAssign.target, Store) && validate_expr(stmt->v.AugAssign.value, Load); case AnnAssign_kind: if (stmt->v.AnnAssign.target->kind != Name_kind && stmt->v.AnnAssign.simple) { PyErr_SetString(PyExc_TypeError, "AnnAssign with simple non-Name target"); return 0; } return validate_expr(stmt->v.AnnAssign.target, Store) && (!stmt->v.AnnAssign.value || validate_expr(stmt->v.AnnAssign.value, Load)) && validate_expr(stmt->v.AnnAssign.annotation, Load); case For_kind: return validate_expr(stmt->v.For.target, Store) && validate_expr(stmt->v.For.iter, Load) && validate_body(stmt->v.For.body, "For") && validate_stmts(stmt->v.For.orelse); case AsyncFor_kind: return validate_expr(stmt->v.AsyncFor.target, Store) && validate_expr(stmt->v.AsyncFor.iter, Load) && validate_body(stmt->v.AsyncFor.body, "AsyncFor") && validate_stmts(stmt->v.AsyncFor.orelse); case While_kind: return validate_expr(stmt->v.While.test, Load) && validate_body(stmt->v.While.body, "While") && validate_stmts(stmt->v.While.orelse); case If_kind: return validate_expr(stmt->v.If.test, Load) && validate_body(stmt->v.If.body, "If") && validate_stmts(stmt->v.If.orelse); case With_kind: if (!validate_nonempty_seq(stmt->v.With.items, "items", "With")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.With.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.With.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.With.body, "With"); case AsyncWith_kind: if (!validate_nonempty_seq(stmt->v.AsyncWith.items, "items", "AsyncWith")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.AsyncWith.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.AsyncWith.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.AsyncWith.body, "AsyncWith"); case Raise_kind: if (stmt->v.Raise.exc) { return validate_expr(stmt->v.Raise.exc, Load) && (!stmt->v.Raise.cause || validate_expr(stmt->v.Raise.cause, Load)); } if (stmt->v.Raise.cause) { PyErr_SetString(PyExc_ValueError, "Raise with cause but no exception"); return 0; } return 1; case Try_kind: if (!validate_body(stmt->v.Try.body, "Try")) return 0; if (!asdl_seq_LEN(stmt->v.Try.handlers) && !asdl_seq_LEN(stmt->v.Try.finalbody)) { PyErr_SetString(PyExc_ValueError, "Try has neither except handlers nor finalbody"); return 0; } if (!asdl_seq_LEN(stmt->v.Try.handlers) && asdl_seq_LEN(stmt->v.Try.orelse)) { PyErr_SetString(PyExc_ValueError, "Try has orelse but no except handlers"); return 0; } for (i = 0; i < asdl_seq_LEN(stmt->v.Try.handlers); i++) { excepthandler_ty handler = asdl_seq_GET(stmt->v.Try.handlers, i); if ((handler->v.ExceptHandler.type && !validate_expr(handler->v.ExceptHandler.type, Load)) || !validate_body(handler->v.ExceptHandler.body, "ExceptHandler")) return 0; } return (!asdl_seq_LEN(stmt->v.Try.finalbody) || validate_stmts(stmt->v.Try.finalbody)) && (!asdl_seq_LEN(stmt->v.Try.orelse) || validate_stmts(stmt->v.Try.orelse)); case Assert_kind: return validate_expr(stmt->v.Assert.test, Load) && (!stmt->v.Assert.msg || validate_expr(stmt->v.Assert.msg, Load)); case Import_kind: return validate_nonempty_seq(stmt->v.Import.names, "names", "Import"); case ImportFrom_kind: if (stmt->v.ImportFrom.level < 0) { PyErr_SetString(PyExc_ValueError, "Negative ImportFrom level"); return 0; } return validate_nonempty_seq(stmt->v.ImportFrom.names, "names", "ImportFrom"); case Global_kind: return validate_nonempty_seq(stmt->v.Global.names, "names", "Global"); case Nonlocal_kind: return validate_nonempty_seq(stmt->v.Nonlocal.names, "names", "Nonlocal"); case Expr_kind: return validate_expr(stmt->v.Expr.value, Load); case AsyncFunctionDef_kind: return validate_body(stmt->v.AsyncFunctionDef.body, "AsyncFunctionDef") && validate_arguments(stmt->v.AsyncFunctionDef.args) && validate_exprs(stmt->v.AsyncFunctionDef.decorator_list, Load, 0) && (!stmt->v.AsyncFunctionDef.returns || validate_expr(stmt->v.AsyncFunctionDef.returns, Load)); case Pass_kind: case Break_kind: case Continue_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected statement"); return 0; } } static int validate_stmts(asdl_seq *seq) { Py_ssize_t i; for (i = 0; i < asdl_seq_LEN(seq); i++) { stmt_ty stmt = asdl_seq_GET(seq, i); if (stmt) { if (!validate_stmt(stmt)) return 0; } else { PyErr_SetString(PyExc_ValueError, "None disallowed in statement list"); return 0; } } return 1; } static int validate_exprs(asdl_seq *exprs, expr_context_ty ctx, int null_ok) { Py_ssize_t i; for (i = 0; i < asdl_seq_LEN(exprs); i++) { expr_ty expr = asdl_seq_GET(exprs, i); if (expr) { if (!validate_expr(expr, ctx)) return 0; } else if (!null_ok) { PyErr_SetString(PyExc_ValueError, "None disallowed in expression list"); return 0; } } return 1; } int PyAST_Validate(mod_ty mod) { int res = 0; switch (mod->kind) { case Module_kind: res = validate_stmts(mod->v.Module.body); break; case Interactive_kind: res = validate_stmts(mod->v.Interactive.body); break; case Expression_kind: res = validate_expr(mod->v.Expression.body, Load); break; case Suite_kind: PyErr_SetString(PyExc_ValueError, "Suite is not valid in the CPython compiler"); break; default: PyErr_SetString(PyExc_SystemError, "impossible module node"); res = 0; break; } return res; } /* This is done here, so defines like "test" don't interfere with AST use above. */ #include "grammar.h" #include "parsetok.h" #include "graminit.h" /* Data structure used internally */ struct compiling { PyArena *c_arena; /* Arena for allocating memory. */ PyObject *c_filename; /* filename */ PyObject *c_normalize; /* Normalization function from unicodedata. */ }; static asdl_seq *seq_for_testlist(struct compiling *, const node *); static expr_ty ast_for_expr(struct compiling *, const node *); static stmt_ty ast_for_stmt(struct compiling *, const node *); static asdl_seq *ast_for_suite(struct compiling *c, const node *n); static asdl_seq *ast_for_exprlist(struct compiling *, const node *, expr_context_ty); static expr_ty ast_for_testlist(struct compiling *, const node *); static stmt_ty ast_for_classdef(struct compiling *, const node *, asdl_seq *); static stmt_ty ast_for_with_stmt(struct compiling *, const node *, bool); static stmt_ty ast_for_for_stmt(struct compiling *, const node *, bool); /* Note different signature for ast_for_call */ static expr_ty ast_for_call(struct compiling *, const node *, expr_ty, const node *, const node *); static PyObject *parsenumber(struct compiling *, const char *); static expr_ty parsestrplus(struct compiling *, const node *n); static void get_last_end_pos(asdl_seq *, int *, int *); #define COMP_GENEXP 0 #define COMP_LISTCOMP 1 #define COMP_SETCOMP 2 static int init_normalization(struct compiling *c) { PyObject *m = PyImport_ImportModuleNoBlock("unicodedata"); if (!m) return 0; c->c_normalize = PyObject_GetAttrString(m, "normalize"); Py_DECREF(m); if (!c->c_normalize) return 0; return 1; } static identifier new_identifier(const char *n, struct compiling *c) { PyObject *id = PyUnicode_DecodeUTF8(n, strlen(n), NULL); if (!id) return NULL; /* PyUnicode_DecodeUTF8 should always return a ready string. */ assert(PyUnicode_IS_READY(id)); /* Check whether there are non-ASCII characters in the identifier; if so, normalize to NFKC. */ if (!PyUnicode_IS_ASCII(id)) { PyObject *id2; _Py_IDENTIFIER(NFKC); if (!c->c_normalize && !init_normalization(c)) { Py_DECREF(id); return NULL; } PyObject *form = _PyUnicode_FromId(&PyId_NFKC); if (form == NULL) { Py_DECREF(id); return NULL; } PyObject *args[2] = {form, id}; id2 = _PyObject_FastCall(c->c_normalize, args, 2); Py_DECREF(id); if (!id2) return NULL; if (!PyUnicode_Check(id2)) { PyErr_Format(PyExc_TypeError, "unicodedata.normalize() must return a string, not " "%.200s", Py_TYPE(id2)->tp_name); Py_DECREF(id2); return NULL; } id = id2; } PyUnicode_InternInPlace(&id); if (PyArena_AddPyObject(c->c_arena, id) < 0) { Py_DECREF(id); return NULL; } return id; } #define NEW_IDENTIFIER(n) new_identifier(STR(n), c) static int ast_error(struct compiling *c, const node *n, const char *errmsg, ...) { PyObject *value, *errstr, *loc, *tmp; va_list va; va_start(va, errmsg); errstr = PyUnicode_FromFormatV(errmsg, va); va_end(va); if (!errstr) { return 0; } loc = PyErr_ProgramTextObject(c->c_filename, LINENO(n)); if (!loc) { Py_INCREF(Py_None); loc = Py_None; } tmp = Py_BuildValue("(OiiN)", c->c_filename, LINENO(n), n->n_col_offset + 1, loc); if (!tmp) { Py_DECREF(errstr); return 0; } value = PyTuple_Pack(2, errstr, tmp); Py_DECREF(errstr); Py_DECREF(tmp); if (value) { PyErr_SetObject(PyExc_SyntaxError, value); Py_DECREF(value); } return 0; } /* num_stmts() returns number of contained statements. Use this routine to determine how big a sequence is needed for the statements in a parse tree. Its raison d'etre is this bit of grammar: stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE A simple_stmt can contain multiple small_stmt elements joined by semicolons. If the arg is a simple_stmt, the number of small_stmt elements is returned. */ static int num_stmts(const node *n) { int i, l; node *ch; switch (TYPE(n)) { case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) return 0; else return num_stmts(CHILD(n, 0)); case file_input: l = 0; for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == stmt) l += num_stmts(ch); } return l; case stmt: return num_stmts(CHILD(n, 0)); case compound_stmt: return 1; case simple_stmt: return NCH(n) / 2; /* Divide by 2 to remove count of semi-colons */ case suite: if (NCH(n) == 1) return num_stmts(CHILD(n, 0)); else { l = 0; for (i = 2; i < (NCH(n) - 1); i++) l += num_stmts(CHILD(n, i)); return l; } default: { char buf[128]; sprintf(buf, "Non-statement found: %d %d", TYPE(n), NCH(n)); Py_FatalError(buf); } } Py_UNREACHABLE(); } /* Transform the CST rooted at node * to the appropriate AST */ mod_ty PyAST_FromNodeObject(const node *n, PyCompilerFlags *flags, PyObject *filename, PyArena *arena) { int i, j, k, num; asdl_seq *stmts = NULL; stmt_ty s; node *ch; struct compiling c; mod_ty res = NULL; c.c_arena = arena; /* borrowed reference */ c.c_filename = filename; c.c_normalize = NULL; if (TYPE(n) == encoding_decl) n = CHILD(n, 0); k = 0; switch (TYPE(n)) { case file_input: stmts = _Py_asdl_seq_new(num_stmts(n), arena); if (!stmts) goto out; for (i = 0; i < NCH(n) - 1; i++) { ch = CHILD(n, i); if (TYPE(ch) == NEWLINE) continue; REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { s = ast_for_stmt(&c, ch); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } else { ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < num; j++) { s = ast_for_stmt(&c, CHILD(ch, j * 2)); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } } } res = Module(stmts, arena); break; case eval_input: { expr_ty testlist_ast; /* XXX Why not comp_for here? */ testlist_ast = ast_for_testlist(&c, CHILD(n, 0)); if (!testlist_ast) goto out; res = Expression(testlist_ast, arena); break; } case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) { stmts = _Py_asdl_seq_new(1, arena); if (!stmts) goto out; asdl_seq_SET(stmts, 0, Pass(n->n_lineno, n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, arena)); if (!asdl_seq_GET(stmts, 0)) goto out; res = Interactive(stmts, arena); } else { n = CHILD(n, 0); num = num_stmts(n); stmts = _Py_asdl_seq_new(num, arena); if (!stmts) goto out; if (num == 1) { s = ast_for_stmt(&c, n); if (!s) goto out; asdl_seq_SET(stmts, 0, s); } else { /* Only a simple_stmt can contain multiple statements. */ REQ(n, simple_stmt); for (i = 0; i < NCH(n); i += 2) { if (TYPE(CHILD(n, i)) == NEWLINE) break; s = ast_for_stmt(&c, CHILD(n, i)); if (!s) goto out; asdl_seq_SET(stmts, i / 2, s); } } res = Interactive(stmts, arena); } break; default: PyErr_Format(PyExc_SystemError, "invalid node %d for PyAST_FromNode", TYPE(n)); goto out; } out: if (c.c_normalize) { Py_DECREF(c.c_normalize); } return res; } mod_ty PyAST_FromNode(const node *n, PyCompilerFlags *flags, const char *filename_str, PyArena *arena) { mod_ty mod; PyObject *filename; filename = PyUnicode_DecodeFSDefault(filename_str); if (filename == NULL) return NULL; mod = PyAST_FromNodeObject(n, flags, filename, arena); Py_DECREF(filename); return mod; } /* Return the AST repr. of the operator represented as syntax (|, ^, etc.) */ static operator_ty get_operator(const node *n) { switch (TYPE(n)) { case VBAR: return BitOr; case CIRCUMFLEX: return BitXor; case AMPER: return BitAnd; case LEFTSHIFT: return LShift; case RIGHTSHIFT: return RShift; case PLUS: return Add; case MINUS: return Sub; case STAR: return Mult; case AT: return MatMult; case SLASH: return Div; case DOUBLESLASH: return FloorDiv; case PERCENT: return Mod; default: return (operator_ty)0; } } static const char * const FORBIDDEN[] = { "None", "True", "False", "__debug__", NULL, }; static int forbidden_name(struct compiling *c, identifier name, const node *n, int full_checks) { assert(PyUnicode_Check(name)); const char * const *p = FORBIDDEN; if (!full_checks) { /* In most cases, the parser will protect True, False, and None from being assign to. */ p += 3; } for (; *p; p++) { if (_PyUnicode_EqualToASCIIString(name, *p)) { ast_error(c, n, "cannot assign to %U", name); return 1; } } return 0; } static expr_ty copy_location(expr_ty e, const node *n) { if (e) { e->lineno = LINENO(n); e->col_offset = n->n_col_offset; e->end_lineno = n->n_end_lineno; e->end_col_offset = n->n_end_col_offset; } return e; } /* Set the context ctx for expr_ty e, recursively traversing e. Only sets context for expr kinds that "can appear in assignment context" (according to ../Parser/Python.asdl). For other expr kinds, it sets an appropriate syntax error and returns false. */ static int set_context(struct compiling *c, expr_ty e, expr_context_ty ctx, const node *n) { asdl_seq *s = NULL; /* If a particular expression type can't be used for assign / delete, set expr_name to its name and an error message will be generated. */ const char* expr_name = NULL; /* The ast defines augmented store and load contexts, but the implementation here doesn't actually use them. The code may be a little more complex than necessary as a result. It also means that expressions in an augmented assignment have a Store context. Consider restructuring so that augmented assignment uses set_context(), too. */ assert(ctx != AugStore && ctx != AugLoad); switch (e->kind) { case Attribute_kind: if (ctx == NamedStore) { expr_name = "attribute"; break; } e->v.Attribute.ctx = ctx; if (ctx == Store && forbidden_name(c, e->v.Attribute.attr, n, 1)) return 0; break; case Subscript_kind: if (ctx == NamedStore) { expr_name = "subscript"; break; } e->v.Subscript.ctx = ctx; break; case Starred_kind: if (ctx == NamedStore) { expr_name = "starred"; break; } e->v.Starred.ctx = ctx; if (!set_context(c, e->v.Starred.value, ctx, n)) return 0; break; case Name_kind: if (ctx == Store) { if (forbidden_name(c, e->v.Name.id, n, 0)) return 0; /* forbidden_name() calls ast_error() */ } e->v.Name.ctx = ctx; break; case List_kind: if (ctx == NamedStore) { expr_name = "list"; break; } e->v.List.ctx = ctx; s = e->v.List.elts; break; case Tuple_kind: if (ctx == NamedStore) { expr_name = "tuple"; break; } e->v.Tuple.ctx = ctx; s = e->v.Tuple.elts; break; case Lambda_kind: expr_name = "lambda"; break; case Call_kind: expr_name = "function call"; break; case BoolOp_kind: case BinOp_kind: case UnaryOp_kind: expr_name = "operator"; break; case GeneratorExp_kind: expr_name = "generator expression"; break; case Yield_kind: case YieldFrom_kind: expr_name = "yield expression"; break; case Await_kind: expr_name = "await expression"; break; case ListComp_kind: expr_name = "list comprehension"; break; case SetComp_kind: expr_name = "set comprehension"; break; case DictComp_kind: expr_name = "dict comprehension"; break; case Dict_kind: expr_name = "dict display"; break; case Set_kind: expr_name = "set display"; break; case JoinedStr_kind: case FormattedValue_kind: expr_name = "f-string expression"; break; case Constant_kind: { PyObject *value = e->v.Constant.value; if (value == Py_None || value == Py_False || value == Py_True || value == Py_Ellipsis) { return ast_error(c, n, "cannot %s %R", ctx == Store ? "assign to" : "delete", value); } expr_name = "literal"; break; } case Compare_kind: expr_name = "comparison"; break; case IfExp_kind: expr_name = "conditional expression"; break; case NamedExpr_kind: expr_name = "named expression"; break; default: PyErr_Format(PyExc_SystemError, "unexpected expression in %sassignment %d (line %d)", ctx == NamedStore ? "named ": "", e->kind, e->lineno); return 0; } /* Check for error string set by switch */ if (expr_name) { if (ctx == NamedStore) { return ast_error(c, n, "cannot use named assignment with %s", expr_name); } else { return ast_error(c, n, "cannot %s %s", ctx == Store ? "assign to" : "delete", expr_name); } } /* If the LHS is a list or tuple, we need to set the assignment context for all the contained elements. */ if (s) { Py_ssize_t i; for (i = 0; i < asdl_seq_LEN(s); i++) { if (!set_context(c, (expr_ty)asdl_seq_GET(s, i), ctx, n)) return 0; } } return 1; } static operator_ty ast_for_augassign(struct compiling *c, const node *n) { REQ(n, augassign); n = CHILD(n, 0); switch (STR(n)[0]) { case '+': return Add; case '-': return Sub; case '/': if (STR(n)[1] == '/') return FloorDiv; else return Div; case '%': return Mod; case '<': return LShift; case '>': return RShift; case '&': return BitAnd; case '^': return BitXor; case '|': return BitOr; case '*': if (STR(n)[1] == '*') return Pow; else return Mult; case '@': return MatMult; default: PyErr_Format(PyExc_SystemError, "invalid augassign: %s", STR(n)); return (operator_ty)0; } } static cmpop_ty ast_for_comp_op(struct compiling *c, const node *n) { /* comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is' |'is' 'not' */ REQ(n, comp_op); if (NCH(n) == 1) { n = CHILD(n, 0); switch (TYPE(n)) { case LESS: return Lt; case GREATER: return Gt; case EQEQUAL: /* == */ return Eq; case LESSEQUAL: return LtE; case GREATEREQUAL: return GtE; case NOTEQUAL: return NotEq; case NAME: if (strcmp(STR(n), "in") == 0) return In; if (strcmp(STR(n), "is") == 0) return Is; /* fall through */ default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s", STR(n)); return (cmpop_ty)0; } } else if (NCH(n) == 2) { /* handle "not in" and "is not" */ switch (TYPE(CHILD(n, 0))) { case NAME: if (strcmp(STR(CHILD(n, 1)), "in") == 0) return NotIn; if (strcmp(STR(CHILD(n, 0)), "is") == 0) return IsNot; /* fall through */ default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s %s", STR(CHILD(n, 0)), STR(CHILD(n, 1))); return (cmpop_ty)0; } } PyErr_Format(PyExc_SystemError, "invalid comp_op: has %d children", NCH(n)); return (cmpop_ty)0; } static asdl_seq * seq_for_testlist(struct compiling *c, const node *n) { /* testlist: test (',' test)* [','] testlist_star_expr: test|star_expr (',' test|star_expr)* [','] */ asdl_seq *seq; expr_ty expression; int i; assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr || TYPE(n) == testlist_comp); seq = _Py_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { const node *ch = CHILD(n, i); assert(TYPE(ch) == test || TYPE(ch) == test_nocond || TYPE(ch) == star_expr || TYPE(ch) == namedexpr_test); expression = ast_for_expr(c, ch); if (!expression) return NULL; assert(i / 2 < seq->size); asdl_seq_SET(seq, i / 2, expression); } return seq; } static arg_ty ast_for_arg(struct compiling *c, const node *n) { identifier name; expr_ty annotation = NULL; node *ch; arg_ty ret; assert(TYPE(n) == tfpdef || TYPE(n) == vfpdef); ch = CHILD(n, 0); name = NEW_IDENTIFIER(ch); if (!name) return NULL; if (forbidden_name(c, name, ch, 0)) return NULL; if (NCH(n) == 3 && TYPE(CHILD(n, 1)) == COLON) { annotation = ast_for_expr(c, CHILD(n, 2)); if (!annotation) return NULL; } ret = arg(name, annotation, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!ret) return NULL; return ret; } /* returns -1 if failed to handle keyword only arguments returns new position to keep processing if successful (',' tfpdef ['=' test])* ^^^ start pointing here */ static int handle_keywordonly_args(struct compiling *c, const node *n, int start, asdl_seq *kwonlyargs, asdl_seq *kwdefaults) { PyObject *argname; node *ch; expr_ty expression, annotation; arg_ty arg; int i = start; int j = 0; /* index for kwdefaults and kwonlyargs */ if (kwonlyargs == NULL) { ast_error(c, CHILD(n, start), "named arguments must follow bare *"); return -1; } assert(kwdefaults != NULL); while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case vfpdef: case tfpdef: if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) goto error; asdl_seq_SET(kwdefaults, j, expression); i += 2; /* '=' and test */ } else { /* setting NULL if no default value exists */ asdl_seq_SET(kwdefaults, j, NULL); } if (NCH(ch) == 3) { /* ch is NAME ':' test */ annotation = ast_for_expr(c, CHILD(ch, 2)); if (!annotation) goto error; } else { annotation = NULL; } ch = CHILD(ch, 0); argname = NEW_IDENTIFIER(ch); if (!argname) goto error; if (forbidden_name(c, argname, ch, 0)) goto error; arg = arg(argname, annotation, LINENO(ch), ch->n_col_offset, ch->n_end_lineno, ch->n_end_col_offset, c->c_arena); if (!arg) goto error; asdl_seq_SET(kwonlyargs, j++, arg); i += 2; /* the name and the comma */ break; case DOUBLESTAR: return i; default: ast_error(c, ch, "unexpected node"); goto error; } } return i; error: return -1; } /* Create AST for argument list. */ static arguments_ty ast_for_arguments(struct compiling *c, const node *n) { /* This function handles both typedargslist (function definition) and varargslist (lambda definition). parameters: '(' [typedargslist] ')' typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']]] | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']) tfpdef: NAME [':' test] varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [',']]] | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [','] ) vfpdef: NAME */ int i, j, k, nposargs = 0, nkwonlyargs = 0; int nposdefaults = 0, found_default = 0; asdl_seq *posargs, *posdefaults, *kwonlyargs, *kwdefaults; arg_ty vararg = NULL, kwarg = NULL; arg_ty arg; node *ch; if (TYPE(n) == parameters) { if (NCH(n) == 2) /* () as argument list */ return arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); n = CHILD(n, 1); } assert(TYPE(n) == typedargslist || TYPE(n) == varargslist); /* First count the number of positional args & defaults. The variable i is the loop index for this for loop and the next. The next loop picks up where the first leaves off. */ for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == STAR) { /* skip star */ i++; if (i < NCH(n) && /* skip argument following star */ (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { i++; } break; } if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == vfpdef || TYPE(ch) == tfpdef) nposargs++; if (TYPE(ch) == EQUAL) nposdefaults++; } /* count the number of keyword only args & defaults for keyword only args */ for ( ; i < NCH(n); ++i) { ch = CHILD(n, i); if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == tfpdef || TYPE(ch) == vfpdef) nkwonlyargs++; } posargs = (nposargs ? _Py_asdl_seq_new(nposargs, c->c_arena) : NULL); if (!posargs && nposargs) return NULL; kwonlyargs = (nkwonlyargs ? _Py_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwonlyargs && nkwonlyargs) return NULL; posdefaults = (nposdefaults ? _Py_asdl_seq_new(nposdefaults, c->c_arena) : NULL); if (!posdefaults && nposdefaults) return NULL; /* The length of kwonlyargs and kwdefaults are same since we set NULL as default for keyword only argument w/o default - we have sequence data structure, but no dictionary */ kwdefaults = (nkwonlyargs ? _Py_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwdefaults && nkwonlyargs) return NULL; /* tfpdef: NAME [':' test] vfpdef: NAME */ i = 0; j = 0; /* index for defaults */ k = 0; /* index for args */ while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case tfpdef: case vfpdef: /* XXX Need to worry about checking if TYPE(CHILD(n, i+1)) is anything other than EQUAL or a comma? */ /* XXX Should NCH(n) check be made a separate check? */ if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expr_ty expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) return NULL; assert(posdefaults != NULL); asdl_seq_SET(posdefaults, j++, expression); i += 2; found_default = 1; } else if (found_default) { ast_error(c, n, "non-default argument follows default argument"); return NULL; } arg = ast_for_arg(c, ch); if (!arg) return NULL; asdl_seq_SET(posargs, k++, arg); i += 2; /* the name and the comma */ break; case STAR: if (i+1 >= NCH(n) || (i+2 == NCH(n) && TYPE(CHILD(n, i+1)) == COMMA)) { ast_error(c, CHILD(n, i), "named arguments must follow bare *"); return NULL; } ch = CHILD(n, i+1); /* tfpdef or COMMA */ if (TYPE(ch) == COMMA) { int res = 0; i += 2; /* now follows keyword only arguments */ res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } else { vararg = ast_for_arg(c, ch); if (!vararg) return NULL; i += 3; if (i < NCH(n) && (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { int res = 0; res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } } break; case DOUBLESTAR: ch = CHILD(n, i+1); /* tfpdef */ assert(TYPE(ch) == tfpdef || TYPE(ch) == vfpdef); kwarg = ast_for_arg(c, ch); if (!kwarg) return NULL; i += 3; break; default: PyErr_Format(PyExc_SystemError, "unexpected node in varargslist: %d @ %d", TYPE(ch), i); return NULL; } } return arguments(posargs, vararg, kwonlyargs, kwdefaults, kwarg, posdefaults, c->c_arena); } static expr_ty ast_for_dotted_name(struct compiling *c, const node *n) { expr_ty e; identifier id; int lineno, col_offset; int i; node *ch; REQ(n, dotted_name); lineno = LINENO(n); col_offset = n->n_col_offset; ch = CHILD(n, 0); id = NEW_IDENTIFIER(ch); if (!id) return NULL; e = Name(id, Load, lineno, col_offset, ch->n_end_lineno, ch->n_end_col_offset, c->c_arena); if (!e) return NULL; for (i = 2; i < NCH(n); i+=2) { id = NEW_IDENTIFIER(CHILD(n, i)); if (!id) return NULL; e = Attribute(e, id, Load, lineno, col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!e) return NULL; } return e; } static expr_ty ast_for_decorator(struct compiling *c, const node *n) { /* decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE */ expr_ty d = NULL; expr_ty name_expr; REQ(n, decorator); REQ(CHILD(n, 0), AT); REQ(RCHILD(n, -1), NEWLINE); name_expr = ast_for_dotted_name(c, CHILD(n, 1)); if (!name_expr) return NULL; if (NCH(n) == 3) { /* No arguments */ d = name_expr; name_expr = NULL; } else if (NCH(n) == 5) { /* Call with no arguments */ d = Call(name_expr, NULL, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!d) return NULL; name_expr = NULL; } else { d = ast_for_call(c, CHILD(n, 3), name_expr, CHILD(n, 2), CHILD(n, 4)); if (!d) return NULL; name_expr = NULL; } return d; } static asdl_seq* ast_for_decorators(struct compiling *c, const node *n) { asdl_seq* decorator_seq; expr_ty d; int i; REQ(n, decorators); decorator_seq = _Py_asdl_seq_new(NCH(n), c->c_arena); if (!decorator_seq) return NULL; for (i = 0; i < NCH(n); i++) { d = ast_for_decorator(c, CHILD(n, i)); if (!d) return NULL; asdl_seq_SET(decorator_seq, i, d); } return decorator_seq; } static stmt_ty ast_for_funcdef_impl(struct compiling *c, const node *n0, asdl_seq *decorator_seq, bool is_async) { /* funcdef: 'def' NAME parameters ['->' test] ':' suite */ const node * const n = is_async ? CHILD(n0, 1) : n0; identifier name; arguments_ty args; asdl_seq *body; expr_ty returns = NULL; int name_i = 1; int end_lineno, end_col_offset; REQ(n, funcdef); name = NEW_IDENTIFIER(CHILD(n, name_i)); if (!name) return NULL; if (forbidden_name(c, name, CHILD(n, name_i), 0)) return NULL; args = ast_for_arguments(c, CHILD(n, name_i + 1)); if (!args) return NULL; if (TYPE(CHILD(n, name_i+2)) == RARROW) { returns = ast_for_expr(c, CHILD(n, name_i + 3)); if (!returns) return NULL; name_i += 2; } body = ast_for_suite(c, CHILD(n, name_i + 3)); if (!body) return NULL; get_last_end_pos(body, &end_lineno, &end_col_offset); if (is_async) return AsyncFunctionDef(name, args, body, decorator_seq, returns, LINENO(n0), n0->n_col_offset, end_lineno, end_col_offset, c->c_arena); else return FunctionDef(name, args, body, decorator_seq, returns, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } static stmt_ty ast_for_async_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* async_funcdef: 'async' funcdef */ REQ(n, async_funcdef); REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); REQ(CHILD(n, 1), funcdef); return ast_for_funcdef_impl(c, n, decorator_seq, true /* is_async */); } static stmt_ty ast_for_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* funcdef: 'def' NAME parameters ['->' test] ':' suite */ return ast_for_funcdef_impl(c, n, decorator_seq, false /* is_async */); } static stmt_ty ast_for_async_stmt(struct compiling *c, const node *n) { /* async_stmt: 'async' (funcdef | with_stmt | for_stmt) */ REQ(n, async_stmt); REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); switch (TYPE(CHILD(n, 1))) { case funcdef: return ast_for_funcdef_impl(c, n, NULL, true /* is_async */); case with_stmt: return ast_for_with_stmt(c, n, true /* is_async */); case for_stmt: return ast_for_for_stmt(c, n, true /* is_async */); default: PyErr_Format(PyExc_SystemError, "invalid async stament: %s", STR(CHILD(n, 1))); return NULL; } } static stmt_ty ast_for_decorated(struct compiling *c, const node *n) { /* decorated: decorators (classdef | funcdef | async_funcdef) */ stmt_ty thing = NULL; asdl_seq *decorator_seq = NULL; REQ(n, decorated); decorator_seq = ast_for_decorators(c, CHILD(n, 0)); if (!decorator_seq) return NULL; assert(TYPE(CHILD(n, 1)) == funcdef || TYPE(CHILD(n, 1)) == async_funcdef || TYPE(CHILD(n, 1)) == classdef); if (TYPE(CHILD(n, 1)) == funcdef) { thing = ast_for_funcdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == classdef) { thing = ast_for_classdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == async_funcdef) { thing = ast_for_async_funcdef(c, CHILD(n, 1), decorator_seq); } return thing; } static expr_ty ast_for_namedexpr(struct compiling *c, const node *n) { /* if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] namedexpr_test: test [':=' test] argument: ( test [comp_for] | test ':=' test | test '=' test | '**' test | '*' test ) */ expr_ty target, value; target = ast_for_expr(c, CHILD(n, 0)); if (!target) return NULL; value = ast_for_expr(c, CHILD(n, 2)); if (!value) return NULL; if (!set_context(c, target, NamedStore, n)) return NULL; return NamedExpr(target, value, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static expr_ty ast_for_lambdef(struct compiling *c, const node *n) { /* lambdef: 'lambda' [varargslist] ':' test lambdef_nocond: 'lambda' [varargslist] ':' test_nocond */ arguments_ty args; expr_ty expression; if (NCH(n) == 3) { args = arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; } else { args = ast_for_arguments(c, CHILD(n, 1)); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 3)); if (!expression) return NULL; } return Lambda(args, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static expr_ty ast_for_ifexpr(struct compiling *c, const node *n) { /* test: or_test 'if' or_test 'else' test */ expr_ty expression, body, orelse; assert(NCH(n) == 5); body = ast_for_expr(c, CHILD(n, 0)); if (!body) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; orelse = ast_for_expr(c, CHILD(n, 4)); if (!orelse) return NULL; return IfExp(expression, body, orelse, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } /* Count the number of 'for' loops in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_fors(struct compiling *c, const node *n) { int n_fors = 0; count_comp_for: n_fors++; REQ(n, comp_for); if (NCH(n) == 2) { REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); n = CHILD(n, 1); } else if (NCH(n) == 1) { n = CHILD(n, 0); } else { goto error; } if (NCH(n) == (5)) { n = CHILD(n, 4); } else { return n_fors; } count_comp_iter: REQ(n, comp_iter); n = CHILD(n, 0); if (TYPE(n) == comp_for) goto count_comp_for; else if (TYPE(n) == comp_if) { if (NCH(n) == 3) { n = CHILD(n, 2); goto count_comp_iter; } else return n_fors; } error: /* Should never be reached */ PyErr_SetString(PyExc_SystemError, "logic error in count_comp_fors"); return -1; } /* Count the number of 'if' statements in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_ifs(struct compiling *c, const node *n) { int n_ifs = 0; while (1) { REQ(n, comp_iter); if (TYPE(CHILD(n, 0)) == comp_for) return n_ifs; n = CHILD(n, 0); REQ(n, comp_if); n_ifs++; if (NCH(n) == 2) return n_ifs; n = CHILD(n, 2); } } static asdl_seq * ast_for_comprehension(struct compiling *c, const node *n) { int i, n_fors; asdl_seq *comps; n_fors = count_comp_fors(c, n); if (n_fors == -1) return NULL; comps = _Py_asdl_seq_new(n_fors, c->c_arena); if (!comps) return NULL; for (i = 0; i < n_fors; i++) { comprehension_ty comp; asdl_seq *t; expr_ty expression, first; node *for_ch; node *sync_n; int is_async = 0; REQ(n, comp_for); if (NCH(n) == 2) { is_async = 1; REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); sync_n = CHILD(n, 1); } else { sync_n = CHILD(n, 0); } REQ(sync_n, sync_comp_for); for_ch = CHILD(sync_n, 1); t = ast_for_exprlist(c, for_ch, Store); if (!t) return NULL; expression = ast_for_expr(c, CHILD(sync_n, 3)); if (!expression) return NULL; /* Check the # of children rather than the length of t, since (x for x, in ...) has 1 element in t, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(t, 0); if (NCH(for_ch) == 1) comp = comprehension(first, expression, NULL, is_async, c->c_arena); else comp = comprehension(Tuple(t, Store, first->lineno, first->col_offset, for_ch->n_end_lineno, for_ch->n_end_col_offset, c->c_arena), expression, NULL, is_async, c->c_arena); if (!comp) return NULL; if (NCH(sync_n) == 5) { int j, n_ifs; asdl_seq *ifs; n = CHILD(sync_n, 4); n_ifs = count_comp_ifs(c, n); if (n_ifs == -1) return NULL; ifs = _Py_asdl_seq_new(n_ifs, c->c_arena); if (!ifs) return NULL; for (j = 0; j < n_ifs; j++) { REQ(n, comp_iter); n = CHILD(n, 0); REQ(n, comp_if); expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; asdl_seq_SET(ifs, j, expression); if (NCH(n) == 3) n = CHILD(n, 2); } /* on exit, must guarantee that n is a comp_for */ if (TYPE(n) == comp_iter) n = CHILD(n, 0); comp->ifs = ifs; } asdl_seq_SET(comps, i, comp); } return comps; } static expr_ty ast_for_itercomp(struct compiling *c, const node *n, int type) { /* testlist_comp: (test|star_expr) * ( comp_for | (',' (test|star_expr))* [','] ) */ expr_ty elt; asdl_seq *comps; node *ch; assert(NCH(n) > 1); ch = CHILD(n, 0); elt = ast_for_expr(c, ch); if (!elt) return NULL; if (elt->kind == Starred_kind) { ast_error(c, ch, "iterable unpacking cannot be used in comprehension"); return NULL; } comps = ast_for_comprehension(c, CHILD(n, 1)); if (!comps) return NULL; if (type == COMP_GENEXP) return GeneratorExp(elt, comps, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else if (type == COMP_LISTCOMP) return ListComp(elt, comps, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else if (type == COMP_SETCOMP) return SetComp(elt, comps, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else /* Should never happen */ return NULL; } /* Fills in the key, value pair corresponding to the dict element. In case * of an unpacking, key is NULL. *i is advanced by the number of ast * elements. Iff successful, nonzero is returned. */ static int ast_for_dictelement(struct compiling *c, const node *n, int *i, expr_ty *key, expr_ty *value) { expr_ty expression; if (TYPE(CHILD(n, *i)) == DOUBLESTAR) { assert(NCH(n) - *i >= 2); expression = ast_for_expr(c, CHILD(n, *i + 1)); if (!expression) return 0; *key = NULL; *value = expression; *i += 2; } else { assert(NCH(n) - *i >= 3); expression = ast_for_expr(c, CHILD(n, *i)); if (!expression) return 0; *key = expression; REQ(CHILD(n, *i + 1), COLON); expression = ast_for_expr(c, CHILD(n, *i + 2)); if (!expression) return 0; *value = expression; *i += 3; } return 1; } static expr_ty ast_for_dictcomp(struct compiling *c, const node *n) { expr_ty key, value; asdl_seq *comps; int i = 0; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; assert(key); assert(NCH(n) - i >= 1); comps = ast_for_comprehension(c, CHILD(n, i)); if (!comps) return NULL; return DictComp(key, value, comps, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static expr_ty ast_for_dictdisplay(struct compiling *c, const node *n) { int i; int j; int size; asdl_seq *keys, *values; size = (NCH(n) + 1) / 3; /* +1 in case no trailing comma */ keys = _Py_asdl_seq_new(size, c->c_arena); if (!keys) return NULL; values = _Py_asdl_seq_new(size, c->c_arena); if (!values) return NULL; j = 0; for (i = 0; i < NCH(n); i++) { expr_ty key, value; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; asdl_seq_SET(keys, j, key); asdl_seq_SET(values, j, value); j++; } keys->size = j; values->size = j; return Dict(keys, values, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static expr_ty ast_for_genexp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp) || TYPE(n) == (argument)); return ast_for_itercomp(c, n, COMP_GENEXP); } static expr_ty ast_for_listcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp)); return ast_for_itercomp(c, n, COMP_LISTCOMP); } static expr_ty ast_for_setcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (dictorsetmaker)); return ast_for_itercomp(c, n, COMP_SETCOMP); } static expr_ty ast_for_setdisplay(struct compiling *c, const node *n) { int i; int size; asdl_seq *elts; assert(TYPE(n) == (dictorsetmaker)); size = (NCH(n) + 1) / 2; /* +1 in case no trailing comma */ elts = _Py_asdl_seq_new(size, c->c_arena); if (!elts) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, i)); if (!expression) return NULL; asdl_seq_SET(elts, i / 2, expression); } return Set(elts, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static expr_ty ast_for_atom(struct compiling *c, const node *n) { /* atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictmaker|testlist_comp] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False' */ node *ch = CHILD(n, 0); switch (TYPE(ch)) { case NAME: { PyObject *name; const char *s = STR(ch); size_t len = strlen(s); if (len >= 4 && len <= 5) { if (!strcmp(s, "None")) return Constant(Py_None, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!strcmp(s, "True")) return Constant(Py_True, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!strcmp(s, "False")) return Constant(Py_False, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } name = new_identifier(s, c); if (!name) return NULL; /* All names start in Load context, but may later be changed. */ return Name(name, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case STRING: { expr_ty str = parsestrplus(c, n); if (!str) { const char *errtype = NULL; if (PyErr_ExceptionMatches(PyExc_UnicodeError)) errtype = "unicode error"; else if (PyErr_ExceptionMatches(PyExc_ValueError)) errtype = "value error"; if (errtype) { PyObject *type, *value, *tback, *errstr; PyErr_Fetch(&type, &value, &tback); errstr = PyObject_Str(value); if (errstr) { ast_error(c, n, "(%s) %U", errtype, errstr); Py_DECREF(errstr); } else { PyErr_Clear(); ast_error(c, n, "(%s) unknown error", errtype); } Py_DECREF(type); Py_XDECREF(value); Py_XDECREF(tback); } return NULL; } return str; } case NUMBER: { PyObject *pynum = parsenumber(c, STR(ch)); if (!pynum) return NULL; if (PyArena_AddPyObject(c->c_arena, pynum) < 0) { Py_DECREF(pynum); return NULL; } return Constant(pynum, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case ELLIPSIS: /* Ellipsis */ return Constant(Py_Ellipsis, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case LPAR: /* some parenthesized expressions */ ch = CHILD(n, 1); if (TYPE(ch) == RPAR) return Tuple(NULL, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (TYPE(ch) == yield_expr) return ast_for_expr(c, ch); /* testlist_comp: test ( comp_for | (',' test)* [','] ) */ if (NCH(ch) == 1) { return ast_for_testlist(c, ch); } if (TYPE(CHILD(ch, 1)) == comp_for) { return copy_location(ast_for_genexp(c, ch), n); } else { return copy_location(ast_for_testlist(c, ch), n); } case LSQB: /* list (or list comprehension) */ ch = CHILD(n, 1); if (TYPE(ch) == RSQB) return List(NULL, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); REQ(ch, testlist_comp); if (NCH(ch) == 1 || TYPE(CHILD(ch, 1)) == COMMA) { asdl_seq *elts = seq_for_testlist(c, ch); if (!elts) return NULL; return List(elts, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { return copy_location(ast_for_listcomp(c, ch), n); } case LBRACE: { /* dictorsetmaker: ( ((test ':' test | '**' test) * (comp_for | (',' (test ':' test | '**' test))* [','])) | * ((test | '*' test) * (comp_for | (',' (test | '*' test))* [','])) ) */ expr_ty res; ch = CHILD(n, 1); if (TYPE(ch) == RBRACE) { /* It's an empty dict. */ return Dict(NULL, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { int is_dict = (TYPE(CHILD(ch, 0)) == DOUBLESTAR); if (NCH(ch) == 1 || (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == COMMA)) { /* It's a set display. */ res = ast_for_setdisplay(c, ch); } else if (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == comp_for) { /* It's a set comprehension. */ res = ast_for_setcomp(c, ch); } else if (NCH(ch) > 3 - is_dict && TYPE(CHILD(ch, 3 - is_dict)) == comp_for) { /* It's a dictionary comprehension. */ if (is_dict) { ast_error(c, n, "dict unpacking cannot be used in " "dict comprehension"); return NULL; } res = ast_for_dictcomp(c, ch); } else { /* It's a dictionary display. */ res = ast_for_dictdisplay(c, ch); } return copy_location(res, n); } } default: PyErr_Format(PyExc_SystemError, "unhandled atom %d", TYPE(ch)); return NULL; } } static slice_ty ast_for_slice(struct compiling *c, const node *n) { node *ch; expr_ty lower = NULL, upper = NULL, step = NULL; REQ(n, subscript); /* subscript: test | [test] ':' [test] [sliceop] sliceop: ':' [test] */ ch = CHILD(n, 0); if (NCH(n) == 1 && TYPE(ch) == test) { /* 'step' variable hold no significance in terms of being used over other vars */ step = ast_for_expr(c, ch); if (!step) return NULL; return Index(step, c->c_arena); } if (TYPE(ch) == test) { lower = ast_for_expr(c, ch); if (!lower) return NULL; } /* If there's an upper bound it's in the second or third position. */ if (TYPE(ch) == COLON) { if (NCH(n) > 1) { node *n2 = CHILD(n, 1); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } } else if (NCH(n) > 2) { node *n2 = CHILD(n, 2); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } ch = CHILD(n, NCH(n) - 1); if (TYPE(ch) == sliceop) { if (NCH(ch) != 1) { ch = CHILD(ch, 1); if (TYPE(ch) == test) { step = ast_for_expr(c, ch); if (!step) return NULL; } } } return Slice(lower, upper, step, c->c_arena); } static expr_ty ast_for_binop(struct compiling *c, const node *n) { /* Must account for a sequence of expressions. How should A op B op C by represented? BinOp(BinOp(A, op, B), op, C). */ int i, nops; expr_ty expr1, expr2, result; operator_ty newoperator; expr1 = ast_for_expr(c, CHILD(n, 0)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 2)); if (!expr2) return NULL; newoperator = get_operator(CHILD(n, 1)); if (!newoperator) return NULL; result = BinOp(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, CHILD(n, 2)->n_end_lineno, CHILD(n, 2)->n_end_col_offset, c->c_arena); if (!result) return NULL; nops = (NCH(n) - 1) / 2; for (i = 1; i < nops; i++) { expr_ty tmp_result, tmp; const node* next_oper = CHILD(n, i * 2 + 1); newoperator = get_operator(next_oper); if (!newoperator) return NULL; tmp = ast_for_expr(c, CHILD(n, i * 2 + 2)); if (!tmp) return NULL; tmp_result = BinOp(result, newoperator, tmp, LINENO(next_oper), next_oper->n_col_offset, CHILD(n, i * 2 + 2)->n_end_lineno, CHILD(n, i * 2 + 2)->n_end_col_offset, c->c_arena); if (!tmp_result) return NULL; result = tmp_result; } return result; } static expr_ty ast_for_trailer(struct compiling *c, const node *n, expr_ty left_expr) { /* trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop] */ const node *n_copy = n; REQ(n, trailer); if (TYPE(CHILD(n, 0)) == LPAR) { if (NCH(n) == 2) return Call(left_expr, NULL, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else return ast_for_call(c, CHILD(n, 1), left_expr, CHILD(n, 0), CHILD(n, 2)); } else if (TYPE(CHILD(n, 0)) == DOT) { PyObject *attr_id = NEW_IDENTIFIER(CHILD(n, 1)); if (!attr_id) return NULL; return Attribute(left_expr, attr_id, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { REQ(CHILD(n, 0), LSQB); REQ(CHILD(n, 2), RSQB); n = CHILD(n, 1); if (NCH(n) == 1) { slice_ty slc = ast_for_slice(c, CHILD(n, 0)); if (!slc) return NULL; return Subscript(left_expr, slc, Load, LINENO(n), n->n_col_offset, n_copy->n_end_lineno, n_copy->n_end_col_offset, c->c_arena); } else { /* The grammar is ambiguous here. The ambiguity is resolved by treating the sequence as a tuple literal if there are no slice features. */ Py_ssize_t j; slice_ty slc; expr_ty e; int simple = 1; asdl_seq *slices, *elts; slices = _Py_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!slices) return NULL; for (j = 0; j < NCH(n); j += 2) { slc = ast_for_slice(c, CHILD(n, j)); if (!slc) return NULL; if (slc->kind != Index_kind) simple = 0; asdl_seq_SET(slices, j / 2, slc); } if (!simple) { return Subscript(left_expr, ExtSlice(slices, c->c_arena), Load, LINENO(n), n->n_col_offset, n_copy->n_end_lineno, n_copy->n_end_col_offset, c->c_arena); } /* extract Index values and put them in a Tuple */ elts = _Py_asdl_seq_new(asdl_seq_LEN(slices), c->c_arena); if (!elts) return NULL; for (j = 0; j < asdl_seq_LEN(slices); ++j) { slc = (slice_ty)asdl_seq_GET(slices, j); assert(slc->kind == Index_kind && slc->v.Index.value); asdl_seq_SET(elts, j, slc->v.Index.value); } e = Tuple(elts, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!e) return NULL; return Subscript(left_expr, Index(e, c->c_arena), Load, LINENO(n), n->n_col_offset, n_copy->n_end_lineno, n_copy->n_end_col_offset, c->c_arena); } } } static expr_ty ast_for_factor(struct compiling *c, const node *n) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; switch (TYPE(CHILD(n, 0))) { case PLUS: return UnaryOp(UAdd, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case MINUS: return UnaryOp(USub, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case TILDE: return UnaryOp(Invert, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unhandled factor: %d", TYPE(CHILD(n, 0))); return NULL; } static expr_ty ast_for_atom_expr(struct compiling *c, const node *n) { int i, nch, start = 0; expr_ty e, tmp; REQ(n, atom_expr); nch = NCH(n); if (TYPE(CHILD(n, 0)) == NAME && strcmp(STR(CHILD(n, 0)), "await") == 0) { start = 1; assert(nch > 1); } e = ast_for_atom(c, CHILD(n, start)); if (!e) return NULL; if (nch == 1) return e; if (start && nch == 2) { return Await(e, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } for (i = start + 1; i < nch; i++) { node *ch = CHILD(n, i); if (TYPE(ch) != trailer) break; tmp = ast_for_trailer(c, ch, e); if (!tmp) return NULL; tmp->lineno = e->lineno; tmp->col_offset = e->col_offset; e = tmp; } if (start) { /* there was an 'await' */ return Await(e, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { return e; } } static expr_ty ast_for_power(struct compiling *c, const node *n) { /* power: atom trailer* ('**' factor)* */ expr_ty e; REQ(n, power); e = ast_for_atom_expr(c, CHILD(n, 0)); if (!e) return NULL; if (NCH(n) == 1) return e; if (TYPE(CHILD(n, NCH(n) - 1)) == factor) { expr_ty f = ast_for_expr(c, CHILD(n, NCH(n) - 1)); if (!f) return NULL; e = BinOp(e, Pow, f, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } return e; } static expr_ty ast_for_starred(struct compiling *c, const node *n) { expr_ty tmp; REQ(n, star_expr); tmp = ast_for_expr(c, CHILD(n, 1)); if (!tmp) return NULL; /* The Load context is changed later. */ return Starred(tmp, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } /* Do not name a variable 'expr'! Will cause a compile error. */ static expr_ty ast_for_expr(struct compiling *c, const node *n) { /* handle the full range of simple expressions namedexpr_test: test [':=' test] test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom_expr ['**' factor] atom_expr: ['await'] atom trailer* yield_expr: 'yield' [yield_arg] */ asdl_seq *seq; int i; loop: switch (TYPE(n)) { case namedexpr_test: if (NCH(n) == 3) return ast_for_namedexpr(c, n); /* Fallthrough */ case test: case test_nocond: if (TYPE(CHILD(n, 0)) == lambdef || TYPE(CHILD(n, 0)) == lambdef_nocond) return ast_for_lambdef(c, CHILD(n, 0)); else if (NCH(n) > 1) return ast_for_ifexpr(c, n); /* Fallthrough */ case or_test: case and_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } seq = _Py_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); } if (!strcmp(STR(CHILD(n, 1)), "and")) return BoolOp(And, seq, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); assert(!strcmp(STR(CHILD(n, 1)), "or")); return BoolOp(Or, seq, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case not_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return UnaryOp(Not, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case comparison: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression; asdl_int_seq *ops; asdl_seq *cmps; ops = _Py_asdl_int_seq_new(NCH(n) / 2, c->c_arena); if (!ops) return NULL; cmps = _Py_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!cmps) { return NULL; } for (i = 1; i < NCH(n); i += 2) { cmpop_ty newoperator; newoperator = ast_for_comp_op(c, CHILD(n, i)); if (!newoperator) { return NULL; } expression = ast_for_expr(c, CHILD(n, i + 1)); if (!expression) { return NULL; } asdl_seq_SET(ops, i / 2, newoperator); asdl_seq_SET(cmps, i / 2, expression); } expression = ast_for_expr(c, CHILD(n, 0)); if (!expression) { return NULL; } return Compare(expression, ops, cmps, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } break; case star_expr: return ast_for_starred(c, n); /* The next five cases all handle BinOps. The main body of code is the same in each case, but the switch turned inside out to reuse the code for each type of operator. */ case expr: case xor_expr: case and_expr: case shift_expr: case arith_expr: case term: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_binop(c, n); case yield_expr: { node *an = NULL; node *en = NULL; int is_from = 0; expr_ty exp = NULL; if (NCH(n) > 1) an = CHILD(n, 1); /* yield_arg */ if (an) { en = CHILD(an, NCH(an) - 1); if (NCH(an) == 2) { is_from = 1; exp = ast_for_expr(c, en); } else exp = ast_for_testlist(c, en); if (!exp) return NULL; } if (is_from) return YieldFrom(exp, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); return Yield(exp, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case factor: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_factor(c, n); case power: return ast_for_power(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled expr: %d", TYPE(n)); return NULL; } /* should never get here unless if error is set */ return NULL; } static expr_ty ast_for_call(struct compiling *c, const node *n, expr_ty func, const node *maybegenbeg, const node *closepar) { /* arglist: argument (',' argument)* [','] argument: ( test [comp_for] | '*' test | test '=' test | '**' test ) */ int i, nargs, nkeywords; int ndoublestars; asdl_seq *args; asdl_seq *keywords; REQ(n, arglist); nargs = 0; nkeywords = 0; for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { if (NCH(ch) == 1) nargs++; else if (TYPE(CHILD(ch, 1)) == comp_for) { nargs++; if (!maybegenbeg) { ast_error(c, ch, "invalid syntax"); return NULL; } if (NCH(n) > 1) { ast_error(c, ch, "Generator expression must be parenthesized"); return NULL; } } else if (TYPE(CHILD(ch, 0)) == STAR) nargs++; else if (TYPE(CHILD(ch, 1)) == COLONEQUAL) { nargs++; } else /* TYPE(CHILD(ch, 0)) == DOUBLESTAR or keyword argument */ nkeywords++; } } args = _Py_asdl_seq_new(nargs, c->c_arena); if (!args) return NULL; keywords = _Py_asdl_seq_new(nkeywords, c->c_arena); if (!keywords) return NULL; nargs = 0; /* positional arguments + iterable argument unpackings */ nkeywords = 0; /* keyword arguments + keyword argument unpackings */ ndoublestars = 0; /* just keyword argument unpackings */ for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { expr_ty e; node *chch = CHILD(ch, 0); if (NCH(ch) == 1) { /* a positional argument */ if (nkeywords) { if (ndoublestars) { ast_error(c, chch, "positional argument follows " "keyword argument unpacking"); } else { ast_error(c, chch, "positional argument follows " "keyword argument"); } return NULL; } e = ast_for_expr(c, chch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else if (TYPE(chch) == STAR) { /* an iterable argument unpacking */ expr_ty starred; if (ndoublestars) { ast_error(c, chch, "iterable argument unpacking follows " "keyword argument unpacking"); return NULL; } e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; starred = Starred(e, Load, LINENO(chch), chch->n_col_offset, chch->n_end_lineno, chch->n_end_col_offset, c->c_arena); if (!starred) return NULL; asdl_seq_SET(args, nargs++, starred); } else if (TYPE(chch) == DOUBLESTAR) { /* a keyword argument unpacking */ keyword_ty kw; i++; e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; kw = keyword(NULL, e, c->c_arena); asdl_seq_SET(keywords, nkeywords++, kw); ndoublestars++; } else if (TYPE(CHILD(ch, 1)) == comp_for) { /* the lone generator expression */ e = copy_location(ast_for_genexp(c, ch), maybegenbeg); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else if (TYPE(CHILD(ch, 1)) == COLONEQUAL) { /* treat colon equal as positional argument */ if (nkeywords) { if (ndoublestars) { ast_error(c, chch, "positional argument follows " "keyword argument unpacking"); } else { ast_error(c, chch, "positional argument follows " "keyword argument"); } return NULL; } e = ast_for_namedexpr(c, ch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else { /* a keyword argument */ keyword_ty kw; identifier key, tmp; int k; // To remain LL(1), the grammar accepts any test (basically, any // expression) in the keyword slot of a call site. So, we need // to manually enforce that the keyword is a NAME here. static const int name_tree[] = { test, or_test, and_test, not_test, comparison, expr, xor_expr, and_expr, shift_expr, arith_expr, term, factor, power, atom_expr, atom, 0, }; node *expr_node = chch; for (int i = 0; name_tree[i]; i++) { if (TYPE(expr_node) != name_tree[i]) break; if (NCH(expr_node) != 1) break; expr_node = CHILD(expr_node, 0); } if (TYPE(expr_node) != NAME) { ast_error(c, chch, "expression cannot contain assignment, " "perhaps you meant \"==\"?"); return NULL; } key = new_identifier(STR(expr_node), c); if (key == NULL) { return NULL; } if (forbidden_name(c, key, chch, 1)) { return NULL; } for (k = 0; k < nkeywords; k++) { tmp = ((keyword_ty)asdl_seq_GET(keywords, k))->arg; if (tmp && !PyUnicode_Compare(tmp, key)) { ast_error(c, chch, "keyword argument repeated"); return NULL; } } e = ast_for_expr(c, CHILD(ch, 2)); if (!e) return NULL; kw = keyword(key, e, c->c_arena); if (!kw) return NULL; asdl_seq_SET(keywords, nkeywords++, kw); } } } return Call(func, args, keywords, func->lineno, func->col_offset, closepar->n_end_lineno, closepar->n_end_col_offset, c->c_arena); } static expr_ty ast_for_testlist(struct compiling *c, const node* n) { /* testlist_comp: test (comp_for | (',' test)* [',']) */ /* testlist: test (',' test)* [','] */ assert(NCH(n) > 0); if (TYPE(n) == testlist_comp) { if (NCH(n) > 1) assert(TYPE(CHILD(n, 1)) != comp_for); } else { assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr); } if (NCH(n) == 1) return ast_for_expr(c, CHILD(n, 0)); else { asdl_seq *tmp = seq_for_testlist(c, n); if (!tmp) return NULL; return Tuple(tmp, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } } static stmt_ty ast_for_expr_stmt(struct compiling *c, const node *n) { REQ(n, expr_stmt); /* expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*) annassign: ':' test ['=' test] testlist_star_expr: (test|star_expr) (',' test|star_expr)* [','] augassign: '+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=' test: ... here starts the operator precedence dance */ if (NCH(n) == 1) { expr_ty e = ast_for_testlist(c, CHILD(n, 0)); if (!e) return NULL; return Expr(e, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == augassign) { expr_ty expr1, expr2; operator_ty newoperator; node *ch = CHILD(n, 0); expr1 = ast_for_testlist(c, ch); if (!expr1) return NULL; if(!set_context(c, expr1, Store, ch)) return NULL; /* set_context checks that most expressions are not the left side. Augmented assignments can only have a name, a subscript, or an attribute on the left, though, so we have to explicitly check for those. */ switch (expr1->kind) { case Name_kind: case Attribute_kind: case Subscript_kind: break; default: ast_error(c, ch, "illegal expression for augmented assignment"); return NULL; } ch = CHILD(n, 2); if (TYPE(ch) == testlist) expr2 = ast_for_testlist(c, ch); else expr2 = ast_for_expr(c, ch); if (!expr2) return NULL; newoperator = ast_for_augassign(c, CHILD(n, 1)); if (!newoperator) return NULL; return AugAssign(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == annassign) { expr_ty expr1, expr2, expr3; node *ch = CHILD(n, 0); node *deep, *ann = CHILD(n, 1); int simple = 1; /* we keep track of parens to qualify (x) as expression not name */ deep = ch; while (NCH(deep) == 1) { deep = CHILD(deep, 0); } if (NCH(deep) > 0 && TYPE(CHILD(deep, 0)) == LPAR) { simple = 0; } expr1 = ast_for_testlist(c, ch); if (!expr1) { return NULL; } switch (expr1->kind) { case Name_kind: if (forbidden_name(c, expr1->v.Name.id, n, 0)) { return NULL; } expr1->v.Name.ctx = Store; break; case Attribute_kind: if (forbidden_name(c, expr1->v.Attribute.attr, n, 1)) { return NULL; } expr1->v.Attribute.ctx = Store; break; case Subscript_kind: expr1->v.Subscript.ctx = Store; break; case List_kind: ast_error(c, ch, "only single target (not list) can be annotated"); return NULL; case Tuple_kind: ast_error(c, ch, "only single target (not tuple) can be annotated"); return NULL; default: ast_error(c, ch, "illegal target for annotation"); return NULL; } if (expr1->kind != Name_kind) { simple = 0; } ch = CHILD(ann, 1); expr2 = ast_for_expr(c, ch); if (!expr2) { return NULL; } if (NCH(ann) == 2) { return AnnAssign(expr1, expr2, NULL, simple, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { ch = CHILD(ann, 3); if (TYPE(ch) == testlist) { expr3 = ast_for_testlist(c, ch); } else { expr3 = ast_for_expr(c, ch); } if (!expr3) { return NULL; } return AnnAssign(expr1, expr2, expr3, simple, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } } else { int i; asdl_seq *targets; node *value; expr_ty expression; /* a normal assignment */ REQ(CHILD(n, 1), EQUAL); targets = _Py_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!targets) return NULL; for (i = 0; i < NCH(n) - 2; i += 2) { expr_ty e; node *ch = CHILD(n, i); if (TYPE(ch) == yield_expr) { ast_error(c, ch, "assignment to yield expression not possible"); return NULL; } e = ast_for_testlist(c, ch); if (!e) return NULL; /* set context to assign */ if (!set_context(c, e, Store, CHILD(n, i))) return NULL; asdl_seq_SET(targets, i / 2, e); } value = CHILD(n, NCH(n) - 1); if (TYPE(value) == testlist_star_expr) expression = ast_for_testlist(c, value); else expression = ast_for_expr(c, value); if (!expression) return NULL; return Assign(targets, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } } static asdl_seq * ast_for_exprlist(struct compiling *c, const node *n, expr_context_ty context) { asdl_seq *seq; int i; expr_ty e; REQ(n, exprlist); seq = _Py_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); if (context && !set_context(c, e, context, CHILD(n, i))) return NULL; } return seq; } static stmt_ty ast_for_del_stmt(struct compiling *c, const node *n) { asdl_seq *expr_list; /* del_stmt: 'del' exprlist */ REQ(n, del_stmt); expr_list = ast_for_exprlist(c, CHILD(n, 1), Del); if (!expr_list) return NULL; return Delete(expr_list, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static stmt_ty ast_for_flow_stmt(struct compiling *c, const node *n) { /* flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt break_stmt: 'break' continue_stmt: 'continue' return_stmt: 'return' [testlist] yield_stmt: yield_expr yield_expr: 'yield' testlist | 'yield' 'from' test raise_stmt: 'raise' [test [',' test [',' test]]] */ node *ch; REQ(n, flow_stmt); ch = CHILD(n, 0); switch (TYPE(ch)) { case break_stmt: return Break(LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case continue_stmt: return Continue(LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case yield_stmt: { /* will reduce to yield_expr */ expr_ty exp = ast_for_expr(c, CHILD(ch, 0)); if (!exp) return NULL; return Expr(exp, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case return_stmt: if (NCH(ch) == 1) return Return(NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else { expr_ty expression = ast_for_testlist(c, CHILD(ch, 1)); if (!expression) return NULL; return Return(expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case raise_stmt: if (NCH(ch) == 1) return Raise(NULL, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else if (NCH(ch) >= 2) { expr_ty cause = NULL; expr_ty expression = ast_for_expr(c, CHILD(ch, 1)); if (!expression) return NULL; if (NCH(ch) == 4) { cause = ast_for_expr(c, CHILD(ch, 3)); if (!cause) return NULL; } return Raise(expression, cause, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } /* fall through */ default: PyErr_Format(PyExc_SystemError, "unexpected flow_stmt: %d", TYPE(ch)); return NULL; } } static alias_ty alias_for_import_name(struct compiling *c, const node *n, int store) { /* import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] dotted_name: NAME ('.' NAME)* */ identifier str, name; loop: switch (TYPE(n)) { case import_as_name: { node *name_node = CHILD(n, 0); str = NULL; name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (NCH(n) == 3) { node *str_node = CHILD(n, 2); str = NEW_IDENTIFIER(str_node); if (!str) return NULL; if (store && forbidden_name(c, str, str_node, 0)) return NULL; } else { if (forbidden_name(c, name, name_node, 0)) return NULL; } return alias(name, str, c->c_arena); } case dotted_as_name: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { node *asname_node = CHILD(n, 2); alias_ty a = alias_for_import_name(c, CHILD(n, 0), 0); if (!a) return NULL; assert(!a->asname); a->asname = NEW_IDENTIFIER(asname_node); if (!a->asname) return NULL; if (forbidden_name(c, a->asname, asname_node, 0)) return NULL; return a; } break; case dotted_name: if (NCH(n) == 1) { node *name_node = CHILD(n, 0); name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (store && forbidden_name(c, name, name_node, 0)) return NULL; return alias(name, NULL, c->c_arena); } else { /* Create a string of the form "a.b.c" */ int i; size_t len; char *s; PyObject *uni; len = 0; for (i = 0; i < NCH(n); i += 2) /* length of string plus one for the dot */ len += strlen(STR(CHILD(n, i))) + 1; len--; /* the last name doesn't have a dot */ str = PyBytes_FromStringAndSize(NULL, len); if (!str) return NULL; s = PyBytes_AS_STRING(str); if (!s) return NULL; for (i = 0; i < NCH(n); i += 2) { char *sch = STR(CHILD(n, i)); strcpy(s, STR(CHILD(n, i))); s += strlen(sch); *s++ = '.'; } --s; *s = '\0'; uni = PyUnicode_DecodeUTF8(PyBytes_AS_STRING(str), PyBytes_GET_SIZE(str), NULL); Py_DECREF(str); if (!uni) return NULL; str = uni; PyUnicode_InternInPlace(&str); if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); } break; case STAR: str = PyUnicode_InternFromString("*"); if (!str) return NULL; if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); default: PyErr_Format(PyExc_SystemError, "unexpected import name: %d", TYPE(n)); return NULL; } PyErr_SetString(PyExc_SystemError, "unhandled import name condition"); return NULL; } static stmt_ty ast_for_import_stmt(struct compiling *c, const node *n) { /* import_stmt: import_name | import_from import_name: 'import' dotted_as_names import_from: 'from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names) */ int lineno; int col_offset; int i; asdl_seq *aliases; REQ(n, import_stmt); lineno = LINENO(n); col_offset = n->n_col_offset; n = CHILD(n, 0); if (TYPE(n) == import_name) { n = CHILD(n, 1); REQ(n, dotted_as_names); aliases = _Py_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!aliases) return NULL; for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } // Even though n is modified above, the end position is not changed return Import(aliases, lineno, col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (TYPE(n) == import_from) { int n_children; int idx, ndots = 0; const node *n_copy = n; alias_ty mod = NULL; identifier modname = NULL; /* Count the number of dots (for relative imports) and check for the optional module name */ for (idx = 1; idx < NCH(n); idx++) { if (TYPE(CHILD(n, idx)) == dotted_name) { mod = alias_for_import_name(c, CHILD(n, idx), 0); if (!mod) return NULL; idx++; break; } else if (TYPE(CHILD(n, idx)) == ELLIPSIS) { /* three consecutive dots are tokenized as one ELLIPSIS */ ndots += 3; continue; } else if (TYPE(CHILD(n, idx)) != DOT) { break; } ndots++; } idx++; /* skip over the 'import' keyword */ switch (TYPE(CHILD(n, idx))) { case STAR: /* from ... import * */ n = CHILD(n, idx); n_children = 1; break; case LPAR: /* from ... import (x, y, z) */ n = CHILD(n, idx + 1); n_children = NCH(n); break; case import_as_names: /* from ... import x, y, z */ n = CHILD(n, idx); n_children = NCH(n); if (n_children % 2 == 0) { ast_error(c, n, "trailing comma not allowed without" " surrounding parentheses"); return NULL; } break; default: ast_error(c, n, "Unexpected node-type in from-import"); return NULL; } aliases = _Py_asdl_seq_new((n_children + 1) / 2, c->c_arena); if (!aliases) return NULL; /* handle "from ... import *" special b/c there's no children */ if (TYPE(n) == STAR) { alias_ty import_alias = alias_for_import_name(c, n, 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, 0, import_alias); } else { for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } } if (mod != NULL) modname = mod->name; return ImportFrom(modname, aliases, ndots, lineno, col_offset, n_copy->n_end_lineno, n_copy->n_end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unknown import statement: starts with command '%s'", STR(CHILD(n, 0))); return NULL; } static stmt_ty ast_for_global_stmt(struct compiling *c, const node *n) { /* global_stmt: 'global' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, global_stmt); s = _Py_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Global(s, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static stmt_ty ast_for_nonlocal_stmt(struct compiling *c, const node *n) { /* nonlocal_stmt: 'nonlocal' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, nonlocal_stmt); s = _Py_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Nonlocal(s, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static stmt_ty ast_for_assert_stmt(struct compiling *c, const node *n) { /* assert_stmt: 'assert' test [',' test] */ REQ(n, assert_stmt); if (NCH(n) == 2) { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return Assert(expression, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (NCH(n) == 4) { expr_ty expr1, expr2; expr1 = ast_for_expr(c, CHILD(n, 1)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 3)); if (!expr2) return NULL; return Assert(expr1, expr2, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "improper number of parts to 'assert' statement: %d", NCH(n)); return NULL; } static asdl_seq * ast_for_suite(struct compiling *c, const node *n) { /* suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT */ asdl_seq *seq; stmt_ty s; int i, total, num, end, pos = 0; node *ch; REQ(n, suite); total = num_stmts(n); seq = _Py_asdl_seq_new(total, c->c_arena); if (!seq) return NULL; if (TYPE(CHILD(n, 0)) == simple_stmt) { n = CHILD(n, 0); /* simple_stmt always ends with a NEWLINE, and may have a trailing SEMI */ end = NCH(n) - 1; if (TYPE(CHILD(n, end - 1)) == SEMI) end--; /* loop by 2 to skip semi-colons */ for (i = 0; i < end; i += 2) { ch = CHILD(n, i); s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } else { for (i = 2; i < (NCH(n) - 1); i++) { ch = CHILD(n, i); REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { /* small_stmt or compound_stmt with only one child */ s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } else { int j; ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < NCH(ch); j += 2) { /* statement terminates with a semi-colon ';' */ if (NCH(CHILD(ch, j)) == 0) { assert((j + 1) == NCH(ch)); break; } s = ast_for_stmt(c, CHILD(ch, j)); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } } } assert(pos == seq->size); return seq; } static void get_last_end_pos(asdl_seq *s, int *end_lineno, int *end_col_offset) { int tot = asdl_seq_LEN(s); // Suite should not be empty, but it is safe to just ignore it // if it will ever occur. if (!tot) { return; } stmt_ty last = asdl_seq_GET(s, tot - 1); *end_lineno = last->end_lineno; *end_col_offset = last->end_col_offset; } static stmt_ty ast_for_if_stmt(struct compiling *c, const node *n) { /* if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] */ char *s; int end_lineno, end_col_offset; REQ(n, if_stmt); if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); return If(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } s = STR(CHILD(n, 4)); /* s[2], the third character in the string, will be 's' for el_s_e, or 'i' for el_i_f */ if (s[2] == 's') { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; get_last_end_pos(seq2, &end_lineno, &end_col_offset); return If(expression, seq1, seq2, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } else if (s[2] == 'i') { int i, n_elif, has_else = 0; expr_ty expression; asdl_seq *suite_seq; asdl_seq *orelse = NULL; n_elif = NCH(n) - 4; /* must reference the child n_elif+1 since 'else' token is third, not fourth, child from the end. */ if (TYPE(CHILD(n, (n_elif + 1))) == NAME && STR(CHILD(n, (n_elif + 1)))[2] == 's') { has_else = 1; n_elif -= 3; } n_elif /= 4; if (has_else) { asdl_seq *suite_seq2; orelse = _Py_asdl_seq_new(1, c->c_arena); if (!orelse) return NULL; expression = ast_for_expr(c, CHILD(n, NCH(n) - 6)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, NCH(n) - 4)); if (!suite_seq) return NULL; suite_seq2 = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!suite_seq2) return NULL; get_last_end_pos(suite_seq2, &end_lineno, &end_col_offset); asdl_seq_SET(orelse, 0, If(expression, suite_seq, suite_seq2, LINENO(CHILD(n, NCH(n) - 6)), CHILD(n, NCH(n) - 6)->n_col_offset, end_lineno, end_col_offset, c->c_arena)); /* the just-created orelse handled the last elif */ n_elif--; } for (i = 0; i < n_elif; i++) { int off = 5 + (n_elif - i - 1) * 4; asdl_seq *newobj = _Py_asdl_seq_new(1, c->c_arena); if (!newobj) return NULL; expression = ast_for_expr(c, CHILD(n, off)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, off + 2)); if (!suite_seq) return NULL; if (orelse != NULL) { get_last_end_pos(orelse, &end_lineno, &end_col_offset); } else { get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); } asdl_seq_SET(newobj, 0, If(expression, suite_seq, orelse, LINENO(CHILD(n, off)), CHILD(n, off)->n_col_offset, end_lineno, end_col_offset, c->c_arena)); orelse = newobj; } expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; get_last_end_pos(orelse, &end_lineno, &end_col_offset); return If(expression, suite_seq, orelse, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unexpected token in 'if' statement: %s", s); return NULL; } static stmt_ty ast_for_while_stmt(struct compiling *c, const node *n) { /* while_stmt: 'while' test ':' suite ['else' ':' suite] */ REQ(n, while_stmt); int end_lineno, end_col_offset; if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); return While(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } else if (NCH(n) == 7) { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; get_last_end_pos(seq2, &end_lineno, &end_col_offset); return While(expression, seq1, seq2, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of tokens for 'while' statement: %d", NCH(n)); return NULL; } static stmt_ty ast_for_for_stmt(struct compiling *c, const node *n0, bool is_async) { const node * const n = is_async ? CHILD(n0, 1) : n0; asdl_seq *_target, *seq = NULL, *suite_seq; expr_ty expression; expr_ty target, first; const node *node_target; int end_lineno, end_col_offset; /* for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] */ REQ(n, for_stmt); if (NCH(n) == 9) { seq = ast_for_suite(c, CHILD(n, 8)); if (!seq) return NULL; } node_target = CHILD(n, 1); _target = ast_for_exprlist(c, node_target, Store); if (!_target) return NULL; /* Check the # of children rather than the length of _target, since for x, in ... has 1 element in _target, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(_target, 0); if (NCH(node_target) == 1) target = first; else target = Tuple(_target, Store, first->lineno, first->col_offset, node_target->n_end_lineno, node_target->n_end_col_offset, c->c_arena); expression = ast_for_testlist(c, CHILD(n, 3)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 5)); if (!suite_seq) return NULL; if (seq != NULL) { get_last_end_pos(seq, &end_lineno, &end_col_offset); } else { get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); } if (is_async) return AsyncFor(target, expression, suite_seq, seq, LINENO(n0), n0->n_col_offset, end_lineno, end_col_offset, c->c_arena); else return For(target, expression, suite_seq, seq, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } static excepthandler_ty ast_for_except_clause(struct compiling *c, const node *exc, node *body) { /* except_clause: 'except' [test ['as' test]] */ int end_lineno, end_col_offset; REQ(exc, except_clause); REQ(body, suite); if (NCH(exc) == 1) { asdl_seq *suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); return ExceptHandler(NULL, NULL, suite_seq, LINENO(exc), exc->n_col_offset, end_lineno, end_col_offset, c->c_arena); } else if (NCH(exc) == 2) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); return ExceptHandler(expression, NULL, suite_seq, LINENO(exc), exc->n_col_offset, end_lineno, end_col_offset, c->c_arena); } else if (NCH(exc) == 4) { asdl_seq *suite_seq; expr_ty expression; identifier e = NEW_IDENTIFIER(CHILD(exc, 3)); if (!e) return NULL; if (forbidden_name(c, e, CHILD(exc, 3), 0)) return NULL; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); return ExceptHandler(expression, e, suite_seq, LINENO(exc), exc->n_col_offset, end_lineno, end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of children for 'except' clause: %d", NCH(exc)); return NULL; } static stmt_ty ast_for_try_stmt(struct compiling *c, const node *n) { const int nch = NCH(n); int end_lineno, end_col_offset, n_except = (nch - 3)/3; asdl_seq *body, *handlers = NULL, *orelse = NULL, *finally = NULL; excepthandler_ty last_handler; REQ(n, try_stmt); body = ast_for_suite(c, CHILD(n, 2)); if (body == NULL) return NULL; if (TYPE(CHILD(n, nch - 3)) == NAME) { if (strcmp(STR(CHILD(n, nch - 3)), "finally") == 0) { if (nch >= 9 && TYPE(CHILD(n, nch - 6)) == NAME) { /* we can assume it's an "else", because nch >= 9 for try-else-finally and it would otherwise have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 4)); if (orelse == NULL) return NULL; n_except--; } finally = ast_for_suite(c, CHILD(n, nch - 1)); if (finally == NULL) return NULL; n_except--; } else { /* we can assume it's an "else", otherwise it would have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 1)); if (orelse == NULL) return NULL; n_except--; } } else if (TYPE(CHILD(n, nch - 3)) != except_clause) { ast_error(c, n, "malformed 'try' statement"); return NULL; } if (n_except > 0) { int i; /* process except statements to create a try ... except */ handlers = _Py_asdl_seq_new(n_except, c->c_arena); if (handlers == NULL) return NULL; for (i = 0; i < n_except; i++) { excepthandler_ty e = ast_for_except_clause(c, CHILD(n, 3 + i * 3), CHILD(n, 5 + i * 3)); if (!e) return NULL; asdl_seq_SET(handlers, i, e); } } assert(finally != NULL || asdl_seq_LEN(handlers)); if (finally != NULL) { // finally is always last get_last_end_pos(finally, &end_lineno, &end_col_offset); } else if (orelse != NULL) { // otherwise else is last get_last_end_pos(orelse, &end_lineno, &end_col_offset); } else { // inline the get_last_end_pos logic due to layout mismatch last_handler = (excepthandler_ty) asdl_seq_GET(handlers, n_except - 1); end_lineno = last_handler->end_lineno; end_col_offset = last_handler->end_col_offset; } return Try(body, handlers, orelse, finally, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } /* with_item: test ['as' expr] */ static withitem_ty ast_for_with_item(struct compiling *c, const node *n) { expr_ty context_expr, optional_vars = NULL; REQ(n, with_item); context_expr = ast_for_expr(c, CHILD(n, 0)); if (!context_expr) return NULL; if (NCH(n) == 3) { optional_vars = ast_for_expr(c, CHILD(n, 2)); if (!optional_vars) { return NULL; } if (!set_context(c, optional_vars, Store, n)) { return NULL; } } return withitem(context_expr, optional_vars, c->c_arena); } /* with_stmt: 'with' with_item (',' with_item)* ':' suite */ static stmt_ty ast_for_with_stmt(struct compiling *c, const node *n0, bool is_async) { const node * const n = is_async ? CHILD(n0, 1) : n0; int i, n_items, end_lineno, end_col_offset; asdl_seq *items, *body; REQ(n, with_stmt); n_items = (NCH(n) - 2) / 2; items = _Py_asdl_seq_new(n_items, c->c_arena); if (!items) return NULL; for (i = 1; i < NCH(n) - 2; i += 2) { withitem_ty item = ast_for_with_item(c, CHILD(n, i)); if (!item) return NULL; asdl_seq_SET(items, (i - 1) / 2, item); } body = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!body) return NULL; get_last_end_pos(body, &end_lineno, &end_col_offset); if (is_async) return AsyncWith(items, body, LINENO(n0), n0->n_col_offset, end_lineno, end_col_offset, c->c_arena); else return With(items, body, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } static stmt_ty ast_for_classdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* classdef: 'class' NAME ['(' arglist ')'] ':' suite */ PyObject *classname; asdl_seq *s; expr_ty call; int end_lineno, end_col_offset; REQ(n, classdef); if (NCH(n) == 4) { /* class NAME ':' suite */ s = ast_for_suite(c, CHILD(n, 3)); if (!s) return NULL; get_last_end_pos(s, &end_lineno, &end_col_offset); classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } if (TYPE(CHILD(n, 3)) == RPAR) { /* class NAME '(' ')' ':' suite */ s = ast_for_suite(c, CHILD(n, 5)); if (!s) return NULL; get_last_end_pos(s, &end_lineno, &end_col_offset); classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } /* class NAME '(' arglist ')' ':' suite */ /* build up a fake Call node so we can extract its pieces */ { PyObject *dummy_name; expr_ty dummy; dummy_name = NEW_IDENTIFIER(CHILD(n, 1)); if (!dummy_name) return NULL; dummy = Name(dummy_name, Load, LINENO(n), n->n_col_offset, CHILD(n, 1)->n_end_lineno, CHILD(n, 1)->n_end_col_offset, c->c_arena); call = ast_for_call(c, CHILD(n, 3), dummy, NULL, CHILD(n, 4)); if (!call) return NULL; } s = ast_for_suite(c, CHILD(n, 6)); if (!s) return NULL; get_last_end_pos(s, &end_lineno, &end_col_offset); classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 1), 0)) return NULL; return ClassDef(classname, call->v.Call.args, call->v.Call.keywords, s, decorator_seq, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } static stmt_ty ast_for_stmt(struct compiling *c, const node *n) { if (TYPE(n) == stmt) { assert(NCH(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == simple_stmt) { assert(num_stmts(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == small_stmt) { n = CHILD(n, 0); /* small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt */ switch (TYPE(n)) { case expr_stmt: return ast_for_expr_stmt(c, n); case del_stmt: return ast_for_del_stmt(c, n); case pass_stmt: return Pass(LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case flow_stmt: return ast_for_flow_stmt(c, n); case import_stmt: return ast_for_import_stmt(c, n); case global_stmt: return ast_for_global_stmt(c, n); case nonlocal_stmt: return ast_for_nonlocal_stmt(c, n); case assert_stmt: return ast_for_assert_stmt(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled small_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } else { /* compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef | decorated | async_stmt */ node *ch = CHILD(n, 0); REQ(n, compound_stmt); switch (TYPE(ch)) { case if_stmt: return ast_for_if_stmt(c, ch); case while_stmt: return ast_for_while_stmt(c, ch); case for_stmt: return ast_for_for_stmt(c, ch, 0); case try_stmt: return ast_for_try_stmt(c, ch); case with_stmt: return ast_for_with_stmt(c, ch, 0); case funcdef: return ast_for_funcdef(c, ch, NULL); case classdef: return ast_for_classdef(c, ch, NULL); case decorated: return ast_for_decorated(c, ch); case async_stmt: return ast_for_async_stmt(c, ch); default: PyErr_Format(PyExc_SystemError, "unhandled compound_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } } static PyObject * parsenumber_raw(struct compiling *c, const char *s) { const char *end; long x; double dx; Py_complex compl; int imflag; assert(s != NULL); errno = 0; end = s + strlen(s) - 1; imflag = *end == 'j' || *end == 'J'; if (s[0] == '0') { x = (long) PyOS_strtoul(s, (char **)&end, 0); if (x < 0 && errno == 0) { return PyLong_FromString(s, (char **)0, 0); } } else x = PyOS_strtol(s, (char **)&end, 0); if (*end == '\0') { if (errno != 0) return PyLong_FromString(s, (char **)0, 0); return PyLong_FromLong(x); } /* XXX Huge floats may silently fail */ if (imflag) { compl.real = 0.; compl.imag = PyOS_string_to_double(s, (char **)&end, NULL); if (compl.imag == -1.0 && PyErr_Occurred()) return NULL; return PyComplex_FromCComplex(compl); } else { dx = PyOS_string_to_double(s, NULL, NULL); if (dx == -1.0 && PyErr_Occurred()) return NULL; return PyFloat_FromDouble(dx); } } static PyObject * parsenumber(struct compiling *c, const char *s) { char *dup, *end; PyObject *res = NULL; assert(s != NULL); if (strchr(s, '_') == NULL) { return parsenumber_raw(c, s); } /* Create a duplicate without underscores. */ dup = PyMem_Malloc(strlen(s) + 1); if (dup == NULL) { return PyErr_NoMemory(); } end = dup; for (; *s; s++) { if (*s != '_') { *end++ = *s; } } *end = '\0'; res = parsenumber_raw(c, dup); PyMem_Free(dup); return res; } static PyObject * decode_utf8(struct compiling *c, const char **sPtr, const char *end) { const char *s, *t; t = s = *sPtr; /* while (s < end && *s != '\\') s++; */ /* inefficient for u".." */ while (s < end && (*s & 0x80)) s++; *sPtr = s; return PyUnicode_DecodeUTF8(t, s - t, NULL); } static int warn_invalid_escape_sequence(struct compiling *c, const node *n, unsigned char first_invalid_escape_char) { PyObject *msg = PyUnicode_FromFormat("invalid escape sequence \\%c", first_invalid_escape_char); if (msg == NULL) { return -1; } if (PyErr_WarnExplicitObject(PyExc_SyntaxWarning, msg, c->c_filename, LINENO(n), NULL, NULL) < 0) { if (PyErr_ExceptionMatches(PyExc_SyntaxWarning)) { /* Replace the SyntaxWarning exception with a SyntaxError to get a more accurate error report */ PyErr_Clear(); ast_error(c, n, "%U", msg); } Py_DECREF(msg); return -1; } Py_DECREF(msg); return 0; } static PyObject * decode_unicode_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { PyObject *v, *u; char *buf; char *p; const char *end; /* check for integer overflow */ if (len > SIZE_MAX / 6) return NULL; /* "ä" (2 bytes) may become "\U000000E4" (10 bytes), or 1:5 "\ä" (3 bytes) may become "\u005c\U000000E4" (16 bytes), or ~1:6 */ u = PyBytes_FromStringAndSize((char *)NULL, len * 6); if (u == NULL) return NULL; p = buf = PyBytes_AsString(u); end = s + len; while (s < end) { if (*s == '\\') { *p++ = *s++; if (s >= end || *s & 0x80) { strcpy(p, "u005c"); p += 5; if (s >= end) break; } } if (*s & 0x80) { /* XXX inefficient */ PyObject *w; int kind; void *data; Py_ssize_t len, i; w = decode_utf8(c, &s, end); if (w == NULL) { Py_DECREF(u); return NULL; } kind = PyUnicode_KIND(w); data = PyUnicode_DATA(w); len = PyUnicode_GET_LENGTH(w); for (i = 0; i < len; i++) { Py_UCS4 chr = PyUnicode_READ(kind, data, i); sprintf(p, "\\U%08x", chr); p += 10; } /* Should be impossible to overflow */ assert(p - buf <= PyBytes_GET_SIZE(u)); Py_DECREF(w); } else { *p++ = *s++; } } len = p - buf; s = buf; const char *first_invalid_escape; v = _PyUnicode_DecodeUnicodeEscape(s, len, NULL, &first_invalid_escape); if (v != NULL && first_invalid_escape != NULL) { if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) { /* We have not decref u before because first_invalid_escape points inside u. */ Py_XDECREF(u); Py_DECREF(v); return NULL; } } Py_XDECREF(u); return v; } static PyObject * decode_bytes_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { const char *first_invalid_escape; PyObject *result = _PyBytes_DecodeEscape(s, len, NULL, 0, NULL, &first_invalid_escape); if (result == NULL) return NULL; if (first_invalid_escape != NULL) { if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) { Py_DECREF(result); return NULL; } } return result; } /* Shift locations for the given node and all its children by adding `lineno` and `col_offset` to existing locations. */ static void fstring_shift_node_locations(node *n, int lineno, int col_offset) { n->n_col_offset = n->n_col_offset + col_offset; n->n_end_col_offset = n->n_end_col_offset + col_offset; for (int i = 0; i < NCH(n); ++i) { if (n->n_lineno && n->n_lineno < CHILD(n, i)->n_lineno) { /* Shifting column offsets unnecessary if there's been newlines. */ col_offset = 0; } fstring_shift_node_locations(CHILD(n, i), lineno, col_offset); } n->n_lineno = n->n_lineno + lineno; n->n_end_lineno = n->n_end_lineno + lineno; } /* Fix locations for the given node and its children. `parent` is the enclosing node. `n` is the node which locations are going to be fixed relative to parent. `expr_str` is the child node's string representation, including braces. */ static void fstring_fix_node_location(const node *parent, node *n, char *expr_str) { char *substr = NULL; char *start; int lines = LINENO(parent) - 1; int cols = parent->n_col_offset; /* Find the full fstring to fix location information in `n`. */ while (parent && parent->n_type != STRING) parent = parent->n_child; if (parent && parent->n_str) { substr = strstr(parent->n_str, expr_str); if (substr) { start = substr; while (start > parent->n_str) { if (start[0] == '\n') break; start--; } cols += (int)(substr - start); /* adjust the start based on the number of newlines encountered before the f-string expression */ for (char* p = parent->n_str; p < substr; p++) { if (*p == '\n') { lines++; } } } } fstring_shift_node_locations(n, lines, cols); } /* Compile this expression in to an expr_ty. Add parens around the expression, in order to allow leading spaces in the expression. */ static expr_ty fstring_compile_expr(const char *expr_start, const char *expr_end, struct compiling *c, const node *n) { PyCompilerFlags cf; node *mod_n; mod_ty mod; char *str; Py_ssize_t len; const char *s; assert(expr_end >= expr_start); assert(*(expr_start-1) == '{'); assert(*expr_end == '}' || *expr_end == '!' || *expr_end == ':'); /* If the substring is all whitespace, it's an error. We need to catch this here, and not when we call PyParser_SimpleParseStringFlagsFilename, because turning the expression '' in to '()' would go from being invalid to valid. */ for (s = expr_start; s != expr_end; s++) { char c = *s; /* The Python parser ignores only the following whitespace characters (\r already is converted to \n). */ if (!(c == ' ' || c == '\t' || c == '\n' || c == '\f')) { break; } } if (s == expr_end) { ast_error(c, n, "f-string: empty expression not allowed"); return NULL; } len = expr_end - expr_start; /* Allocate 3 extra bytes: open paren, close paren, null byte. */ str = PyMem_RawMalloc(len + 3); if (str == NULL) { PyErr_NoMemory(); return NULL; } str[0] = '('; memcpy(str+1, expr_start, len); str[len+1] = ')'; str[len+2] = 0; cf.cf_flags = PyCF_ONLY_AST; mod_n = PyParser_SimpleParseStringFlagsFilename(str, "<fstring>", Py_eval_input, 0); if (!mod_n) { PyMem_RawFree(str); return NULL; } /* Reuse str to find the correct column offset. */ str[0] = '{'; str[len+1] = '}'; fstring_fix_node_location(n, mod_n, str); mod = PyAST_FromNode(mod_n, &cf, "<fstring>", c->c_arena); PyMem_RawFree(str); PyNode_Free(mod_n); if (!mod) return NULL; return mod->v.Expression.body; } /* Return -1 on error. Return 0 if we reached the end of the literal. Return 1 if we haven't reached the end of the literal, but we want the caller to process the literal up to this point. Used for doubled braces. */ static int fstring_find_literal(const char **str, const char *end, int raw, PyObject **literal, int recurse_lvl, struct compiling *c, const node *n) { /* Get any literal string. It ends when we hit an un-doubled left brace (which isn't part of a unicode name escape such as "\N{EULER CONSTANT}"), or the end of the string. */ const char *s = *str; const char *literal_start = s; int result = 0; assert(*literal == NULL); while (s < end) { char ch = *s++; if (!raw && ch == '\\' && s < end) { ch = *s++; if (ch == 'N') { if (s < end && *s++ == '{') { while (s < end && *s++ != '}') { } continue; } break; } if (ch == '{' && warn_invalid_escape_sequence(c, n, ch) < 0) { return -1; } } if (ch == '{' || ch == '}') { /* Check for doubled braces, but only at the top level. If we checked at every level, then f'{0:{3}}' would fail with the two closing braces. */ if (recurse_lvl == 0) { if (s < end && *s == ch) { /* We're going to tell the caller that the literal ends here, but that they should continue scanning. But also skip over the second brace when we resume scanning. */ *str = s + 1; result = 1; goto done; } /* Where a single '{' is the start of a new expression, a single '}' is not allowed. */ if (ch == '}') { *str = s - 1; ast_error(c, n, "f-string: single '}' is not allowed"); return -1; } } /* We're either at a '{', which means we're starting another expression; or a '}', which means we're at the end of this f-string (for a nested format_spec). */ s--; break; } } *str = s; assert(s <= end); assert(s == end || *s == '{' || *s == '}'); done: if (literal_start != s) { if (raw) *literal = PyUnicode_DecodeUTF8Stateful(literal_start, s - literal_start, NULL, NULL); else *literal = decode_unicode_with_escapes(c, n, literal_start, s - literal_start); if (!*literal) return -1; } return result; } /* Forward declaration because parsing is recursive. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n); /* Parse the f-string at *str, ending at end. We know *str starts an expression (so it must be a '{'). Returns the FormattedValue node, which includes the expression, conversion character, and format_spec expression. Note that I don't do a perfect job here: I don't make sure that a closing brace doesn't match an opening paren, for example. It doesn't need to error on all invalid expressions, just correctly find the end of all valid ones. Any errors inside the expression will be caught when we parse it later. */ static int fstring_find_expr(const char **str, const char *end, int raw, int recurse_lvl, expr_ty *expression, struct compiling *c, const node *n) { /* Return -1 on error, else 0. */ const char *expr_start; const char *expr_end; expr_ty simple_expression; expr_ty format_spec = NULL; /* Optional format specifier. */ int conversion = -1; /* The conversion char. -1 if not specified. */ /* 0 if we're not in a string, else the quote char we're trying to match (single or double quote). */ char quote_char = 0; /* If we're inside a string, 1=normal, 3=triple-quoted. */ int string_type = 0; /* Keep track of nesting level for braces/parens/brackets in expressions. */ Py_ssize_t nested_depth = 0; char parenstack[MAXLEVEL]; /* Can only nest one level deep. */ if (recurse_lvl >= 2) { ast_error(c, n, "f-string: expressions nested too deeply"); return -1; } /* The first char must be a left brace, or we wouldn't have gotten here. Skip over it. */ assert(**str == '{'); *str += 1; expr_start = *str; for (; *str < end; (*str)++) { char ch; /* Loop invariants. */ assert(nested_depth >= 0); assert(*str >= expr_start && *str < end); if (quote_char) assert(string_type == 1 || string_type == 3); else assert(string_type == 0); ch = **str; /* Nowhere inside an expression is a backslash allowed. */ if (ch == '\\') { /* Error: can't include a backslash character, inside parens or strings or not. */ ast_error(c, n, "f-string expression part " "cannot include a backslash"); return -1; } if (quote_char) { /* We're inside a string. See if we're at the end. */ /* This code needs to implement the same non-error logic as tok_get from tokenizer.c, at the letter_quote label. To actually share that code would be a nightmare. But, it's unlikely to change and is small, so duplicate it here. Note we don't need to catch all of the errors, since they'll be caught when parsing the expression. We just need to match the non-error cases. Thus we can ignore \n in single-quoted strings, for example. Or non-terminated strings. */ if (ch == quote_char) { /* Does this match the string_type (single or triple quoted)? */ if (string_type == 3) { if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { /* We're at the end of a triple quoted string. */ *str += 2; string_type = 0; quote_char = 0; continue; } } else { /* We're at the end of a normal string. */ quote_char = 0; string_type = 0; continue; } } } else if (ch == '\'' || ch == '"') { /* Is this a triple quoted string? */ if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { string_type = 3; *str += 2; } else { /* Start of a normal string. */ string_type = 1; } /* Start looking for the end of the string. */ quote_char = ch; } else if (ch == '[' || ch == '{' || ch == '(') { if (nested_depth >= MAXLEVEL) { ast_error(c, n, "f-string: too many nested parenthesis"); return -1; } parenstack[nested_depth] = ch; nested_depth++; } else if (ch == '#') { /* Error: can't include a comment character, inside parens or not. */ ast_error(c, n, "f-string expression part cannot include '#'"); return -1; } else if (nested_depth == 0 && (ch == '!' || ch == ':' || ch == '}')) { /* First, test for the special case of "!=". Since '=' is not an allowed conversion character, nothing is lost in this test. */ if (ch == '!' && *str+1 < end && *(*str+1) == '=') { /* This isn't a conversion character, just continue. */ continue; } /* Normal way out of this loop. */ break; } else if (ch == ']' || ch == '}' || ch == ')') { if (!nested_depth) { ast_error(c, n, "f-string: unmatched '%c'", ch); return -1; } nested_depth--; int opening = parenstack[nested_depth]; if (!((opening == '(' && ch == ')') || (opening == '[' && ch == ']') || (opening == '{' && ch == '}'))) { ast_error(c, n, "f-string: closing parenthesis '%c' " "does not match opening parenthesis '%c'", ch, opening); return -1; } } else { /* Just consume this char and loop around. */ } } expr_end = *str; /* If we leave this loop in a string or with mismatched parens, we don't care. We'll get a syntax error when compiling the expression. But, we can produce a better error message, so let's just do that.*/ if (quote_char) { ast_error(c, n, "f-string: unterminated string"); return -1; } if (nested_depth) { int opening = parenstack[nested_depth - 1]; ast_error(c, n, "f-string: unmatched '%c'", opening); return -1; } if (*str >= end) goto unexpected_end_of_string; /* Compile the expression as soon as possible, so we show errors related to the expression before errors related to the conversion or format_spec. */ simple_expression = fstring_compile_expr(expr_start, expr_end, c, n); if (!simple_expression) return -1; /* Check for a conversion char, if present. */ if (**str == '!') { *str += 1; if (*str >= end) goto unexpected_end_of_string; conversion = **str; *str += 1; /* Validate the conversion. */ if (!(conversion == 's' || conversion == 'r' || conversion == 'a')) { ast_error(c, n, "f-string: invalid conversion character: " "expected 's', 'r', or 'a'"); return -1; } } /* Check for the format spec, if present. */ if (*str >= end) goto unexpected_end_of_string; if (**str == ':') { *str += 1; if (*str >= end) goto unexpected_end_of_string; /* Parse the format spec. */ format_spec = fstring_parse(str, end, raw, recurse_lvl+1, c, n); if (!format_spec) return -1; } if (*str >= end || **str != '}') goto unexpected_end_of_string; /* We're at a right brace. Consume it. */ assert(*str < end); assert(**str == '}'); *str += 1; /* And now create the FormattedValue node that represents this entire expression with the conversion and format spec. */ *expression = FormattedValue(simple_expression, conversion, format_spec, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!*expression) return -1; return 0; unexpected_end_of_string: ast_error(c, n, "f-string: expecting '}'"); return -1; } /* Return -1 on error. Return 0 if we have a literal (possible zero length) and an expression (zero length if at the end of the string. Return 1 if we have a literal, but no expression, and we want the caller to call us again. This is used to deal with doubled braces. When called multiple times on the string 'a{{b{0}c', this function will return: 1. the literal 'a{' with no expression, and a return value of 1. Despite the fact that there's no expression, the return value of 1 means we're not finished yet. 2. the literal 'b' and the expression '0', with a return value of 0. The fact that there's an expression means we're not finished. 3. literal 'c' with no expression and a return value of 0. The combination of the return value of 0 with no expression means we're finished. */ static int fstring_find_literal_and_expr(const char **str, const char *end, int raw, int recurse_lvl, PyObject **literal, expr_ty *expression, struct compiling *c, const node *n) { int result; assert(*literal == NULL && *expression == NULL); /* Get any literal string. */ result = fstring_find_literal(str, end, raw, literal, recurse_lvl, c, n); if (result < 0) goto error; assert(result == 0 || result == 1); if (result == 1) /* We have a literal, but don't look at the expression. */ return 1; if (*str >= end || **str == '}') /* We're at the end of the string or the end of a nested f-string: no expression. The top-level error case where we expect to be at the end of the string but we're at a '}' is handled later. */ return 0; /* We must now be the start of an expression, on a '{'. */ assert(**str == '{'); if (fstring_find_expr(str, end, raw, recurse_lvl, expression, c, n) < 0) goto error; return 0; error: Py_CLEAR(*literal); return -1; } #define EXPRLIST_N_CACHED 64 typedef struct { /* Incrementally build an array of expr_ty, so be used in an asdl_seq. Cache some small but reasonably sized number of expr_ty's, and then after that start dynamically allocating, doubling the number allocated each time. Note that the f-string f'{0}a{1}' contains 3 expr_ty's: 2 FormattedValue's, and one Constant for the literal 'a'. So you add expr_ty's about twice as fast as you add exressions in an f-string. */ Py_ssize_t allocated; /* Number we've allocated. */ Py_ssize_t size; /* Number we've used. */ expr_ty *p; /* Pointer to the memory we're actually using. Will point to 'data' until we start dynamically allocating. */ expr_ty data[EXPRLIST_N_CACHED]; } ExprList; #ifdef NDEBUG #define ExprList_check_invariants(l) #else static void ExprList_check_invariants(ExprList *l) { /* Check our invariants. Make sure this object is "live", and hasn't been deallocated. */ assert(l->size >= 0); assert(l->p != NULL); if (l->size <= EXPRLIST_N_CACHED) assert(l->data == l->p); } #endif static void ExprList_Init(ExprList *l) { l->allocated = EXPRLIST_N_CACHED; l->size = 0; /* Until we start allocating dynamically, p points to data. */ l->p = l->data; ExprList_check_invariants(l); } static int ExprList_Append(ExprList *l, expr_ty exp) { ExprList_check_invariants(l); if (l->size >= l->allocated) { /* We need to alloc (or realloc) the memory. */ Py_ssize_t new_size = l->allocated * 2; /* See if we've ever allocated anything dynamically. */ if (l->p == l->data) { Py_ssize_t i; /* We're still using the cached data. Switch to alloc-ing. */ l->p = PyMem_RawMalloc(sizeof(expr_ty) * new_size); if (!l->p) return -1; /* Copy the cached data into the new buffer. */ for (i = 0; i < l->size; i++) l->p[i] = l->data[i]; } else { /* Just realloc. */ expr_ty *tmp = PyMem_RawRealloc(l->p, sizeof(expr_ty) * new_size); if (!tmp) { PyMem_RawFree(l->p); l->p = NULL; return -1; } l->p = tmp; } l->allocated = new_size; assert(l->allocated == 2 * l->size); } l->p[l->size++] = exp; ExprList_check_invariants(l); return 0; } static void ExprList_Dealloc(ExprList *l) { ExprList_check_invariants(l); /* If there's been an error, or we've never dynamically allocated, do nothing. */ if (!l->p || l->p == l->data) { /* Do nothing. */ } else { /* We have dynamically allocated. Free the memory. */ PyMem_RawFree(l->p); } l->p = NULL; l->size = -1; } static asdl_seq * ExprList_Finish(ExprList *l, PyArena *arena) { asdl_seq *seq; ExprList_check_invariants(l); /* Allocate the asdl_seq and copy the expressions in to it. */ seq = _Py_asdl_seq_new(l->size, arena); if (seq) { Py_ssize_t i; for (i = 0; i < l->size; i++) asdl_seq_SET(seq, i, l->p[i]); } ExprList_Dealloc(l); return seq; } /* The FstringParser is designed to add a mix of strings and f-strings, and concat them together as needed. Ultimately, it generates an expr_ty. */ typedef struct { PyObject *last_str; ExprList expr_list; int fmode; } FstringParser; #ifdef NDEBUG #define FstringParser_check_invariants(state) #else static void FstringParser_check_invariants(FstringParser *state) { if (state->last_str) assert(PyUnicode_CheckExact(state->last_str)); ExprList_check_invariants(&state->expr_list); } #endif static void FstringParser_Init(FstringParser *state) { state->last_str = NULL; state->fmode = 0; ExprList_Init(&state->expr_list); FstringParser_check_invariants(state); } static void FstringParser_Dealloc(FstringParser *state) { FstringParser_check_invariants(state); Py_XDECREF(state->last_str); ExprList_Dealloc(&state->expr_list); } /* Make a Constant node, but decref the PyUnicode object being added. */ static expr_ty make_str_node_and_del(PyObject **str, struct compiling *c, const node* n) { PyObject *s = *str; *str = NULL; assert(PyUnicode_CheckExact(s)); if (PyArena_AddPyObject(c->c_arena, s) < 0) { Py_DECREF(s); return NULL; } return Constant(s, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } /* Add a non-f-string (that is, a regular literal string). str is decref'd. */ static int FstringParser_ConcatAndDel(FstringParser *state, PyObject *str) { FstringParser_check_invariants(state); assert(PyUnicode_CheckExact(str)); if (PyUnicode_GET_LENGTH(str) == 0) { Py_DECREF(str); return 0; } if (!state->last_str) { /* We didn't have a string before, so just remember this one. */ state->last_str = str; } else { /* Concatenate this with the previous string. */ PyUnicode_AppendAndDel(&state->last_str, str); if (!state->last_str) return -1; } FstringParser_check_invariants(state); return 0; } /* Parse an f-string. The f-string is in *str to end, with no 'f' or quotes. */ static int FstringParser_ConcatFstring(FstringParser *state, const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser_check_invariants(state); state->fmode = 1; /* Parse the f-string. */ while (1) { PyObject *literal = NULL; expr_ty expression = NULL; /* If there's a zero length literal in front of the expression, literal will be NULL. If we're at the end of the f-string, expression will be NULL (unless result == 1, see below). */ int result = fstring_find_literal_and_expr(str, end, raw, recurse_lvl, &literal, &expression, c, n); if (result < 0) return -1; /* Add the literal, if any. */ if (!literal) { /* Do nothing. Just leave last_str alone (and possibly NULL). */ } else if (!state->last_str) { /* Note that the literal can be zero length, if the input string is "\\\n" or "\\\r", among others. */ state->last_str = literal; literal = NULL; } else { /* We have a literal, concatenate it. */ assert(PyUnicode_GET_LENGTH(literal) != 0); if (FstringParser_ConcatAndDel(state, literal) < 0) return -1; literal = NULL; } /* We've dealt with the literal now. It can't be leaked on further errors. */ assert(literal == NULL); /* See if we should just loop around to get the next literal and expression, while ignoring the expression this time. This is used for un-doubling braces, as an optimization. */ if (result == 1) continue; if (!expression) /* We're done with this f-string. */ break; /* We know we have an expression. Convert any existing string to a Constant node. */ if (!state->last_str) { /* Do nothing. No previous literal. */ } else { /* Convert the existing last_str literal to a Constant node. */ expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) return -1; } if (ExprList_Append(&state->expr_list, expression) < 0) return -1; } /* If recurse_lvl is zero, then we must be at the end of the string. Otherwise, we must be at a right brace. */ if (recurse_lvl == 0 && *str < end-1) { ast_error(c, n, "f-string: unexpected end of string"); return -1; } if (recurse_lvl != 0 && **str != '}') { ast_error(c, n, "f-string: expecting '}'"); return -1; } FstringParser_check_invariants(state); return 0; } /* Convert the partial state reflected in last_str and expr_list to an expr_ty. The expr_ty can be a Constant, or a JoinedStr. */ static expr_ty FstringParser_Finish(FstringParser *state, struct compiling *c, const node *n) { asdl_seq *seq; FstringParser_check_invariants(state); /* If we're just a constant string with no expressions, return that. */ if (!state->fmode) { assert(!state->expr_list.size); if (!state->last_str) { /* Create a zero length string. */ state->last_str = PyUnicode_FromStringAndSize(NULL, 0); if (!state->last_str) goto error; } return make_str_node_and_del(&state->last_str, c, n); } /* Create a Constant node out of last_str, if needed. It will be the last node in our expression list. */ if (state->last_str) { expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) goto error; } /* This has already been freed. */ assert(state->last_str == NULL); seq = ExprList_Finish(&state->expr_list, c->c_arena); if (!seq) goto error; return JoinedStr(seq, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); error: FstringParser_Dealloc(state); return NULL; } /* Given an f-string (with no 'f' or quotes) that's in *str and ends at end, parse it into an expr_ty. Return NULL on error. Adjust str to point past the parsed portion. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser state; FstringParser_Init(&state); if (FstringParser_ConcatFstring(&state, str, end, raw, recurse_lvl, c, n) < 0) { FstringParser_Dealloc(&state); return NULL; } return FstringParser_Finish(&state, c, n); } /* n is a Python string literal, including the bracketing quote characters, and r, b, u, &/or f prefixes (if any), and embedded escape sequences (if any). parsestr parses it, and sets *result to decoded Python string object. If the string is an f-string, set *fstr and *fstrlen to the unparsed string object. Return 0 if no errors occurred. */ static int parsestr(struct compiling *c, const node *n, int *bytesmode, int *rawmode, PyObject **result, const char **fstr, Py_ssize_t *fstrlen) { size_t len; const char *s = STR(n); int quote = Py_CHARMASK(*s); int fmode = 0; *bytesmode = 0; *rawmode = 0; *result = NULL; *fstr = NULL; if (Py_ISALPHA(quote)) { while (!*bytesmode || !*rawmode) { if (quote == 'b' || quote == 'B') { quote = *++s; *bytesmode = 1; } else if (quote == 'u' || quote == 'U') { quote = *++s; } else if (quote == 'r' || quote == 'R') { quote = *++s; *rawmode = 1; } else if (quote == 'f' || quote == 'F') { quote = *++s; fmode = 1; } else { break; } } } if (fmode && *bytesmode) { PyErr_BadInternalCall(); return -1; } if (quote != '\'' && quote != '\"') { PyErr_BadInternalCall(); return -1; } /* Skip the leading quote char. */ s++; len = strlen(s); if (len > INT_MAX) { PyErr_SetString(PyExc_OverflowError, "string to parse is too long"); return -1; } if (s[--len] != quote) { /* Last quote char must match the first. */ PyErr_BadInternalCall(); return -1; } if (len >= 4 && s[0] == quote && s[1] == quote) { /* A triple quoted string. We've already skipped one quote at the start and one at the end of the string. Now skip the two at the start. */ s += 2; len -= 2; /* And check that the last two match. */ if (s[--len] != quote || s[--len] != quote) { PyErr_BadInternalCall(); return -1; } } if (fmode) { /* Just return the bytes. The caller will parse the resulting string. */ *fstr = s; *fstrlen = len; return 0; } /* Not an f-string. */ /* Avoid invoking escape decoding routines if possible. */ *rawmode = *rawmode || strchr(s, '\\') == NULL; if (*bytesmode) { /* Disallow non-ASCII characters. */ const char *ch; for (ch = s; *ch; ch++) { if (Py_CHARMASK(*ch) >= 0x80) { ast_error(c, n, "bytes can only contain ASCII " "literal characters."); return -1; } } if (*rawmode) *result = PyBytes_FromStringAndSize(s, len); else *result = decode_bytes_with_escapes(c, n, s, len); } else { if (*rawmode) *result = PyUnicode_DecodeUTF8Stateful(s, len, NULL, NULL); else *result = decode_unicode_with_escapes(c, n, s, len); } return *result == NULL ? -1 : 0; } /* Accepts a STRING+ atom, and produces an expr_ty node. Run through each STRING atom, and process it as needed. For bytes, just concatenate them together, and the result will be a Constant node. For normal strings and f-strings, concatenate them together. The result will be a Constant node if there were no f-strings; a FormattedValue node if there's just an f-string (with no leading or trailing literals), or a JoinedStr node if there are multiple f-strings or any literals involved. */ static expr_ty parsestrplus(struct compiling *c, const node *n) { int bytesmode = 0; PyObject *bytes_str = NULL; int i; FstringParser state; FstringParser_Init(&state); for (i = 0; i < NCH(n); i++) { int this_bytesmode; int this_rawmode; PyObject *s; const char *fstr; Py_ssize_t fstrlen = -1; /* Silence a compiler warning. */ REQ(CHILD(n, i), STRING); if (parsestr(c, CHILD(n, i), &this_bytesmode, &this_rawmode, &s, &fstr, &fstrlen) != 0) goto error; /* Check that we're not mixing bytes with unicode. */ if (i != 0 && bytesmode != this_bytesmode) { ast_error(c, n, "cannot mix bytes and nonbytes literals"); /* s is NULL if the current string part is an f-string. */ Py_XDECREF(s); goto error; } bytesmode = this_bytesmode; if (fstr != NULL) { int result; assert(s == NULL && !bytesmode); /* This is an f-string. Parse and concatenate it. */ result = FstringParser_ConcatFstring(&state, &fstr, fstr+fstrlen, this_rawmode, 0, c, n); if (result < 0) goto error; } else { /* A string or byte string. */ assert(s != NULL && fstr == NULL); assert(bytesmode ? PyBytes_CheckExact(s) : PyUnicode_CheckExact(s)); if (bytesmode) { /* For bytes, concat as we go. */ if (i == 0) { /* First time, just remember this value. */ bytes_str = s; } else { PyBytes_ConcatAndDel(&bytes_str, s); if (!bytes_str) goto error; } } else { /* This is a regular string. Concatenate it. */ if (FstringParser_ConcatAndDel(&state, s) < 0) goto error; } } } if (bytesmode) { /* Just return the bytes object and we're done. */ if (PyArena_AddPyObject(c->c_arena, bytes_str) < 0) goto error; return Constant(bytes_str, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } /* We're not a bytes string, bytes_str should never have been set. */ assert(bytes_str == NULL); return FstringParser_Finish(&state, c, n); error: Py_XDECREF(bytes_str); FstringParser_Dealloc(&state); return NULL; } PyObject * _PyAST_GetDocString(asdl_seq *body) { if (!asdl_seq_LEN(body)) { return NULL; } stmt_ty st = (stmt_ty)asdl_seq_GET(body, 0); if (st->kind != Expr_kind) { return NULL; } expr_ty e = st->v.Expr.value; if (e->kind == Constant_kind && PyUnicode_CheckExact(e->v.Constant.value)) { return e->v.Constant.value; } return NULL; }
/* * This file includes functions to transform a concrete syntax tree (CST) to * an abstract syntax tree (AST). The main function is PyAST_FromNode(). * */ #include "Python.h" #include "Python-ast.h" #include "node.h" #include "ast.h" #include "token.h" #include "pythonrun.h" #include <assert.h> #include <stdbool.h> #define MAXLEVEL 200 /* Max parentheses level */ static int validate_stmts(asdl_seq *); static int validate_exprs(asdl_seq *, expr_context_ty, int); static int validate_nonempty_seq(asdl_seq *, const char *, const char *); static int validate_stmt(stmt_ty); static int validate_expr(expr_ty, expr_context_ty); static int validate_comprehension(asdl_seq *gens) { Py_ssize_t i; if (!asdl_seq_LEN(gens)) { PyErr_SetString(PyExc_ValueError, "comprehension with no generators"); return 0; } for (i = 0; i < asdl_seq_LEN(gens); i++) { comprehension_ty comp = asdl_seq_GET(gens, i); if (!validate_expr(comp->target, Store) || !validate_expr(comp->iter, Load) || !validate_exprs(comp->ifs, Load, 0)) return 0; } return 1; } static int validate_slice(slice_ty slice) { switch (slice->kind) { case Slice_kind: return (!slice->v.Slice.lower || validate_expr(slice->v.Slice.lower, Load)) && (!slice->v.Slice.upper || validate_expr(slice->v.Slice.upper, Load)) && (!slice->v.Slice.step || validate_expr(slice->v.Slice.step, Load)); case ExtSlice_kind: { Py_ssize_t i; if (!validate_nonempty_seq(slice->v.ExtSlice.dims, "dims", "ExtSlice")) return 0; for (i = 0; i < asdl_seq_LEN(slice->v.ExtSlice.dims); i++) if (!validate_slice(asdl_seq_GET(slice->v.ExtSlice.dims, i))) return 0; return 1; } case Index_kind: return validate_expr(slice->v.Index.value, Load); default: PyErr_SetString(PyExc_SystemError, "unknown slice node"); return 0; } } static int validate_keywords(asdl_seq *keywords) { Py_ssize_t i; for (i = 0; i < asdl_seq_LEN(keywords); i++) if (!validate_expr(((keyword_ty)asdl_seq_GET(keywords, i))->value, Load)) return 0; return 1; } static int validate_args(asdl_seq *args) { Py_ssize_t i; for (i = 0; i < asdl_seq_LEN(args); i++) { arg_ty arg = asdl_seq_GET(args, i); if (arg->annotation && !validate_expr(arg->annotation, Load)) return 0; } return 1; } static const char * expr_context_name(expr_context_ty ctx) { switch (ctx) { case Load: return "Load"; case Store: return "Store"; case NamedStore: return "NamedStore"; case Del: return "Del"; case AugLoad: return "AugLoad"; case AugStore: return "AugStore"; case Param: return "Param"; default: Py_UNREACHABLE(); } } static int validate_arguments(arguments_ty args) { if (!validate_args(args->args)) return 0; if (args->vararg && args->vararg->annotation && !validate_expr(args->vararg->annotation, Load)) { return 0; } if (!validate_args(args->kwonlyargs)) return 0; if (args->kwarg && args->kwarg->annotation && !validate_expr(args->kwarg->annotation, Load)) { return 0; } if (asdl_seq_LEN(args->defaults) > asdl_seq_LEN(args->args)) { PyErr_SetString(PyExc_ValueError, "more positional defaults than args on arguments"); return 0; } if (asdl_seq_LEN(args->kw_defaults) != asdl_seq_LEN(args->kwonlyargs)) { PyErr_SetString(PyExc_ValueError, "length of kwonlyargs is not the same as " "kw_defaults on arguments"); return 0; } return validate_exprs(args->defaults, Load, 0) && validate_exprs(args->kw_defaults, Load, 1); } static int validate_constant(PyObject *value) { if (value == Py_None || value == Py_Ellipsis) return 1; if (PyLong_CheckExact(value) || PyFloat_CheckExact(value) || PyComplex_CheckExact(value) || PyBool_Check(value) || PyUnicode_CheckExact(value) || PyBytes_CheckExact(value)) return 1; if (PyTuple_CheckExact(value) || PyFrozenSet_CheckExact(value)) { PyObject *it; it = PyObject_GetIter(value); if (it == NULL) return 0; while (1) { PyObject *item = PyIter_Next(it); if (item == NULL) { if (PyErr_Occurred()) { Py_DECREF(it); return 0; } break; } if (!validate_constant(item)) { Py_DECREF(it); Py_DECREF(item); return 0; } Py_DECREF(item); } Py_DECREF(it); return 1; } return 0; } static int validate_expr(expr_ty exp, expr_context_ty ctx) { int check_ctx = 1; expr_context_ty actual_ctx; /* First check expression context. */ switch (exp->kind) { case Attribute_kind: actual_ctx = exp->v.Attribute.ctx; break; case Subscript_kind: actual_ctx = exp->v.Subscript.ctx; break; case Starred_kind: actual_ctx = exp->v.Starred.ctx; break; case Name_kind: actual_ctx = exp->v.Name.ctx; break; case List_kind: actual_ctx = exp->v.List.ctx; break; case Tuple_kind: actual_ctx = exp->v.Tuple.ctx; break; default: if (ctx != Load) { PyErr_Format(PyExc_ValueError, "expression which can't be " "assigned to in %s context", expr_context_name(ctx)); return 0; } check_ctx = 0; /* set actual_ctx to prevent gcc warning */ actual_ctx = 0; } if (check_ctx && actual_ctx != ctx) { PyErr_Format(PyExc_ValueError, "expression must have %s context but has %s instead", expr_context_name(ctx), expr_context_name(actual_ctx)); return 0; } /* Now validate expression. */ switch (exp->kind) { case BoolOp_kind: if (asdl_seq_LEN(exp->v.BoolOp.values) < 2) { PyErr_SetString(PyExc_ValueError, "BoolOp with less than 2 values"); return 0; } return validate_exprs(exp->v.BoolOp.values, Load, 0); case BinOp_kind: return validate_expr(exp->v.BinOp.left, Load) && validate_expr(exp->v.BinOp.right, Load); case UnaryOp_kind: return validate_expr(exp->v.UnaryOp.operand, Load); case Lambda_kind: return validate_arguments(exp->v.Lambda.args) && validate_expr(exp->v.Lambda.body, Load); case IfExp_kind: return validate_expr(exp->v.IfExp.test, Load) && validate_expr(exp->v.IfExp.body, Load) && validate_expr(exp->v.IfExp.orelse, Load); case Dict_kind: if (asdl_seq_LEN(exp->v.Dict.keys) != asdl_seq_LEN(exp->v.Dict.values)) { PyErr_SetString(PyExc_ValueError, "Dict doesn't have the same number of keys as values"); return 0; } /* null_ok=1 for keys expressions to allow dict unpacking to work in dict literals, i.e. ``{**{a:b}}`` */ return validate_exprs(exp->v.Dict.keys, Load, /*null_ok=*/ 1) && validate_exprs(exp->v.Dict.values, Load, /*null_ok=*/ 0); case Set_kind: return validate_exprs(exp->v.Set.elts, Load, 0); #define COMP(NAME) \ case NAME ## _kind: \ return validate_comprehension(exp->v.NAME.generators) && \ validate_expr(exp->v.NAME.elt, Load); COMP(ListComp) COMP(SetComp) COMP(GeneratorExp) #undef COMP case DictComp_kind: return validate_comprehension(exp->v.DictComp.generators) && validate_expr(exp->v.DictComp.key, Load) && validate_expr(exp->v.DictComp.value, Load); case Yield_kind: return !exp->v.Yield.value || validate_expr(exp->v.Yield.value, Load); case YieldFrom_kind: return validate_expr(exp->v.YieldFrom.value, Load); case Await_kind: return validate_expr(exp->v.Await.value, Load); case Compare_kind: if (!asdl_seq_LEN(exp->v.Compare.comparators)) { PyErr_SetString(PyExc_ValueError, "Compare with no comparators"); return 0; } if (asdl_seq_LEN(exp->v.Compare.comparators) != asdl_seq_LEN(exp->v.Compare.ops)) { PyErr_SetString(PyExc_ValueError, "Compare has a different number " "of comparators and operands"); return 0; } return validate_exprs(exp->v.Compare.comparators, Load, 0) && validate_expr(exp->v.Compare.left, Load); case Call_kind: return validate_expr(exp->v.Call.func, Load) && validate_exprs(exp->v.Call.args, Load, 0) && validate_keywords(exp->v.Call.keywords); case Constant_kind: if (!validate_constant(exp->v.Constant.value)) { PyErr_Format(PyExc_TypeError, "got an invalid type in Constant: %s", Py_TYPE(exp->v.Constant.value)->tp_name); return 0; } return 1; case JoinedStr_kind: return validate_exprs(exp->v.JoinedStr.values, Load, 0); case FormattedValue_kind: if (validate_expr(exp->v.FormattedValue.value, Load) == 0) return 0; if (exp->v.FormattedValue.format_spec) return validate_expr(exp->v.FormattedValue.format_spec, Load); return 1; case Attribute_kind: return validate_expr(exp->v.Attribute.value, Load); case Subscript_kind: return validate_slice(exp->v.Subscript.slice) && validate_expr(exp->v.Subscript.value, Load); case Starred_kind: return validate_expr(exp->v.Starred.value, ctx); case List_kind: return validate_exprs(exp->v.List.elts, ctx, 0); case Tuple_kind: return validate_exprs(exp->v.Tuple.elts, ctx, 0); /* This last case doesn't have any checking. */ case Name_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected expression"); return 0; } } static int validate_nonempty_seq(asdl_seq *seq, const char *what, const char *owner) { if (asdl_seq_LEN(seq)) return 1; PyErr_Format(PyExc_ValueError, "empty %s on %s", what, owner); return 0; } static int validate_assignlist(asdl_seq *targets, expr_context_ty ctx) { return validate_nonempty_seq(targets, "targets", ctx == Del ? "Delete" : "Assign") && validate_exprs(targets, ctx, 0); } static int validate_body(asdl_seq *body, const char *owner) { return validate_nonempty_seq(body, "body", owner) && validate_stmts(body); } static int validate_stmt(stmt_ty stmt) { Py_ssize_t i; switch (stmt->kind) { case FunctionDef_kind: return validate_body(stmt->v.FunctionDef.body, "FunctionDef") && validate_arguments(stmt->v.FunctionDef.args) && validate_exprs(stmt->v.FunctionDef.decorator_list, Load, 0) && (!stmt->v.FunctionDef.returns || validate_expr(stmt->v.FunctionDef.returns, Load)); case ClassDef_kind: return validate_body(stmt->v.ClassDef.body, "ClassDef") && validate_exprs(stmt->v.ClassDef.bases, Load, 0) && validate_keywords(stmt->v.ClassDef.keywords) && validate_exprs(stmt->v.ClassDef.decorator_list, Load, 0); case Return_kind: return !stmt->v.Return.value || validate_expr(stmt->v.Return.value, Load); case Delete_kind: return validate_assignlist(stmt->v.Delete.targets, Del); case Assign_kind: return validate_assignlist(stmt->v.Assign.targets, Store) && validate_expr(stmt->v.Assign.value, Load); case AugAssign_kind: return validate_expr(stmt->v.AugAssign.target, Store) && validate_expr(stmt->v.AugAssign.value, Load); case AnnAssign_kind: if (stmt->v.AnnAssign.target->kind != Name_kind && stmt->v.AnnAssign.simple) { PyErr_SetString(PyExc_TypeError, "AnnAssign with simple non-Name target"); return 0; } return validate_expr(stmt->v.AnnAssign.target, Store) && (!stmt->v.AnnAssign.value || validate_expr(stmt->v.AnnAssign.value, Load)) && validate_expr(stmt->v.AnnAssign.annotation, Load); case For_kind: return validate_expr(stmt->v.For.target, Store) && validate_expr(stmt->v.For.iter, Load) && validate_body(stmt->v.For.body, "For") && validate_stmts(stmt->v.For.orelse); case AsyncFor_kind: return validate_expr(stmt->v.AsyncFor.target, Store) && validate_expr(stmt->v.AsyncFor.iter, Load) && validate_body(stmt->v.AsyncFor.body, "AsyncFor") && validate_stmts(stmt->v.AsyncFor.orelse); case While_kind: return validate_expr(stmt->v.While.test, Load) && validate_body(stmt->v.While.body, "While") && validate_stmts(stmt->v.While.orelse); case If_kind: return validate_expr(stmt->v.If.test, Load) && validate_body(stmt->v.If.body, "If") && validate_stmts(stmt->v.If.orelse); case With_kind: if (!validate_nonempty_seq(stmt->v.With.items, "items", "With")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.With.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.With.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.With.body, "With"); case AsyncWith_kind: if (!validate_nonempty_seq(stmt->v.AsyncWith.items, "items", "AsyncWith")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.AsyncWith.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.AsyncWith.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.AsyncWith.body, "AsyncWith"); case Raise_kind: if (stmt->v.Raise.exc) { return validate_expr(stmt->v.Raise.exc, Load) && (!stmt->v.Raise.cause || validate_expr(stmt->v.Raise.cause, Load)); } if (stmt->v.Raise.cause) { PyErr_SetString(PyExc_ValueError, "Raise with cause but no exception"); return 0; } return 1; case Try_kind: if (!validate_body(stmt->v.Try.body, "Try")) return 0; if (!asdl_seq_LEN(stmt->v.Try.handlers) && !asdl_seq_LEN(stmt->v.Try.finalbody)) { PyErr_SetString(PyExc_ValueError, "Try has neither except handlers nor finalbody"); return 0; } if (!asdl_seq_LEN(stmt->v.Try.handlers) && asdl_seq_LEN(stmt->v.Try.orelse)) { PyErr_SetString(PyExc_ValueError, "Try has orelse but no except handlers"); return 0; } for (i = 0; i < asdl_seq_LEN(stmt->v.Try.handlers); i++) { excepthandler_ty handler = asdl_seq_GET(stmt->v.Try.handlers, i); if ((handler->v.ExceptHandler.type && !validate_expr(handler->v.ExceptHandler.type, Load)) || !validate_body(handler->v.ExceptHandler.body, "ExceptHandler")) return 0; } return (!asdl_seq_LEN(stmt->v.Try.finalbody) || validate_stmts(stmt->v.Try.finalbody)) && (!asdl_seq_LEN(stmt->v.Try.orelse) || validate_stmts(stmt->v.Try.orelse)); case Assert_kind: return validate_expr(stmt->v.Assert.test, Load) && (!stmt->v.Assert.msg || validate_expr(stmt->v.Assert.msg, Load)); case Import_kind: return validate_nonempty_seq(stmt->v.Import.names, "names", "Import"); case ImportFrom_kind: if (stmt->v.ImportFrom.level < 0) { PyErr_SetString(PyExc_ValueError, "Negative ImportFrom level"); return 0; } return validate_nonempty_seq(stmt->v.ImportFrom.names, "names", "ImportFrom"); case Global_kind: return validate_nonempty_seq(stmt->v.Global.names, "names", "Global"); case Nonlocal_kind: return validate_nonempty_seq(stmt->v.Nonlocal.names, "names", "Nonlocal"); case Expr_kind: return validate_expr(stmt->v.Expr.value, Load); case AsyncFunctionDef_kind: return validate_body(stmt->v.AsyncFunctionDef.body, "AsyncFunctionDef") && validate_arguments(stmt->v.AsyncFunctionDef.args) && validate_exprs(stmt->v.AsyncFunctionDef.decorator_list, Load, 0) && (!stmt->v.AsyncFunctionDef.returns || validate_expr(stmt->v.AsyncFunctionDef.returns, Load)); case Pass_kind: case Break_kind: case Continue_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected statement"); return 0; } } static int validate_stmts(asdl_seq *seq) { Py_ssize_t i; for (i = 0; i < asdl_seq_LEN(seq); i++) { stmt_ty stmt = asdl_seq_GET(seq, i); if (stmt) { if (!validate_stmt(stmt)) return 0; } else { PyErr_SetString(PyExc_ValueError, "None disallowed in statement list"); return 0; } } return 1; } static int validate_exprs(asdl_seq *exprs, expr_context_ty ctx, int null_ok) { Py_ssize_t i; for (i = 0; i < asdl_seq_LEN(exprs); i++) { expr_ty expr = asdl_seq_GET(exprs, i); if (expr) { if (!validate_expr(expr, ctx)) return 0; } else if (!null_ok) { PyErr_SetString(PyExc_ValueError, "None disallowed in expression list"); return 0; } } return 1; } int PyAST_Validate(mod_ty mod) { int res = 0; switch (mod->kind) { case Module_kind: res = validate_stmts(mod->v.Module.body); break; case Interactive_kind: res = validate_stmts(mod->v.Interactive.body); break; case Expression_kind: res = validate_expr(mod->v.Expression.body, Load); break; case Suite_kind: PyErr_SetString(PyExc_ValueError, "Suite is not valid in the CPython compiler"); break; default: PyErr_SetString(PyExc_SystemError, "impossible module node"); res = 0; break; } return res; } /* This is done here, so defines like "test" don't interfere with AST use above. */ #include "grammar.h" #include "parsetok.h" #include "graminit.h" /* Data structure used internally */ struct compiling { PyArena *c_arena; /* Arena for allocating memory. */ PyObject *c_filename; /* filename */ PyObject *c_normalize; /* Normalization function from unicodedata. */ }; static asdl_seq *seq_for_testlist(struct compiling *, const node *); static expr_ty ast_for_expr(struct compiling *, const node *); static stmt_ty ast_for_stmt(struct compiling *, const node *); static asdl_seq *ast_for_suite(struct compiling *c, const node *n); static asdl_seq *ast_for_exprlist(struct compiling *, const node *, expr_context_ty); static expr_ty ast_for_testlist(struct compiling *, const node *); static stmt_ty ast_for_classdef(struct compiling *, const node *, asdl_seq *); static stmt_ty ast_for_with_stmt(struct compiling *, const node *, bool); static stmt_ty ast_for_for_stmt(struct compiling *, const node *, bool); /* Note different signature for ast_for_call */ static expr_ty ast_for_call(struct compiling *, const node *, expr_ty, const node *, const node *); static PyObject *parsenumber(struct compiling *, const char *); static expr_ty parsestrplus(struct compiling *, const node *n); static void get_last_end_pos(asdl_seq *, int *, int *); #define COMP_GENEXP 0 #define COMP_LISTCOMP 1 #define COMP_SETCOMP 2 static int init_normalization(struct compiling *c) { PyObject *m = PyImport_ImportModuleNoBlock("unicodedata"); if (!m) return 0; c->c_normalize = PyObject_GetAttrString(m, "normalize"); Py_DECREF(m); if (!c->c_normalize) return 0; return 1; } static identifier new_identifier(const char *n, struct compiling *c) { PyObject *id = PyUnicode_DecodeUTF8(n, strlen(n), NULL); if (!id) return NULL; /* PyUnicode_DecodeUTF8 should always return a ready string. */ assert(PyUnicode_IS_READY(id)); /* Check whether there are non-ASCII characters in the identifier; if so, normalize to NFKC. */ if (!PyUnicode_IS_ASCII(id)) { PyObject *id2; _Py_IDENTIFIER(NFKC); if (!c->c_normalize && !init_normalization(c)) { Py_DECREF(id); return NULL; } PyObject *form = _PyUnicode_FromId(&PyId_NFKC); if (form == NULL) { Py_DECREF(id); return NULL; } PyObject *args[2] = {form, id}; id2 = _PyObject_FastCall(c->c_normalize, args, 2); Py_DECREF(id); if (!id2) return NULL; if (!PyUnicode_Check(id2)) { PyErr_Format(PyExc_TypeError, "unicodedata.normalize() must return a string, not " "%.200s", Py_TYPE(id2)->tp_name); Py_DECREF(id2); return NULL; } id = id2; } PyUnicode_InternInPlace(&id); if (PyArena_AddPyObject(c->c_arena, id) < 0) { Py_DECREF(id); return NULL; } return id; } #define NEW_IDENTIFIER(n) new_identifier(STR(n), c) static int ast_error(struct compiling *c, const node *n, const char *errmsg, ...) { PyObject *value, *errstr, *loc, *tmp; va_list va; va_start(va, errmsg); errstr = PyUnicode_FromFormatV(errmsg, va); va_end(va); if (!errstr) { return 0; } loc = PyErr_ProgramTextObject(c->c_filename, LINENO(n)); if (!loc) { Py_INCREF(Py_None); loc = Py_None; } tmp = Py_BuildValue("(OiiN)", c->c_filename, LINENO(n), n->n_col_offset + 1, loc); if (!tmp) { Py_DECREF(errstr); return 0; } value = PyTuple_Pack(2, errstr, tmp); Py_DECREF(errstr); Py_DECREF(tmp); if (value) { PyErr_SetObject(PyExc_SyntaxError, value); Py_DECREF(value); } return 0; } /* num_stmts() returns number of contained statements. Use this routine to determine how big a sequence is needed for the statements in a parse tree. Its raison d'etre is this bit of grammar: stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE A simple_stmt can contain multiple small_stmt elements joined by semicolons. If the arg is a simple_stmt, the number of small_stmt elements is returned. */ static string new_type_comment(const char *s) { return PyUnicode_DecodeUTF8(s, strlen(s), NULL); } #define NEW_TYPE_COMMENT(n) new_type_comment(STR(n)) static int num_stmts(const node *n) { int i, l; node *ch; switch (TYPE(n)) { case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) return 0; else return num_stmts(CHILD(n, 0)); case file_input: l = 0; for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == stmt) l += num_stmts(ch); } return l; case stmt: return num_stmts(CHILD(n, 0)); case compound_stmt: return 1; case simple_stmt: return NCH(n) / 2; /* Divide by 2 to remove count of semi-colons */ case suite: case func_body_suite: /* func_body_suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */ /* suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT */ if (NCH(n) == 1) return num_stmts(CHILD(n, 0)); else { i = 2; l = 0; if (TYPE(CHILD(n, 1)) == TYPE_COMMENT) i += 2; for (; i < (NCH(n) - 1); i++) l += num_stmts(CHILD(n, i)); return l; } default: { char buf[128]; sprintf(buf, "Non-statement found: %d %d", TYPE(n), NCH(n)); Py_FatalError(buf); } } Py_UNREACHABLE(); } /* Transform the CST rooted at node * to the appropriate AST */ mod_ty PyAST_FromNodeObject(const node *n, PyCompilerFlags *flags, PyObject *filename, PyArena *arena) { int i, j, k, num; asdl_seq *stmts = NULL; asdl_seq *type_ignores = NULL; stmt_ty s; node *ch; struct compiling c; mod_ty res = NULL; asdl_seq *argtypes = NULL; expr_ty ret, arg; c.c_arena = arena; /* borrowed reference */ c.c_filename = filename; c.c_normalize = NULL; if (TYPE(n) == encoding_decl) n = CHILD(n, 0); k = 0; switch (TYPE(n)) { case file_input: stmts = _Py_asdl_seq_new(num_stmts(n), arena); if (!stmts) goto out; for (i = 0; i < NCH(n) - 1; i++) { ch = CHILD(n, i); if (TYPE(ch) == NEWLINE) continue; REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { s = ast_for_stmt(&c, ch); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } else { ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < num; j++) { s = ast_for_stmt(&c, CHILD(ch, j * 2)); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } } } /* Type ignores are stored under the ENDMARKER in file_input. */ ch = CHILD(n, NCH(n) - 1); REQ(ch, ENDMARKER); num = NCH(ch); type_ignores = _Py_asdl_seq_new(num, arena); if (!type_ignores) goto out; for (i = 0; i < num; i++) { type_ignore_ty ti = TypeIgnore(LINENO(CHILD(ch, i)), arena); if (!ti) goto out; asdl_seq_SET(type_ignores, i, ti); } res = Module(stmts, type_ignores, arena); break; case eval_input: { expr_ty testlist_ast; /* XXX Why not comp_for here? */ testlist_ast = ast_for_testlist(&c, CHILD(n, 0)); if (!testlist_ast) goto out; res = Expression(testlist_ast, arena); break; } case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) { stmts = _Py_asdl_seq_new(1, arena); if (!stmts) goto out; asdl_seq_SET(stmts, 0, Pass(n->n_lineno, n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, arena)); if (!asdl_seq_GET(stmts, 0)) goto out; res = Interactive(stmts, arena); } else { n = CHILD(n, 0); num = num_stmts(n); stmts = _Py_asdl_seq_new(num, arena); if (!stmts) goto out; if (num == 1) { s = ast_for_stmt(&c, n); if (!s) goto out; asdl_seq_SET(stmts, 0, s); } else { /* Only a simple_stmt can contain multiple statements. */ REQ(n, simple_stmt); for (i = 0; i < NCH(n); i += 2) { if (TYPE(CHILD(n, i)) == NEWLINE) break; s = ast_for_stmt(&c, CHILD(n, i)); if (!s) goto out; asdl_seq_SET(stmts, i / 2, s); } } res = Interactive(stmts, arena); } break; case func_type_input: n = CHILD(n, 0); REQ(n, func_type); if (TYPE(CHILD(n, 1)) == typelist) { ch = CHILD(n, 1); /* this is overly permissive -- we don't pay any attention to * stars on the args -- just parse them into an ordered list */ num = 0; for (i = 0; i < NCH(ch); i++) { if (TYPE(CHILD(ch, i)) == test) { num++; } } argtypes = _Py_asdl_seq_new(num, arena); if (!argtypes) goto out; j = 0; for (i = 0; i < NCH(ch); i++) { if (TYPE(CHILD(ch, i)) == test) { arg = ast_for_expr(&c, CHILD(ch, i)); if (!arg) goto out; asdl_seq_SET(argtypes, j++, arg); } } } else { argtypes = _Py_asdl_seq_new(0, arena); if (!argtypes) goto out; } ret = ast_for_expr(&c, CHILD(n, NCH(n) - 1)); if (!ret) goto out; res = FunctionType(argtypes, ret, arena); break; default: PyErr_Format(PyExc_SystemError, "invalid node %d for PyAST_FromNode", TYPE(n)); goto out; } out: if (c.c_normalize) { Py_DECREF(c.c_normalize); } return res; } mod_ty PyAST_FromNode(const node *n, PyCompilerFlags *flags, const char *filename_str, PyArena *arena) { mod_ty mod; PyObject *filename; filename = PyUnicode_DecodeFSDefault(filename_str); if (filename == NULL) return NULL; mod = PyAST_FromNodeObject(n, flags, filename, arena); Py_DECREF(filename); return mod; } /* Return the AST repr. of the operator represented as syntax (|, ^, etc.) */ static operator_ty get_operator(const node *n) { switch (TYPE(n)) { case VBAR: return BitOr; case CIRCUMFLEX: return BitXor; case AMPER: return BitAnd; case LEFTSHIFT: return LShift; case RIGHTSHIFT: return RShift; case PLUS: return Add; case MINUS: return Sub; case STAR: return Mult; case AT: return MatMult; case SLASH: return Div; case DOUBLESLASH: return FloorDiv; case PERCENT: return Mod; default: return (operator_ty)0; } } static const char * const FORBIDDEN[] = { "None", "True", "False", "__debug__", NULL, }; static int forbidden_name(struct compiling *c, identifier name, const node *n, int full_checks) { assert(PyUnicode_Check(name)); const char * const *p = FORBIDDEN; if (!full_checks) { /* In most cases, the parser will protect True, False, and None from being assign to. */ p += 3; } for (; *p; p++) { if (_PyUnicode_EqualToASCIIString(name, *p)) { ast_error(c, n, "cannot assign to %U", name); return 1; } } return 0; } static expr_ty copy_location(expr_ty e, const node *n) { if (e) { e->lineno = LINENO(n); e->col_offset = n->n_col_offset; e->end_lineno = n->n_end_lineno; e->end_col_offset = n->n_end_col_offset; } return e; } /* Set the context ctx for expr_ty e, recursively traversing e. Only sets context for expr kinds that "can appear in assignment context" (according to ../Parser/Python.asdl). For other expr kinds, it sets an appropriate syntax error and returns false. */ static int set_context(struct compiling *c, expr_ty e, expr_context_ty ctx, const node *n) { asdl_seq *s = NULL; /* If a particular expression type can't be used for assign / delete, set expr_name to its name and an error message will be generated. */ const char* expr_name = NULL; /* The ast defines augmented store and load contexts, but the implementation here doesn't actually use them. The code may be a little more complex than necessary as a result. It also means that expressions in an augmented assignment have a Store context. Consider restructuring so that augmented assignment uses set_context(), too. */ assert(ctx != AugStore && ctx != AugLoad); switch (e->kind) { case Attribute_kind: if (ctx == NamedStore) { expr_name = "attribute"; break; } e->v.Attribute.ctx = ctx; if (ctx == Store && forbidden_name(c, e->v.Attribute.attr, n, 1)) return 0; break; case Subscript_kind: if (ctx == NamedStore) { expr_name = "subscript"; break; } e->v.Subscript.ctx = ctx; break; case Starred_kind: if (ctx == NamedStore) { expr_name = "starred"; break; } e->v.Starred.ctx = ctx; if (!set_context(c, e->v.Starred.value, ctx, n)) return 0; break; case Name_kind: if (ctx == Store) { if (forbidden_name(c, e->v.Name.id, n, 0)) return 0; /* forbidden_name() calls ast_error() */ } e->v.Name.ctx = ctx; break; case List_kind: if (ctx == NamedStore) { expr_name = "list"; break; } e->v.List.ctx = ctx; s = e->v.List.elts; break; case Tuple_kind: if (ctx == NamedStore) { expr_name = "tuple"; break; } e->v.Tuple.ctx = ctx; s = e->v.Tuple.elts; break; case Lambda_kind: expr_name = "lambda"; break; case Call_kind: expr_name = "function call"; break; case BoolOp_kind: case BinOp_kind: case UnaryOp_kind: expr_name = "operator"; break; case GeneratorExp_kind: expr_name = "generator expression"; break; case Yield_kind: case YieldFrom_kind: expr_name = "yield expression"; break; case Await_kind: expr_name = "await expression"; break; case ListComp_kind: expr_name = "list comprehension"; break; case SetComp_kind: expr_name = "set comprehension"; break; case DictComp_kind: expr_name = "dict comprehension"; break; case Dict_kind: expr_name = "dict display"; break; case Set_kind: expr_name = "set display"; break; case JoinedStr_kind: case FormattedValue_kind: expr_name = "f-string expression"; break; case Constant_kind: { PyObject *value = e->v.Constant.value; if (value == Py_None || value == Py_False || value == Py_True || value == Py_Ellipsis) { return ast_error(c, n, "cannot %s %R", ctx == Store ? "assign to" : "delete", value); } expr_name = "literal"; break; } case Compare_kind: expr_name = "comparison"; break; case IfExp_kind: expr_name = "conditional expression"; break; case NamedExpr_kind: expr_name = "named expression"; break; default: PyErr_Format(PyExc_SystemError, "unexpected expression in %sassignment %d (line %d)", ctx == NamedStore ? "named ": "", e->kind, e->lineno); return 0; } /* Check for error string set by switch */ if (expr_name) { if (ctx == NamedStore) { return ast_error(c, n, "cannot use named assignment with %s", expr_name); } else { return ast_error(c, n, "cannot %s %s", ctx == Store ? "assign to" : "delete", expr_name); } } /* If the LHS is a list or tuple, we need to set the assignment context for all the contained elements. */ if (s) { Py_ssize_t i; for (i = 0; i < asdl_seq_LEN(s); i++) { if (!set_context(c, (expr_ty)asdl_seq_GET(s, i), ctx, n)) return 0; } } return 1; } static operator_ty ast_for_augassign(struct compiling *c, const node *n) { REQ(n, augassign); n = CHILD(n, 0); switch (STR(n)[0]) { case '+': return Add; case '-': return Sub; case '/': if (STR(n)[1] == '/') return FloorDiv; else return Div; case '%': return Mod; case '<': return LShift; case '>': return RShift; case '&': return BitAnd; case '^': return BitXor; case '|': return BitOr; case '*': if (STR(n)[1] == '*') return Pow; else return Mult; case '@': return MatMult; default: PyErr_Format(PyExc_SystemError, "invalid augassign: %s", STR(n)); return (operator_ty)0; } } static cmpop_ty ast_for_comp_op(struct compiling *c, const node *n) { /* comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is' |'is' 'not' */ REQ(n, comp_op); if (NCH(n) == 1) { n = CHILD(n, 0); switch (TYPE(n)) { case LESS: return Lt; case GREATER: return Gt; case EQEQUAL: /* == */ return Eq; case LESSEQUAL: return LtE; case GREATEREQUAL: return GtE; case NOTEQUAL: return NotEq; case NAME: if (strcmp(STR(n), "in") == 0) return In; if (strcmp(STR(n), "is") == 0) return Is; /* fall through */ default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s", STR(n)); return (cmpop_ty)0; } } else if (NCH(n) == 2) { /* handle "not in" and "is not" */ switch (TYPE(CHILD(n, 0))) { case NAME: if (strcmp(STR(CHILD(n, 1)), "in") == 0) return NotIn; if (strcmp(STR(CHILD(n, 0)), "is") == 0) return IsNot; /* fall through */ default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s %s", STR(CHILD(n, 0)), STR(CHILD(n, 1))); return (cmpop_ty)0; } } PyErr_Format(PyExc_SystemError, "invalid comp_op: has %d children", NCH(n)); return (cmpop_ty)0; } static asdl_seq * seq_for_testlist(struct compiling *c, const node *n) { /* testlist: test (',' test)* [','] testlist_star_expr: test|star_expr (',' test|star_expr)* [','] */ asdl_seq *seq; expr_ty expression; int i; assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr || TYPE(n) == testlist_comp); seq = _Py_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { const node *ch = CHILD(n, i); assert(TYPE(ch) == test || TYPE(ch) == test_nocond || TYPE(ch) == star_expr || TYPE(ch) == namedexpr_test); expression = ast_for_expr(c, ch); if (!expression) return NULL; assert(i / 2 < seq->size); asdl_seq_SET(seq, i / 2, expression); } return seq; } static arg_ty ast_for_arg(struct compiling *c, const node *n) { identifier name; expr_ty annotation = NULL; node *ch; arg_ty ret; assert(TYPE(n) == tfpdef || TYPE(n) == vfpdef); ch = CHILD(n, 0); name = NEW_IDENTIFIER(ch); if (!name) return NULL; if (forbidden_name(c, name, ch, 0)) return NULL; if (NCH(n) == 3 && TYPE(CHILD(n, 1)) == COLON) { annotation = ast_for_expr(c, CHILD(n, 2)); if (!annotation) return NULL; } ret = arg(name, annotation, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!ret) return NULL; return ret; } /* returns -1 if failed to handle keyword only arguments returns new position to keep processing if successful (',' tfpdef ['=' test])* ^^^ start pointing here */ static int handle_keywordonly_args(struct compiling *c, const node *n, int start, asdl_seq *kwonlyargs, asdl_seq *kwdefaults) { PyObject *argname; node *ch; expr_ty expression, annotation; arg_ty arg; int i = start; int j = 0; /* index for kwdefaults and kwonlyargs */ if (kwonlyargs == NULL) { ast_error(c, CHILD(n, start), "named arguments must follow bare *"); return -1; } assert(kwdefaults != NULL); while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case vfpdef: case tfpdef: if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) goto error; asdl_seq_SET(kwdefaults, j, expression); i += 2; /* '=' and test */ } else { /* setting NULL if no default value exists */ asdl_seq_SET(kwdefaults, j, NULL); } if (NCH(ch) == 3) { /* ch is NAME ':' test */ annotation = ast_for_expr(c, CHILD(ch, 2)); if (!annotation) goto error; } else { annotation = NULL; } ch = CHILD(ch, 0); argname = NEW_IDENTIFIER(ch); if (!argname) goto error; if (forbidden_name(c, argname, ch, 0)) goto error; arg = arg(argname, annotation, NULL, LINENO(ch), ch->n_col_offset, ch->n_end_lineno, ch->n_end_col_offset, c->c_arena); if (!arg) goto error; asdl_seq_SET(kwonlyargs, j++, arg); i += 1; /* the name */ if (TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case TYPE_COMMENT: /* arg will be equal to the last argument processed */ arg->type_comment = NEW_TYPE_COMMENT(ch); if (!arg->type_comment) goto error; i += 1; break; case DOUBLESTAR: return i; default: ast_error(c, ch, "unexpected node"); goto error; } } return i; error: return -1; } /* Create AST for argument list. */ static arguments_ty ast_for_arguments(struct compiling *c, const node *n) { /* This function handles both typedargslist (function definition) and varargslist (lambda definition). parameters: '(' [typedargslist] ')' typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']]] | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']) tfpdef: NAME [':' test] varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [',']]] | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [','] ) vfpdef: NAME */ int i, j, k, nposargs = 0, nkwonlyargs = 0; int nposdefaults = 0, found_default = 0; asdl_seq *posargs, *posdefaults, *kwonlyargs, *kwdefaults; arg_ty vararg = NULL, kwarg = NULL; arg_ty arg; node *ch; if (TYPE(n) == parameters) { if (NCH(n) == 2) /* () as argument list */ return arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); n = CHILD(n, 1); } assert(TYPE(n) == typedargslist || TYPE(n) == varargslist); /* First count the number of positional args & defaults. The variable i is the loop index for this for loop and the next. The next loop picks up where the first leaves off. */ for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == STAR) { /* skip star */ i++; if (i < NCH(n) && /* skip argument following star */ (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { i++; } break; } if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == vfpdef || TYPE(ch) == tfpdef) nposargs++; if (TYPE(ch) == EQUAL) nposdefaults++; } /* count the number of keyword only args & defaults for keyword only args */ for ( ; i < NCH(n); ++i) { ch = CHILD(n, i); if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == tfpdef || TYPE(ch) == vfpdef) nkwonlyargs++; } posargs = (nposargs ? _Py_asdl_seq_new(nposargs, c->c_arena) : NULL); if (!posargs && nposargs) return NULL; kwonlyargs = (nkwonlyargs ? _Py_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwonlyargs && nkwonlyargs) return NULL; posdefaults = (nposdefaults ? _Py_asdl_seq_new(nposdefaults, c->c_arena) : NULL); if (!posdefaults && nposdefaults) return NULL; /* The length of kwonlyargs and kwdefaults are same since we set NULL as default for keyword only argument w/o default - we have sequence data structure, but no dictionary */ kwdefaults = (nkwonlyargs ? _Py_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwdefaults && nkwonlyargs) return NULL; /* tfpdef: NAME [':' test] vfpdef: NAME */ i = 0; j = 0; /* index for defaults */ k = 0; /* index for args */ while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case tfpdef: case vfpdef: /* XXX Need to worry about checking if TYPE(CHILD(n, i+1)) is anything other than EQUAL or a comma? */ /* XXX Should NCH(n) check be made a separate check? */ if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expr_ty expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) return NULL; assert(posdefaults != NULL); asdl_seq_SET(posdefaults, j++, expression); i += 2; found_default = 1; } else if (found_default) { ast_error(c, n, "non-default argument follows default argument"); return NULL; } arg = ast_for_arg(c, ch); if (!arg) return NULL; asdl_seq_SET(posargs, k++, arg); i += 1; /* the name */ if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case STAR: if (i+1 >= NCH(n) || (i+2 == NCH(n) && (TYPE(CHILD(n, i+1)) == COMMA || TYPE(CHILD(n, i+1)) == TYPE_COMMENT))) { ast_error(c, CHILD(n, i), "named arguments must follow bare *"); return NULL; } ch = CHILD(n, i+1); /* tfpdef or COMMA */ if (TYPE(ch) == COMMA) { int res = 0; i += 2; /* now follows keyword only arguments */ if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) { ast_error(c, CHILD(n, i), "bare * has associated type comment"); return NULL; } res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } else { vararg = ast_for_arg(c, ch); if (!vararg) return NULL; i += 2; /* the star and the name */ if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) { vararg->type_comment = NEW_TYPE_COMMENT(CHILD(n, i)); if (!vararg->type_comment) return NULL; i += 1; } if (i < NCH(n) && (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { int res = 0; res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } } break; case DOUBLESTAR: ch = CHILD(n, i+1); /* tfpdef */ assert(TYPE(ch) == tfpdef || TYPE(ch) == vfpdef); kwarg = ast_for_arg(c, ch); if (!kwarg) return NULL; i += 2; /* the double star and the name */ if (TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case TYPE_COMMENT: assert(i); if (kwarg) arg = kwarg; /* arg will be equal to the last argument processed */ arg->type_comment = NEW_TYPE_COMMENT(ch); if (!arg->type_comment) return NULL; i += 1; break; default: PyErr_Format(PyExc_SystemError, "unexpected node in varargslist: %d @ %d", TYPE(ch), i); return NULL; } } return arguments(posargs, vararg, kwonlyargs, kwdefaults, kwarg, posdefaults, c->c_arena); } static expr_ty ast_for_dotted_name(struct compiling *c, const node *n) { expr_ty e; identifier id; int lineno, col_offset; int i; node *ch; REQ(n, dotted_name); lineno = LINENO(n); col_offset = n->n_col_offset; ch = CHILD(n, 0); id = NEW_IDENTIFIER(ch); if (!id) return NULL; e = Name(id, Load, lineno, col_offset, ch->n_end_lineno, ch->n_end_col_offset, c->c_arena); if (!e) return NULL; for (i = 2; i < NCH(n); i+=2) { id = NEW_IDENTIFIER(CHILD(n, i)); if (!id) return NULL; e = Attribute(e, id, Load, lineno, col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!e) return NULL; } return e; } static expr_ty ast_for_decorator(struct compiling *c, const node *n) { /* decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE */ expr_ty d = NULL; expr_ty name_expr; REQ(n, decorator); REQ(CHILD(n, 0), AT); REQ(RCHILD(n, -1), NEWLINE); name_expr = ast_for_dotted_name(c, CHILD(n, 1)); if (!name_expr) return NULL; if (NCH(n) == 3) { /* No arguments */ d = name_expr; name_expr = NULL; } else if (NCH(n) == 5) { /* Call with no arguments */ d = Call(name_expr, NULL, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!d) return NULL; name_expr = NULL; } else { d = ast_for_call(c, CHILD(n, 3), name_expr, CHILD(n, 2), CHILD(n, 4)); if (!d) return NULL; name_expr = NULL; } return d; } static asdl_seq* ast_for_decorators(struct compiling *c, const node *n) { asdl_seq* decorator_seq; expr_ty d; int i; REQ(n, decorators); decorator_seq = _Py_asdl_seq_new(NCH(n), c->c_arena); if (!decorator_seq) return NULL; for (i = 0; i < NCH(n); i++) { d = ast_for_decorator(c, CHILD(n, i)); if (!d) return NULL; asdl_seq_SET(decorator_seq, i, d); } return decorator_seq; } static stmt_ty ast_for_funcdef_impl(struct compiling *c, const node *n0, asdl_seq *decorator_seq, bool is_async) { /* funcdef: 'def' NAME parameters ['->' test] ':' [TYPE_COMMENT] suite */ const node * const n = is_async ? CHILD(n0, 1) : n0; identifier name; arguments_ty args; asdl_seq *body; expr_ty returns = NULL; int name_i = 1; int end_lineno, end_col_offset; node *tc; string type_comment = NULL; REQ(n, funcdef); name = NEW_IDENTIFIER(CHILD(n, name_i)); if (!name) return NULL; if (forbidden_name(c, name, CHILD(n, name_i), 0)) return NULL; args = ast_for_arguments(c, CHILD(n, name_i + 1)); if (!args) return NULL; if (TYPE(CHILD(n, name_i+2)) == RARROW) { returns = ast_for_expr(c, CHILD(n, name_i + 3)); if (!returns) return NULL; name_i += 2; } if (TYPE(CHILD(n, name_i + 3)) == TYPE_COMMENT) { type_comment = NEW_TYPE_COMMENT(CHILD(n, name_i + 3)); if (!type_comment) return NULL; name_i += 1; } body = ast_for_suite(c, CHILD(n, name_i + 3)); if (!body) return NULL; get_last_end_pos(body, &end_lineno, &end_col_offset); if (NCH(CHILD(n, name_i + 3)) > 1) { /* Check if the suite has a type comment in it. */ tc = CHILD(CHILD(n, name_i + 3), 1); if (TYPE(tc) == TYPE_COMMENT) { if (type_comment != NULL) { ast_error(c, n, "Cannot have two type comments on def"); return NULL; } type_comment = NEW_TYPE_COMMENT(tc); if (!type_comment) return NULL; } } if (is_async) return AsyncFunctionDef(name, args, body, decorator_seq, returns, type_comment, LINENO(n0), n0->n_col_offset, end_lineno, end_col_offset, c->c_arena); else return FunctionDef(name, args, body, decorator_seq, returns, type_comment, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } static stmt_ty ast_for_async_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* async_funcdef: 'async' funcdef */ REQ(n, async_funcdef); REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); REQ(CHILD(n, 1), funcdef); return ast_for_funcdef_impl(c, n, decorator_seq, true /* is_async */); } static stmt_ty ast_for_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* funcdef: 'def' NAME parameters ['->' test] ':' suite */ return ast_for_funcdef_impl(c, n, decorator_seq, false /* is_async */); } static stmt_ty ast_for_async_stmt(struct compiling *c, const node *n) { /* async_stmt: 'async' (funcdef | with_stmt | for_stmt) */ REQ(n, async_stmt); REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); switch (TYPE(CHILD(n, 1))) { case funcdef: return ast_for_funcdef_impl(c, n, NULL, true /* is_async */); case with_stmt: return ast_for_with_stmt(c, n, true /* is_async */); case for_stmt: return ast_for_for_stmt(c, n, true /* is_async */); default: PyErr_Format(PyExc_SystemError, "invalid async stament: %s", STR(CHILD(n, 1))); return NULL; } } static stmt_ty ast_for_decorated(struct compiling *c, const node *n) { /* decorated: decorators (classdef | funcdef | async_funcdef) */ stmt_ty thing = NULL; asdl_seq *decorator_seq = NULL; REQ(n, decorated); decorator_seq = ast_for_decorators(c, CHILD(n, 0)); if (!decorator_seq) return NULL; assert(TYPE(CHILD(n, 1)) == funcdef || TYPE(CHILD(n, 1)) == async_funcdef || TYPE(CHILD(n, 1)) == classdef); if (TYPE(CHILD(n, 1)) == funcdef) { thing = ast_for_funcdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == classdef) { thing = ast_for_classdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == async_funcdef) { thing = ast_for_async_funcdef(c, CHILD(n, 1), decorator_seq); } return thing; } static expr_ty ast_for_namedexpr(struct compiling *c, const node *n) { /* if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] namedexpr_test: test [':=' test] argument: ( test [comp_for] | test ':=' test | test '=' test | '**' test | '*' test ) */ expr_ty target, value; target = ast_for_expr(c, CHILD(n, 0)); if (!target) return NULL; value = ast_for_expr(c, CHILD(n, 2)); if (!value) return NULL; if (!set_context(c, target, NamedStore, n)) return NULL; return NamedExpr(target, value, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static expr_ty ast_for_lambdef(struct compiling *c, const node *n) { /* lambdef: 'lambda' [varargslist] ':' test lambdef_nocond: 'lambda' [varargslist] ':' test_nocond */ arguments_ty args; expr_ty expression; if (NCH(n) == 3) { args = arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; } else { args = ast_for_arguments(c, CHILD(n, 1)); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 3)); if (!expression) return NULL; } return Lambda(args, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static expr_ty ast_for_ifexpr(struct compiling *c, const node *n) { /* test: or_test 'if' or_test 'else' test */ expr_ty expression, body, orelse; assert(NCH(n) == 5); body = ast_for_expr(c, CHILD(n, 0)); if (!body) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; orelse = ast_for_expr(c, CHILD(n, 4)); if (!orelse) return NULL; return IfExp(expression, body, orelse, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } /* Count the number of 'for' loops in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_fors(struct compiling *c, const node *n) { int n_fors = 0; count_comp_for: n_fors++; REQ(n, comp_for); if (NCH(n) == 2) { REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); n = CHILD(n, 1); } else if (NCH(n) == 1) { n = CHILD(n, 0); } else { goto error; } if (NCH(n) == (5)) { n = CHILD(n, 4); } else { return n_fors; } count_comp_iter: REQ(n, comp_iter); n = CHILD(n, 0); if (TYPE(n) == comp_for) goto count_comp_for; else if (TYPE(n) == comp_if) { if (NCH(n) == 3) { n = CHILD(n, 2); goto count_comp_iter; } else return n_fors; } error: /* Should never be reached */ PyErr_SetString(PyExc_SystemError, "logic error in count_comp_fors"); return -1; } /* Count the number of 'if' statements in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_ifs(struct compiling *c, const node *n) { int n_ifs = 0; while (1) { REQ(n, comp_iter); if (TYPE(CHILD(n, 0)) == comp_for) return n_ifs; n = CHILD(n, 0); REQ(n, comp_if); n_ifs++; if (NCH(n) == 2) return n_ifs; n = CHILD(n, 2); } } static asdl_seq * ast_for_comprehension(struct compiling *c, const node *n) { int i, n_fors; asdl_seq *comps; n_fors = count_comp_fors(c, n); if (n_fors == -1) return NULL; comps = _Py_asdl_seq_new(n_fors, c->c_arena); if (!comps) return NULL; for (i = 0; i < n_fors; i++) { comprehension_ty comp; asdl_seq *t; expr_ty expression, first; node *for_ch; node *sync_n; int is_async = 0; REQ(n, comp_for); if (NCH(n) == 2) { is_async = 1; REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); sync_n = CHILD(n, 1); } else { sync_n = CHILD(n, 0); } REQ(sync_n, sync_comp_for); for_ch = CHILD(sync_n, 1); t = ast_for_exprlist(c, for_ch, Store); if (!t) return NULL; expression = ast_for_expr(c, CHILD(sync_n, 3)); if (!expression) return NULL; /* Check the # of children rather than the length of t, since (x for x, in ...) has 1 element in t, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(t, 0); if (NCH(for_ch) == 1) comp = comprehension(first, expression, NULL, is_async, c->c_arena); else comp = comprehension(Tuple(t, Store, first->lineno, first->col_offset, for_ch->n_end_lineno, for_ch->n_end_col_offset, c->c_arena), expression, NULL, is_async, c->c_arena); if (!comp) return NULL; if (NCH(sync_n) == 5) { int j, n_ifs; asdl_seq *ifs; n = CHILD(sync_n, 4); n_ifs = count_comp_ifs(c, n); if (n_ifs == -1) return NULL; ifs = _Py_asdl_seq_new(n_ifs, c->c_arena); if (!ifs) return NULL; for (j = 0; j < n_ifs; j++) { REQ(n, comp_iter); n = CHILD(n, 0); REQ(n, comp_if); expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; asdl_seq_SET(ifs, j, expression); if (NCH(n) == 3) n = CHILD(n, 2); } /* on exit, must guarantee that n is a comp_for */ if (TYPE(n) == comp_iter) n = CHILD(n, 0); comp->ifs = ifs; } asdl_seq_SET(comps, i, comp); } return comps; } static expr_ty ast_for_itercomp(struct compiling *c, const node *n, int type) { /* testlist_comp: (test|star_expr) * ( comp_for | (',' (test|star_expr))* [','] ) */ expr_ty elt; asdl_seq *comps; node *ch; assert(NCH(n) > 1); ch = CHILD(n, 0); elt = ast_for_expr(c, ch); if (!elt) return NULL; if (elt->kind == Starred_kind) { ast_error(c, ch, "iterable unpacking cannot be used in comprehension"); return NULL; } comps = ast_for_comprehension(c, CHILD(n, 1)); if (!comps) return NULL; if (type == COMP_GENEXP) return GeneratorExp(elt, comps, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else if (type == COMP_LISTCOMP) return ListComp(elt, comps, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else if (type == COMP_SETCOMP) return SetComp(elt, comps, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else /* Should never happen */ return NULL; } /* Fills in the key, value pair corresponding to the dict element. In case * of an unpacking, key is NULL. *i is advanced by the number of ast * elements. Iff successful, nonzero is returned. */ static int ast_for_dictelement(struct compiling *c, const node *n, int *i, expr_ty *key, expr_ty *value) { expr_ty expression; if (TYPE(CHILD(n, *i)) == DOUBLESTAR) { assert(NCH(n) - *i >= 2); expression = ast_for_expr(c, CHILD(n, *i + 1)); if (!expression) return 0; *key = NULL; *value = expression; *i += 2; } else { assert(NCH(n) - *i >= 3); expression = ast_for_expr(c, CHILD(n, *i)); if (!expression) return 0; *key = expression; REQ(CHILD(n, *i + 1), COLON); expression = ast_for_expr(c, CHILD(n, *i + 2)); if (!expression) return 0; *value = expression; *i += 3; } return 1; } static expr_ty ast_for_dictcomp(struct compiling *c, const node *n) { expr_ty key, value; asdl_seq *comps; int i = 0; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; assert(key); assert(NCH(n) - i >= 1); comps = ast_for_comprehension(c, CHILD(n, i)); if (!comps) return NULL; return DictComp(key, value, comps, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static expr_ty ast_for_dictdisplay(struct compiling *c, const node *n) { int i; int j; int size; asdl_seq *keys, *values; size = (NCH(n) + 1) / 3; /* +1 in case no trailing comma */ keys = _Py_asdl_seq_new(size, c->c_arena); if (!keys) return NULL; values = _Py_asdl_seq_new(size, c->c_arena); if (!values) return NULL; j = 0; for (i = 0; i < NCH(n); i++) { expr_ty key, value; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; asdl_seq_SET(keys, j, key); asdl_seq_SET(values, j, value); j++; } keys->size = j; values->size = j; return Dict(keys, values, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static expr_ty ast_for_genexp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp) || TYPE(n) == (argument)); return ast_for_itercomp(c, n, COMP_GENEXP); } static expr_ty ast_for_listcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp)); return ast_for_itercomp(c, n, COMP_LISTCOMP); } static expr_ty ast_for_setcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (dictorsetmaker)); return ast_for_itercomp(c, n, COMP_SETCOMP); } static expr_ty ast_for_setdisplay(struct compiling *c, const node *n) { int i; int size; asdl_seq *elts; assert(TYPE(n) == (dictorsetmaker)); size = (NCH(n) + 1) / 2; /* +1 in case no trailing comma */ elts = _Py_asdl_seq_new(size, c->c_arena); if (!elts) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, i)); if (!expression) return NULL; asdl_seq_SET(elts, i / 2, expression); } return Set(elts, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static expr_ty ast_for_atom(struct compiling *c, const node *n) { /* atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictmaker|testlist_comp] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False' */ node *ch = CHILD(n, 0); switch (TYPE(ch)) { case NAME: { PyObject *name; const char *s = STR(ch); size_t len = strlen(s); if (len >= 4 && len <= 5) { if (!strcmp(s, "None")) return Constant(Py_None, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!strcmp(s, "True")) return Constant(Py_True, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!strcmp(s, "False")) return Constant(Py_False, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } name = new_identifier(s, c); if (!name) return NULL; /* All names start in Load context, but may later be changed. */ return Name(name, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case STRING: { expr_ty str = parsestrplus(c, n); if (!str) { const char *errtype = NULL; if (PyErr_ExceptionMatches(PyExc_UnicodeError)) errtype = "unicode error"; else if (PyErr_ExceptionMatches(PyExc_ValueError)) errtype = "value error"; if (errtype) { PyObject *type, *value, *tback, *errstr; PyErr_Fetch(&type, &value, &tback); errstr = PyObject_Str(value); if (errstr) { ast_error(c, n, "(%s) %U", errtype, errstr); Py_DECREF(errstr); } else { PyErr_Clear(); ast_error(c, n, "(%s) unknown error", errtype); } Py_DECREF(type); Py_XDECREF(value); Py_XDECREF(tback); } return NULL; } return str; } case NUMBER: { PyObject *pynum = parsenumber(c, STR(ch)); if (!pynum) return NULL; if (PyArena_AddPyObject(c->c_arena, pynum) < 0) { Py_DECREF(pynum); return NULL; } return Constant(pynum, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case ELLIPSIS: /* Ellipsis */ return Constant(Py_Ellipsis, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case LPAR: /* some parenthesized expressions */ ch = CHILD(n, 1); if (TYPE(ch) == RPAR) return Tuple(NULL, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (TYPE(ch) == yield_expr) return ast_for_expr(c, ch); /* testlist_comp: test ( comp_for | (',' test)* [','] ) */ if (NCH(ch) == 1) { return ast_for_testlist(c, ch); } if (TYPE(CHILD(ch, 1)) == comp_for) { return copy_location(ast_for_genexp(c, ch), n); } else { return copy_location(ast_for_testlist(c, ch), n); } case LSQB: /* list (or list comprehension) */ ch = CHILD(n, 1); if (TYPE(ch) == RSQB) return List(NULL, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); REQ(ch, testlist_comp); if (NCH(ch) == 1 || TYPE(CHILD(ch, 1)) == COMMA) { asdl_seq *elts = seq_for_testlist(c, ch); if (!elts) return NULL; return List(elts, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { return copy_location(ast_for_listcomp(c, ch), n); } case LBRACE: { /* dictorsetmaker: ( ((test ':' test | '**' test) * (comp_for | (',' (test ':' test | '**' test))* [','])) | * ((test | '*' test) * (comp_for | (',' (test | '*' test))* [','])) ) */ expr_ty res; ch = CHILD(n, 1); if (TYPE(ch) == RBRACE) { /* It's an empty dict. */ return Dict(NULL, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { int is_dict = (TYPE(CHILD(ch, 0)) == DOUBLESTAR); if (NCH(ch) == 1 || (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == COMMA)) { /* It's a set display. */ res = ast_for_setdisplay(c, ch); } else if (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == comp_for) { /* It's a set comprehension. */ res = ast_for_setcomp(c, ch); } else if (NCH(ch) > 3 - is_dict && TYPE(CHILD(ch, 3 - is_dict)) == comp_for) { /* It's a dictionary comprehension. */ if (is_dict) { ast_error(c, n, "dict unpacking cannot be used in " "dict comprehension"); return NULL; } res = ast_for_dictcomp(c, ch); } else { /* It's a dictionary display. */ res = ast_for_dictdisplay(c, ch); } return copy_location(res, n); } } default: PyErr_Format(PyExc_SystemError, "unhandled atom %d", TYPE(ch)); return NULL; } } static slice_ty ast_for_slice(struct compiling *c, const node *n) { node *ch; expr_ty lower = NULL, upper = NULL, step = NULL; REQ(n, subscript); /* subscript: test | [test] ':' [test] [sliceop] sliceop: ':' [test] */ ch = CHILD(n, 0); if (NCH(n) == 1 && TYPE(ch) == test) { /* 'step' variable hold no significance in terms of being used over other vars */ step = ast_for_expr(c, ch); if (!step) return NULL; return Index(step, c->c_arena); } if (TYPE(ch) == test) { lower = ast_for_expr(c, ch); if (!lower) return NULL; } /* If there's an upper bound it's in the second or third position. */ if (TYPE(ch) == COLON) { if (NCH(n) > 1) { node *n2 = CHILD(n, 1); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } } else if (NCH(n) > 2) { node *n2 = CHILD(n, 2); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } ch = CHILD(n, NCH(n) - 1); if (TYPE(ch) == sliceop) { if (NCH(ch) != 1) { ch = CHILD(ch, 1); if (TYPE(ch) == test) { step = ast_for_expr(c, ch); if (!step) return NULL; } } } return Slice(lower, upper, step, c->c_arena); } static expr_ty ast_for_binop(struct compiling *c, const node *n) { /* Must account for a sequence of expressions. How should A op B op C by represented? BinOp(BinOp(A, op, B), op, C). */ int i, nops; expr_ty expr1, expr2, result; operator_ty newoperator; expr1 = ast_for_expr(c, CHILD(n, 0)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 2)); if (!expr2) return NULL; newoperator = get_operator(CHILD(n, 1)); if (!newoperator) return NULL; result = BinOp(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, CHILD(n, 2)->n_end_lineno, CHILD(n, 2)->n_end_col_offset, c->c_arena); if (!result) return NULL; nops = (NCH(n) - 1) / 2; for (i = 1; i < nops; i++) { expr_ty tmp_result, tmp; const node* next_oper = CHILD(n, i * 2 + 1); newoperator = get_operator(next_oper); if (!newoperator) return NULL; tmp = ast_for_expr(c, CHILD(n, i * 2 + 2)); if (!tmp) return NULL; tmp_result = BinOp(result, newoperator, tmp, LINENO(next_oper), next_oper->n_col_offset, CHILD(n, i * 2 + 2)->n_end_lineno, CHILD(n, i * 2 + 2)->n_end_col_offset, c->c_arena); if (!tmp_result) return NULL; result = tmp_result; } return result; } static expr_ty ast_for_trailer(struct compiling *c, const node *n, expr_ty left_expr) { /* trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop] */ const node *n_copy = n; REQ(n, trailer); if (TYPE(CHILD(n, 0)) == LPAR) { if (NCH(n) == 2) return Call(left_expr, NULL, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else return ast_for_call(c, CHILD(n, 1), left_expr, CHILD(n, 0), CHILD(n, 2)); } else if (TYPE(CHILD(n, 0)) == DOT) { PyObject *attr_id = NEW_IDENTIFIER(CHILD(n, 1)); if (!attr_id) return NULL; return Attribute(left_expr, attr_id, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { REQ(CHILD(n, 0), LSQB); REQ(CHILD(n, 2), RSQB); n = CHILD(n, 1); if (NCH(n) == 1) { slice_ty slc = ast_for_slice(c, CHILD(n, 0)); if (!slc) return NULL; return Subscript(left_expr, slc, Load, LINENO(n), n->n_col_offset, n_copy->n_end_lineno, n_copy->n_end_col_offset, c->c_arena); } else { /* The grammar is ambiguous here. The ambiguity is resolved by treating the sequence as a tuple literal if there are no slice features. */ Py_ssize_t j; slice_ty slc; expr_ty e; int simple = 1; asdl_seq *slices, *elts; slices = _Py_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!slices) return NULL; for (j = 0; j < NCH(n); j += 2) { slc = ast_for_slice(c, CHILD(n, j)); if (!slc) return NULL; if (slc->kind != Index_kind) simple = 0; asdl_seq_SET(slices, j / 2, slc); } if (!simple) { return Subscript(left_expr, ExtSlice(slices, c->c_arena), Load, LINENO(n), n->n_col_offset, n_copy->n_end_lineno, n_copy->n_end_col_offset, c->c_arena); } /* extract Index values and put them in a Tuple */ elts = _Py_asdl_seq_new(asdl_seq_LEN(slices), c->c_arena); if (!elts) return NULL; for (j = 0; j < asdl_seq_LEN(slices); ++j) { slc = (slice_ty)asdl_seq_GET(slices, j); assert(slc->kind == Index_kind && slc->v.Index.value); asdl_seq_SET(elts, j, slc->v.Index.value); } e = Tuple(elts, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!e) return NULL; return Subscript(left_expr, Index(e, c->c_arena), Load, LINENO(n), n->n_col_offset, n_copy->n_end_lineno, n_copy->n_end_col_offset, c->c_arena); } } } static expr_ty ast_for_factor(struct compiling *c, const node *n) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; switch (TYPE(CHILD(n, 0))) { case PLUS: return UnaryOp(UAdd, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case MINUS: return UnaryOp(USub, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case TILDE: return UnaryOp(Invert, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unhandled factor: %d", TYPE(CHILD(n, 0))); return NULL; } static expr_ty ast_for_atom_expr(struct compiling *c, const node *n) { int i, nch, start = 0; expr_ty e, tmp; REQ(n, atom_expr); nch = NCH(n); if (TYPE(CHILD(n, 0)) == NAME && strcmp(STR(CHILD(n, 0)), "await") == 0) { start = 1; assert(nch > 1); } e = ast_for_atom(c, CHILD(n, start)); if (!e) return NULL; if (nch == 1) return e; if (start && nch == 2) { return Await(e, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } for (i = start + 1; i < nch; i++) { node *ch = CHILD(n, i); if (TYPE(ch) != trailer) break; tmp = ast_for_trailer(c, ch, e); if (!tmp) return NULL; tmp->lineno = e->lineno; tmp->col_offset = e->col_offset; e = tmp; } if (start) { /* there was an 'await' */ return Await(e, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { return e; } } static expr_ty ast_for_power(struct compiling *c, const node *n) { /* power: atom trailer* ('**' factor)* */ expr_ty e; REQ(n, power); e = ast_for_atom_expr(c, CHILD(n, 0)); if (!e) return NULL; if (NCH(n) == 1) return e; if (TYPE(CHILD(n, NCH(n) - 1)) == factor) { expr_ty f = ast_for_expr(c, CHILD(n, NCH(n) - 1)); if (!f) return NULL; e = BinOp(e, Pow, f, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } return e; } static expr_ty ast_for_starred(struct compiling *c, const node *n) { expr_ty tmp; REQ(n, star_expr); tmp = ast_for_expr(c, CHILD(n, 1)); if (!tmp) return NULL; /* The Load context is changed later. */ return Starred(tmp, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } /* Do not name a variable 'expr'! Will cause a compile error. */ static expr_ty ast_for_expr(struct compiling *c, const node *n) { /* handle the full range of simple expressions namedexpr_test: test [':=' test] test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom_expr ['**' factor] atom_expr: ['await'] atom trailer* yield_expr: 'yield' [yield_arg] */ asdl_seq *seq; int i; loop: switch (TYPE(n)) { case namedexpr_test: if (NCH(n) == 3) return ast_for_namedexpr(c, n); /* Fallthrough */ case test: case test_nocond: if (TYPE(CHILD(n, 0)) == lambdef || TYPE(CHILD(n, 0)) == lambdef_nocond) return ast_for_lambdef(c, CHILD(n, 0)); else if (NCH(n) > 1) return ast_for_ifexpr(c, n); /* Fallthrough */ case or_test: case and_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } seq = _Py_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); } if (!strcmp(STR(CHILD(n, 1)), "and")) return BoolOp(And, seq, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); assert(!strcmp(STR(CHILD(n, 1)), "or")); return BoolOp(Or, seq, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case not_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return UnaryOp(Not, expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case comparison: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression; asdl_int_seq *ops; asdl_seq *cmps; ops = _Py_asdl_int_seq_new(NCH(n) / 2, c->c_arena); if (!ops) return NULL; cmps = _Py_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!cmps) { return NULL; } for (i = 1; i < NCH(n); i += 2) { cmpop_ty newoperator; newoperator = ast_for_comp_op(c, CHILD(n, i)); if (!newoperator) { return NULL; } expression = ast_for_expr(c, CHILD(n, i + 1)); if (!expression) { return NULL; } asdl_seq_SET(ops, i / 2, newoperator); asdl_seq_SET(cmps, i / 2, expression); } expression = ast_for_expr(c, CHILD(n, 0)); if (!expression) { return NULL; } return Compare(expression, ops, cmps, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } break; case star_expr: return ast_for_starred(c, n); /* The next five cases all handle BinOps. The main body of code is the same in each case, but the switch turned inside out to reuse the code for each type of operator. */ case expr: case xor_expr: case and_expr: case shift_expr: case arith_expr: case term: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_binop(c, n); case yield_expr: { node *an = NULL; node *en = NULL; int is_from = 0; expr_ty exp = NULL; if (NCH(n) > 1) an = CHILD(n, 1); /* yield_arg */ if (an) { en = CHILD(an, NCH(an) - 1); if (NCH(an) == 2) { is_from = 1; exp = ast_for_expr(c, en); } else exp = ast_for_testlist(c, en); if (!exp) return NULL; } if (is_from) return YieldFrom(exp, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); return Yield(exp, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case factor: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_factor(c, n); case power: return ast_for_power(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled expr: %d", TYPE(n)); return NULL; } /* should never get here unless if error is set */ return NULL; } static expr_ty ast_for_call(struct compiling *c, const node *n, expr_ty func, const node *maybegenbeg, const node *closepar) { /* arglist: argument (',' argument)* [','] argument: ( test [comp_for] | '*' test | test '=' test | '**' test ) */ int i, nargs, nkeywords; int ndoublestars; asdl_seq *args; asdl_seq *keywords; REQ(n, arglist); nargs = 0; nkeywords = 0; for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { if (NCH(ch) == 1) nargs++; else if (TYPE(CHILD(ch, 1)) == comp_for) { nargs++; if (!maybegenbeg) { ast_error(c, ch, "invalid syntax"); return NULL; } if (NCH(n) > 1) { ast_error(c, ch, "Generator expression must be parenthesized"); return NULL; } } else if (TYPE(CHILD(ch, 0)) == STAR) nargs++; else if (TYPE(CHILD(ch, 1)) == COLONEQUAL) { nargs++; } else /* TYPE(CHILD(ch, 0)) == DOUBLESTAR or keyword argument */ nkeywords++; } } args = _Py_asdl_seq_new(nargs, c->c_arena); if (!args) return NULL; keywords = _Py_asdl_seq_new(nkeywords, c->c_arena); if (!keywords) return NULL; nargs = 0; /* positional arguments + iterable argument unpackings */ nkeywords = 0; /* keyword arguments + keyword argument unpackings */ ndoublestars = 0; /* just keyword argument unpackings */ for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { expr_ty e; node *chch = CHILD(ch, 0); if (NCH(ch) == 1) { /* a positional argument */ if (nkeywords) { if (ndoublestars) { ast_error(c, chch, "positional argument follows " "keyword argument unpacking"); } else { ast_error(c, chch, "positional argument follows " "keyword argument"); } return NULL; } e = ast_for_expr(c, chch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else if (TYPE(chch) == STAR) { /* an iterable argument unpacking */ expr_ty starred; if (ndoublestars) { ast_error(c, chch, "iterable argument unpacking follows " "keyword argument unpacking"); return NULL; } e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; starred = Starred(e, Load, LINENO(chch), chch->n_col_offset, chch->n_end_lineno, chch->n_end_col_offset, c->c_arena); if (!starred) return NULL; asdl_seq_SET(args, nargs++, starred); } else if (TYPE(chch) == DOUBLESTAR) { /* a keyword argument unpacking */ keyword_ty kw; i++; e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; kw = keyword(NULL, e, c->c_arena); asdl_seq_SET(keywords, nkeywords++, kw); ndoublestars++; } else if (TYPE(CHILD(ch, 1)) == comp_for) { /* the lone generator expression */ e = copy_location(ast_for_genexp(c, ch), maybegenbeg); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else if (TYPE(CHILD(ch, 1)) == COLONEQUAL) { /* treat colon equal as positional argument */ if (nkeywords) { if (ndoublestars) { ast_error(c, chch, "positional argument follows " "keyword argument unpacking"); } else { ast_error(c, chch, "positional argument follows " "keyword argument"); } return NULL; } e = ast_for_namedexpr(c, ch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else { /* a keyword argument */ keyword_ty kw; identifier key, tmp; int k; // To remain LL(1), the grammar accepts any test (basically, any // expression) in the keyword slot of a call site. So, we need // to manually enforce that the keyword is a NAME here. static const int name_tree[] = { test, or_test, and_test, not_test, comparison, expr, xor_expr, and_expr, shift_expr, arith_expr, term, factor, power, atom_expr, atom, 0, }; node *expr_node = chch; for (int i = 0; name_tree[i]; i++) { if (TYPE(expr_node) != name_tree[i]) break; if (NCH(expr_node) != 1) break; expr_node = CHILD(expr_node, 0); } if (TYPE(expr_node) != NAME) { ast_error(c, chch, "expression cannot contain assignment, " "perhaps you meant \"==\"?"); return NULL; } key = new_identifier(STR(expr_node), c); if (key == NULL) { return NULL; } if (forbidden_name(c, key, chch, 1)) { return NULL; } for (k = 0; k < nkeywords; k++) { tmp = ((keyword_ty)asdl_seq_GET(keywords, k))->arg; if (tmp && !PyUnicode_Compare(tmp, key)) { ast_error(c, chch, "keyword argument repeated"); return NULL; } } e = ast_for_expr(c, CHILD(ch, 2)); if (!e) return NULL; kw = keyword(key, e, c->c_arena); if (!kw) return NULL; asdl_seq_SET(keywords, nkeywords++, kw); } } } return Call(func, args, keywords, func->lineno, func->col_offset, closepar->n_end_lineno, closepar->n_end_col_offset, c->c_arena); } static expr_ty ast_for_testlist(struct compiling *c, const node* n) { /* testlist_comp: test (comp_for | (',' test)* [',']) */ /* testlist: test (',' test)* [','] */ assert(NCH(n) > 0); if (TYPE(n) == testlist_comp) { if (NCH(n) > 1) assert(TYPE(CHILD(n, 1)) != comp_for); } else { assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr); } if (NCH(n) == 1) return ast_for_expr(c, CHILD(n, 0)); else { asdl_seq *tmp = seq_for_testlist(c, n); if (!tmp) return NULL; return Tuple(tmp, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } } static stmt_ty ast_for_expr_stmt(struct compiling *c, const node *n) { REQ(n, expr_stmt); /* expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | [('=' (yield_expr|testlist_star_expr))+ [TYPE_COMMENT]] ) annassign: ':' test ['=' (yield_expr|testlist)] testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=') test: ... here starts the operator precedence dance */ int num = NCH(n); if (num == 1) { expr_ty e = ast_for_testlist(c, CHILD(n, 0)); if (!e) return NULL; return Expr(e, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == augassign) { expr_ty expr1, expr2; operator_ty newoperator; node *ch = CHILD(n, 0); expr1 = ast_for_testlist(c, ch); if (!expr1) return NULL; if(!set_context(c, expr1, Store, ch)) return NULL; /* set_context checks that most expressions are not the left side. Augmented assignments can only have a name, a subscript, or an attribute on the left, though, so we have to explicitly check for those. */ switch (expr1->kind) { case Name_kind: case Attribute_kind: case Subscript_kind: break; default: ast_error(c, ch, "illegal expression for augmented assignment"); return NULL; } ch = CHILD(n, 2); if (TYPE(ch) == testlist) expr2 = ast_for_testlist(c, ch); else expr2 = ast_for_expr(c, ch); if (!expr2) return NULL; newoperator = ast_for_augassign(c, CHILD(n, 1)); if (!newoperator) return NULL; return AugAssign(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == annassign) { expr_ty expr1, expr2, expr3; node *ch = CHILD(n, 0); node *deep, *ann = CHILD(n, 1); int simple = 1; /* we keep track of parens to qualify (x) as expression not name */ deep = ch; while (NCH(deep) == 1) { deep = CHILD(deep, 0); } if (NCH(deep) > 0 && TYPE(CHILD(deep, 0)) == LPAR) { simple = 0; } expr1 = ast_for_testlist(c, ch); if (!expr1) { return NULL; } switch (expr1->kind) { case Name_kind: if (forbidden_name(c, expr1->v.Name.id, n, 0)) { return NULL; } expr1->v.Name.ctx = Store; break; case Attribute_kind: if (forbidden_name(c, expr1->v.Attribute.attr, n, 1)) { return NULL; } expr1->v.Attribute.ctx = Store; break; case Subscript_kind: expr1->v.Subscript.ctx = Store; break; case List_kind: ast_error(c, ch, "only single target (not list) can be annotated"); return NULL; case Tuple_kind: ast_error(c, ch, "only single target (not tuple) can be annotated"); return NULL; default: ast_error(c, ch, "illegal target for annotation"); return NULL; } if (expr1->kind != Name_kind) { simple = 0; } ch = CHILD(ann, 1); expr2 = ast_for_expr(c, ch); if (!expr2) { return NULL; } if (NCH(ann) == 2) { return AnnAssign(expr1, expr2, NULL, simple, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { ch = CHILD(ann, 3); if (TYPE(ch) == testlist) { expr3 = ast_for_testlist(c, ch); } else { expr3 = ast_for_expr(c, ch); } if (!expr3) { return NULL; } return AnnAssign(expr1, expr2, expr3, simple, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } } else { int i, nch_minus_type, has_type_comment; asdl_seq *targets; node *value; expr_ty expression; string type_comment; /* a normal assignment */ REQ(CHILD(n, 1), EQUAL); has_type_comment = TYPE(CHILD(n, num - 1)) == TYPE_COMMENT; nch_minus_type = num - has_type_comment; targets = _Py_asdl_seq_new(nch_minus_type / 2, c->c_arena); if (!targets) return NULL; for (i = 0; i < nch_minus_type - 2; i += 2) { expr_ty e; node *ch = CHILD(n, i); if (TYPE(ch) == yield_expr) { ast_error(c, ch, "assignment to yield expression not possible"); return NULL; } e = ast_for_testlist(c, ch); if (!e) return NULL; /* set context to assign */ if (!set_context(c, e, Store, CHILD(n, i))) return NULL; asdl_seq_SET(targets, i / 2, e); } value = CHILD(n, nch_minus_type - 1); if (TYPE(value) == testlist_star_expr) expression = ast_for_testlist(c, value); else expression = ast_for_expr(c, value); if (!expression) return NULL; if (has_type_comment) { type_comment = NEW_TYPE_COMMENT(CHILD(n, nch_minus_type)); if (!type_comment) return NULL; } else type_comment = NULL; return Assign(targets, expression, type_comment, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } } static asdl_seq * ast_for_exprlist(struct compiling *c, const node *n, expr_context_ty context) { asdl_seq *seq; int i; expr_ty e; REQ(n, exprlist); seq = _Py_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); if (context && !set_context(c, e, context, CHILD(n, i))) return NULL; } return seq; } static stmt_ty ast_for_del_stmt(struct compiling *c, const node *n) { asdl_seq *expr_list; /* del_stmt: 'del' exprlist */ REQ(n, del_stmt); expr_list = ast_for_exprlist(c, CHILD(n, 1), Del); if (!expr_list) return NULL; return Delete(expr_list, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static stmt_ty ast_for_flow_stmt(struct compiling *c, const node *n) { /* flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt break_stmt: 'break' continue_stmt: 'continue' return_stmt: 'return' [testlist] yield_stmt: yield_expr yield_expr: 'yield' testlist | 'yield' 'from' test raise_stmt: 'raise' [test [',' test [',' test]]] */ node *ch; REQ(n, flow_stmt); ch = CHILD(n, 0); switch (TYPE(ch)) { case break_stmt: return Break(LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case continue_stmt: return Continue(LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case yield_stmt: { /* will reduce to yield_expr */ expr_ty exp = ast_for_expr(c, CHILD(ch, 0)); if (!exp) return NULL; return Expr(exp, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case return_stmt: if (NCH(ch) == 1) return Return(NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else { expr_ty expression = ast_for_testlist(c, CHILD(ch, 1)); if (!expression) return NULL; return Return(expression, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case raise_stmt: if (NCH(ch) == 1) return Raise(NULL, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); else if (NCH(ch) >= 2) { expr_ty cause = NULL; expr_ty expression = ast_for_expr(c, CHILD(ch, 1)); if (!expression) return NULL; if (NCH(ch) == 4) { cause = ast_for_expr(c, CHILD(ch, 3)); if (!cause) return NULL; } return Raise(expression, cause, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } /* fall through */ default: PyErr_Format(PyExc_SystemError, "unexpected flow_stmt: %d", TYPE(ch)); return NULL; } } static alias_ty alias_for_import_name(struct compiling *c, const node *n, int store) { /* import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] dotted_name: NAME ('.' NAME)* */ identifier str, name; loop: switch (TYPE(n)) { case import_as_name: { node *name_node = CHILD(n, 0); str = NULL; name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (NCH(n) == 3) { node *str_node = CHILD(n, 2); str = NEW_IDENTIFIER(str_node); if (!str) return NULL; if (store && forbidden_name(c, str, str_node, 0)) return NULL; } else { if (forbidden_name(c, name, name_node, 0)) return NULL; } return alias(name, str, c->c_arena); } case dotted_as_name: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { node *asname_node = CHILD(n, 2); alias_ty a = alias_for_import_name(c, CHILD(n, 0), 0); if (!a) return NULL; assert(!a->asname); a->asname = NEW_IDENTIFIER(asname_node); if (!a->asname) return NULL; if (forbidden_name(c, a->asname, asname_node, 0)) return NULL; return a; } break; case dotted_name: if (NCH(n) == 1) { node *name_node = CHILD(n, 0); name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (store && forbidden_name(c, name, name_node, 0)) return NULL; return alias(name, NULL, c->c_arena); } else { /* Create a string of the form "a.b.c" */ int i; size_t len; char *s; PyObject *uni; len = 0; for (i = 0; i < NCH(n); i += 2) /* length of string plus one for the dot */ len += strlen(STR(CHILD(n, i))) + 1; len--; /* the last name doesn't have a dot */ str = PyBytes_FromStringAndSize(NULL, len); if (!str) return NULL; s = PyBytes_AS_STRING(str); if (!s) return NULL; for (i = 0; i < NCH(n); i += 2) { char *sch = STR(CHILD(n, i)); strcpy(s, STR(CHILD(n, i))); s += strlen(sch); *s++ = '.'; } --s; *s = '\0'; uni = PyUnicode_DecodeUTF8(PyBytes_AS_STRING(str), PyBytes_GET_SIZE(str), NULL); Py_DECREF(str); if (!uni) return NULL; str = uni; PyUnicode_InternInPlace(&str); if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); } break; case STAR: str = PyUnicode_InternFromString("*"); if (!str) return NULL; if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); default: PyErr_Format(PyExc_SystemError, "unexpected import name: %d", TYPE(n)); return NULL; } PyErr_SetString(PyExc_SystemError, "unhandled import name condition"); return NULL; } static stmt_ty ast_for_import_stmt(struct compiling *c, const node *n) { /* import_stmt: import_name | import_from import_name: 'import' dotted_as_names import_from: 'from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names) */ int lineno; int col_offset; int i; asdl_seq *aliases; REQ(n, import_stmt); lineno = LINENO(n); col_offset = n->n_col_offset; n = CHILD(n, 0); if (TYPE(n) == import_name) { n = CHILD(n, 1); REQ(n, dotted_as_names); aliases = _Py_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!aliases) return NULL; for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } // Even though n is modified above, the end position is not changed return Import(aliases, lineno, col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (TYPE(n) == import_from) { int n_children; int idx, ndots = 0; const node *n_copy = n; alias_ty mod = NULL; identifier modname = NULL; /* Count the number of dots (for relative imports) and check for the optional module name */ for (idx = 1; idx < NCH(n); idx++) { if (TYPE(CHILD(n, idx)) == dotted_name) { mod = alias_for_import_name(c, CHILD(n, idx), 0); if (!mod) return NULL; idx++; break; } else if (TYPE(CHILD(n, idx)) == ELLIPSIS) { /* three consecutive dots are tokenized as one ELLIPSIS */ ndots += 3; continue; } else if (TYPE(CHILD(n, idx)) != DOT) { break; } ndots++; } idx++; /* skip over the 'import' keyword */ switch (TYPE(CHILD(n, idx))) { case STAR: /* from ... import * */ n = CHILD(n, idx); n_children = 1; break; case LPAR: /* from ... import (x, y, z) */ n = CHILD(n, idx + 1); n_children = NCH(n); break; case import_as_names: /* from ... import x, y, z */ n = CHILD(n, idx); n_children = NCH(n); if (n_children % 2 == 0) { ast_error(c, n, "trailing comma not allowed without" " surrounding parentheses"); return NULL; } break; default: ast_error(c, n, "Unexpected node-type in from-import"); return NULL; } aliases = _Py_asdl_seq_new((n_children + 1) / 2, c->c_arena); if (!aliases) return NULL; /* handle "from ... import *" special b/c there's no children */ if (TYPE(n) == STAR) { alias_ty import_alias = alias_for_import_name(c, n, 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, 0, import_alias); } else { for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } } if (mod != NULL) modname = mod->name; return ImportFrom(modname, aliases, ndots, lineno, col_offset, n_copy->n_end_lineno, n_copy->n_end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unknown import statement: starts with command '%s'", STR(CHILD(n, 0))); return NULL; } static stmt_ty ast_for_global_stmt(struct compiling *c, const node *n) { /* global_stmt: 'global' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, global_stmt); s = _Py_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Global(s, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static stmt_ty ast_for_nonlocal_stmt(struct compiling *c, const node *n) { /* nonlocal_stmt: 'nonlocal' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, nonlocal_stmt); s = _Py_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Nonlocal(s, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } static stmt_ty ast_for_assert_stmt(struct compiling *c, const node *n) { /* assert_stmt: 'assert' test [',' test] */ REQ(n, assert_stmt); if (NCH(n) == 2) { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return Assert(expression, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else if (NCH(n) == 4) { expr_ty expr1, expr2; expr1 = ast_for_expr(c, CHILD(n, 1)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 3)); if (!expr2) return NULL; return Assert(expr1, expr2, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "improper number of parts to 'assert' statement: %d", NCH(n)); return NULL; } static asdl_seq * ast_for_suite(struct compiling *c, const node *n) { /* suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */ asdl_seq *seq; stmt_ty s; int i, total, num, end, pos = 0; node *ch; if (TYPE(n) != func_body_suite) { REQ(n, suite); } total = num_stmts(n); seq = _Py_asdl_seq_new(total, c->c_arena); if (!seq) return NULL; if (TYPE(CHILD(n, 0)) == simple_stmt) { n = CHILD(n, 0); /* simple_stmt always ends with a NEWLINE, and may have a trailing SEMI */ end = NCH(n) - 1; if (TYPE(CHILD(n, end - 1)) == SEMI) end--; /* loop by 2 to skip semi-colons */ for (i = 0; i < end; i += 2) { ch = CHILD(n, i); s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } else { i = 2; if (TYPE(CHILD(n, 1)) == TYPE_COMMENT) { i += 2; REQ(CHILD(n, 2), NEWLINE); } for (; i < (NCH(n) - 1); i++) { ch = CHILD(n, i); REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { /* small_stmt or compound_stmt with only one child */ s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } else { int j; ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < NCH(ch); j += 2) { /* statement terminates with a semi-colon ';' */ if (NCH(CHILD(ch, j)) == 0) { assert((j + 1) == NCH(ch)); break; } s = ast_for_stmt(c, CHILD(ch, j)); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } } } assert(pos == seq->size); return seq; } static void get_last_end_pos(asdl_seq *s, int *end_lineno, int *end_col_offset) { int tot = asdl_seq_LEN(s); // Suite should not be empty, but it is safe to just ignore it // if it will ever occur. if (!tot) { return; } stmt_ty last = asdl_seq_GET(s, tot - 1); *end_lineno = last->end_lineno; *end_col_offset = last->end_col_offset; } static stmt_ty ast_for_if_stmt(struct compiling *c, const node *n) { /* if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] */ char *s; int end_lineno, end_col_offset; REQ(n, if_stmt); if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); return If(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } s = STR(CHILD(n, 4)); /* s[2], the third character in the string, will be 's' for el_s_e, or 'i' for el_i_f */ if (s[2] == 's') { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; get_last_end_pos(seq2, &end_lineno, &end_col_offset); return If(expression, seq1, seq2, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } else if (s[2] == 'i') { int i, n_elif, has_else = 0; expr_ty expression; asdl_seq *suite_seq; asdl_seq *orelse = NULL; n_elif = NCH(n) - 4; /* must reference the child n_elif+1 since 'else' token is third, not fourth, child from the end. */ if (TYPE(CHILD(n, (n_elif + 1))) == NAME && STR(CHILD(n, (n_elif + 1)))[2] == 's') { has_else = 1; n_elif -= 3; } n_elif /= 4; if (has_else) { asdl_seq *suite_seq2; orelse = _Py_asdl_seq_new(1, c->c_arena); if (!orelse) return NULL; expression = ast_for_expr(c, CHILD(n, NCH(n) - 6)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, NCH(n) - 4)); if (!suite_seq) return NULL; suite_seq2 = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!suite_seq2) return NULL; get_last_end_pos(suite_seq2, &end_lineno, &end_col_offset); asdl_seq_SET(orelse, 0, If(expression, suite_seq, suite_seq2, LINENO(CHILD(n, NCH(n) - 6)), CHILD(n, NCH(n) - 6)->n_col_offset, end_lineno, end_col_offset, c->c_arena)); /* the just-created orelse handled the last elif */ n_elif--; } for (i = 0; i < n_elif; i++) { int off = 5 + (n_elif - i - 1) * 4; asdl_seq *newobj = _Py_asdl_seq_new(1, c->c_arena); if (!newobj) return NULL; expression = ast_for_expr(c, CHILD(n, off)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, off + 2)); if (!suite_seq) return NULL; if (orelse != NULL) { get_last_end_pos(orelse, &end_lineno, &end_col_offset); } else { get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); } asdl_seq_SET(newobj, 0, If(expression, suite_seq, orelse, LINENO(CHILD(n, off)), CHILD(n, off)->n_col_offset, end_lineno, end_col_offset, c->c_arena)); orelse = newobj; } expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; get_last_end_pos(orelse, &end_lineno, &end_col_offset); return If(expression, suite_seq, orelse, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unexpected token in 'if' statement: %s", s); return NULL; } static stmt_ty ast_for_while_stmt(struct compiling *c, const node *n) { /* while_stmt: 'while' test ':' suite ['else' ':' suite] */ REQ(n, while_stmt); int end_lineno, end_col_offset; if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); return While(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } else if (NCH(n) == 7) { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; get_last_end_pos(seq2, &end_lineno, &end_col_offset); return While(expression, seq1, seq2, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of tokens for 'while' statement: %d", NCH(n)); return NULL; } static stmt_ty ast_for_for_stmt(struct compiling *c, const node *n0, bool is_async) { const node * const n = is_async ? CHILD(n0, 1) : n0; asdl_seq *_target, *seq = NULL, *suite_seq; expr_ty expression; expr_ty target, first; const node *node_target; int end_lineno, end_col_offset; int has_type_comment; string type_comment; /* for_stmt: 'for' exprlist 'in' testlist ':' [TYPE_COMMENT] suite ['else' ':' suite] */ REQ(n, for_stmt); has_type_comment = TYPE(CHILD(n, 5)) == TYPE_COMMENT; if (NCH(n) == 9 + has_type_comment) { seq = ast_for_suite(c, CHILD(n, 8 + has_type_comment)); if (!seq) return NULL; } node_target = CHILD(n, 1); _target = ast_for_exprlist(c, node_target, Store); if (!_target) return NULL; /* Check the # of children rather than the length of _target, since for x, in ... has 1 element in _target, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(_target, 0); if (NCH(node_target) == 1) target = first; else target = Tuple(_target, Store, first->lineno, first->col_offset, node_target->n_end_lineno, node_target->n_end_col_offset, c->c_arena); expression = ast_for_testlist(c, CHILD(n, 3)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 5 + has_type_comment)); if (!suite_seq) return NULL; if (seq != NULL) { get_last_end_pos(seq, &end_lineno, &end_col_offset); } else { get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); } if (has_type_comment) { type_comment = NEW_TYPE_COMMENT(CHILD(n, 5)); if (!type_comment) return NULL; } else type_comment = NULL; if (is_async) return AsyncFor(target, expression, suite_seq, seq, type_comment, LINENO(n0), n0->n_col_offset, end_lineno, end_col_offset, c->c_arena); else return For(target, expression, suite_seq, seq, type_comment, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } static excepthandler_ty ast_for_except_clause(struct compiling *c, const node *exc, node *body) { /* except_clause: 'except' [test ['as' test]] */ int end_lineno, end_col_offset; REQ(exc, except_clause); REQ(body, suite); if (NCH(exc) == 1) { asdl_seq *suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); return ExceptHandler(NULL, NULL, suite_seq, LINENO(exc), exc->n_col_offset, end_lineno, end_col_offset, c->c_arena); } else if (NCH(exc) == 2) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); return ExceptHandler(expression, NULL, suite_seq, LINENO(exc), exc->n_col_offset, end_lineno, end_col_offset, c->c_arena); } else if (NCH(exc) == 4) { asdl_seq *suite_seq; expr_ty expression; identifier e = NEW_IDENTIFIER(CHILD(exc, 3)); if (!e) return NULL; if (forbidden_name(c, e, CHILD(exc, 3), 0)) return NULL; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; get_last_end_pos(suite_seq, &end_lineno, &end_col_offset); return ExceptHandler(expression, e, suite_seq, LINENO(exc), exc->n_col_offset, end_lineno, end_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of children for 'except' clause: %d", NCH(exc)); return NULL; } static stmt_ty ast_for_try_stmt(struct compiling *c, const node *n) { const int nch = NCH(n); int end_lineno, end_col_offset, n_except = (nch - 3)/3; asdl_seq *body, *handlers = NULL, *orelse = NULL, *finally = NULL; excepthandler_ty last_handler; REQ(n, try_stmt); body = ast_for_suite(c, CHILD(n, 2)); if (body == NULL) return NULL; if (TYPE(CHILD(n, nch - 3)) == NAME) { if (strcmp(STR(CHILD(n, nch - 3)), "finally") == 0) { if (nch >= 9 && TYPE(CHILD(n, nch - 6)) == NAME) { /* we can assume it's an "else", because nch >= 9 for try-else-finally and it would otherwise have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 4)); if (orelse == NULL) return NULL; n_except--; } finally = ast_for_suite(c, CHILD(n, nch - 1)); if (finally == NULL) return NULL; n_except--; } else { /* we can assume it's an "else", otherwise it would have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 1)); if (orelse == NULL) return NULL; n_except--; } } else if (TYPE(CHILD(n, nch - 3)) != except_clause) { ast_error(c, n, "malformed 'try' statement"); return NULL; } if (n_except > 0) { int i; /* process except statements to create a try ... except */ handlers = _Py_asdl_seq_new(n_except, c->c_arena); if (handlers == NULL) return NULL; for (i = 0; i < n_except; i++) { excepthandler_ty e = ast_for_except_clause(c, CHILD(n, 3 + i * 3), CHILD(n, 5 + i * 3)); if (!e) return NULL; asdl_seq_SET(handlers, i, e); } } assert(finally != NULL || asdl_seq_LEN(handlers)); if (finally != NULL) { // finally is always last get_last_end_pos(finally, &end_lineno, &end_col_offset); } else if (orelse != NULL) { // otherwise else is last get_last_end_pos(orelse, &end_lineno, &end_col_offset); } else { // inline the get_last_end_pos logic due to layout mismatch last_handler = (excepthandler_ty) asdl_seq_GET(handlers, n_except - 1); end_lineno = last_handler->end_lineno; end_col_offset = last_handler->end_col_offset; } return Try(body, handlers, orelse, finally, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } /* with_item: test ['as' expr] */ static withitem_ty ast_for_with_item(struct compiling *c, const node *n) { expr_ty context_expr, optional_vars = NULL; REQ(n, with_item); context_expr = ast_for_expr(c, CHILD(n, 0)); if (!context_expr) return NULL; if (NCH(n) == 3) { optional_vars = ast_for_expr(c, CHILD(n, 2)); if (!optional_vars) { return NULL; } if (!set_context(c, optional_vars, Store, n)) { return NULL; } } return withitem(context_expr, optional_vars, c->c_arena); } /* with_stmt: 'with' with_item (',' with_item)* ':' [TYPE_COMMENT] suite */ static stmt_ty ast_for_with_stmt(struct compiling *c, const node *n0, bool is_async) { const node * const n = is_async ? CHILD(n0, 1) : n0; int i, n_items, nch_minus_type, has_type_comment, end_lineno, end_col_offset; asdl_seq *items, *body; string type_comment; REQ(n, with_stmt); has_type_comment = TYPE(CHILD(n, NCH(n) - 2)) == TYPE_COMMENT; nch_minus_type = NCH(n) - has_type_comment; n_items = (nch_minus_type - 2) / 2; items = _Py_asdl_seq_new(n_items, c->c_arena); if (!items) return NULL; for (i = 1; i < nch_minus_type - 2; i += 2) { withitem_ty item = ast_for_with_item(c, CHILD(n, i)); if (!item) return NULL; asdl_seq_SET(items, (i - 1) / 2, item); } body = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!body) return NULL; get_last_end_pos(body, &end_lineno, &end_col_offset); if (has_type_comment) { type_comment = NEW_TYPE_COMMENT(CHILD(n, NCH(n) - 2)); if (!type_comment) return NULL; } else type_comment = NULL; if (is_async) return AsyncWith(items, body, type_comment, LINENO(n0), n0->n_col_offset, end_lineno, end_col_offset, c->c_arena); else return With(items, body, type_comment, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } static stmt_ty ast_for_classdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* classdef: 'class' NAME ['(' arglist ')'] ':' suite */ PyObject *classname; asdl_seq *s; expr_ty call; int end_lineno, end_col_offset; REQ(n, classdef); if (NCH(n) == 4) { /* class NAME ':' suite */ s = ast_for_suite(c, CHILD(n, 3)); if (!s) return NULL; get_last_end_pos(s, &end_lineno, &end_col_offset); classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } if (TYPE(CHILD(n, 3)) == RPAR) { /* class NAME '(' ')' ':' suite */ s = ast_for_suite(c, CHILD(n, 5)); if (!s) return NULL; get_last_end_pos(s, &end_lineno, &end_col_offset); classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } /* class NAME '(' arglist ')' ':' suite */ /* build up a fake Call node so we can extract its pieces */ { PyObject *dummy_name; expr_ty dummy; dummy_name = NEW_IDENTIFIER(CHILD(n, 1)); if (!dummy_name) return NULL; dummy = Name(dummy_name, Load, LINENO(n), n->n_col_offset, CHILD(n, 1)->n_end_lineno, CHILD(n, 1)->n_end_col_offset, c->c_arena); call = ast_for_call(c, CHILD(n, 3), dummy, NULL, CHILD(n, 4)); if (!call) return NULL; } s = ast_for_suite(c, CHILD(n, 6)); if (!s) return NULL; get_last_end_pos(s, &end_lineno, &end_col_offset); classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 1), 0)) return NULL; return ClassDef(classname, call->v.Call.args, call->v.Call.keywords, s, decorator_seq, LINENO(n), n->n_col_offset, end_lineno, end_col_offset, c->c_arena); } static stmt_ty ast_for_stmt(struct compiling *c, const node *n) { if (TYPE(n) == stmt) { assert(NCH(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == simple_stmt) { assert(num_stmts(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == small_stmt) { n = CHILD(n, 0); /* small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt */ switch (TYPE(n)) { case expr_stmt: return ast_for_expr_stmt(c, n); case del_stmt: return ast_for_del_stmt(c, n); case pass_stmt: return Pass(LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case flow_stmt: return ast_for_flow_stmt(c, n); case import_stmt: return ast_for_import_stmt(c, n); case global_stmt: return ast_for_global_stmt(c, n); case nonlocal_stmt: return ast_for_nonlocal_stmt(c, n); case assert_stmt: return ast_for_assert_stmt(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled small_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } else { /* compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef | decorated | async_stmt */ node *ch = CHILD(n, 0); REQ(n, compound_stmt); switch (TYPE(ch)) { case if_stmt: return ast_for_if_stmt(c, ch); case while_stmt: return ast_for_while_stmt(c, ch); case for_stmt: return ast_for_for_stmt(c, ch, 0); case try_stmt: return ast_for_try_stmt(c, ch); case with_stmt: return ast_for_with_stmt(c, ch, 0); case funcdef: return ast_for_funcdef(c, ch, NULL); case classdef: return ast_for_classdef(c, ch, NULL); case decorated: return ast_for_decorated(c, ch); case async_stmt: return ast_for_async_stmt(c, ch); default: PyErr_Format(PyExc_SystemError, "unhandled compound_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } } static PyObject * parsenumber_raw(struct compiling *c, const char *s) { const char *end; long x; double dx; Py_complex compl; int imflag; assert(s != NULL); errno = 0; end = s + strlen(s) - 1; imflag = *end == 'j' || *end == 'J'; if (s[0] == '0') { x = (long) PyOS_strtoul(s, (char **)&end, 0); if (x < 0 && errno == 0) { return PyLong_FromString(s, (char **)0, 0); } } else x = PyOS_strtol(s, (char **)&end, 0); if (*end == '\0') { if (errno != 0) return PyLong_FromString(s, (char **)0, 0); return PyLong_FromLong(x); } /* XXX Huge floats may silently fail */ if (imflag) { compl.real = 0.; compl.imag = PyOS_string_to_double(s, (char **)&end, NULL); if (compl.imag == -1.0 && PyErr_Occurred()) return NULL; return PyComplex_FromCComplex(compl); } else { dx = PyOS_string_to_double(s, NULL, NULL); if (dx == -1.0 && PyErr_Occurred()) return NULL; return PyFloat_FromDouble(dx); } } static PyObject * parsenumber(struct compiling *c, const char *s) { char *dup, *end; PyObject *res = NULL; assert(s != NULL); if (strchr(s, '_') == NULL) { return parsenumber_raw(c, s); } /* Create a duplicate without underscores. */ dup = PyMem_Malloc(strlen(s) + 1); if (dup == NULL) { return PyErr_NoMemory(); } end = dup; for (; *s; s++) { if (*s != '_') { *end++ = *s; } } *end = '\0'; res = parsenumber_raw(c, dup); PyMem_Free(dup); return res; } static PyObject * decode_utf8(struct compiling *c, const char **sPtr, const char *end) { const char *s, *t; t = s = *sPtr; /* while (s < end && *s != '\\') s++; */ /* inefficient for u".." */ while (s < end && (*s & 0x80)) s++; *sPtr = s; return PyUnicode_DecodeUTF8(t, s - t, NULL); } static int warn_invalid_escape_sequence(struct compiling *c, const node *n, unsigned char first_invalid_escape_char) { PyObject *msg = PyUnicode_FromFormat("invalid escape sequence \\%c", first_invalid_escape_char); if (msg == NULL) { return -1; } if (PyErr_WarnExplicitObject(PyExc_SyntaxWarning, msg, c->c_filename, LINENO(n), NULL, NULL) < 0) { if (PyErr_ExceptionMatches(PyExc_SyntaxWarning)) { /* Replace the SyntaxWarning exception with a SyntaxError to get a more accurate error report */ PyErr_Clear(); ast_error(c, n, "%U", msg); } Py_DECREF(msg); return -1; } Py_DECREF(msg); return 0; } static PyObject * decode_unicode_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { PyObject *v, *u; char *buf; char *p; const char *end; /* check for integer overflow */ if (len > SIZE_MAX / 6) return NULL; /* "ä" (2 bytes) may become "\U000000E4" (10 bytes), or 1:5 "\ä" (3 bytes) may become "\u005c\U000000E4" (16 bytes), or ~1:6 */ u = PyBytes_FromStringAndSize((char *)NULL, len * 6); if (u == NULL) return NULL; p = buf = PyBytes_AsString(u); end = s + len; while (s < end) { if (*s == '\\') { *p++ = *s++; if (s >= end || *s & 0x80) { strcpy(p, "u005c"); p += 5; if (s >= end) break; } } if (*s & 0x80) { /* XXX inefficient */ PyObject *w; int kind; void *data; Py_ssize_t len, i; w = decode_utf8(c, &s, end); if (w == NULL) { Py_DECREF(u); return NULL; } kind = PyUnicode_KIND(w); data = PyUnicode_DATA(w); len = PyUnicode_GET_LENGTH(w); for (i = 0; i < len; i++) { Py_UCS4 chr = PyUnicode_READ(kind, data, i); sprintf(p, "\\U%08x", chr); p += 10; } /* Should be impossible to overflow */ assert(p - buf <= PyBytes_GET_SIZE(u)); Py_DECREF(w); } else { *p++ = *s++; } } len = p - buf; s = buf; const char *first_invalid_escape; v = _PyUnicode_DecodeUnicodeEscape(s, len, NULL, &first_invalid_escape); if (v != NULL && first_invalid_escape != NULL) { if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) { /* We have not decref u before because first_invalid_escape points inside u. */ Py_XDECREF(u); Py_DECREF(v); return NULL; } } Py_XDECREF(u); return v; } static PyObject * decode_bytes_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { const char *first_invalid_escape; PyObject *result = _PyBytes_DecodeEscape(s, len, NULL, 0, NULL, &first_invalid_escape); if (result == NULL) return NULL; if (first_invalid_escape != NULL) { if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) { Py_DECREF(result); return NULL; } } return result; } /* Shift locations for the given node and all its children by adding `lineno` and `col_offset` to existing locations. */ static void fstring_shift_node_locations(node *n, int lineno, int col_offset) { n->n_col_offset = n->n_col_offset + col_offset; n->n_end_col_offset = n->n_end_col_offset + col_offset; for (int i = 0; i < NCH(n); ++i) { if (n->n_lineno && n->n_lineno < CHILD(n, i)->n_lineno) { /* Shifting column offsets unnecessary if there's been newlines. */ col_offset = 0; } fstring_shift_node_locations(CHILD(n, i), lineno, col_offset); } n->n_lineno = n->n_lineno + lineno; n->n_end_lineno = n->n_end_lineno + lineno; } /* Fix locations for the given node and its children. `parent` is the enclosing node. `n` is the node which locations are going to be fixed relative to parent. `expr_str` is the child node's string representation, including braces. */ static void fstring_fix_node_location(const node *parent, node *n, char *expr_str) { char *substr = NULL; char *start; int lines = LINENO(parent) - 1; int cols = parent->n_col_offset; /* Find the full fstring to fix location information in `n`. */ while (parent && parent->n_type != STRING) parent = parent->n_child; if (parent && parent->n_str) { substr = strstr(parent->n_str, expr_str); if (substr) { start = substr; while (start > parent->n_str) { if (start[0] == '\n') break; start--; } cols += (int)(substr - start); /* adjust the start based on the number of newlines encountered before the f-string expression */ for (char* p = parent->n_str; p < substr; p++) { if (*p == '\n') { lines++; } } } } fstring_shift_node_locations(n, lines, cols); } /* Compile this expression in to an expr_ty. Add parens around the expression, in order to allow leading spaces in the expression. */ static expr_ty fstring_compile_expr(const char *expr_start, const char *expr_end, struct compiling *c, const node *n) { PyCompilerFlags cf; node *mod_n; mod_ty mod; char *str; Py_ssize_t len; const char *s; assert(expr_end >= expr_start); assert(*(expr_start-1) == '{'); assert(*expr_end == '}' || *expr_end == '!' || *expr_end == ':'); /* If the substring is all whitespace, it's an error. We need to catch this here, and not when we call PyParser_SimpleParseStringFlagsFilename, because turning the expression '' in to '()' would go from being invalid to valid. */ for (s = expr_start; s != expr_end; s++) { char c = *s; /* The Python parser ignores only the following whitespace characters (\r already is converted to \n). */ if (!(c == ' ' || c == '\t' || c == '\n' || c == '\f')) { break; } } if (s == expr_end) { ast_error(c, n, "f-string: empty expression not allowed"); return NULL; } len = expr_end - expr_start; /* Allocate 3 extra bytes: open paren, close paren, null byte. */ str = PyMem_RawMalloc(len + 3); if (str == NULL) { PyErr_NoMemory(); return NULL; } str[0] = '('; memcpy(str+1, expr_start, len); str[len+1] = ')'; str[len+2] = 0; cf.cf_flags = PyCF_ONLY_AST; mod_n = PyParser_SimpleParseStringFlagsFilename(str, "<fstring>", Py_eval_input, 0); if (!mod_n) { PyMem_RawFree(str); return NULL; } /* Reuse str to find the correct column offset. */ str[0] = '{'; str[len+1] = '}'; fstring_fix_node_location(n, mod_n, str); mod = PyAST_FromNode(mod_n, &cf, "<fstring>", c->c_arena); PyMem_RawFree(str); PyNode_Free(mod_n); if (!mod) return NULL; return mod->v.Expression.body; } /* Return -1 on error. Return 0 if we reached the end of the literal. Return 1 if we haven't reached the end of the literal, but we want the caller to process the literal up to this point. Used for doubled braces. */ static int fstring_find_literal(const char **str, const char *end, int raw, PyObject **literal, int recurse_lvl, struct compiling *c, const node *n) { /* Get any literal string. It ends when we hit an un-doubled left brace (which isn't part of a unicode name escape such as "\N{EULER CONSTANT}"), or the end of the string. */ const char *s = *str; const char *literal_start = s; int result = 0; assert(*literal == NULL); while (s < end) { char ch = *s++; if (!raw && ch == '\\' && s < end) { ch = *s++; if (ch == 'N') { if (s < end && *s++ == '{') { while (s < end && *s++ != '}') { } continue; } break; } if (ch == '{' && warn_invalid_escape_sequence(c, n, ch) < 0) { return -1; } } if (ch == '{' || ch == '}') { /* Check for doubled braces, but only at the top level. If we checked at every level, then f'{0:{3}}' would fail with the two closing braces. */ if (recurse_lvl == 0) { if (s < end && *s == ch) { /* We're going to tell the caller that the literal ends here, but that they should continue scanning. But also skip over the second brace when we resume scanning. */ *str = s + 1; result = 1; goto done; } /* Where a single '{' is the start of a new expression, a single '}' is not allowed. */ if (ch == '}') { *str = s - 1; ast_error(c, n, "f-string: single '}' is not allowed"); return -1; } } /* We're either at a '{', which means we're starting another expression; or a '}', which means we're at the end of this f-string (for a nested format_spec). */ s--; break; } } *str = s; assert(s <= end); assert(s == end || *s == '{' || *s == '}'); done: if (literal_start != s) { if (raw) *literal = PyUnicode_DecodeUTF8Stateful(literal_start, s - literal_start, NULL, NULL); else *literal = decode_unicode_with_escapes(c, n, literal_start, s - literal_start); if (!*literal) return -1; } return result; } /* Forward declaration because parsing is recursive. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n); /* Parse the f-string at *str, ending at end. We know *str starts an expression (so it must be a '{'). Returns the FormattedValue node, which includes the expression, conversion character, and format_spec expression. Note that I don't do a perfect job here: I don't make sure that a closing brace doesn't match an opening paren, for example. It doesn't need to error on all invalid expressions, just correctly find the end of all valid ones. Any errors inside the expression will be caught when we parse it later. */ static int fstring_find_expr(const char **str, const char *end, int raw, int recurse_lvl, expr_ty *expression, struct compiling *c, const node *n) { /* Return -1 on error, else 0. */ const char *expr_start; const char *expr_end; expr_ty simple_expression; expr_ty format_spec = NULL; /* Optional format specifier. */ int conversion = -1; /* The conversion char. -1 if not specified. */ /* 0 if we're not in a string, else the quote char we're trying to match (single or double quote). */ char quote_char = 0; /* If we're inside a string, 1=normal, 3=triple-quoted. */ int string_type = 0; /* Keep track of nesting level for braces/parens/brackets in expressions. */ Py_ssize_t nested_depth = 0; char parenstack[MAXLEVEL]; /* Can only nest one level deep. */ if (recurse_lvl >= 2) { ast_error(c, n, "f-string: expressions nested too deeply"); return -1; } /* The first char must be a left brace, or we wouldn't have gotten here. Skip over it. */ assert(**str == '{'); *str += 1; expr_start = *str; for (; *str < end; (*str)++) { char ch; /* Loop invariants. */ assert(nested_depth >= 0); assert(*str >= expr_start && *str < end); if (quote_char) assert(string_type == 1 || string_type == 3); else assert(string_type == 0); ch = **str; /* Nowhere inside an expression is a backslash allowed. */ if (ch == '\\') { /* Error: can't include a backslash character, inside parens or strings or not. */ ast_error(c, n, "f-string expression part " "cannot include a backslash"); return -1; } if (quote_char) { /* We're inside a string. See if we're at the end. */ /* This code needs to implement the same non-error logic as tok_get from tokenizer.c, at the letter_quote label. To actually share that code would be a nightmare. But, it's unlikely to change and is small, so duplicate it here. Note we don't need to catch all of the errors, since they'll be caught when parsing the expression. We just need to match the non-error cases. Thus we can ignore \n in single-quoted strings, for example. Or non-terminated strings. */ if (ch == quote_char) { /* Does this match the string_type (single or triple quoted)? */ if (string_type == 3) { if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { /* We're at the end of a triple quoted string. */ *str += 2; string_type = 0; quote_char = 0; continue; } } else { /* We're at the end of a normal string. */ quote_char = 0; string_type = 0; continue; } } } else if (ch == '\'' || ch == '"') { /* Is this a triple quoted string? */ if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { string_type = 3; *str += 2; } else { /* Start of a normal string. */ string_type = 1; } /* Start looking for the end of the string. */ quote_char = ch; } else if (ch == '[' || ch == '{' || ch == '(') { if (nested_depth >= MAXLEVEL) { ast_error(c, n, "f-string: too many nested parenthesis"); return -1; } parenstack[nested_depth] = ch; nested_depth++; } else if (ch == '#') { /* Error: can't include a comment character, inside parens or not. */ ast_error(c, n, "f-string expression part cannot include '#'"); return -1; } else if (nested_depth == 0 && (ch == '!' || ch == ':' || ch == '}')) { /* First, test for the special case of "!=". Since '=' is not an allowed conversion character, nothing is lost in this test. */ if (ch == '!' && *str+1 < end && *(*str+1) == '=') { /* This isn't a conversion character, just continue. */ continue; } /* Normal way out of this loop. */ break; } else if (ch == ']' || ch == '}' || ch == ')') { if (!nested_depth) { ast_error(c, n, "f-string: unmatched '%c'", ch); return -1; } nested_depth--; int opening = parenstack[nested_depth]; if (!((opening == '(' && ch == ')') || (opening == '[' && ch == ']') || (opening == '{' && ch == '}'))) { ast_error(c, n, "f-string: closing parenthesis '%c' " "does not match opening parenthesis '%c'", ch, opening); return -1; } } else { /* Just consume this char and loop around. */ } } expr_end = *str; /* If we leave this loop in a string or with mismatched parens, we don't care. We'll get a syntax error when compiling the expression. But, we can produce a better error message, so let's just do that.*/ if (quote_char) { ast_error(c, n, "f-string: unterminated string"); return -1; } if (nested_depth) { int opening = parenstack[nested_depth - 1]; ast_error(c, n, "f-string: unmatched '%c'", opening); return -1; } if (*str >= end) goto unexpected_end_of_string; /* Compile the expression as soon as possible, so we show errors related to the expression before errors related to the conversion or format_spec. */ simple_expression = fstring_compile_expr(expr_start, expr_end, c, n); if (!simple_expression) return -1; /* Check for a conversion char, if present. */ if (**str == '!') { *str += 1; if (*str >= end) goto unexpected_end_of_string; conversion = **str; *str += 1; /* Validate the conversion. */ if (!(conversion == 's' || conversion == 'r' || conversion == 'a')) { ast_error(c, n, "f-string: invalid conversion character: " "expected 's', 'r', or 'a'"); return -1; } } /* Check for the format spec, if present. */ if (*str >= end) goto unexpected_end_of_string; if (**str == ':') { *str += 1; if (*str >= end) goto unexpected_end_of_string; /* Parse the format spec. */ format_spec = fstring_parse(str, end, raw, recurse_lvl+1, c, n); if (!format_spec) return -1; } if (*str >= end || **str != '}') goto unexpected_end_of_string; /* We're at a right brace. Consume it. */ assert(*str < end); assert(**str == '}'); *str += 1; /* And now create the FormattedValue node that represents this entire expression with the conversion and format spec. */ *expression = FormattedValue(simple_expression, conversion, format_spec, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!*expression) return -1; return 0; unexpected_end_of_string: ast_error(c, n, "f-string: expecting '}'"); return -1; } /* Return -1 on error. Return 0 if we have a literal (possible zero length) and an expression (zero length if at the end of the string. Return 1 if we have a literal, but no expression, and we want the caller to call us again. This is used to deal with doubled braces. When called multiple times on the string 'a{{b{0}c', this function will return: 1. the literal 'a{' with no expression, and a return value of 1. Despite the fact that there's no expression, the return value of 1 means we're not finished yet. 2. the literal 'b' and the expression '0', with a return value of 0. The fact that there's an expression means we're not finished. 3. literal 'c' with no expression and a return value of 0. The combination of the return value of 0 with no expression means we're finished. */ static int fstring_find_literal_and_expr(const char **str, const char *end, int raw, int recurse_lvl, PyObject **literal, expr_ty *expression, struct compiling *c, const node *n) { int result; assert(*literal == NULL && *expression == NULL); /* Get any literal string. */ result = fstring_find_literal(str, end, raw, literal, recurse_lvl, c, n); if (result < 0) goto error; assert(result == 0 || result == 1); if (result == 1) /* We have a literal, but don't look at the expression. */ return 1; if (*str >= end || **str == '}') /* We're at the end of the string or the end of a nested f-string: no expression. The top-level error case where we expect to be at the end of the string but we're at a '}' is handled later. */ return 0; /* We must now be the start of an expression, on a '{'. */ assert(**str == '{'); if (fstring_find_expr(str, end, raw, recurse_lvl, expression, c, n) < 0) goto error; return 0; error: Py_CLEAR(*literal); return -1; } #define EXPRLIST_N_CACHED 64 typedef struct { /* Incrementally build an array of expr_ty, so be used in an asdl_seq. Cache some small but reasonably sized number of expr_ty's, and then after that start dynamically allocating, doubling the number allocated each time. Note that the f-string f'{0}a{1}' contains 3 expr_ty's: 2 FormattedValue's, and one Constant for the literal 'a'. So you add expr_ty's about twice as fast as you add exressions in an f-string. */ Py_ssize_t allocated; /* Number we've allocated. */ Py_ssize_t size; /* Number we've used. */ expr_ty *p; /* Pointer to the memory we're actually using. Will point to 'data' until we start dynamically allocating. */ expr_ty data[EXPRLIST_N_CACHED]; } ExprList; #ifdef NDEBUG #define ExprList_check_invariants(l) #else static void ExprList_check_invariants(ExprList *l) { /* Check our invariants. Make sure this object is "live", and hasn't been deallocated. */ assert(l->size >= 0); assert(l->p != NULL); if (l->size <= EXPRLIST_N_CACHED) assert(l->data == l->p); } #endif static void ExprList_Init(ExprList *l) { l->allocated = EXPRLIST_N_CACHED; l->size = 0; /* Until we start allocating dynamically, p points to data. */ l->p = l->data; ExprList_check_invariants(l); } static int ExprList_Append(ExprList *l, expr_ty exp) { ExprList_check_invariants(l); if (l->size >= l->allocated) { /* We need to alloc (or realloc) the memory. */ Py_ssize_t new_size = l->allocated * 2; /* See if we've ever allocated anything dynamically. */ if (l->p == l->data) { Py_ssize_t i; /* We're still using the cached data. Switch to alloc-ing. */ l->p = PyMem_RawMalloc(sizeof(expr_ty) * new_size); if (!l->p) return -1; /* Copy the cached data into the new buffer. */ for (i = 0; i < l->size; i++) l->p[i] = l->data[i]; } else { /* Just realloc. */ expr_ty *tmp = PyMem_RawRealloc(l->p, sizeof(expr_ty) * new_size); if (!tmp) { PyMem_RawFree(l->p); l->p = NULL; return -1; } l->p = tmp; } l->allocated = new_size; assert(l->allocated == 2 * l->size); } l->p[l->size++] = exp; ExprList_check_invariants(l); return 0; } static void ExprList_Dealloc(ExprList *l) { ExprList_check_invariants(l); /* If there's been an error, or we've never dynamically allocated, do nothing. */ if (!l->p || l->p == l->data) { /* Do nothing. */ } else { /* We have dynamically allocated. Free the memory. */ PyMem_RawFree(l->p); } l->p = NULL; l->size = -1; } static asdl_seq * ExprList_Finish(ExprList *l, PyArena *arena) { asdl_seq *seq; ExprList_check_invariants(l); /* Allocate the asdl_seq and copy the expressions in to it. */ seq = _Py_asdl_seq_new(l->size, arena); if (seq) { Py_ssize_t i; for (i = 0; i < l->size; i++) asdl_seq_SET(seq, i, l->p[i]); } ExprList_Dealloc(l); return seq; } /* The FstringParser is designed to add a mix of strings and f-strings, and concat them together as needed. Ultimately, it generates an expr_ty. */ typedef struct { PyObject *last_str; ExprList expr_list; int fmode; } FstringParser; #ifdef NDEBUG #define FstringParser_check_invariants(state) #else static void FstringParser_check_invariants(FstringParser *state) { if (state->last_str) assert(PyUnicode_CheckExact(state->last_str)); ExprList_check_invariants(&state->expr_list); } #endif static void FstringParser_Init(FstringParser *state) { state->last_str = NULL; state->fmode = 0; ExprList_Init(&state->expr_list); FstringParser_check_invariants(state); } static void FstringParser_Dealloc(FstringParser *state) { FstringParser_check_invariants(state); Py_XDECREF(state->last_str); ExprList_Dealloc(&state->expr_list); } /* Make a Constant node, but decref the PyUnicode object being added. */ static expr_ty make_str_node_and_del(PyObject **str, struct compiling *c, const node* n) { PyObject *s = *str; *str = NULL; assert(PyUnicode_CheckExact(s)); if (PyArena_AddPyObject(c->c_arena, s) < 0) { Py_DECREF(s); return NULL; } return Constant(s, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } /* Add a non-f-string (that is, a regular literal string). str is decref'd. */ static int FstringParser_ConcatAndDel(FstringParser *state, PyObject *str) { FstringParser_check_invariants(state); assert(PyUnicode_CheckExact(str)); if (PyUnicode_GET_LENGTH(str) == 0) { Py_DECREF(str); return 0; } if (!state->last_str) { /* We didn't have a string before, so just remember this one. */ state->last_str = str; } else { /* Concatenate this with the previous string. */ PyUnicode_AppendAndDel(&state->last_str, str); if (!state->last_str) return -1; } FstringParser_check_invariants(state); return 0; } /* Parse an f-string. The f-string is in *str to end, with no 'f' or quotes. */ static int FstringParser_ConcatFstring(FstringParser *state, const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser_check_invariants(state); state->fmode = 1; /* Parse the f-string. */ while (1) { PyObject *literal = NULL; expr_ty expression = NULL; /* If there's a zero length literal in front of the expression, literal will be NULL. If we're at the end of the f-string, expression will be NULL (unless result == 1, see below). */ int result = fstring_find_literal_and_expr(str, end, raw, recurse_lvl, &literal, &expression, c, n); if (result < 0) return -1; /* Add the literal, if any. */ if (!literal) { /* Do nothing. Just leave last_str alone (and possibly NULL). */ } else if (!state->last_str) { /* Note that the literal can be zero length, if the input string is "\\\n" or "\\\r", among others. */ state->last_str = literal; literal = NULL; } else { /* We have a literal, concatenate it. */ assert(PyUnicode_GET_LENGTH(literal) != 0); if (FstringParser_ConcatAndDel(state, literal) < 0) return -1; literal = NULL; } /* We've dealt with the literal now. It can't be leaked on further errors. */ assert(literal == NULL); /* See if we should just loop around to get the next literal and expression, while ignoring the expression this time. This is used for un-doubling braces, as an optimization. */ if (result == 1) continue; if (!expression) /* We're done with this f-string. */ break; /* We know we have an expression. Convert any existing string to a Constant node. */ if (!state->last_str) { /* Do nothing. No previous literal. */ } else { /* Convert the existing last_str literal to a Constant node. */ expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) return -1; } if (ExprList_Append(&state->expr_list, expression) < 0) return -1; } /* If recurse_lvl is zero, then we must be at the end of the string. Otherwise, we must be at a right brace. */ if (recurse_lvl == 0 && *str < end-1) { ast_error(c, n, "f-string: unexpected end of string"); return -1; } if (recurse_lvl != 0 && **str != '}') { ast_error(c, n, "f-string: expecting '}'"); return -1; } FstringParser_check_invariants(state); return 0; } /* Convert the partial state reflected in last_str and expr_list to an expr_ty. The expr_ty can be a Constant, or a JoinedStr. */ static expr_ty FstringParser_Finish(FstringParser *state, struct compiling *c, const node *n) { asdl_seq *seq; FstringParser_check_invariants(state); /* If we're just a constant string with no expressions, return that. */ if (!state->fmode) { assert(!state->expr_list.size); if (!state->last_str) { /* Create a zero length string. */ state->last_str = PyUnicode_FromStringAndSize(NULL, 0); if (!state->last_str) goto error; } return make_str_node_and_del(&state->last_str, c, n); } /* Create a Constant node out of last_str, if needed. It will be the last node in our expression list. */ if (state->last_str) { expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) goto error; } /* This has already been freed. */ assert(state->last_str == NULL); seq = ExprList_Finish(&state->expr_list, c->c_arena); if (!seq) goto error; return JoinedStr(seq, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); error: FstringParser_Dealloc(state); return NULL; } /* Given an f-string (with no 'f' or quotes) that's in *str and ends at end, parse it into an expr_ty. Return NULL on error. Adjust str to point past the parsed portion. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser state; FstringParser_Init(&state); if (FstringParser_ConcatFstring(&state, str, end, raw, recurse_lvl, c, n) < 0) { FstringParser_Dealloc(&state); return NULL; } return FstringParser_Finish(&state, c, n); } /* n is a Python string literal, including the bracketing quote characters, and r, b, u, &/or f prefixes (if any), and embedded escape sequences (if any). parsestr parses it, and sets *result to decoded Python string object. If the string is an f-string, set *fstr and *fstrlen to the unparsed string object. Return 0 if no errors occurred. */ static int parsestr(struct compiling *c, const node *n, int *bytesmode, int *rawmode, PyObject **result, const char **fstr, Py_ssize_t *fstrlen) { size_t len; const char *s = STR(n); int quote = Py_CHARMASK(*s); int fmode = 0; *bytesmode = 0; *rawmode = 0; *result = NULL; *fstr = NULL; if (Py_ISALPHA(quote)) { while (!*bytesmode || !*rawmode) { if (quote == 'b' || quote == 'B') { quote = *++s; *bytesmode = 1; } else if (quote == 'u' || quote == 'U') { quote = *++s; } else if (quote == 'r' || quote == 'R') { quote = *++s; *rawmode = 1; } else if (quote == 'f' || quote == 'F') { quote = *++s; fmode = 1; } else { break; } } } if (fmode && *bytesmode) { PyErr_BadInternalCall(); return -1; } if (quote != '\'' && quote != '\"') { PyErr_BadInternalCall(); return -1; } /* Skip the leading quote char. */ s++; len = strlen(s); if (len > INT_MAX) { PyErr_SetString(PyExc_OverflowError, "string to parse is too long"); return -1; } if (s[--len] != quote) { /* Last quote char must match the first. */ PyErr_BadInternalCall(); return -1; } if (len >= 4 && s[0] == quote && s[1] == quote) { /* A triple quoted string. We've already skipped one quote at the start and one at the end of the string. Now skip the two at the start. */ s += 2; len -= 2; /* And check that the last two match. */ if (s[--len] != quote || s[--len] != quote) { PyErr_BadInternalCall(); return -1; } } if (fmode) { /* Just return the bytes. The caller will parse the resulting string. */ *fstr = s; *fstrlen = len; return 0; } /* Not an f-string. */ /* Avoid invoking escape decoding routines if possible. */ *rawmode = *rawmode || strchr(s, '\\') == NULL; if (*bytesmode) { /* Disallow non-ASCII characters. */ const char *ch; for (ch = s; *ch; ch++) { if (Py_CHARMASK(*ch) >= 0x80) { ast_error(c, n, "bytes can only contain ASCII " "literal characters."); return -1; } } if (*rawmode) *result = PyBytes_FromStringAndSize(s, len); else *result = decode_bytes_with_escapes(c, n, s, len); } else { if (*rawmode) *result = PyUnicode_DecodeUTF8Stateful(s, len, NULL, NULL); else *result = decode_unicode_with_escapes(c, n, s, len); } return *result == NULL ? -1 : 0; } /* Accepts a STRING+ atom, and produces an expr_ty node. Run through each STRING atom, and process it as needed. For bytes, just concatenate them together, and the result will be a Constant node. For normal strings and f-strings, concatenate them together. The result will be a Constant node if there were no f-strings; a FormattedValue node if there's just an f-string (with no leading or trailing literals), or a JoinedStr node if there are multiple f-strings or any literals involved. */ static expr_ty parsestrplus(struct compiling *c, const node *n) { int bytesmode = 0; PyObject *bytes_str = NULL; int i; FstringParser state; FstringParser_Init(&state); for (i = 0; i < NCH(n); i++) { int this_bytesmode; int this_rawmode; PyObject *s; const char *fstr; Py_ssize_t fstrlen = -1; /* Silence a compiler warning. */ REQ(CHILD(n, i), STRING); if (parsestr(c, CHILD(n, i), &this_bytesmode, &this_rawmode, &s, &fstr, &fstrlen) != 0) goto error; /* Check that we're not mixing bytes with unicode. */ if (i != 0 && bytesmode != this_bytesmode) { ast_error(c, n, "cannot mix bytes and nonbytes literals"); /* s is NULL if the current string part is an f-string. */ Py_XDECREF(s); goto error; } bytesmode = this_bytesmode; if (fstr != NULL) { int result; assert(s == NULL && !bytesmode); /* This is an f-string. Parse and concatenate it. */ result = FstringParser_ConcatFstring(&state, &fstr, fstr+fstrlen, this_rawmode, 0, c, n); if (result < 0) goto error; } else { /* A string or byte string. */ assert(s != NULL && fstr == NULL); assert(bytesmode ? PyBytes_CheckExact(s) : PyUnicode_CheckExact(s)); if (bytesmode) { /* For bytes, concat as we go. */ if (i == 0) { /* First time, just remember this value. */ bytes_str = s; } else { PyBytes_ConcatAndDel(&bytes_str, s); if (!bytes_str) goto error; } } else { /* This is a regular string. Concatenate it. */ if (FstringParser_ConcatAndDel(&state, s) < 0) goto error; } } } if (bytesmode) { /* Just return the bytes object and we're done. */ if (PyArena_AddPyObject(c->c_arena, bytes_str) < 0) goto error; return Constant(bytes_str, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } /* We're not a bytes string, bytes_str should never have been set. */ assert(bytes_str == NULL); return FstringParser_Finish(&state, c, n); error: Py_XDECREF(bytes_str); FstringParser_Dealloc(&state); return NULL; } PyObject * _PyAST_GetDocString(asdl_seq *body) { if (!asdl_seq_LEN(body)) { return NULL; } stmt_ty st = (stmt_ty)asdl_seq_GET(body, 0); if (st->kind != Expr_kind) { return NULL; } expr_ty e = st->v.Expr.value; if (e->kind == Constant_kind && PyUnicode_CheckExact(e->v.Constant.value)) { return e->v.Constant.value; } return NULL; }
ast_for_atom(struct compiling *c, const node *n) { /* atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictmaker|testlist_comp] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False' */ node *ch = CHILD(n, 0); switch (TYPE(ch)) { case NAME: { PyObject *name; const char *s = STR(ch); size_t len = strlen(s); if (len >= 4 && len <= 5) { if (!strcmp(s, "None")) return Constant(Py_None, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!strcmp(s, "True")) return Constant(Py_True, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!strcmp(s, "False")) return Constant(Py_False, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } name = new_identifier(s, c); if (!name) return NULL; /* All names start in Load context, but may later be changed. */ return Name(name, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case STRING: { expr_ty str = parsestrplus(c, n); if (!str) { const char *errtype = NULL; if (PyErr_ExceptionMatches(PyExc_UnicodeError)) errtype = "unicode error"; else if (PyErr_ExceptionMatches(PyExc_ValueError)) errtype = "value error"; if (errtype) { PyObject *type, *value, *tback, *errstr; PyErr_Fetch(&type, &value, &tback); errstr = PyObject_Str(value); if (errstr) { ast_error(c, n, "(%s) %U", errtype, errstr); Py_DECREF(errstr); } else { PyErr_Clear(); ast_error(c, n, "(%s) unknown error", errtype); } Py_DECREF(type); Py_XDECREF(value); Py_XDECREF(tback); } return NULL; } return str; } case NUMBER: { PyObject *pynum = parsenumber(c, STR(ch)); if (!pynum) return NULL; if (PyArena_AddPyObject(c->c_arena, pynum) < 0) { Py_DECREF(pynum); return NULL; } return Constant(pynum, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case ELLIPSIS: /* Ellipsis */ return Constant(Py_Ellipsis, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case LPAR: /* some parenthesized expressions */ ch = CHILD(n, 1); if (TYPE(ch) == RPAR) return Tuple(NULL, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (TYPE(ch) == yield_expr) return ast_for_expr(c, ch); /* testlist_comp: test ( comp_for | (',' test)* [','] ) */ if (NCH(ch) == 1) { return ast_for_testlist(c, ch); } if (TYPE(CHILD(ch, 1)) == comp_for) { return copy_location(ast_for_genexp(c, ch), n); } else { return copy_location(ast_for_testlist(c, ch), n); } case LSQB: /* list (or list comprehension) */ ch = CHILD(n, 1); if (TYPE(ch) == RSQB) return List(NULL, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); REQ(ch, testlist_comp); if (NCH(ch) == 1 || TYPE(CHILD(ch, 1)) == COMMA) { asdl_seq *elts = seq_for_testlist(c, ch); if (!elts) return NULL; return List(elts, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { return copy_location(ast_for_listcomp(c, ch), n); } case LBRACE: { /* dictorsetmaker: ( ((test ':' test | '**' test) * (comp_for | (',' (test ':' test | '**' test))* [','])) | * ((test | '*' test) * (comp_for | (',' (test | '*' test))* [','])) ) */ expr_ty res; ch = CHILD(n, 1); if (TYPE(ch) == RBRACE) { /* It's an empty dict. */ return Dict(NULL, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { int is_dict = (TYPE(CHILD(ch, 0)) == DOUBLESTAR); if (NCH(ch) == 1 || (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == COMMA)) { /* It's a set display. */ res = ast_for_setdisplay(c, ch); } else if (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == comp_for) { /* It's a set comprehension. */ res = ast_for_setcomp(c, ch); } else if (NCH(ch) > 3 - is_dict && TYPE(CHILD(ch, 3 - is_dict)) == comp_for) { /* It's a dictionary comprehension. */ if (is_dict) { ast_error(c, n, "dict unpacking cannot be used in " "dict comprehension"); return NULL; } res = ast_for_dictcomp(c, ch); } else { /* It's a dictionary display. */ res = ast_for_dictdisplay(c, ch); } return copy_location(res, n); } } default: PyErr_Format(PyExc_SystemError, "unhandled atom %d", TYPE(ch)); return NULL; } }
ast_for_atom(struct compiling *c, const node *n) { /* atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictmaker|testlist_comp] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False' */ node *ch = CHILD(n, 0); switch (TYPE(ch)) { case NAME: { PyObject *name; const char *s = STR(ch); size_t len = strlen(s); if (len >= 4 && len <= 5) { if (!strcmp(s, "None")) return Constant(Py_None, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!strcmp(s, "True")) return Constant(Py_True, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (!strcmp(s, "False")) return Constant(Py_False, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } name = new_identifier(s, c); if (!name) return NULL; /* All names start in Load context, but may later be changed. */ return Name(name, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case STRING: { expr_ty str = parsestrplus(c, n); if (!str) { const char *errtype = NULL; if (PyErr_ExceptionMatches(PyExc_UnicodeError)) errtype = "unicode error"; else if (PyErr_ExceptionMatches(PyExc_ValueError)) errtype = "value error"; if (errtype) { PyObject *type, *value, *tback, *errstr; PyErr_Fetch(&type, &value, &tback); errstr = PyObject_Str(value); if (errstr) { ast_error(c, n, "(%s) %U", errtype, errstr); Py_DECREF(errstr); } else { PyErr_Clear(); ast_error(c, n, "(%s) unknown error", errtype); } Py_DECREF(type); Py_XDECREF(value); Py_XDECREF(tback); } return NULL; } return str; } case NUMBER: { PyObject *pynum = parsenumber(c, STR(ch)); if (!pynum) return NULL; if (PyArena_AddPyObject(c->c_arena, pynum) < 0) { Py_DECREF(pynum); return NULL; } return Constant(pynum, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } case ELLIPSIS: /* Ellipsis */ return Constant(Py_Ellipsis, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); case LPAR: /* some parenthesized expressions */ ch = CHILD(n, 1); if (TYPE(ch) == RPAR) return Tuple(NULL, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); if (TYPE(ch) == yield_expr) return ast_for_expr(c, ch); /* testlist_comp: test ( comp_for | (',' test)* [','] ) */ if (NCH(ch) == 1) { return ast_for_testlist(c, ch); } if (TYPE(CHILD(ch, 1)) == comp_for) { return copy_location(ast_for_genexp(c, ch), n); } else { return copy_location(ast_for_testlist(c, ch), n); } case LSQB: /* list (or list comprehension) */ ch = CHILD(n, 1); if (TYPE(ch) == RSQB) return List(NULL, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); REQ(ch, testlist_comp); if (NCH(ch) == 1 || TYPE(CHILD(ch, 1)) == COMMA) { asdl_seq *elts = seq_for_testlist(c, ch); if (!elts) return NULL; return List(elts, Load, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { return copy_location(ast_for_listcomp(c, ch), n); } case LBRACE: { /* dictorsetmaker: ( ((test ':' test | '**' test) * (comp_for | (',' (test ':' test | '**' test))* [','])) | * ((test | '*' test) * (comp_for | (',' (test | '*' test))* [','])) ) */ expr_ty res; ch = CHILD(n, 1); if (TYPE(ch) == RBRACE) { /* It's an empty dict. */ return Dict(NULL, NULL, LINENO(n), n->n_col_offset, n->n_end_lineno, n->n_end_col_offset, c->c_arena); } else { int is_dict = (TYPE(CHILD(ch, 0)) == DOUBLESTAR); if (NCH(ch) == 1 || (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == COMMA)) { /* It's a set display. */ res = ast_for_setdisplay(c, ch); } else if (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == comp_for) { /* It's a set comprehension. */ res = ast_for_setcomp(c, ch); } else if (NCH(ch) > 3 - is_dict && TYPE(CHILD(ch, 3 - is_dict)) == comp_for) { /* It's a dictionary comprehension. */ if (is_dict) { ast_error(c, n, "dict unpacking cannot be used in " "dict comprehension"); return NULL; } res = ast_for_dictcomp(c, ch); } else { /* It's a dictionary display. */ res = ast_for_dictdisplay(c, ch); } return copy_location(res, n); } } default: PyErr_Format(PyExc_SystemError, "unhandled atom %d", TYPE(ch)); return NULL; } }
{'added': [(701, 'static string'), (702, 'new_type_comment(const char *s)'), (703, '{'), (704, ' return PyUnicode_DecodeUTF8(s, strlen(s), NULL);'), (705, '}'), (706, '#define NEW_TYPE_COMMENT(n) new_type_comment(STR(n))'), (707, ''), (735, ' case func_body_suite:'), (736, ' /* func_body_suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */'), (737, ' /* suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT */'), (741, ' i = 2;'), (743, ' if (TYPE(CHILD(n, 1)) == TYPE_COMMENT)'), (744, ' i += 2;'), (745, ' for (; i < (NCH(n) - 1); i++)'), (769, ' asdl_seq *type_ignores = NULL;'), (774, ' asdl_seq *argtypes = NULL;'), (775, ' expr_ty ret, arg;'), (814, ''), (815, ' /* Type ignores are stored under the ENDMARKER in file_input. */'), (816, ' ch = CHILD(n, NCH(n) - 1);'), (817, ' REQ(ch, ENDMARKER);'), (818, ' num = NCH(ch);'), (819, ' type_ignores = _Py_asdl_seq_new(num, arena);'), (820, ' if (!type_ignores)'), (821, ' goto out;'), (822, ''), (823, ' for (i = 0; i < num; i++) {'), (824, ' type_ignore_ty ti = TypeIgnore(LINENO(CHILD(ch, i)), arena);'), (825, ' if (!ti)'), (826, ' goto out;'), (827, ' asdl_seq_SET(type_ignores, i, ti);'), (828, ' }'), (829, ''), (830, ' res = Module(stmts, type_ignores, arena);'), (882, ' case func_type_input:'), (883, ' n = CHILD(n, 0);'), (884, ' REQ(n, func_type);'), (885, ''), (886, ' if (TYPE(CHILD(n, 1)) == typelist) {'), (887, ' ch = CHILD(n, 1);'), (888, " /* this is overly permissive -- we don't pay any attention to"), (889, ' * stars on the args -- just parse them into an ordered list */'), (890, ' num = 0;'), (891, ' for (i = 0; i < NCH(ch); i++) {'), (892, ' if (TYPE(CHILD(ch, i)) == test) {'), (893, ' num++;'), (894, ' }'), (895, ' }'), (896, ''), (897, ' argtypes = _Py_asdl_seq_new(num, arena);'), (898, ' if (!argtypes)'), (899, ' goto out;'), (900, ''), (901, ' j = 0;'), (902, ' for (i = 0; i < NCH(ch); i++) {'), (903, ' if (TYPE(CHILD(ch, i)) == test) {'), (904, ' arg = ast_for_expr(&c, CHILD(ch, i));'), (905, ' if (!arg)'), (906, ' goto out;'), (907, ' asdl_seq_SET(argtypes, j++, arg);'), (908, ' }'), (909, ' }'), (910, ' }'), (911, ' else {'), (912, ' argtypes = _Py_asdl_seq_new(0, arena);'), (913, ' if (!argtypes)'), (914, ' goto out;'), (915, ' }'), (916, ''), (917, ' ret = ast_for_expr(&c, CHILD(n, NCH(n) - 1));'), (918, ' if (!ret)'), (919, ' goto out;'), (920, ' res = FunctionType(argtypes, ret, arena);'), (921, ' break;'), (1344, ' ret = arg(name, annotation, NULL, LINENO(n), n->n_col_offset,'), (1403, ' arg = arg(argname, annotation, NULL, LINENO(ch), ch->n_col_offset,'), (1409, ' i += 1; /* the name */'), (1410, ' if (TYPE(CHILD(n, i)) == COMMA)'), (1411, ' i += 1; /* the comma, if present */'), (1412, ' break;'), (1413, ' case TYPE_COMMENT:'), (1414, ' /* arg will be equal to the last argument processed */'), (1415, ' arg->type_comment = NEW_TYPE_COMMENT(ch);'), (1416, ' if (!arg->type_comment)'), (1417, ' goto error;'), (1418, ' i += 1;'), (1548, ' i += 1; /* the name */'), (1549, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA)'), (1550, ' i += 1; /* the comma, if present */'), (1554, ' (i+2 == NCH(n) && (TYPE(CHILD(n, i+1)) == COMMA'), (1555, ' || TYPE(CHILD(n, i+1)) == TYPE_COMMENT))) {'), (1557, ' "named arguments must follow bare *");'), (1564, ''), (1565, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) {'), (1566, ' ast_error(c, CHILD(n, i),'), (1567, ' "bare * has associated type comment");'), (1568, ' return NULL;'), (1569, ' }'), (1570, ''), (1581, ' i += 2; /* the star and the name */'), (1582, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA)'), (1583, ' i += 1; /* the comma, if present */'), (1584, ''), (1585, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) {'), (1586, ' vararg->type_comment = NEW_TYPE_COMMENT(CHILD(n, i));'), (1587, ' if (!vararg->type_comment)'), (1588, ' return NULL;'), (1589, ' i += 1;'), (1590, ' }'), (1591, ''), (1608, ' i += 2; /* the double star and the name */'), (1609, ' if (TYPE(CHILD(n, i)) == COMMA)'), (1610, ' i += 1; /* the comma, if present */'), (1611, ' break;'), (1612, ' case TYPE_COMMENT:'), (1613, ' assert(i);'), (1614, ''), (1615, ' if (kwarg)'), (1616, ' arg = kwarg;'), (1617, ''), (1618, ' /* arg will be equal to the last argument processed */'), (1619, ' arg->type_comment = NEW_TYPE_COMMENT(ch);'), (1620, ' if (!arg->type_comment)'), (1621, ' return NULL;'), (1622, ' i += 1;'), (1731, " /* funcdef: 'def' NAME parameters ['->' test] ':' [TYPE_COMMENT] suite */"), (1739, ' node *tc;'), (1740, ' string type_comment = NULL;'), (1758, ' if (TYPE(CHILD(n, name_i + 3)) == TYPE_COMMENT) {'), (1759, ' type_comment = NEW_TYPE_COMMENT(CHILD(n, name_i + 3));'), (1760, ' if (!type_comment)'), (1761, ' return NULL;'), (1762, ' name_i += 1;'), (1763, ' }'), (1769, ' if (NCH(CHILD(n, name_i + 3)) > 1) {'), (1770, ' /* Check if the suite has a type comment in it. */'), (1771, ' tc = CHILD(CHILD(n, name_i + 3), 1);'), (1772, ''), (1773, ' if (TYPE(tc) == TYPE_COMMENT) {'), (1774, ' if (type_comment != NULL) {'), (1775, ' ast_error(c, n, "Cannot have two type comments on def");'), (1776, ' return NULL;'), (1777, ' }'), (1778, ' type_comment = NEW_TYPE_COMMENT(tc);'), (1779, ' if (!type_comment)'), (1780, ' return NULL;'), (1781, ' }'), (1782, ' }'), (1783, ''), (1785, ' return AsyncFunctionDef(name, args, body, decorator_seq, returns, type_comment,'), (1788, ' return FunctionDef(name, args, body, decorator_seq, returns, type_comment,'), (2436, ' "dict comprehension");'), (3011, ' "positional argument follows "'), (3012, ' "keyword argument unpacking");'), (3016, ' "positional argument follows "'), (3017, ' "keyword argument");'), (3031, ' "iterable argument unpacking follows "'), (3032, ' "keyword argument unpacking");'), (3070, ' "positional argument follows "'), (3071, ' "keyword argument unpacking");'), (3075, ' "positional argument follows "'), (3076, ' "keyword argument");'), (3137, ' "keyword argument repeated");'), (3186, " [('=' (yield_expr|testlist_star_expr))+ [TYPE_COMMENT]] )"), (3187, " annassign: ':' test ['=' (yield_expr|testlist)]"), (3188, " testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']"), (3189, " augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |"), (3190, " '<<=' | '>>=' | '**=' | '//=')"), (3193, ' int num = NCH(n);'), (3195, ' if (num == 1) {'), (3320, ' int i, nch_minus_type, has_type_comment;'), (3324, ' string type_comment;'), (3328, ''), (3329, ' has_type_comment = TYPE(CHILD(n, num - 1)) == TYPE_COMMENT;'), (3330, ' nch_minus_type = num - has_type_comment;'), (3331, ''), (3332, ' targets = _Py_asdl_seq_new(nch_minus_type / 2, c->c_arena);'), (3335, ' for (i = 0; i < nch_minus_type - 2; i += 2) {'), (3352, ' value = CHILD(n, nch_minus_type - 1);'), (3359, ' if (has_type_comment) {'), (3360, ' type_comment = NEW_TYPE_COMMENT(CHILD(n, nch_minus_type));'), (3361, ' if (!type_comment)'), (3362, ' return NULL;'), (3363, ' }'), (3364, ' else'), (3365, ' type_comment = NULL;'), (3366, ' return Assign(targets, expression, type_comment, LINENO(n), n->n_col_offset,'), (3674, ' ast_error(c, n,'), (3675, ' "trailing comma not allowed without"'), (3676, ' " surrounding parentheses");'), (3794, ' /* suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */'), (3800, ' if (TYPE(n) != func_body_suite) {'), (3801, ' REQ(n, suite);'), (3802, ' }'), (3826, ' i = 2;'), (3827, ' if (TYPE(CHILD(n, 1)) == TYPE_COMMENT) {'), (3828, ' i += 2;'), (3829, ' REQ(CHILD(n, 2), NEWLINE);'), (3830, ' }'), (3831, ''), (3832, ' for (; i < (NCH(n) - 1); i++) {'), (4066, ' int has_type_comment;'), (4067, ' string type_comment;'), (4068, " /* for_stmt: 'for' exprlist 'in' testlist ':' [TYPE_COMMENT] suite ['else' ':' suite] */"), (4071, ' has_type_comment = TYPE(CHILD(n, 5)) == TYPE_COMMENT;'), (4072, ''), (4073, ' if (NCH(n) == 9 + has_type_comment) {'), (4074, ' seq = ast_for_suite(c, CHILD(n, 8 + has_type_comment));'), (4096, ' suite_seq = ast_for_suite(c, CHILD(n, 5 + has_type_comment));'), (4105, ''), (4106, ' if (has_type_comment) {'), (4107, ' type_comment = NEW_TYPE_COMMENT(CHILD(n, 5));'), (4108, ' if (!type_comment)'), (4109, ' return NULL;'), (4110, ' }'), (4111, ' else'), (4112, ' type_comment = NULL;'), (4113, ''), (4115, ' return AsyncFor(target, expression, suite_seq, seq, type_comment,'), (4119, ' return For(target, expression, suite_seq, seq, type_comment,'), (4287, "/* with_stmt: 'with' with_item (',' with_item)* ':' [TYPE_COMMENT] suite */"), (4292, ' int i, n_items, nch_minus_type, has_type_comment, end_lineno, end_col_offset;'), (4294, ' string type_comment;'), (4298, ' has_type_comment = TYPE(CHILD(n, NCH(n) - 2)) == TYPE_COMMENT;'), (4299, ' nch_minus_type = NCH(n) - has_type_comment;'), (4300, ''), (4301, ' n_items = (nch_minus_type - 2) / 2;'), (4305, ' for (i = 1; i < nch_minus_type - 2; i += 2) {'), (4317, ' if (has_type_comment) {'), (4318, ' type_comment = NEW_TYPE_COMMENT(CHILD(n, NCH(n) - 2));'), (4319, ' if (!type_comment)'), (4320, ' return NULL;'), (4321, ' }'), (4322, ' else'), (4323, ' type_comment = NULL;'), (4324, ''), (4326, ' return AsyncWith(items, body, type_comment, LINENO(n0), n0->n_col_offset,'), (4329, ' return With(items, body, type_comment, LINENO(n), n->n_col_offset,'), (4956, ' ast_error(c, n,'), (4957, ' "f-string expression part "'), (4958, ' "cannot include a backslash");'), (5082, ' ast_error(c, n,'), (5083, ' "f-string: invalid conversion character: "'), (5084, ' "expected \'s\', \'r\', or \'a\'");'), (5636, ' ast_error(c, n,'), (5637, ' "bytes can only contain ASCII "')], 'deleted': [(732, ' for (i = 2; i < (NCH(n) - 1); i++)'), (798, ' res = Module(stmts, arena);'), (1272, ' ret = arg(name, annotation, LINENO(n), n->n_col_offset,'), (1331, ' arg = arg(argname, annotation, LINENO(ch), ch->n_col_offset,'), (1337, ' i += 2; /* the name and the comma */'), (1467, ' i += 2; /* the name and the comma */'), (1471, ' (i+2 == NCH(n) && TYPE(CHILD(n, i+1)) == COMMA)) {'), (1473, ' "named arguments must follow bare *");'), (1490, ' i += 3;'), (1507, ' i += 3;'), (1616, " /* funcdef: 'def' NAME parameters ['->' test] ':' suite */"), (1647, ' return AsyncFunctionDef(name, args, body, decorator_seq, returns,'), (1650, ' return FunctionDef(name, args, body, decorator_seq, returns,'), (2298, ' "dict comprehension");'), (2873, ' "positional argument follows "'), (2874, ' "keyword argument unpacking");'), (2878, ' "positional argument follows "'), (2879, ' "keyword argument");'), (2893, ' "iterable argument unpacking follows "'), (2894, ' "keyword argument unpacking");'), (2932, ' "positional argument follows "'), (2933, ' "keyword argument unpacking");'), (2937, ' "positional argument follows "'), (2938, ' "keyword argument");'), (2999, ' "keyword argument repeated");'), (3048, " ('=' (yield_expr|testlist_star_expr))*)"), (3049, " annassign: ':' test ['=' test]"), (3050, " testlist_star_expr: (test|star_expr) (',' test|star_expr)* [',']"), (3051, " augassign: '+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^='"), (3052, " | '<<=' | '>>=' | '**=' | '//='"), (3056, ' if (NCH(n) == 1) {'), (3181, ' int i;'), (3188, ' targets = _Py_asdl_seq_new(NCH(n) / 2, c->c_arena);'), (3191, ' for (i = 0; i < NCH(n) - 2; i += 2) {'), (3208, ' value = CHILD(n, NCH(n) - 1);'), (3215, ' return Assign(targets, expression, LINENO(n), n->n_col_offset,'), (3523, ' ast_error(c, n, "trailing comma not allowed without"'), (3524, ' " surrounding parentheses");'), (3642, ' /* suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT */'), (3648, ' REQ(n, suite);'), (3672, ' for (i = 2; i < (NCH(n) - 1); i++) {'), (3906, " /* for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] */"), (3909, ' if (NCH(n) == 9) {'), (3910, ' seq = ast_for_suite(c, CHILD(n, 8));'), (3932, ' suite_seq = ast_for_suite(c, CHILD(n, 5));'), (3942, ' return AsyncFor(target, expression, suite_seq, seq,'), (3946, ' return For(target, expression, suite_seq, seq,'), (4114, "/* with_stmt: 'with' with_item (',' with_item)* ':' suite */"), (4119, ' int i, n_items, end_lineno, end_col_offset;'), (4124, ' n_items = (NCH(n) - 2) / 2;'), (4128, ' for (i = 1; i < NCH(n) - 2; i += 2) {'), (4141, ' return AsyncWith(items, body, LINENO(n0), n0->n_col_offset,'), (4144, ' return With(items, body, LINENO(n), n->n_col_offset,'), (4771, ' ast_error(c, n, "f-string expression part "'), (4772, ' "cannot include a backslash");'), (4896, ' ast_error(c, n, "f-string: invalid conversion character: "'), (4897, ' "expected \'s\', \'r\', or \'a\'");'), (5449, ' ast_error(c, n, "bytes can only contain ASCII "')]}
246
58
4,665
30,267
137
1,009
38
https://github.com/python/cpython
CVE-2019-19274
CWE-125
1,886
print-eap.c
C
eap_print
/* * Copyright (c) 2004 - Michael Richardson <mcr@xelerance.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Extensible Authentication Protocol (EAP) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "extract.h" #define EAP_FRAME_TYPE_PACKET 0 #define EAP_FRAME_TYPE_START 1 #define EAP_FRAME_TYPE_LOGOFF 2 #define EAP_FRAME_TYPE_KEY 3 #define EAP_FRAME_TYPE_ENCAP_ASF_ALERT 4 struct eap_frame_t { unsigned char version; unsigned char type; unsigned char length[2]; }; static const struct tok eap_frame_type_values[] = { { EAP_FRAME_TYPE_PACKET, "EAP packet" }, { EAP_FRAME_TYPE_START, "EAPOL start" }, { EAP_FRAME_TYPE_LOGOFF, "EAPOL logoff" }, { EAP_FRAME_TYPE_KEY, "EAPOL key" }, { EAP_FRAME_TYPE_ENCAP_ASF_ALERT, "Encapsulated ASF alert" }, { 0, NULL} }; /* RFC 3748 */ struct eap_packet_t { unsigned char code; unsigned char id; unsigned char length[2]; }; #define EAP_REQUEST 1 #define EAP_RESPONSE 2 #define EAP_SUCCESS 3 #define EAP_FAILURE 4 static const struct tok eap_code_values[] = { { EAP_REQUEST, "Request" }, { EAP_RESPONSE, "Response" }, { EAP_SUCCESS, "Success" }, { EAP_FAILURE, "Failure" }, { 0, NULL} }; #define EAP_TYPE_NO_PROPOSED 0 #define EAP_TYPE_IDENTITY 1 #define EAP_TYPE_NOTIFICATION 2 #define EAP_TYPE_NAK 3 #define EAP_TYPE_MD5_CHALLENGE 4 #define EAP_TYPE_OTP 5 #define EAP_TYPE_GTC 6 #define EAP_TYPE_TLS 13 /* RFC 2716 */ #define EAP_TYPE_SIM 18 /* RFC 4186 */ #define EAP_TYPE_TTLS 21 /* draft-funk-eap-ttls-v0-01.txt */ #define EAP_TYPE_AKA 23 /* RFC 4187 */ #define EAP_TYPE_FAST 43 /* RFC 4851 */ #define EAP_TYPE_EXPANDED_TYPES 254 #define EAP_TYPE_EXPERIMENTAL 255 static const struct tok eap_type_values[] = { { EAP_TYPE_NO_PROPOSED, "No proposed" }, { EAP_TYPE_IDENTITY, "Identity" }, { EAP_TYPE_NOTIFICATION, "Notification" }, { EAP_TYPE_NAK, "Nak" }, { EAP_TYPE_MD5_CHALLENGE, "MD5-challenge" }, { EAP_TYPE_OTP, "OTP" }, { EAP_TYPE_GTC, "GTC" }, { EAP_TYPE_TLS, "TLS" }, { EAP_TYPE_SIM, "SIM" }, { EAP_TYPE_TTLS, "TTLS" }, { EAP_TYPE_AKA, "AKA" }, { EAP_TYPE_FAST, "FAST" }, { EAP_TYPE_EXPANDED_TYPES, "Expanded types" }, { EAP_TYPE_EXPERIMENTAL, "Experimental" }, { 0, NULL} }; #define EAP_TLS_EXTRACT_BIT_L(x) (((x)&0x80)>>7) /* RFC 2716 - EAP TLS bits */ #define EAP_TLS_FLAGS_LEN_INCLUDED (1 << 7) #define EAP_TLS_FLAGS_MORE_FRAGMENTS (1 << 6) #define EAP_TLS_FLAGS_START (1 << 5) static const struct tok eap_tls_flags_values[] = { { EAP_TLS_FLAGS_LEN_INCLUDED, "L bit" }, { EAP_TLS_FLAGS_MORE_FRAGMENTS, "More fragments bit"}, { EAP_TLS_FLAGS_START, "Start bit"}, { 0, NULL} }; #define EAP_TTLS_VERSION(x) ((x)&0x07) /* EAP-AKA and EAP-SIM - RFC 4187 */ #define EAP_AKA_CHALLENGE 1 #define EAP_AKA_AUTH_REJECT 2 #define EAP_AKA_SYNC_FAILURE 4 #define EAP_AKA_IDENTITY 5 #define EAP_SIM_START 10 #define EAP_SIM_CHALLENGE 11 #define EAP_AKA_NOTIFICATION 12 #define EAP_AKA_REAUTH 13 #define EAP_AKA_CLIENT_ERROR 14 static const struct tok eap_aka_subtype_values[] = { { EAP_AKA_CHALLENGE, "Challenge" }, { EAP_AKA_AUTH_REJECT, "Auth reject" }, { EAP_AKA_SYNC_FAILURE, "Sync failure" }, { EAP_AKA_IDENTITY, "Identity" }, { EAP_SIM_START, "Start" }, { EAP_SIM_CHALLENGE, "Challenge" }, { EAP_AKA_NOTIFICATION, "Notification" }, { EAP_AKA_REAUTH, "Reauth" }, { EAP_AKA_CLIENT_ERROR, "Client error" }, { 0, NULL} }; /* * Print EAP requests / responses */ void eap_print(netdissect_options *ndo, register const u_char *cp, u_int length) { const struct eap_frame_t *eap; const u_char *tptr; u_int tlen, type, subtype; int count=0, len; tptr = cp; tlen = length; eap = (const struct eap_frame_t *)cp; ND_TCHECK(*eap); /* in non-verbose mode just lets print the basic info */ if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); return; } ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); tptr += sizeof(const struct eap_frame_t); tlen -= sizeof(const struct eap_frame_t); switch (eap->type) { case EAP_FRAME_TYPE_PACKET: type = *(tptr); len = EXTRACT_16BITS(tptr+2); ND_PRINT((ndo, ", %s (%u), id %u, len %u", tok2str(eap_code_values, "unknown", type), type, *(tptr+1), len)); ND_TCHECK2(*tptr, len); if (type <= 2) { /* For EAP_REQUEST and EAP_RESPONSE only */ subtype = *(tptr+4); ND_PRINT((ndo, "\n\t\t Type %s (%u)", tok2str(eap_type_values, "unknown", *(tptr+4)), *(tptr + 4))); switch (subtype) { case EAP_TYPE_IDENTITY: if (len - 5 > 0) { ND_PRINT((ndo, ", Identity: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NOTIFICATION: if (len - 5 > 0) { ND_PRINT((ndo, ", Notification: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NAK: count = 5; /* * one or more octets indicating * the desired authentication * type one octet per type */ while (count < len) { ND_PRINT((ndo, " %s (%u),", tok2str(eap_type_values, "unknown", *(tptr+count)), *(tptr + count))); count++; } break; case EAP_TYPE_TTLS: ND_PRINT((ndo, " TTLSv%u", EAP_TTLS_VERSION(*(tptr + 5)))); /* fall through */ case EAP_TYPE_TLS: ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } break; case EAP_TYPE_FAST: ND_PRINT((ndo, " FASTv%u", EAP_TTLS_VERSION(*(tptr + 5)))); ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } /* FIXME - TLV attributes follow */ break; case EAP_TYPE_AKA: case EAP_TYPE_SIM: ND_PRINT((ndo, " subtype [%s] 0x%02x,", tok2str(eap_aka_subtype_values, "unknown", *(tptr+5)), *(tptr + 5))); /* FIXME - TLV attributes follow */ break; case EAP_TYPE_MD5_CHALLENGE: case EAP_TYPE_OTP: case EAP_TYPE_GTC: case EAP_TYPE_EXPANDED_TYPES: case EAP_TYPE_EXPERIMENTAL: default: break; } } break; case EAP_FRAME_TYPE_LOGOFF: case EAP_FRAME_TYPE_ENCAP_ASF_ALERT: default: break; } return; trunc: ND_PRINT((ndo, "\n\t[|EAP]")); } /* * Local Variables: * c-basic-offset: 4 * End: */
/* * Copyright (c) 2004 - Michael Richardson <mcr@xelerance.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Extensible Authentication Protocol (EAP) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "extract.h" #define EAP_FRAME_TYPE_PACKET 0 #define EAP_FRAME_TYPE_START 1 #define EAP_FRAME_TYPE_LOGOFF 2 #define EAP_FRAME_TYPE_KEY 3 #define EAP_FRAME_TYPE_ENCAP_ASF_ALERT 4 struct eap_frame_t { unsigned char version; unsigned char type; unsigned char length[2]; }; static const struct tok eap_frame_type_values[] = { { EAP_FRAME_TYPE_PACKET, "EAP packet" }, { EAP_FRAME_TYPE_START, "EAPOL start" }, { EAP_FRAME_TYPE_LOGOFF, "EAPOL logoff" }, { EAP_FRAME_TYPE_KEY, "EAPOL key" }, { EAP_FRAME_TYPE_ENCAP_ASF_ALERT, "Encapsulated ASF alert" }, { 0, NULL} }; /* RFC 3748 */ struct eap_packet_t { unsigned char code; unsigned char id; unsigned char length[2]; }; #define EAP_REQUEST 1 #define EAP_RESPONSE 2 #define EAP_SUCCESS 3 #define EAP_FAILURE 4 static const struct tok eap_code_values[] = { { EAP_REQUEST, "Request" }, { EAP_RESPONSE, "Response" }, { EAP_SUCCESS, "Success" }, { EAP_FAILURE, "Failure" }, { 0, NULL} }; #define EAP_TYPE_NO_PROPOSED 0 #define EAP_TYPE_IDENTITY 1 #define EAP_TYPE_NOTIFICATION 2 #define EAP_TYPE_NAK 3 #define EAP_TYPE_MD5_CHALLENGE 4 #define EAP_TYPE_OTP 5 #define EAP_TYPE_GTC 6 #define EAP_TYPE_TLS 13 /* RFC 2716 */ #define EAP_TYPE_SIM 18 /* RFC 4186 */ #define EAP_TYPE_TTLS 21 /* draft-funk-eap-ttls-v0-01.txt */ #define EAP_TYPE_AKA 23 /* RFC 4187 */ #define EAP_TYPE_FAST 43 /* RFC 4851 */ #define EAP_TYPE_EXPANDED_TYPES 254 #define EAP_TYPE_EXPERIMENTAL 255 static const struct tok eap_type_values[] = { { EAP_TYPE_NO_PROPOSED, "No proposed" }, { EAP_TYPE_IDENTITY, "Identity" }, { EAP_TYPE_NOTIFICATION, "Notification" }, { EAP_TYPE_NAK, "Nak" }, { EAP_TYPE_MD5_CHALLENGE, "MD5-challenge" }, { EAP_TYPE_OTP, "OTP" }, { EAP_TYPE_GTC, "GTC" }, { EAP_TYPE_TLS, "TLS" }, { EAP_TYPE_SIM, "SIM" }, { EAP_TYPE_TTLS, "TTLS" }, { EAP_TYPE_AKA, "AKA" }, { EAP_TYPE_FAST, "FAST" }, { EAP_TYPE_EXPANDED_TYPES, "Expanded types" }, { EAP_TYPE_EXPERIMENTAL, "Experimental" }, { 0, NULL} }; #define EAP_TLS_EXTRACT_BIT_L(x) (((x)&0x80)>>7) /* RFC 2716 - EAP TLS bits */ #define EAP_TLS_FLAGS_LEN_INCLUDED (1 << 7) #define EAP_TLS_FLAGS_MORE_FRAGMENTS (1 << 6) #define EAP_TLS_FLAGS_START (1 << 5) static const struct tok eap_tls_flags_values[] = { { EAP_TLS_FLAGS_LEN_INCLUDED, "L bit" }, { EAP_TLS_FLAGS_MORE_FRAGMENTS, "More fragments bit"}, { EAP_TLS_FLAGS_START, "Start bit"}, { 0, NULL} }; #define EAP_TTLS_VERSION(x) ((x)&0x07) /* EAP-AKA and EAP-SIM - RFC 4187 */ #define EAP_AKA_CHALLENGE 1 #define EAP_AKA_AUTH_REJECT 2 #define EAP_AKA_SYNC_FAILURE 4 #define EAP_AKA_IDENTITY 5 #define EAP_SIM_START 10 #define EAP_SIM_CHALLENGE 11 #define EAP_AKA_NOTIFICATION 12 #define EAP_AKA_REAUTH 13 #define EAP_AKA_CLIENT_ERROR 14 static const struct tok eap_aka_subtype_values[] = { { EAP_AKA_CHALLENGE, "Challenge" }, { EAP_AKA_AUTH_REJECT, "Auth reject" }, { EAP_AKA_SYNC_FAILURE, "Sync failure" }, { EAP_AKA_IDENTITY, "Identity" }, { EAP_SIM_START, "Start" }, { EAP_SIM_CHALLENGE, "Challenge" }, { EAP_AKA_NOTIFICATION, "Notification" }, { EAP_AKA_REAUTH, "Reauth" }, { EAP_AKA_CLIENT_ERROR, "Client error" }, { 0, NULL} }; /* * Print EAP requests / responses */ void eap_print(netdissect_options *ndo, register const u_char *cp, u_int length) { const struct eap_frame_t *eap; const u_char *tptr; u_int tlen, type, subtype; int count=0, len; tptr = cp; tlen = length; eap = (const struct eap_frame_t *)cp; ND_TCHECK(*eap); /* in non-verbose mode just lets print the basic info */ if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); return; } ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); tptr += sizeof(const struct eap_frame_t); tlen -= sizeof(const struct eap_frame_t); switch (eap->type) { case EAP_FRAME_TYPE_PACKET: ND_TCHECK_8BITS(tptr); type = *(tptr); ND_TCHECK_16BITS(tptr+2); len = EXTRACT_16BITS(tptr+2); ND_PRINT((ndo, ", %s (%u), id %u, len %u", tok2str(eap_code_values, "unknown", type), type, *(tptr+1), len)); ND_TCHECK2(*tptr, len); if (type <= 2) { /* For EAP_REQUEST and EAP_RESPONSE only */ ND_TCHECK_8BITS(tptr+4); subtype = *(tptr+4); ND_PRINT((ndo, "\n\t\t Type %s (%u)", tok2str(eap_type_values, "unknown", subtype), subtype)); switch (subtype) { case EAP_TYPE_IDENTITY: if (len - 5 > 0) { ND_PRINT((ndo, ", Identity: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NOTIFICATION: if (len - 5 > 0) { ND_PRINT((ndo, ", Notification: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NAK: count = 5; /* * one or more octets indicating * the desired authentication * type one octet per type */ while (count < len) { ND_TCHECK_8BITS(tptr+count); ND_PRINT((ndo, " %s (%u),", tok2str(eap_type_values, "unknown", *(tptr+count)), *(tptr + count))); count++; } break; case EAP_TYPE_TTLS: case EAP_TYPE_TLS: ND_TCHECK_8BITS(tptr + 5); if (subtype == EAP_TYPE_TTLS) ND_PRINT((ndo, " TTLSv%u", EAP_TTLS_VERSION(*(tptr + 5)))); ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_TCHECK_32BITS(tptr + 6); ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } break; case EAP_TYPE_FAST: ND_TCHECK_8BITS(tptr + 5); ND_PRINT((ndo, " FASTv%u", EAP_TTLS_VERSION(*(tptr + 5)))); ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_TCHECK_32BITS(tptr + 6); ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } /* FIXME - TLV attributes follow */ break; case EAP_TYPE_AKA: case EAP_TYPE_SIM: ND_TCHECK_8BITS(tptr + 5); ND_PRINT((ndo, " subtype [%s] 0x%02x,", tok2str(eap_aka_subtype_values, "unknown", *(tptr+5)), *(tptr + 5))); /* FIXME - TLV attributes follow */ break; case EAP_TYPE_MD5_CHALLENGE: case EAP_TYPE_OTP: case EAP_TYPE_GTC: case EAP_TYPE_EXPANDED_TYPES: case EAP_TYPE_EXPERIMENTAL: default: break; } } break; case EAP_FRAME_TYPE_LOGOFF: case EAP_FRAME_TYPE_ENCAP_ASF_ALERT: default: break; } return; trunc: ND_PRINT((ndo, "\n\t[|EAP]")); } /* * Local Variables: * c-basic-offset: 4 * End: */
eap_print(netdissect_options *ndo, register const u_char *cp, u_int length) { const struct eap_frame_t *eap; const u_char *tptr; u_int tlen, type, subtype; int count=0, len; tptr = cp; tlen = length; eap = (const struct eap_frame_t *)cp; ND_TCHECK(*eap); /* in non-verbose mode just lets print the basic info */ if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); return; } ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); tptr += sizeof(const struct eap_frame_t); tlen -= sizeof(const struct eap_frame_t); switch (eap->type) { case EAP_FRAME_TYPE_PACKET: type = *(tptr); len = EXTRACT_16BITS(tptr+2); ND_PRINT((ndo, ", %s (%u), id %u, len %u", tok2str(eap_code_values, "unknown", type), type, *(tptr+1), len)); ND_TCHECK2(*tptr, len); if (type <= 2) { /* For EAP_REQUEST and EAP_RESPONSE only */ subtype = *(tptr+4); ND_PRINT((ndo, "\n\t\t Type %s (%u)", tok2str(eap_type_values, "unknown", *(tptr+4)), *(tptr + 4))); switch (subtype) { case EAP_TYPE_IDENTITY: if (len - 5 > 0) { ND_PRINT((ndo, ", Identity: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NOTIFICATION: if (len - 5 > 0) { ND_PRINT((ndo, ", Notification: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NAK: count = 5; /* * one or more octets indicating * the desired authentication * type one octet per type */ while (count < len) { ND_PRINT((ndo, " %s (%u),", tok2str(eap_type_values, "unknown", *(tptr+count)), *(tptr + count))); count++; } break; case EAP_TYPE_TTLS: ND_PRINT((ndo, " TTLSv%u", EAP_TTLS_VERSION(*(tptr + 5)))); /* fall through */ case EAP_TYPE_TLS: ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } break; case EAP_TYPE_FAST: ND_PRINT((ndo, " FASTv%u", EAP_TTLS_VERSION(*(tptr + 5)))); ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } /* FIXME - TLV attributes follow */ break; case EAP_TYPE_AKA: case EAP_TYPE_SIM: ND_PRINT((ndo, " subtype [%s] 0x%02x,", tok2str(eap_aka_subtype_values, "unknown", *(tptr+5)), *(tptr + 5))); /* FIXME - TLV attributes follow */ break; case EAP_TYPE_MD5_CHALLENGE: case EAP_TYPE_OTP: case EAP_TYPE_GTC: case EAP_TYPE_EXPANDED_TYPES: case EAP_TYPE_EXPERIMENTAL: default: break; } } break; case EAP_FRAME_TYPE_LOGOFF: case EAP_FRAME_TYPE_ENCAP_ASF_ALERT: default: break; } return; trunc: ND_PRINT((ndo, "\n\t[|EAP]")); }
eap_print(netdissect_options *ndo, register const u_char *cp, u_int length) { const struct eap_frame_t *eap; const u_char *tptr; u_int tlen, type, subtype; int count=0, len; tptr = cp; tlen = length; eap = (const struct eap_frame_t *)cp; ND_TCHECK(*eap); /* in non-verbose mode just lets print the basic info */ if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); return; } ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); tptr += sizeof(const struct eap_frame_t); tlen -= sizeof(const struct eap_frame_t); switch (eap->type) { case EAP_FRAME_TYPE_PACKET: ND_TCHECK_8BITS(tptr); type = *(tptr); ND_TCHECK_16BITS(tptr+2); len = EXTRACT_16BITS(tptr+2); ND_PRINT((ndo, ", %s (%u), id %u, len %u", tok2str(eap_code_values, "unknown", type), type, *(tptr+1), len)); ND_TCHECK2(*tptr, len); if (type <= 2) { /* For EAP_REQUEST and EAP_RESPONSE only */ ND_TCHECK_8BITS(tptr+4); subtype = *(tptr+4); ND_PRINT((ndo, "\n\t\t Type %s (%u)", tok2str(eap_type_values, "unknown", subtype), subtype)); switch (subtype) { case EAP_TYPE_IDENTITY: if (len - 5 > 0) { ND_PRINT((ndo, ", Identity: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NOTIFICATION: if (len - 5 > 0) { ND_PRINT((ndo, ", Notification: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NAK: count = 5; /* * one or more octets indicating * the desired authentication * type one octet per type */ while (count < len) { ND_TCHECK_8BITS(tptr+count); ND_PRINT((ndo, " %s (%u),", tok2str(eap_type_values, "unknown", *(tptr+count)), *(tptr + count))); count++; } break; case EAP_TYPE_TTLS: case EAP_TYPE_TLS: ND_TCHECK_8BITS(tptr + 5); if (subtype == EAP_TYPE_TTLS) ND_PRINT((ndo, " TTLSv%u", EAP_TTLS_VERSION(*(tptr + 5)))); ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_TCHECK_32BITS(tptr + 6); ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } break; case EAP_TYPE_FAST: ND_TCHECK_8BITS(tptr + 5); ND_PRINT((ndo, " FASTv%u", EAP_TTLS_VERSION(*(tptr + 5)))); ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_TCHECK_32BITS(tptr + 6); ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } /* FIXME - TLV attributes follow */ break; case EAP_TYPE_AKA: case EAP_TYPE_SIM: ND_TCHECK_8BITS(tptr + 5); ND_PRINT((ndo, " subtype [%s] 0x%02x,", tok2str(eap_aka_subtype_values, "unknown", *(tptr+5)), *(tptr + 5))); /* FIXME - TLV attributes follow */ break; case EAP_TYPE_MD5_CHALLENGE: case EAP_TYPE_OTP: case EAP_TYPE_GTC: case EAP_TYPE_EXPANDED_TYPES: case EAP_TYPE_EXPERIMENTAL: default: break; } } break; case EAP_FRAME_TYPE_LOGOFF: case EAP_FRAME_TYPE_ENCAP_ASF_ALERT: default: break; } return; trunc: ND_PRINT((ndo, "\n\t[|EAP]")); }
{'added': [(185, ' ND_TCHECK_8BITS(tptr);'), (187, ' ND_TCHECK_16BITS(tptr+2);'), (198, ' ND_TCHECK_8BITS(tptr+4);'), (201, ' tok2str(eap_type_values, "unknown", subtype),'), (202, ' subtype));'), (228, ' ND_TCHECK_8BITS(tptr+count);'), (238, ' ND_TCHECK_8BITS(tptr + 5);'), (239, ' if (subtype == EAP_TYPE_TTLS)'), (240, ' ND_PRINT((ndo, " TTLSv%u",'), (241, ' EAP_TTLS_VERSION(*(tptr + 5))));'), (247, ' ND_TCHECK_32BITS(tptr + 6);'), (253, ' ND_TCHECK_8BITS(tptr + 5);'), (261, ' ND_TCHECK_32BITS(tptr + 6);'), (270, ' ND_TCHECK_8BITS(tptr + 5);')], 'deleted': [(198, ' tok2str(eap_type_values, "unknown", *(tptr+4)),'), (199, ' *(tptr + 4)));'), (233, ' ND_PRINT((ndo, " TTLSv%u",'), (234, ' EAP_TTLS_VERSION(*(tptr + 5)))); /* fall through */')]}
14
4
185
1,056
110
660
24
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13015
CWE-125
2,112
box_dump.c
C
nalm_dump
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2012 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #include <gpac/utf.h> #include <gpac/network.h> #include <gpac/color.h> #include <gpac/avparse.h> #include <time.h> #ifndef GPAC_DISABLE_ISOM_DUMP static void dump_data(FILE *trace, char *data, u32 dataLength) { u32 i; fprintf(trace, "data:application/octet-string,"); for (i=0; i<dataLength; i++) { fprintf(trace, "%02X", (unsigned char) data[i]); } } static void dump_data_hex(FILE *trace, char *data, u32 dataLength) { u32 i; fprintf(trace, "0x"); for (i=0; i<dataLength; i++) { fprintf(trace, "%02X", (unsigned char) data[i]); } } static void dump_data_attribute(FILE *trace, char *name, char *data, u32 data_size) { u32 i; if (!data || !data_size) { fprintf(trace, "%s=\"\"", name); return; } fprintf(trace, "%s=\"0x", name); for (i=0; i<data_size; i++) fprintf(trace, "%02X", (unsigned char) data[i]); fprintf(trace, "\" "); } static void dump_data_string(FILE *trace, char *data, u32 dataLength) { u32 i; for (i=0; i<dataLength; i++) { switch ((unsigned char) data[i]) { case '\'': fprintf(trace, "&apos;"); break; case '\"': fprintf(trace, "&quot;"); break; case '&': fprintf(trace, "&amp;"); break; case '>': fprintf(trace, "&gt;"); break; case '<': fprintf(trace, "&lt;"); break; default: fprintf(trace, "%c", (u8) data[i]); break; } } } GF_Err gf_isom_box_dump(void *ptr, FILE * trace) { return gf_isom_box_dump_ex(ptr, trace, 0); } GF_Err gf_isom_box_array_dump(GF_List *list, FILE * trace) { u32 i; GF_Box *a; if (!list) return GF_OK; i=0; while ((a = (GF_Box *)gf_list_enum(list, &i))) { gf_isom_box_dump(a, trace); } return GF_OK; } extern Bool use_dump_mode; GF_EXPORT GF_Err gf_isom_dump(GF_ISOFile *mov, FILE * trace) { u32 i; GF_Box *box; if (!mov || !trace) return GF_BAD_PARAM; use_dump_mode = mov->dump_mode_alloc; fprintf(trace, "<!--MP4Box dump trace-->\n"); fprintf(trace, "<IsoMediaFile xmlns=\"urn:mpeg:isobmff:schema:file:2016\" Name=\"%s\">\n", mov->fileName); i=0; while ((box = (GF_Box *)gf_list_enum(mov->TopBoxes, &i))) { if (box->type==GF_ISOM_BOX_TYPE_UNKNOWN) { fprintf(trace, "<!--WARNING: Unknown Top-level Box Found -->\n"); } else if (box->type==GF_ISOM_BOX_TYPE_UUID) { } else if (!gf_isom_box_is_file_level(box)) { fprintf(trace, "<!--ERROR: Invalid Top-level Box Found (\"%s\")-->\n", gf_4cc_to_str(box->type)); } gf_isom_box_dump(box, trace); } fprintf(trace, "</IsoMediaFile>\n"); return GF_OK; } GF_Err reftype_dump(GF_Box *a, FILE * trace) { u32 i; GF_TrackReferenceTypeBox *p = (GF_TrackReferenceTypeBox *)a; if (!p->reference_type) return GF_OK; p->type = p->reference_type; gf_isom_box_dump_start(a, "TrackReferenceTypeBox", trace); fprintf(trace, ">\n"); for (i=0; i<p->trackIDCount; i++) { fprintf(trace, "<TrackReferenceEntry TrackID=\"%d\"/>\n", p->trackIDs[i]); } if (!p->size) fprintf(trace, "<TrackReferenceEntry TrackID=\"\"/>\n"); gf_isom_box_dump_done("TrackReferenceTypeBox", a, trace); p->type = GF_ISOM_BOX_TYPE_REFT; return GF_OK; } GF_Err ireftype_dump(GF_Box *a, FILE * trace) { u32 i; GF_ItemReferenceTypeBox *p = (GF_ItemReferenceTypeBox *)a; if (!p->reference_type) return GF_OK; p->type = p->reference_type; gf_isom_box_dump_start(a, "ItemReferenceBox", trace); fprintf(trace, "from_item_id=\"%d\">\n", p->from_item_id); for (i = 0; i < p->reference_count; i++) { fprintf(trace, "<ItemReferenceBoxEntry ItemID=\"%d\"/>\n", p->to_item_IDs[i]); } if (!p->size) fprintf(trace, "<ItemReferenceBoxEntry ItemID=\"\"/>\n"); gf_isom_box_dump_done("ItemReferenceBox", a, trace); p->type = GF_ISOM_BOX_TYPE_REFI; return GF_OK; } GF_Err free_dump(GF_Box *a, FILE * trace) { GF_FreeSpaceBox *p = (GF_FreeSpaceBox *)a; gf_isom_box_dump_start(a, (a->type==GF_ISOM_BOX_TYPE_FREE) ? "FreeSpaceBox" : "SkipBox", trace); fprintf(trace, "dataSize=\"%d\">\n", p->dataSize); gf_isom_box_dump_done( (a->type==GF_ISOM_BOX_TYPE_FREE) ? "FreeSpaceBox" : "SkipBox", a, trace); return GF_OK; } GF_Err mdat_dump(GF_Box *a, FILE * trace) { GF_MediaDataBox *p; const char *name = (a->type==GF_ISOM_BOX_TYPE_IDAT ? "ItemDataBox" : "MediaDataBox"); p = (GF_MediaDataBox *)a; gf_isom_box_dump_start(a, name, trace); fprintf(trace, "dataSize=\""LLD"\">\n", LLD_CAST p->dataSize); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err moov_dump(GF_Box *a, FILE * trace) { GF_MovieBox *p; p = (GF_MovieBox *)a; gf_isom_box_dump_start(a, "MovieBox", trace); fprintf(trace, ">\n"); if (p->iods) gf_isom_box_dump(p->iods, trace); if (p->meta) gf_isom_box_dump(p->meta, trace); //dump only if size if (p->size) gf_isom_box_dump_ex(p->mvhd, trace,GF_ISOM_BOX_TYPE_MVHD); #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (p->mvex) gf_isom_box_dump(p->mvex, trace); #endif gf_isom_box_array_dump(p->trackList, trace); if (p->udta) gf_isom_box_dump(p->udta, trace); gf_isom_box_dump_done("MovieBox", a, trace); return GF_OK; } GF_Err mvhd_dump(GF_Box *a, FILE * trace) { GF_MovieHeaderBox *p; p = (GF_MovieHeaderBox *) a; gf_isom_box_dump_start(a, "MovieHeaderBox", trace); fprintf(trace, "CreationTime=\""LLD"\" ", LLD_CAST p->creationTime); fprintf(trace, "ModificationTime=\""LLD"\" ", LLD_CAST p->modificationTime); fprintf(trace, "TimeScale=\"%d\" ", p->timeScale); fprintf(trace, "Duration=\""LLD"\" ", LLD_CAST p->duration); fprintf(trace, "NextTrackID=\"%d\">\n", p->nextTrackID); gf_isom_box_dump_done("MovieHeaderBox", a, trace); return GF_OK; } GF_Err mdhd_dump(GF_Box *a, FILE * trace) { GF_MediaHeaderBox *p; p = (GF_MediaHeaderBox *)a; gf_isom_box_dump_start(a, "MediaHeaderBox", trace); fprintf(trace, "CreationTime=\""LLD"\" ", LLD_CAST p->creationTime); fprintf(trace, "ModificationTime=\""LLD"\" ", LLD_CAST p->modificationTime); fprintf(trace, "TimeScale=\"%d\" ", p->timeScale); fprintf(trace, "Duration=\""LLD"\" ", LLD_CAST p->duration); fprintf(trace, "LanguageCode=\"%c%c%c\">\n", p->packedLanguage[0], p->packedLanguage[1], p->packedLanguage[2]); gf_isom_box_dump_done("MediaHeaderBox", a, trace); return GF_OK; } GF_Err vmhd_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "VideoMediaHeaderBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("VideoMediaHeaderBox", a, trace); return GF_OK; } GF_Err smhd_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "SoundMediaHeaderBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("SoundMediaHeaderBox", a, trace); return GF_OK; } GF_Err hmhd_dump(GF_Box *a, FILE * trace) { GF_HintMediaHeaderBox *p; p = (GF_HintMediaHeaderBox *)a; gf_isom_box_dump_start(a, "HintMediaHeaderBox", trace); fprintf(trace, "MaximumPDUSize=\"%d\" ", p->maxPDUSize); fprintf(trace, "AveragePDUSize=\"%d\" ", p->avgPDUSize); fprintf(trace, "MaxBitRate=\"%d\" ", p->maxBitrate); fprintf(trace, "AverageBitRate=\"%d\">\n", p->avgBitrate); gf_isom_box_dump_done("HintMediaHeaderBox", a, trace); return GF_OK; } GF_Err nmhd_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "MPEGMediaHeaderBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("MPEGMediaHeaderBox", a, trace); return GF_OK; } GF_Err stbl_dump(GF_Box *a, FILE * trace) { GF_SampleTableBox *p; p = (GF_SampleTableBox *)a; gf_isom_box_dump_start(a, "SampleTableBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->SampleDescription, trace, GF_ISOM_BOX_TYPE_STSD); if (p->size) gf_isom_box_dump_ex(p->TimeToSample, trace, GF_ISOM_BOX_TYPE_STTS); if (p->CompositionOffset) gf_isom_box_dump(p->CompositionOffset, trace); if (p->CompositionToDecode) gf_isom_box_dump(p->CompositionToDecode, trace); if (p->SyncSample) gf_isom_box_dump(p->SyncSample, trace); if (p->ShadowSync) gf_isom_box_dump(p->ShadowSync, trace); if (p->size) gf_isom_box_dump_ex(p->SampleToChunk, trace, GF_ISOM_BOX_TYPE_STSC); if (p->size) gf_isom_box_dump_ex(p->SampleSize, trace, GF_ISOM_BOX_TYPE_STSZ); if (p->size) gf_isom_box_dump_ex(p->ChunkOffset, trace, GF_ISOM_BOX_TYPE_STCO); if (p->DegradationPriority) gf_isom_box_dump(p->DegradationPriority, trace); if (p->SampleDep) gf_isom_box_dump(p->SampleDep, trace); if (p->PaddingBits) gf_isom_box_dump(p->PaddingBits, trace); if (p->Fragments) gf_isom_box_dump(p->Fragments, trace); if (p->sub_samples) gf_isom_box_array_dump(p->sub_samples, trace); if (p->sampleGroupsDescription) gf_isom_box_array_dump(p->sampleGroupsDescription, trace); if (p->sampleGroups) gf_isom_box_array_dump(p->sampleGroups, trace); if (p->sai_sizes) { u32 i; for (i = 0; i < gf_list_count(p->sai_sizes); i++) { GF_SampleAuxiliaryInfoSizeBox *saiz = (GF_SampleAuxiliaryInfoSizeBox *)gf_list_get(p->sai_sizes, i); gf_isom_box_dump(saiz, trace); } } if (p->sai_offsets) { u32 i; for (i = 0; i < gf_list_count(p->sai_offsets); i++) { GF_SampleAuxiliaryInfoOffsetBox *saio = (GF_SampleAuxiliaryInfoOffsetBox *)gf_list_get(p->sai_offsets, i); gf_isom_box_dump(saio, trace); } } gf_isom_box_dump_done("SampleTableBox", a, trace); return GF_OK; } GF_Err dinf_dump(GF_Box *a, FILE * trace) { GF_DataInformationBox *p; p = (GF_DataInformationBox *)a; gf_isom_box_dump_start(a, "DataInformationBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->dref, trace, GF_ISOM_BOX_TYPE_DREF); gf_isom_box_dump_done("DataInformationBox", a, trace); return GF_OK; } GF_Err url_dump(GF_Box *a, FILE * trace) { GF_DataEntryURLBox *p; p = (GF_DataEntryURLBox *)a; gf_isom_box_dump_start(a, "URLDataEntryBox", trace); if (p->location) { fprintf(trace, " URL=\"%s\">\n", p->location); } else { fprintf(trace, ">\n"); if (p->size) { if (! (p->flags & 1) ) { fprintf(trace, "<!--ERROR: No location indicated-->\n"); } else { fprintf(trace, "<!--Data is contained in the movie file-->\n"); } } } gf_isom_box_dump_done("URLDataEntryBox", a, trace); return GF_OK; } GF_Err urn_dump(GF_Box *a, FILE * trace) { GF_DataEntryURNBox *p; p = (GF_DataEntryURNBox *)a; gf_isom_box_dump_start(a, "URNDataEntryBox", trace); if (p->nameURN) fprintf(trace, " URN=\"%s\"", p->nameURN); if (p->location) fprintf(trace, " URL=\"%s\"", p->location); fprintf(trace, ">\n"); gf_isom_box_dump_done("URNDataEntryBox", a, trace); return GF_OK; } GF_Err cprt_dump(GF_Box *a, FILE * trace) { GF_CopyrightBox *p; p = (GF_CopyrightBox *)a; gf_isom_box_dump_start(a, "CopyrightBox", trace); fprintf(trace, "LanguageCode=\"%s\" CopyrightNotice=\"%s\">\n", p->packedLanguageCode, p->notice); gf_isom_box_dump_done("CopyrightBox", a, trace); return GF_OK; } GF_Err kind_dump(GF_Box *a, FILE * trace) { GF_KindBox *p; p = (GF_KindBox *)a; gf_isom_box_dump_start(a, "KindBox", trace); fprintf(trace, "schemeURI=\"%s\" value=\"%s\">\n", p->schemeURI, (p->value ? p->value : "")); gf_isom_box_dump_done("KindBox", a, trace); return GF_OK; } static char *format_duration(u64 dur, u32 timescale, char *szDur) { u32 h, m, s, ms; dur = (u32) (( ((Double) (s64) dur)/timescale)*1000); h = (u32) (dur / 3600000); dur -= h*3600000; m = (u32) (dur / 60000); dur -= m*60000; s = (u32) (dur/1000); dur -= s*1000; ms = (u32) (dur); sprintf(szDur, "%02d:%02d:%02d.%03d", h, m, s, ms); return szDur; } static void dump_escape_string(FILE * trace, char *name) { u32 i, len = (u32) strlen(name); for (i=0; i<len; i++) { if (name[i]=='"') fprintf(trace, "&quot;"); else fputc(name[i], trace); } } GF_Err chpl_dump(GF_Box *a, FILE * trace) { u32 i, count; char szDur[20]; GF_ChapterListBox *p = (GF_ChapterListBox *)a; gf_isom_box_dump_start(a, "ChapterListBox", trace); fprintf(trace, ">\n"); if (p->size) { count = gf_list_count(p->list); for (i=0; i<count; i++) { GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(p->list, i); fprintf(trace, "<Chapter name=\""); dump_escape_string(trace, ce->name); fprintf(trace, "\" startTime=\"%s\" />\n", format_duration(ce->start_time, 1000*10000, szDur)); } } else { fprintf(trace, "<Chapter name=\"\" startTime=\"\"/>\n"); } gf_isom_box_dump_done("ChapterListBox", a, trace); return GF_OK; } GF_Err pdin_dump(GF_Box *a, FILE * trace) { u32 i; GF_ProgressiveDownloadBox *p = (GF_ProgressiveDownloadBox *)a; gf_isom_box_dump_start(a, "ProgressiveDownloadBox", trace); fprintf(trace, ">\n"); if (p->size) { for (i=0; i<p->count; i++) { fprintf(trace, "<DownloadInfo rate=\"%d\" estimatedTime=\"%d\" />\n", p->rates[i], p->times[i]); } } else { fprintf(trace, "<DownloadInfo rate=\"\" estimatedTime=\"\" />\n"); } gf_isom_box_dump_done("ProgressiveDownloadBox", a, trace); return GF_OK; } GF_Err hdlr_dump(GF_Box *a, FILE * trace) { GF_HandlerBox *p = (GF_HandlerBox *)a; gf_isom_box_dump_start(a, "HandlerBox", trace); if (p->nameUTF8 && (u32) p->nameUTF8[0] == strlen(p->nameUTF8+1)) { fprintf(trace, "hdlrType=\"%s\" Name=\"%s\" ", gf_4cc_to_str(p->handlerType), p->nameUTF8+1); } else { fprintf(trace, "hdlrType=\"%s\" Name=\"%s\" ", gf_4cc_to_str(p->handlerType), p->nameUTF8); } fprintf(trace, "reserved1=\"%d\" reserved2=\"", p->reserved1); dump_data(trace, (char *) p->reserved2, 12); fprintf(trace, "\""); fprintf(trace, ">\n"); gf_isom_box_dump_done("HandlerBox", a, trace); return GF_OK; } GF_Err iods_dump(GF_Box *a, FILE * trace) { GF_ObjectDescriptorBox *p; p = (GF_ObjectDescriptorBox *)a; gf_isom_box_dump_start(a, "ObjectDescriptorBox", trace); fprintf(trace, ">\n"); if (p->descriptor) { #ifndef GPAC_DISABLE_OD_DUMP gf_odf_dump_desc(p->descriptor, trace, 1, GF_TRUE); #else fprintf(trace, "<!-- Object Descriptor Dumping disabled in this build of GPAC -->\n"); #endif } else if (p->size) { fprintf(trace, "<!--WARNING: Object Descriptor not present-->\n"); } gf_isom_box_dump_done("ObjectDescriptorBox", a, trace); return GF_OK; } GF_Err trak_dump(GF_Box *a, FILE * trace) { GF_TrackBox *p; p = (GF_TrackBox *)a; gf_isom_box_dump_start(a, "TrackBox", trace); fprintf(trace, ">\n"); if (p->Header) { gf_isom_box_dump(p->Header, trace); } else if (p->size) { fprintf(trace, "<!--INVALID FILE: Missing Track Header-->\n"); } if (p->References) gf_isom_box_dump(p->References, trace); if (p->meta) gf_isom_box_dump(p->meta, trace); if (p->editBox) gf_isom_box_dump(p->editBox, trace); if (p->Media) gf_isom_box_dump(p->Media, trace); if (p->groups) gf_isom_box_dump(p->groups, trace); if (p->udta) gf_isom_box_dump(p->udta, trace); gf_isom_box_dump_done("TrackBox", a, trace); return GF_OK; } GF_Err mp4s_dump(GF_Box *a, FILE * trace) { GF_MPEGSampleEntryBox *p; p = (GF_MPEGSampleEntryBox *)a; gf_isom_box_dump_start(a, "MPEGSystemsSampleDescriptionBox", trace); fprintf(trace, "DataReferenceIndex=\"%d\">\n", p->dataReferenceIndex); if (p->esd) { gf_isom_box_dump(p->esd, trace); } else if (p->size) { fprintf(trace, "<!--INVALID MP4 FILE: ESDBox not present in MPEG Sample Description or corrupted-->\n"); } if (a->type == GF_ISOM_BOX_TYPE_ENCS) { gf_isom_box_array_dump(p->protections, trace); } gf_isom_box_dump_done("MPEGSystemsSampleDescriptionBox", a, trace); return GF_OK; } GF_Err video_sample_entry_dump(GF_Box *a, FILE * trace) { GF_MPEGVisualSampleEntryBox *p = (GF_MPEGVisualSampleEntryBox *)a; const char *name; switch (p->type) { case GF_ISOM_SUBTYPE_AVC_H264: case GF_ISOM_SUBTYPE_AVC2_H264: case GF_ISOM_SUBTYPE_AVC3_H264: case GF_ISOM_SUBTYPE_AVC4_H264: name = "AVCSampleEntryBox"; break; case GF_ISOM_SUBTYPE_MVC_H264: name = "MVCSampleEntryBox"; break; case GF_ISOM_SUBTYPE_SVC_H264: name = "SVCSampleEntryBox"; break; case GF_ISOM_SUBTYPE_HVC1: case GF_ISOM_SUBTYPE_HEV1: case GF_ISOM_SUBTYPE_HVC2: case GF_ISOM_SUBTYPE_HEV2: name = "HEVCSampleEntryBox"; break; case GF_ISOM_SUBTYPE_LHV1: case GF_ISOM_SUBTYPE_LHE1: name = "LHEVCSampleEntryBox"; break; case GF_ISOM_SUBTYPE_3GP_H263: name = "H263SampleDescriptionBox"; break; default: name = "MPEGVisualSampleDescriptionBox"; } gf_isom_box_dump_start(a, name, trace); fprintf(trace, " DataReferenceIndex=\"%d\" Width=\"%d\" Height=\"%d\"", p->dataReferenceIndex, p->Width, p->Height); //dump reserved info fprintf(trace, " XDPI=\"%d\" YDPI=\"%d\" BitDepth=\"%d\"", p->horiz_res, p->vert_res, p->bit_depth); if (strlen((const char*)p->compressor_name) ) fprintf(trace, " CompressorName=\"%s\"\n", p->compressor_name+1); fprintf(trace, ">\n"); if (p->esd) { gf_isom_box_dump(p->esd, trace); } else { if (p->hevc_config) gf_isom_box_dump(p->hevc_config, trace); if (p->avc_config) gf_isom_box_dump(p->avc_config, trace); if (p->ipod_ext) gf_isom_box_dump(p->ipod_ext, trace); if (p->descr) gf_isom_box_dump(p->descr, trace); if (p->svc_config) gf_isom_box_dump(p->svc_config, trace); if (p->mvc_config) gf_isom_box_dump(p->mvc_config, trace); if (p->lhvc_config) gf_isom_box_dump(p->lhvc_config, trace); if (p->cfg_3gpp) gf_isom_box_dump(p->cfg_3gpp, trace); } if (a->type == GF_ISOM_BOX_TYPE_ENCV) { gf_isom_box_array_dump(p->protections, trace); } if (p->pasp) gf_isom_box_dump(p->pasp, trace); if (p->rvcc) gf_isom_box_dump(p->rvcc, trace); if (p->rinf) gf_isom_box_dump(p->rinf, trace); gf_isom_box_dump_done(name, a, trace); return GF_OK; } void base_audio_entry_dump(GF_AudioSampleEntryBox *p, FILE * trace) { fprintf(trace, " DataReferenceIndex=\"%d\" SampleRate=\"%d\"", p->dataReferenceIndex, p->samplerate_hi); fprintf(trace, " Channels=\"%d\" BitsPerSample=\"%d\"", p->channel_count, p->bitspersample); } GF_Err audio_sample_entry_dump(GF_Box *a, FILE * trace) { char *szName; Bool is_3gpp = GF_FALSE; GF_MPEGAudioSampleEntryBox *p = (GF_MPEGAudioSampleEntryBox *)a; switch (p->type) { case GF_ISOM_SUBTYPE_3GP_AMR: szName = "AMRSampleDescriptionBox"; is_3gpp = GF_TRUE; break; case GF_ISOM_SUBTYPE_3GP_AMR_WB: szName = "AMR_WB_SampleDescriptionBox"; is_3gpp = GF_TRUE; break; case GF_ISOM_SUBTYPE_3GP_EVRC: szName = "EVRCSampleDescriptionBox"; is_3gpp = GF_TRUE; break; case GF_ISOM_SUBTYPE_3GP_QCELP: szName = "QCELPSampleDescriptionBox"; is_3gpp = GF_TRUE; break; case GF_ISOM_SUBTYPE_3GP_SMV: szName = "SMVSampleDescriptionBox"; is_3gpp = GF_TRUE; break; case GF_ISOM_BOX_TYPE_MP4A: szName = "MPEGAudioSampleDescriptionBox"; break; case GF_ISOM_BOX_TYPE_AC3: szName = "AC3SampleEntryBox"; break; case GF_ISOM_BOX_TYPE_EC3: szName = "EC3SampleEntryBox"; break; default: szName = "AudioSampleDescriptionBox"; break; } gf_isom_box_dump_start(a, szName, trace); base_audio_entry_dump((GF_AudioSampleEntryBox *)p, trace); fprintf(trace, ">\n"); if (p->esd) { gf_isom_box_dump(p->esd, trace); } else if (p->cfg_3gpp) { gf_isom_box_dump(p->cfg_3gpp, trace); } else if (p->cfg_ac3) { if (p->size) gf_isom_box_dump(p->cfg_ac3, trace); } else if (p->size) { if (is_3gpp) { fprintf(trace, "<!-- INVALID 3GPP FILE: Config not present in Sample Description-->\n"); } else { fprintf(trace, "<!--INVALID MP4 FILE: ESDBox not present in MPEG Sample Description or corrupted-->\n"); } } if (a->type == GF_ISOM_BOX_TYPE_ENCA) { gf_isom_box_array_dump(p->protections, trace); } gf_isom_box_dump_done(szName, a, trace); return GF_OK; } GF_Err gnrm_dump(GF_Box *a, FILE * trace) { GF_GenericSampleEntryBox *p = (GF_GenericSampleEntryBox *)a; if (p->EntryType) a->type = p->EntryType; gf_isom_box_dump_start(a, "SampleDescriptionBox", trace); fprintf(trace, "DataReferenceIndex=\"%d\" ExtensionDataSize=\"%d\">\n", p->dataReferenceIndex, p->data_size); a->type = GF_ISOM_BOX_TYPE_GNRM; gf_isom_box_dump_done("SampleDescriptionBox", a, trace); return GF_OK; } GF_Err gnrv_dump(GF_Box *a, FILE * trace) { GF_GenericVisualSampleEntryBox *p = (GF_GenericVisualSampleEntryBox *)a; if (p->EntryType) a->type = p->EntryType; gf_isom_box_dump_start(a, "VisualSampleDescriptionBox", trace); fprintf(trace, "DataReferenceIndex=\"%d\" Version=\"%d\" Revision=\"%d\" Vendor=\"%d\" TemporalQuality=\"%d\" SpacialQuality=\"%d\" Width=\"%d\" Height=\"%d\" HorizontalResolution=\"%d\" VerticalResolution=\"%d\" CompressorName=\"%s\" BitDepth=\"%d\">\n", p->dataReferenceIndex, p->version, p->revision, p->vendor, p->temporal_quality, p->spatial_quality, p->Width, p->Height, p->horiz_res, p->vert_res, p->compressor_name+1, p->bit_depth); a->type = GF_ISOM_BOX_TYPE_GNRV; gf_isom_box_dump_done("VisualSampleDescriptionBox", a, trace); return GF_OK; } GF_Err gnra_dump(GF_Box *a, FILE * trace) { GF_GenericAudioSampleEntryBox *p = (GF_GenericAudioSampleEntryBox *)a; if (p->EntryType) a->type = p->EntryType; gf_isom_box_dump_start(a, "AudioSampleDescriptionBox", trace); fprintf(trace, "DataReferenceIndex=\"%d\" Version=\"%d\" Revision=\"%d\" Vendor=\"%d\" ChannelCount=\"%d\" BitsPerSample=\"%d\" Samplerate=\"%d\">\n", p->dataReferenceIndex, p->version, p->revision, p->vendor, p->channel_count, p->bitspersample, p->samplerate_hi); a->type = GF_ISOM_BOX_TYPE_GNRA; gf_isom_box_dump_done("AudioSampleDescriptionBox", a, trace); return GF_OK; } GF_Err edts_dump(GF_Box *a, FILE * trace) { GF_EditBox *p; p = (GF_EditBox *)a; gf_isom_box_dump_start(a, "EditBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->editList, trace, GF_ISOM_BOX_TYPE_ELST); gf_isom_box_dump_done("EditBox", a, trace); return GF_OK; } GF_Err udta_dump(GF_Box *a, FILE * trace) { GF_UserDataBox *p; GF_UserDataMap *map; u32 i; p = (GF_UserDataBox *)a; gf_isom_box_dump_start(a, "UserDataBox", trace); fprintf(trace, ">\n"); i=0; while ((map = (GF_UserDataMap *)gf_list_enum(p->recordList, &i))) { gf_isom_box_array_dump(map->other_boxes, trace); } gf_isom_box_dump_done("UserDataBox", a, trace); return GF_OK; } GF_Err dref_dump(GF_Box *a, FILE * trace) { // GF_DataReferenceBox *p = (GF_DataReferenceBox *)a; gf_isom_box_dump_start(a, "DataReferenceBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("DataReferenceBox", a, trace); return GF_OK; } GF_Err stsd_dump(GF_Box *a, FILE * trace) { // GF_SampleDescriptionBox *p = (GF_SampleDescriptionBox *)a; gf_isom_box_dump_start(a, "SampleDescriptionBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("SampleDescriptionBox", a, trace); return GF_OK; } GF_Err stts_dump(GF_Box *a, FILE * trace) { GF_TimeToSampleBox *p; u32 i, nb_samples; p = (GF_TimeToSampleBox *)a; gf_isom_box_dump_start(a, "TimeToSampleBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); nb_samples = 0; for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<TimeToSampleEntry SampleDelta=\"%d\" SampleCount=\"%d\"/>\n", p->entries[i].sampleDelta, p->entries[i].sampleCount); nb_samples += p->entries[i].sampleCount; } if (p->size) fprintf(trace, "<!-- counted %d samples in STTS entries -->\n", nb_samples); else fprintf(trace, "<TimeToSampleEntry SampleDelta=\"\" SampleCount=\"\"/>\n"); gf_isom_box_dump_done("TimeToSampleBox", a, trace); return GF_OK; } GF_Err ctts_dump(GF_Box *a, FILE * trace) { GF_CompositionOffsetBox *p; u32 i, nb_samples; p = (GF_CompositionOffsetBox *)a; gf_isom_box_dump_start(a, "CompositionOffsetBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); nb_samples = 0; for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<CompositionOffsetEntry CompositionOffset=\"%d\" SampleCount=\"%d\"/>\n", p->entries[i].decodingOffset, p->entries[i].sampleCount); nb_samples += p->entries[i].sampleCount; } if (p->size) fprintf(trace, "<!-- counted %d samples in CTTS entries -->\n", nb_samples); else fprintf(trace, "<CompositionOffsetEntry CompositionOffset=\"\" SampleCount=\"\"/>\n"); gf_isom_box_dump_done("CompositionOffsetBox", a, trace); return GF_OK; } GF_Err cslg_dump(GF_Box *a, FILE * trace) { GF_CompositionToDecodeBox *p; p = (GF_CompositionToDecodeBox *)a; gf_isom_box_dump_start(a, "CompositionToDecodeBox", trace); fprintf(trace, "compositionToDTSShift=\"%d\" leastDecodeToDisplayDelta=\"%d\" compositionStartTime=\"%d\" compositionEndTime=\"%d\">\n", p->leastDecodeToDisplayDelta, p->greatestDecodeToDisplayDelta, p->compositionStartTime, p->compositionEndTime); gf_isom_box_dump_done("CompositionToDecodeBox", a, trace); return GF_OK; } GF_Err ccst_dump(GF_Box *a, FILE * trace) { GF_CodingConstraintsBox *p = (GF_CodingConstraintsBox *)a; gf_isom_box_dump_start(a, "CodingConstraintsBox", trace); fprintf(trace, "all_ref_pics_intra=\"%d\" intra_pred_used=\"%d\" max_ref_per_pic=\"%d\" reserved=\"%d\">\n", p->all_ref_pics_intra, p->intra_pred_used, p->max_ref_per_pic, p->reserved); gf_isom_box_dump_done("CodingConstraintsBox", a, trace); return GF_OK; } GF_Err stsh_dump(GF_Box *a, FILE * trace) { GF_ShadowSyncBox *p; u32 i; GF_StshEntry *t; p = (GF_ShadowSyncBox *)a; gf_isom_box_dump_start(a, "SyncShadowBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", gf_list_count(p->entries)); i=0; while ((t = (GF_StshEntry *)gf_list_enum(p->entries, &i))) { fprintf(trace, "<SyncShadowEntry ShadowedSample=\"%d\" SyncSample=\"%d\"/>\n", t->shadowedSampleNumber, t->syncSampleNumber); } if (!p->size) { fprintf(trace, "<SyncShadowEntry ShadowedSample=\"\" SyncSample=\"\"/>\n"); } gf_isom_box_dump_done("SyncShadowBox", a, trace); return GF_OK; } GF_Err elst_dump(GF_Box *a, FILE * trace) { GF_EditListBox *p; u32 i; GF_EdtsEntry *t; p = (GF_EditListBox *)a; gf_isom_box_dump_start(a, "EditListBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", gf_list_count(p->entryList)); i=0; while ((t = (GF_EdtsEntry *)gf_list_enum(p->entryList, &i))) { fprintf(trace, "<EditListEntry Duration=\""LLD"\" MediaTime=\""LLD"\" MediaRate=\"%u\"/>\n", LLD_CAST t->segmentDuration, LLD_CAST t->mediaTime, t->mediaRate); } if (!p->size) { fprintf(trace, "<EditListEntry Duration=\"\" MediaTime=\"\" MediaRate=\"\"/>\n"); } gf_isom_box_dump_done("EditListBox", a, trace); return GF_OK; } GF_Err stsc_dump(GF_Box *a, FILE * trace) { GF_SampleToChunkBox *p; u32 i, nb_samples; p = (GF_SampleToChunkBox *)a; gf_isom_box_dump_start(a, "SampleToChunkBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); nb_samples = 0; for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<SampleToChunkEntry FirstChunk=\"%d\" SamplesPerChunk=\"%d\" SampleDescriptionIndex=\"%d\"/>\n", p->entries[i].firstChunk, p->entries[i].samplesPerChunk, p->entries[i].sampleDescriptionIndex); if (i+1<p->nb_entries) { nb_samples += (p->entries[i+1].firstChunk - p->entries[i].firstChunk) * p->entries[i].samplesPerChunk; } else { nb_samples += p->entries[i].samplesPerChunk; } } if (p->size) fprintf(trace, "<!-- counted %d samples in STSC entries (could be less than sample count) -->\n", nb_samples); else fprintf(trace, "<SampleToChunkEntry FirstChunk=\"\" SamplesPerChunk=\"\" SampleDescriptionIndex=\"\"/>\n"); gf_isom_box_dump_done("SampleToChunkBox", a, trace); return GF_OK; } GF_Err stsz_dump(GF_Box *a, FILE * trace) { GF_SampleSizeBox *p; u32 i; p = (GF_SampleSizeBox *)a; if (a->type == GF_ISOM_BOX_TYPE_STSZ) { gf_isom_box_dump_start(a, "SampleSizeBox", trace); } else { gf_isom_box_dump_start(a, "CompactSampleSizeBox", trace); } fprintf(trace, "SampleCount=\"%d\"", p->sampleCount); if (a->type == GF_ISOM_BOX_TYPE_STSZ) { if (p->sampleSize) { fprintf(trace, " ConstantSampleSize=\"%d\"", p->sampleSize); } } else { fprintf(trace, " SampleSizeBits=\"%d\"", p->sampleSize); } fprintf(trace, ">\n"); if ((a->type != GF_ISOM_BOX_TYPE_STSZ) || !p->sampleSize) { if (!p->sizes && p->size) { fprintf(trace, "<!--WARNING: No Sample Size indications-->\n"); } else { for (i=0; i<p->sampleCount; i++) { fprintf(trace, "<SampleSizeEntry Size=\"%d\"/>\n", p->sizes[i]); } } } if (!p->size) { fprintf(trace, "<SampleSizeEntry Size=\"\"/>\n"); } gf_isom_box_dump_done((a->type == GF_ISOM_BOX_TYPE_STSZ) ? "SampleSizeBox" : "CompactSampleSizeBox", a, trace); return GF_OK; } GF_Err stco_dump(GF_Box *a, FILE * trace) { GF_ChunkOffsetBox *p; u32 i; p = (GF_ChunkOffsetBox *)a; gf_isom_box_dump_start(a, "ChunkOffsetBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); if (!p->offsets && p->size) { fprintf(trace, "<!--Warning: No Chunk Offsets indications-->\n"); } else { for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<ChunkEntry offset=\"%u\"/>\n", p->offsets[i]); } } if (!p->size) { fprintf(trace, "<ChunkEntry offset=\"\"/>\n"); } gf_isom_box_dump_done("ChunkOffsetBox", a, trace); return GF_OK; } GF_Err stss_dump(GF_Box *a, FILE * trace) { GF_SyncSampleBox *p; u32 i; p = (GF_SyncSampleBox *)a; gf_isom_box_dump_start(a, "SyncSampleBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); if (!p->sampleNumbers && p->size) { fprintf(trace, "<!--Warning: No Key Frames indications-->\n"); } else { for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<SyncSampleEntry sampleNumber=\"%u\"/>\n", p->sampleNumbers[i]); } } if (!p->size) { fprintf(trace, "<SyncSampleEntry sampleNumber=\"\"/>\n"); } gf_isom_box_dump_done("SyncSampleBox", a, trace); return GF_OK; } GF_Err stdp_dump(GF_Box *a, FILE * trace) { GF_DegradationPriorityBox *p; u32 i; p = (GF_DegradationPriorityBox *)a; gf_isom_box_dump_start(a, "DegradationPriorityBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); if (!p->priorities && p->size) { fprintf(trace, "<!--Warning: No Degradation Priority indications-->\n"); } else { for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<DegradationPriorityEntry DegradationPriority=\"%d\"/>\n", p->priorities[i]); } } if (!p->size) { fprintf(trace, "<DegradationPriorityEntry DegradationPriority=\"\"/>\n"); } gf_isom_box_dump_done("DegradationPriorityBox", a, trace); return GF_OK; } GF_Err sdtp_dump(GF_Box *a, FILE * trace) { GF_SampleDependencyTypeBox *p; u32 i; p = (GF_SampleDependencyTypeBox*)a; gf_isom_box_dump_start(a, "SampleDependencyTypeBox", trace); fprintf(trace, "SampleCount=\"%d\">\n", p->sampleCount); if (!p->sample_info && p->size) { fprintf(trace, "<!--Warning: No sample dependencies indications-->\n"); } else { for (i=0; i<p->sampleCount; i++) { u8 flag = p->sample_info[i]; fprintf(trace, "<SampleDependencyEntry "); switch ( (flag >> 4) & 3) { case 0: fprintf(trace, "dependsOnOther=\"unknown\" "); break; case 1: fprintf(trace, "dependsOnOther=\"yes\" "); break; case 2: fprintf(trace, "dependsOnOther=\"no\" "); break; case 3: fprintf(trace, "dependsOnOther=\"RESERVED\" "); break; } switch ( (flag >> 2) & 3) { case 0: fprintf(trace, "dependedOn=\"unknown\" "); break; case 1: fprintf(trace, "dependedOn=\"yes\" "); break; case 2: fprintf(trace, "dependedOn=\"no\" "); break; case 3: fprintf(trace, "dependedOn=\"RESERVED\" "); break; } switch ( flag & 3) { case 0: fprintf(trace, "hasRedundancy=\"unknown\" "); break; case 1: fprintf(trace, "hasRedundancy=\"yes\" "); break; case 2: fprintf(trace, "hasRedundancy=\"no\" "); break; case 3: fprintf(trace, "hasRedundancy=\"RESERVED\" "); break; } fprintf(trace, " />\n"); } } if (!p->size) { fprintf(trace, "<SampleDependencyEntry dependsOnOther=\"unknown|yes|no|RESERVED\" dependedOn=\"unknown|yes|no|RESERVED\" hasRedundancy=\"unknown|yes|no|RESERVED\"/>\n"); } gf_isom_box_dump_done("SampleDependencyTypeBox", a, trace); return GF_OK; } GF_Err co64_dump(GF_Box *a, FILE * trace) { GF_ChunkLargeOffsetBox *p; u32 i; p = (GF_ChunkLargeOffsetBox *)a; gf_isom_box_dump_start(a, "ChunkLargeOffsetBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); if (!p->offsets && p->size) { fprintf(trace, "<!-- Warning: No Chunk Offsets indications/>\n"); } else { for (i=0; i<p->nb_entries; i++) fprintf(trace, "<ChunkOffsetEntry offset=\""LLU"\"/>\n", LLU_CAST p->offsets[i]); } if (!p->size) { fprintf(trace, "<ChunkOffsetEntry offset=\"\"/>\n"); } gf_isom_box_dump_done("ChunkLargeOffsetBox", a, trace); return GF_OK; } GF_Err esds_dump(GF_Box *a, FILE * trace) { GF_ESDBox *p; p = (GF_ESDBox *)a; gf_isom_box_dump_start(a, "MPEG4ESDescriptorBox", trace); fprintf(trace, ">\n"); if (p->desc) { #ifndef GPAC_DISABLE_OD_DUMP gf_odf_dump_desc((GF_Descriptor *) p->desc, trace, 1, GF_TRUE); #else fprintf(trace, "<!-- Object Descriptor Dumping disabled in this build of GPAC -->\n"); #endif } else if (p->size) { fprintf(trace, "<!--INVALID MP4 FILE: ESD not present in MPEG Sample Description or corrupted-->\n"); } gf_isom_box_dump_done("MPEG4ESDescriptorBox", a, trace); return GF_OK; } GF_Err minf_dump(GF_Box *a, FILE * trace) { GF_MediaInformationBox *p; p = (GF_MediaInformationBox *)a; gf_isom_box_dump_start(a, "MediaInformationBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->InfoHeader, trace, GF_ISOM_BOX_TYPE_NMHD); if (p->size) gf_isom_box_dump_ex(p->dataInformation, trace, GF_ISOM_BOX_TYPE_DINF); if (p->size) gf_isom_box_dump_ex(p->sampleTable, trace, GF_ISOM_BOX_TYPE_STBL); gf_isom_box_dump_done("MediaInformationBox", a, trace); return GF_OK; } GF_Err tkhd_dump(GF_Box *a, FILE * trace) { GF_TrackHeaderBox *p; p = (GF_TrackHeaderBox *)a; gf_isom_box_dump_start(a, "TrackHeaderBox", trace); fprintf(trace, "CreationTime=\""LLD"\" ModificationTime=\""LLD"\" TrackID=\"%u\" Duration=\""LLD"\"", LLD_CAST p->creationTime, LLD_CAST p->modificationTime, p->trackID, LLD_CAST p->duration); if (p->alternate_group) fprintf(trace, " AlternateGroupID=\"%d\"", p->alternate_group); if (p->volume) { fprintf(trace, " Volume=\"%.2f\"", (Float)p->volume / 256); } else if (p->width || p->height) { fprintf(trace, " Width=\"%.2f\" Height=\"%.2f\"", (Float)p->width / 65536, (Float)p->height / 65536); if (p->layer) fprintf(trace, " Layer=\"%d\"", p->layer); } fprintf(trace, ">\n"); if (p->width || p->height) { fprintf(trace, "<Matrix m11=\"0x%.8x\" m12=\"0x%.8x\" m13=\"0x%.8x\" ", p->matrix[0], p->matrix[1], p->matrix[2]); fprintf(trace, "m21=\"0x%.8x\" m22=\"0x%.8x\" m23=\"0x%.8x\" ", p->matrix[3], p->matrix[4], p->matrix[5]); fprintf(trace, "m31=\"0x%.8x\" m32=\"0x%.8x\" m33=\"0x%.8x\"/>\n", p->matrix[6], p->matrix[7], p->matrix[8]); } gf_isom_box_dump_done("TrackHeaderBox", a, trace); return GF_OK; } GF_Err tref_dump(GF_Box *a, FILE * trace) { // GF_TrackReferenceBox *p = (GF_TrackReferenceBox *)a; gf_isom_box_dump_start(a, "TrackReferenceBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("TrackReferenceBox", a, trace); return GF_OK; } GF_Err mdia_dump(GF_Box *a, FILE * trace) { GF_MediaBox *p = (GF_MediaBox *)a; gf_isom_box_dump_start(a, "MediaBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->mediaHeader, trace, GF_ISOM_BOX_TYPE_MDHD); if (p->size) gf_isom_box_dump_ex(p->handler, trace,GF_ISOM_BOX_TYPE_HDLR); if (p->size) gf_isom_box_dump_ex(p->information, trace, GF_ISOM_BOX_TYPE_MINF); gf_isom_box_dump_done("MediaBox", a, trace); return GF_OK; } GF_Err mfra_dump(GF_Box *a, FILE * trace) { GF_MovieFragmentRandomAccessBox *p = (GF_MovieFragmentRandomAccessBox *)a; u32 i, count; GF_TrackFragmentRandomAccessBox *tfra; gf_isom_box_dump_start(a, "MovieFragmentRandomAccessBox", trace); fprintf(trace, ">\n"); count = gf_list_count(p->tfra_list); for (i=0; i<count; i++) { tfra = (GF_TrackFragmentRandomAccessBox *)gf_list_get(p->tfra_list, i); gf_isom_box_dump_ex(tfra, trace, GF_ISOM_BOX_TYPE_TFRA); } gf_isom_box_dump_done("MovieFragmentRandomAccessBox", a, trace); return GF_OK; } GF_Err tfra_dump(GF_Box *a, FILE * trace) { u32 i; GF_TrackFragmentRandomAccessBox *p = (GF_TrackFragmentRandomAccessBox *)a; gf_isom_box_dump_start(a, "TrackFragmentRandomAccessBox", trace); fprintf(trace, "TrackId=\"%u\" number_of_entries=\"%u\">\n", p->track_id, p->nb_entries); for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<RandomAccessEntry time=\""LLU"\" moof_offset=\""LLU"\" traf=\"%u\" trun=\"%u\" sample=\"%u\"/>\n", p->entries[i].time, p->entries[i].moof_offset, p->entries[i].traf_number, p->entries[i].trun_number, p->entries[i].sample_number); } if (!p->size) { fprintf(trace, "<RandomAccessEntry time=\"\" moof_offset=\"\" traf=\"\" trun=\"\" sample=\"\"/>\n"); } gf_isom_box_dump_done("TrackFragmentRandomAccessBox", a, trace); return GF_OK; } GF_Err mfro_dump(GF_Box *a, FILE * trace) { GF_MovieFragmentRandomAccessOffsetBox *p = (GF_MovieFragmentRandomAccessOffsetBox *)a; gf_isom_box_dump_start(a, "MovieFragmentRandomAccessOffsetBox", trace); fprintf(trace, "container_size=\"%d\" >\n", p->container_size); gf_isom_box_dump_done("MovieFragmentRandomAccessOffsetBox", a, trace); return GF_OK; } GF_Err elng_dump(GF_Box *a, FILE * trace) { GF_ExtendedLanguageBox *p = (GF_ExtendedLanguageBox *)a; gf_isom_box_dump_start(a, "ExtendedLanguageBox", trace); fprintf(trace, "LanguageCode=\"%s\">\n", p->extended_language); gf_isom_box_dump_done("ExtendedLanguageBox", a, trace); return GF_OK; } GF_Err unkn_dump(GF_Box *a, FILE * trace) { GF_UnknownBox *u = (GF_UnknownBox *)a; u->type = u->original_4cc; gf_isom_box_dump_start(a, "UnknownBox", trace); u->type = GF_ISOM_BOX_TYPE_UNKNOWN; if (u->dataSize<100) dump_data_attribute(trace, "data", u->data, u->dataSize); fprintf(trace, ">\n"); gf_isom_box_dump_done("UnknownBox", a, trace); return GF_OK; } GF_Err uuid_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "UUIDBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("UUIDBox", a, trace); return GF_OK; } GF_Err void_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "VoidBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("VoidBox", a, trace); return GF_OK; } GF_Err ftyp_dump(GF_Box *a, FILE * trace) { GF_FileTypeBox *p; u32 i; p = (GF_FileTypeBox *)a; gf_isom_box_dump_start(a, (a->type == GF_ISOM_BOX_TYPE_FTYP ? "FileTypeBox" : "SegmentTypeBox"), trace); fprintf(trace, "MajorBrand=\"%s\" MinorVersion=\"%d\">\n", gf_4cc_to_str(p->majorBrand), p->minorVersion); for (i=0; i<p->altCount; i++) { fprintf(trace, "<BrandEntry AlternateBrand=\"%s\"/>\n", gf_4cc_to_str(p->altBrand[i])); } if (!p->type) { fprintf(trace, "<BrandEntry AlternateBrand=\"4CC\"/>\n"); } gf_isom_box_dump_done((a->type == GF_ISOM_BOX_TYPE_FTYP ? "FileTypeBox" : "SegmentTypeBox"), a, trace); return GF_OK; } GF_Err padb_dump(GF_Box *a, FILE * trace) { GF_PaddingBitsBox *p; u32 i; p = (GF_PaddingBitsBox *)a; gf_isom_box_dump_start(a, "PaddingBitsBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->SampleCount); for (i=0; i<p->SampleCount; i+=1) { fprintf(trace, "<PaddingBitsEntry PaddingBits=\"%d\"/>\n", p->padbits[i]); } if (!p->size) { fprintf(trace, "<PaddingBitsEntry PaddingBits=\"\"/>\n"); } gf_isom_box_dump_done("PaddingBitsBox", a, trace); return GF_OK; } GF_Err stsf_dump(GF_Box *a, FILE * trace) { GF_SampleFragmentBox *p; GF_StsfEntry *ent; u32 i, j, count; p = (GF_SampleFragmentBox *)a; count = gf_list_count(p->entryList); gf_isom_box_dump_start(a, "SampleFragmentBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", count); for (i=0; i<count; i++) { ent = (GF_StsfEntry *)gf_list_get(p->entryList, i); fprintf(trace, "<SampleFragmentEntry SampleNumber=\"%d\" FragmentCount=\"%d\">\n", ent->SampleNumber, ent->fragmentCount); for (j=0; j<ent->fragmentCount; j++) fprintf(trace, "<FragmentSizeEntry size=\"%d\"/>\n", ent->fragmentSizes[j]); fprintf(trace, "</SampleFragmentEntry>\n"); } if (!p->size) { fprintf(trace, "<SampleFragmentEntry SampleNumber=\"\" FragmentCount=\"\">\n"); fprintf(trace, "<FragmentSizeEntry size=\"\"/>\n"); fprintf(trace, "</SampleFragmentEntry>\n"); } gf_isom_box_dump_done("SampleFragmentBox", a, trace); return GF_OK; } GF_Err gppc_dump(GF_Box *a, FILE * trace) { GF_3GPPConfigBox *p = (GF_3GPPConfigBox *)a; const char *name = gf_4cc_to_str(p->cfg.vendor); switch (p->cfg.type) { case GF_ISOM_SUBTYPE_3GP_AMR: case GF_ISOM_SUBTYPE_3GP_AMR_WB: gf_isom_box_dump_start(a, "AMRConfigurationBox", trace); fprintf(trace, "Vendor=\"%s\" Version=\"%d\"", name, p->cfg.decoder_version); fprintf(trace, " FramesPerSample=\"%d\" SupportedModes=\"%x\" ModeRotating=\"%d\"", p->cfg.frames_per_sample, p->cfg.AMR_mode_set, p->cfg.AMR_mode_change_period); fprintf(trace, ">\n"); gf_isom_box_dump_done("AMRConfigurationBox", a, trace); break; case GF_ISOM_SUBTYPE_3GP_EVRC: gf_isom_box_dump_start(a, "EVRCConfigurationBox", trace); fprintf(trace, "Vendor=\"%s\" Version=\"%d\" FramesPerSample=\"%d\" >\n", name, p->cfg.decoder_version, p->cfg.frames_per_sample); gf_isom_box_dump_done("EVRCConfigurationBox", a, trace); break; case GF_ISOM_SUBTYPE_3GP_QCELP: gf_isom_box_dump_start(a, "QCELPConfigurationBox", trace); fprintf(trace, "Vendor=\"%s\" Version=\"%d\" FramesPerSample=\"%d\" >\n", name, p->cfg.decoder_version, p->cfg.frames_per_sample); gf_isom_box_dump_done("QCELPConfigurationBox", a, trace); break; case GF_ISOM_SUBTYPE_3GP_SMV: gf_isom_box_dump_start(a, "SMVConfigurationBox", trace); fprintf(trace, "Vendor=\"%s\" Version=\"%d\" FramesPerSample=\"%d\" >\n", name, p->cfg.decoder_version, p->cfg.frames_per_sample); gf_isom_box_dump_done("SMVConfigurationBox", a, trace); break; case GF_ISOM_SUBTYPE_3GP_H263: gf_isom_box_dump_start(a, "H263ConfigurationBox", trace); fprintf(trace, "Vendor=\"%s\" Version=\"%d\"", name, p->cfg.decoder_version); fprintf(trace, " Profile=\"%d\" Level=\"%d\"", p->cfg.H263_profile, p->cfg.H263_level); fprintf(trace, ">\n"); gf_isom_box_dump_done("H263ConfigurationBox", a, trace); break; default: break; } return GF_OK; } GF_Err avcc_dump(GF_Box *a, FILE * trace) { u32 i, count; GF_AVCConfigurationBox *p = (GF_AVCConfigurationBox *) a; const char *name = (p->type==GF_ISOM_BOX_TYPE_MVCC) ? "MVC" : (p->type==GF_ISOM_BOX_TYPE_SVCC) ? "SVC" : "AVC"; char boxname[256]; sprintf(boxname, "%sConfigurationBox", name); gf_isom_box_dump_start(a, boxname, trace); fprintf(trace, ">\n"); fprintf(trace, "<%sDecoderConfigurationRecord", name); if (! p->config) { if (p->size) { fprintf(trace, ">\n"); fprintf(trace, "<!-- INVALID AVC ENTRY : no AVC/SVC config record -->\n"); } else { fprintf(trace, " configurationVersion=\"\" AVCProfileIndication=\"\" profile_compatibility=\"\" AVCLevelIndication=\"\" nal_unit_size=\"\" complete_representation=\"\""); fprintf(trace, " chroma_format=\"\" luma_bit_depth=\"\" chroma_bit_depth=\"\""); fprintf(trace, ">\n"); fprintf(trace, "<SequenceParameterSet size=\"\" content=\"\"/>\n"); fprintf(trace, "<PictureParameterSet size=\"\" content=\"\"/>\n"); fprintf(trace, "<SequenceParameterSetExtensions size=\"\" content=\"\"/>\n"); } fprintf(trace, "</%sDecoderConfigurationRecord>\n", name); gf_isom_box_dump_done(boxname, a, trace); return GF_OK; } fprintf(trace, " configurationVersion=\"%d\" AVCProfileIndication=\"%d\" profile_compatibility=\"%d\" AVCLevelIndication=\"%d\" nal_unit_size=\"%d\"", p->config->configurationVersion, p->config->AVCProfileIndication, p->config->profile_compatibility, p->config->AVCLevelIndication, p->config->nal_unit_size); if ((p->type==GF_ISOM_BOX_TYPE_SVCC) || (p->type==GF_ISOM_BOX_TYPE_MVCC) ) fprintf(trace, " complete_representation=\"%d\"", p->config->complete_representation); if (p->type==GF_ISOM_BOX_TYPE_AVCC) { if (gf_avc_is_rext_profile(p->config->AVCProfileIndication)) { fprintf(trace, " chroma_format=\"%s\" luma_bit_depth=\"%d\" chroma_bit_depth=\"%d\"", gf_avc_hevc_get_chroma_format_name(p->config->chroma_format), p->config->luma_bit_depth, p->config->chroma_bit_depth); } } fprintf(trace, ">\n"); count = gf_list_count(p->config->sequenceParameterSets); for (i=0; i<count; i++) { GF_AVCConfigSlot *c = (GF_AVCConfigSlot *)gf_list_get(p->config->sequenceParameterSets, i); fprintf(trace, "<SequenceParameterSet size=\"%d\" content=\"", c->size); dump_data(trace, c->data, c->size); fprintf(trace, "\"/>\n"); } count = gf_list_count(p->config->pictureParameterSets); for (i=0; i<count; i++) { GF_AVCConfigSlot *c = (GF_AVCConfigSlot *)gf_list_get(p->config->pictureParameterSets, i); fprintf(trace, "<PictureParameterSet size=\"%d\" content=\"", c->size); dump_data(trace, c->data, c->size); fprintf(trace, "\"/>\n"); } if (p->config->sequenceParameterSetExtensions) { count = gf_list_count(p->config->sequenceParameterSetExtensions); for (i=0; i<count; i++) { GF_AVCConfigSlot *c = (GF_AVCConfigSlot *)gf_list_get(p->config->sequenceParameterSetExtensions, i); fprintf(trace, "<SequenceParameterSetExtensions size=\"%d\" content=\"", c->size); dump_data(trace, c->data, c->size); fprintf(trace, "\"/>\n"); } } fprintf(trace, "</%sDecoderConfigurationRecord>\n", name); gf_isom_box_dump_done(boxname, a, trace); return GF_OK; } GF_Err hvcc_dump(GF_Box *a, FILE * trace) { u32 i, count; const char *name = (a->type==GF_ISOM_BOX_TYPE_HVCC) ? "HEVC" : "L-HEVC"; char boxname[256]; GF_HEVCConfigurationBox *p = (GF_HEVCConfigurationBox *) a; sprintf(boxname, "%sConfigurationBox", name); gf_isom_box_dump_start(a, boxname, trace); fprintf(trace, ">\n"); if (! p->config) { if (p->size) { fprintf(trace, "<!-- INVALID HEVC ENTRY: no HEVC/SHVC config record -->\n"); } else { fprintf(trace, "<%sDecoderConfigurationRecord nal_unit_size=\"\" configurationVersion=\"\" ", name); if (a->type==GF_ISOM_BOX_TYPE_HVCC) { fprintf(trace, "profile_space=\"\" tier_flag=\"\" profile_idc=\"\" general_profile_compatibility_flags=\"\" progressive_source_flag=\"\" interlaced_source_flag=\"\" non_packed_constraint_flag=\"\" frame_only_constraint_flag=\"\" constraint_indicator_flags=\"\" level_idc=\"\" "); } fprintf(trace, "min_spatial_segmentation_idc=\"\" parallelismType=\"\" "); if (a->type==GF_ISOM_BOX_TYPE_HVCC) fprintf(trace, "chroma_format=\"\" luma_bit_depth=\"\" chroma_bit_depth=\"\" avgFrameRate=\"\" constantFrameRate=\"\" numTemporalLayers=\"\" temporalIdNested=\"\""); fprintf(trace, ">\n"); fprintf(trace, "<ParameterSetArray nalu_type=\"\" complete_set=\"\">\n"); fprintf(trace, "<ParameterSet size=\"\" content=\"\"/>\n"); fprintf(trace, "</ParameterSetArray>\n"); fprintf(trace, "</%sDecoderConfigurationRecord>\n", name); } fprintf(trace, "</%sConfigurationBox>\n", name); return GF_OK; } fprintf(trace, "<%sDecoderConfigurationRecord nal_unit_size=\"%d\" ", name, p->config->nal_unit_size); fprintf(trace, "configurationVersion=\"%u\" ", p->config->configurationVersion); if (a->type==GF_ISOM_BOX_TYPE_HVCC) { fprintf(trace, "profile_space=\"%u\" ", p->config->profile_space); fprintf(trace, "tier_flag=\"%u\" ", p->config->tier_flag); fprintf(trace, "profile_idc=\"%u\" ", p->config->profile_idc); fprintf(trace, "general_profile_compatibility_flags=\"%X\" ", p->config->general_profile_compatibility_flags); fprintf(trace, "progressive_source_flag=\"%u\" ", p->config->progressive_source_flag); fprintf(trace, "interlaced_source_flag=\"%u\" ", p->config->interlaced_source_flag); fprintf(trace, "non_packed_constraint_flag=\"%u\" ", p->config->non_packed_constraint_flag); fprintf(trace, "frame_only_constraint_flag=\"%u\" ", p->config->frame_only_constraint_flag); fprintf(trace, "constraint_indicator_flags=\""LLX"\" ", p->config->constraint_indicator_flags); fprintf(trace, "level_idc=\"%d\" ", p->config->level_idc); } fprintf(trace, "min_spatial_segmentation_idc=\"%u\" ", p->config->min_spatial_segmentation_idc); fprintf(trace, "parallelismType=\"%u\" ", p->config->parallelismType); if (a->type==GF_ISOM_BOX_TYPE_HVCC) fprintf(trace, "chroma_format=\"%s\" luma_bit_depth=\"%u\" chroma_bit_depth=\"%u\" avgFrameRate=\"%u\" constantFrameRate=\"%u\" numTemporalLayers=\"%u\" temporalIdNested=\"%u\"", gf_avc_hevc_get_chroma_format_name(p->config->chromaFormat), p->config->luma_bit_depth, p->config->chroma_bit_depth, p->config->avgFrameRate, p->config->constantFrameRate, p->config->numTemporalLayers, p->config->temporalIdNested); fprintf(trace, ">\n"); count = gf_list_count(p->config->param_array); for (i=0; i<count; i++) { u32 nalucount, j; GF_HEVCParamArray *ar = (GF_HEVCParamArray*)gf_list_get(p->config->param_array, i); fprintf(trace, "<ParameterSetArray nalu_type=\"%d\" complete_set=\"%d\">\n", ar->type, ar->array_completeness); nalucount = gf_list_count(ar->nalus); for (j=0; j<nalucount; j++) { GF_AVCConfigSlot *c = (GF_AVCConfigSlot *)gf_list_get(ar->nalus, j); fprintf(trace, "<ParameterSet size=\"%d\" content=\"", c->size); dump_data(trace, c->data, c->size); fprintf(trace, "\"/>\n"); } fprintf(trace, "</ParameterSetArray>\n"); } fprintf(trace, "</%sDecoderConfigurationRecord>\n", name); gf_isom_box_dump_done(boxname, a, trace); return GF_OK; } GF_Err m4ds_dump(GF_Box *a, FILE * trace) { u32 i; GF_Descriptor *desc; GF_MPEG4ExtensionDescriptorsBox *p = (GF_MPEG4ExtensionDescriptorsBox *) a; gf_isom_box_dump_start(a, "MPEG4ExtensionDescriptorsBox", trace); fprintf(trace, ">\n"); i=0; while ((desc = (GF_Descriptor *)gf_list_enum(p->descriptors, &i))) { #ifndef GPAC_DISABLE_OD_DUMP gf_odf_dump_desc(desc, trace, 1, GF_TRUE); #else fprintf(trace, "<!-- Object Descriptor Dumping disabled in this build of GPAC -->\n"); #endif } gf_isom_box_dump_done("MPEG4ExtensionDescriptorsBox", a, trace); return GF_OK; } GF_Err btrt_dump(GF_Box *a, FILE * trace) { GF_BitRateBox *p = (GF_BitRateBox*)a; gf_isom_box_dump_start(a, "BitRateBox", trace); fprintf(trace, "BufferSizeDB=\"%d\" avgBitRate=\"%d\" maxBitRate=\"%d\">\n", p->bufferSizeDB, p->avgBitrate, p->maxBitrate); gf_isom_box_dump_done("BitRateBox", a, trace); return GF_OK; } GF_Err ftab_dump(GF_Box *a, FILE * trace) { u32 i; GF_FontTableBox *p = (GF_FontTableBox *)a; gf_isom_box_dump_start(a, "FontTableBox", trace); fprintf(trace, ">\n"); for (i=0; i<p->entry_count; i++) { fprintf(trace, "<FontRecord ID=\"%d\" name=\"%s\"/>\n", p->fonts[i].fontID, p->fonts[i].fontName ? p->fonts[i].fontName : "NULL"); } if (!p->size) { fprintf(trace, "<FontRecord ID=\"\" name=\"\"/>\n"); } gf_isom_box_dump_done("FontTableBox", a, trace); return GF_OK; } static void tx3g_dump_rgba8(FILE * trace, char *name, u32 col) { fprintf(trace, "%s=\"%x %x %x %x\"", name, (col>>16)&0xFF, (col>>8)&0xFF, (col)&0xFF, (col>>24)&0xFF); } static void tx3g_dump_rgb16(FILE * trace, char *name, char col[6]) { fprintf(trace, "%s=\"%x %x %x\"", name, *((u16*)col), *((u16*)(col+1)), *((u16*)(col+2))); } static void tx3g_dump_box(FILE * trace, GF_BoxRecord *rec) { fprintf(trace, "<BoxRecord top=\"%d\" left=\"%d\" bottom=\"%d\" right=\"%d\"/>\n", rec->top, rec->left, rec->bottom, rec->right); } static void tx3g_dump_style(FILE * trace, GF_StyleRecord *rec) { fprintf(trace, "<StyleRecord startChar=\"%d\" endChar=\"%d\" fontID=\"%d\" styles=\"", rec->startCharOffset, rec->endCharOffset, rec->fontID); if (!rec->style_flags) { fprintf(trace, "Normal"); } else { if (rec->style_flags & 1) fprintf(trace, "Bold "); if (rec->style_flags & 2) fprintf(trace, "Italic "); if (rec->style_flags & 4) fprintf(trace, "Underlined "); } fprintf(trace, "\" fontSize=\"%d\" ", rec->font_size); tx3g_dump_rgba8(trace, "textColor", rec->text_color); fprintf(trace, "/>\n"); } GF_Err tx3g_dump(GF_Box *a, FILE * trace) { GF_Tx3gSampleEntryBox *p = (GF_Tx3gSampleEntryBox *)a; gf_isom_box_dump_start(a, "Tx3gSampleEntryBox", trace); fprintf(trace, "dataReferenceIndex=\"%d\" displayFlags=\"%x\" horizontal-justification=\"%d\" vertical-justification=\"%d\" ", p->dataReferenceIndex, p->displayFlags, p->horizontal_justification, p->vertical_justification); tx3g_dump_rgba8(trace, "backgroundColor", p->back_color); fprintf(trace, ">\n"); fprintf(trace, "<DefaultBox>\n"); tx3g_dump_box(trace, &p->default_box); gf_isom_box_dump_done("DefaultBox", a, trace); fprintf(trace, "<DefaultStyle>\n"); tx3g_dump_style(trace, &p->default_style); fprintf(trace, "</DefaultStyle>\n"); if (p->size) { gf_isom_box_dump_ex(p->font_table, trace, GF_ISOM_BOX_TYPE_FTAB); } gf_isom_box_dump_done("Tx3gSampleEntryBox", a, trace); return GF_OK; } GF_Err text_dump(GF_Box *a, FILE * trace) { GF_TextSampleEntryBox *p = (GF_TextSampleEntryBox *)a; gf_isom_box_dump_start(a, "TextSampleEntryBox", trace); fprintf(trace, "dataReferenceIndex=\"%d\" displayFlags=\"%x\" textJustification=\"%d\" ", p->dataReferenceIndex, p->displayFlags, p->textJustification); if (p->textName) fprintf(trace, "textName=\"%s\" ", p->textName); tx3g_dump_rgb16(trace, "background-color", p->background_color); tx3g_dump_rgb16(trace, " foreground-color", p->foreground_color); fprintf(trace, ">\n"); fprintf(trace, "<DefaultBox>\n"); tx3g_dump_box(trace, &p->default_box); gf_isom_box_dump_done("DefaultBox", a, trace); gf_isom_box_dump_done("TextSampleEntryBox", a, trace); return GF_OK; } GF_Err styl_dump(GF_Box *a, FILE * trace) { u32 i; GF_TextStyleBox*p = (GF_TextStyleBox*)a; gf_isom_box_dump_start(a, "TextStyleBox", trace); fprintf(trace, ">\n"); for (i=0; i<p->entry_count; i++) tx3g_dump_style(trace, &p->styles[i]); if (!p->size) { fprintf(trace, "<StyleRecord startChar=\"\" endChar=\"\" fontID=\"\" styles=\"Normal|Bold|Italic|Underlined\" fontSize=\"\" textColor=\"\" />\n"); } gf_isom_box_dump_done("TextStyleBox", a, trace); return GF_OK; } GF_Err hlit_dump(GF_Box *a, FILE * trace) { GF_TextHighlightBox*p = (GF_TextHighlightBox*)a; gf_isom_box_dump_start(a, "TextHighlightBox", trace); fprintf(trace, "startcharoffset=\"%d\" endcharoffset=\"%d\">\n", p->startcharoffset, p->endcharoffset); gf_isom_box_dump_done("TextHighlightBox", a, trace); return GF_OK; } GF_Err hclr_dump(GF_Box *a, FILE * trace) { GF_TextHighlightColorBox*p = (GF_TextHighlightColorBox*)a; gf_isom_box_dump_start(a, "TextHighlightColorBox", trace); tx3g_dump_rgba8(trace, "highlight_color", p->hil_color); fprintf(trace, ">\n"); gf_isom_box_dump_done("TextHighlightColorBox", a, trace); return GF_OK; } GF_Err krok_dump(GF_Box *a, FILE * trace) { u32 i; GF_TextKaraokeBox*p = (GF_TextKaraokeBox*)a; gf_isom_box_dump_start(a, "TextKaraokeBox", trace); fprintf(trace, "highlight_starttime=\"%d\">\n", p->highlight_starttime); for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<KaraokeRecord highlight_endtime=\"%d\" start_charoffset=\"%d\" end_charoffset=\"%d\"/>\n", p->records[i].highlight_endtime, p->records[i].start_charoffset, p->records[i].end_charoffset); } if (!p->size) { fprintf(trace, "<KaraokeRecord highlight_endtime=\"\" start_charoffset=\"\" end_charoffset=\"\"/>\n"); } gf_isom_box_dump_done("TextKaraokeBox", a, trace); return GF_OK; } GF_Err dlay_dump(GF_Box *a, FILE * trace) { GF_TextScrollDelayBox*p = (GF_TextScrollDelayBox*)a; gf_isom_box_dump_start(a, "TextScrollDelayBox", trace); fprintf(trace, "scroll_delay=\"%d\">\n", p->scroll_delay); gf_isom_box_dump_done("TextScrollDelayBox", a, trace); return GF_OK; } GF_Err href_dump(GF_Box *a, FILE * trace) { GF_TextHyperTextBox*p = (GF_TextHyperTextBox*)a; gf_isom_box_dump_start(a, "TextHyperTextBox", trace); fprintf(trace, "startcharoffset=\"%d\" endcharoffset=\"%d\" URL=\"%s\" altString=\"%s\">\n", p->startcharoffset, p->endcharoffset, p->URL ? p->URL : "NULL", p->URL_hint ? p->URL_hint : "NULL"); gf_isom_box_dump_done("TextHyperTextBox", a, trace); return GF_OK; } GF_Err tbox_dump(GF_Box *a, FILE * trace) { GF_TextBoxBox*p = (GF_TextBoxBox*)a; gf_isom_box_dump_start(a, "TextBoxBox", trace); fprintf(trace, ">\n"); tx3g_dump_box(trace, &p->box); gf_isom_box_dump_done("TextBoxBox", a, trace); return GF_OK; } GF_Err blnk_dump(GF_Box *a, FILE * trace) { GF_TextBlinkBox*p = (GF_TextBlinkBox*)a; gf_isom_box_dump_start(a, "TextBlinkBox", trace); fprintf(trace, "start_charoffset=\"%d\" end_charoffset=\"%d\">\n", p->startcharoffset, p->endcharoffset); gf_isom_box_dump_done("TextBlinkBox", a, trace); return GF_OK; } GF_Err twrp_dump(GF_Box *a, FILE * trace) { GF_TextWrapBox*p = (GF_TextWrapBox*)a; gf_isom_box_dump_start(a, "TextWrapBox", trace); fprintf(trace, "wrap_flag=\"%s\">\n", p->wrap_flag ? ( (p->wrap_flag>1) ? "Reserved" : "Automatic" ) : "No Wrap"); gf_isom_box_dump_done("TextWrapBox", a, trace); return GF_OK; } GF_Err meta_dump(GF_Box *a, FILE * trace) { GF_MetaBox *p; p = (GF_MetaBox *)a; gf_isom_box_dump_start(a, "MetaBox", trace); fprintf(trace, ">\n"); if (p->handler) gf_isom_box_dump(p->handler, trace); if (p->primary_resource) gf_isom_box_dump(p->primary_resource, trace); if (p->file_locations) gf_isom_box_dump(p->file_locations, trace); if (p->item_locations) gf_isom_box_dump(p->item_locations, trace); if (p->protections) gf_isom_box_dump(p->protections, trace); if (p->item_infos) gf_isom_box_dump(p->item_infos, trace); if (p->IPMP_control) gf_isom_box_dump(p->IPMP_control, trace); if (p->item_refs) gf_isom_box_dump(p->item_refs, trace); if (p->item_props) gf_isom_box_dump(p->item_props, trace); gf_isom_box_dump_done("MetaBox", a, trace); return GF_OK; } GF_Err xml_dump(GF_Box *a, FILE * trace) { GF_XMLBox *p = (GF_XMLBox *)a; gf_isom_box_dump_start(a, "XMLBox", trace); fprintf(trace, ">\n"); fprintf(trace, "<![CDATA[\n"); if (p->xml) gf_fwrite(p->xml, strlen(p->xml), 1, trace); fprintf(trace, "]]>\n"); gf_isom_box_dump_done("XMLBox", a, trace); return GF_OK; } GF_Err bxml_dump(GF_Box *a, FILE * trace) { GF_BinaryXMLBox *p = (GF_BinaryXMLBox *)a; gf_isom_box_dump_start(a, "BinaryXMLBox", trace); fprintf(trace, "binarySize=\"%d\">\n", p->data_length); gf_isom_box_dump_done("BinaryXMLBox", a, trace); return GF_OK; } GF_Err pitm_dump(GF_Box *a, FILE * trace) { GF_PrimaryItemBox *p = (GF_PrimaryItemBox *)a; gf_isom_box_dump_start(a, "PrimaryItemBox", trace); fprintf(trace, "item_ID=\"%d\">\n", p->item_ID); gf_isom_box_dump_done("PrimaryItemBox", a, trace); return GF_OK; } GF_Err ipro_dump(GF_Box *a, FILE * trace) { GF_ItemProtectionBox *p = (GF_ItemProtectionBox *)a; gf_isom_box_dump_start(a, "ItemProtectionBox", trace); fprintf(trace, ">\n"); gf_isom_box_array_dump(p->protection_information, trace); gf_isom_box_dump_done("ItemProtectionBox", a, trace); return GF_OK; } GF_Err infe_dump(GF_Box *a, FILE * trace) { GF_ItemInfoEntryBox *p = (GF_ItemInfoEntryBox *)a; gf_isom_box_dump_start(a, "ItemInfoEntryBox", trace); fprintf(trace, "item_ID=\"%d\" item_protection_index=\"%d\" item_name=\"%s\" content_type=\"%s\" content_encoding=\"%s\" item_type=\"%s\">\n", p->item_ID, p->item_protection_index, p->item_name, p->content_type, p->content_encoding, gf_4cc_to_str(p->item_type)); gf_isom_box_dump_done("ItemInfoEntryBox", a, trace); return GF_OK; } GF_Err iinf_dump(GF_Box *a, FILE * trace) { GF_ItemInfoBox *p = (GF_ItemInfoBox *)a; gf_isom_box_dump_start(a, "ItemInfoBox", trace); fprintf(trace, ">\n"); gf_isom_box_array_dump(p->item_infos, trace); gf_isom_box_dump_done("ItemInfoBox", a, trace); return GF_OK; } GF_Err iloc_dump(GF_Box *a, FILE * trace) { u32 i, j, count, count2; GF_ItemLocationBox *p = (GF_ItemLocationBox*)a; gf_isom_box_dump_start(a, "ItemLocationBox", trace); fprintf(trace, "offset_size=\"%d\" length_size=\"%d\" base_offset_size=\"%d\" index_size=\"%d\">\n", p->offset_size, p->length_size, p->base_offset_size, p->index_size); count = gf_list_count(p->location_entries); for (i=0; i<count; i++) { GF_ItemLocationEntry *ie = (GF_ItemLocationEntry *)gf_list_get(p->location_entries, i); count2 = gf_list_count(ie->extent_entries); fprintf(trace, "<ItemLocationEntry item_ID=\"%d\" data_reference_index=\"%d\" base_offset=\""LLD"\" construction_method=\"%d\">\n", ie->item_ID, ie->data_reference_index, LLD_CAST ie->base_offset, ie->construction_method); for (j=0; j<count2; j++) { GF_ItemExtentEntry *iee = (GF_ItemExtentEntry *)gf_list_get(ie->extent_entries, j); fprintf(trace, "<ItemExtentEntry extent_offset=\""LLD"\" extent_length=\""LLD"\" extent_index=\""LLD"\" />\n", LLD_CAST iee->extent_offset, LLD_CAST iee->extent_length, LLD_CAST iee->extent_index); } fprintf(trace, "</ItemLocationEntry>\n"); } if (!p->size) { fprintf(trace, "<ItemLocationEntry item_ID=\"\" data_reference_index=\"\" base_offset=\"\" construction_method=\"\">\n"); fprintf(trace, "<ItemExtentEntry extent_offset=\"\" extent_length=\"\" extent_index=\"\" />\n"); fprintf(trace, "</ItemLocationEntry>\n"); } gf_isom_box_dump_done("ItemLocationBox", a, trace); return GF_OK; } GF_Err iref_dump(GF_Box *a, FILE * trace) { GF_ItemReferenceBox *p = (GF_ItemReferenceBox *)a; gf_isom_box_dump_start(a, "ItemReferenceBox", trace); fprintf(trace, ">\n"); gf_isom_box_array_dump(p->references, trace); gf_isom_box_dump_done("ItemReferenceBox", a, trace); return GF_OK; } GF_Err hinf_dump(GF_Box *a, FILE * trace) { // GF_HintInfoBox *p = (GF_HintInfoBox *)a; gf_isom_box_dump_start(a, "HintInfoBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("HintInfoBox", a, trace); return GF_OK; } GF_Err trpy_dump(GF_Box *a, FILE * trace) { GF_TRPYBox *p = (GF_TRPYBox *)a; gf_isom_box_dump_start(a, "LargeTotalRTPBytesBox", trace); fprintf(trace, "RTPBytesSent=\""LLD"\">\n", LLD_CAST p->nbBytes); gf_isom_box_dump_done("LargeTotalRTPBytesBox", a, trace); return GF_OK; } GF_Err totl_dump(GF_Box *a, FILE * trace) { GF_TOTLBox *p; p = (GF_TOTLBox *)a; gf_isom_box_dump_start(a, "TotalRTPBytesBox", trace); fprintf(trace, "RTPBytesSent=\"%d\">\n", p->nbBytes); gf_isom_box_dump_done("TotalRTPBytesBox", a, trace); return GF_OK; } GF_Err nump_dump(GF_Box *a, FILE * trace) { GF_NUMPBox *p; p = (GF_NUMPBox *)a; gf_isom_box_dump_start(a, "LargeTotalPacketBox", trace); fprintf(trace, "PacketsSent=\""LLD"\">\n", LLD_CAST p->nbPackets); gf_isom_box_dump_done("LargeTotalPacketBox", a, trace); return GF_OK; } GF_Err npck_dump(GF_Box *a, FILE * trace) { GF_NPCKBox *p; p = (GF_NPCKBox *)a; gf_isom_box_dump_start(a, "TotalPacketBox", trace); fprintf(trace, "packetsSent=\"%d\">\n", p->nbPackets); gf_isom_box_dump_done("TotalPacketBox", a, trace); return GF_OK; } GF_Err tpyl_dump(GF_Box *a, FILE * trace) { GF_NTYLBox *p; p = (GF_NTYLBox *)a; gf_isom_box_dump_start(a, "LargeTotalMediaBytesBox", trace); fprintf(trace, "BytesSent=\""LLD"\">\n", LLD_CAST p->nbBytes); gf_isom_box_dump_done("LargeTotalMediaBytesBox", a, trace); return GF_OK; } GF_Err tpay_dump(GF_Box *a, FILE * trace) { GF_TPAYBox *p; p = (GF_TPAYBox *)a; gf_isom_box_dump_start(a, "TotalMediaBytesBox", trace); fprintf(trace, "BytesSent=\"%d\">\n", p->nbBytes); gf_isom_box_dump_done("TotalMediaBytesBox", a, trace); return GF_OK; } GF_Err maxr_dump(GF_Box *a, FILE * trace) { GF_MAXRBox *p; p = (GF_MAXRBox *)a; gf_isom_box_dump_start(a, "MaxDataRateBox", trace); fprintf(trace, "MaxDataRate=\"%d\" Granularity=\"%d\">\n", p->maxDataRate, p->granularity); gf_isom_box_dump_done("MaxDataRateBox", a, trace); return GF_OK; } GF_Err dmed_dump(GF_Box *a, FILE * trace) { GF_DMEDBox *p; p = (GF_DMEDBox *)a; gf_isom_box_dump_start(a, "BytesFromMediaTrackBox", trace); fprintf(trace, "BytesSent=\""LLD"\">\n", LLD_CAST p->nbBytes); gf_isom_box_dump_done("BytesFromMediaTrackBox", a, trace); return GF_OK; } GF_Err dimm_dump(GF_Box *a, FILE * trace) { GF_DIMMBox *p; p = (GF_DIMMBox *)a; gf_isom_box_dump_start(a, "ImmediateDataBytesBox", trace); fprintf(trace, "BytesSent=\""LLD"\">\n", LLD_CAST p->nbBytes); gf_isom_box_dump_done("ImmediateDataBytesBox", a, trace); return GF_OK; } GF_Err drep_dump(GF_Box *a, FILE * trace) { GF_DREPBox *p; p = (GF_DREPBox *)a; gf_isom_box_dump_start(a, "RepeatedDataBytesBox", trace); fprintf(trace, "RepeatedBytes=\""LLD"\">\n", LLD_CAST p->nbBytes); gf_isom_box_dump_done("RepeatedDataBytesBox", a, trace); return GF_OK; } GF_Err tssy_dump(GF_Box *a, FILE * trace) { GF_TimeStampSynchronyBox *p = (GF_TimeStampSynchronyBox *)a; gf_isom_box_dump_start(a, "TimeStampSynchronyBox", trace); fprintf(trace, "timestamp_sync=\"%d\">\n", p->timestamp_sync); gf_isom_box_dump_done("TimeStampSynchronyBox", a, trace); return GF_OK; } GF_Err rssr_dump(GF_Box *a, FILE * trace) { GF_ReceivedSsrcBox *p = (GF_ReceivedSsrcBox *)a; gf_isom_box_dump_start(a, "ReceivedSsrcBox", trace); fprintf(trace, "SSRC=\"%d\">\n", p->ssrc); gf_isom_box_dump_done("ReceivedSsrcBox", a, trace); return GF_OK; } GF_Err tmin_dump(GF_Box *a, FILE * trace) { GF_TMINBox *p; p = (GF_TMINBox *)a; gf_isom_box_dump_start(a, "MinTransmissionTimeBox", trace); fprintf(trace, "MinimumTransmitTime=\"%d\">\n", p->minTime); gf_isom_box_dump_done("MinTransmissionTimeBox", a, trace); return GF_OK; } GF_Err tmax_dump(GF_Box *a, FILE * trace) { GF_TMAXBox *p; p = (GF_TMAXBox *)a; gf_isom_box_dump_start(a, "MaxTransmissionTimeBox", trace); fprintf(trace, "MaximumTransmitTime=\"%d\">\n", p->maxTime); gf_isom_box_dump_done("MaxTransmissionTimeBox", a, trace); return GF_OK; } GF_Err pmax_dump(GF_Box *a, FILE * trace) { GF_PMAXBox *p; p = (GF_PMAXBox *)a; gf_isom_box_dump_start(a, "MaxPacketSizeBox", trace); fprintf(trace, "MaximumSize=\"%d\">\n", p->maxSize); gf_isom_box_dump_done("MaxPacketSizeBox", a, trace); return GF_OK; } GF_Err dmax_dump(GF_Box *a, FILE * trace) { GF_DMAXBox *p; p = (GF_DMAXBox *)a; gf_isom_box_dump_start(a, "MaxPacketDurationBox", trace); fprintf(trace, "MaximumDuration=\"%d\">\n", p->maxDur); gf_isom_box_dump_done("MaxPacketDurationBox", a, trace); return GF_OK; } GF_Err payt_dump(GF_Box *a, FILE * trace) { GF_PAYTBox *p; p = (GF_PAYTBox *)a; gf_isom_box_dump_start(a, "PayloadTypeBox", trace); fprintf(trace, "PayloadID=\"%d\" PayloadString=\"%s\">\n", p->payloadCode, p->payloadString); gf_isom_box_dump_done("PayloadTypeBox", a, trace); return GF_OK; } GF_Err name_dump(GF_Box *a, FILE * trace) { GF_NameBox *p; p = (GF_NameBox *)a; gf_isom_box_dump_start(a, "NameBox", trace); fprintf(trace, "Name=\"%s\">\n", p->string); gf_isom_box_dump_done("NameBox", a, trace); return GF_OK; } GF_Err rely_dump(GF_Box *a, FILE * trace) { GF_RelyHintBox *p; p = (GF_RelyHintBox *)a; gf_isom_box_dump_start(a, "RelyTransmissionBox", trace); fprintf(trace, "Prefered=\"%d\" required=\"%d\">\n", p->prefered, p->required); gf_isom_box_dump_done("RelyTransmissionBox", a, trace); return GF_OK; } GF_Err snro_dump(GF_Box *a, FILE * trace) { GF_SeqOffHintEntryBox *p; p = (GF_SeqOffHintEntryBox *)a; gf_isom_box_dump_start(a, "PacketSequenceOffsetBox", trace); fprintf(trace, "SeqNumOffset=\"%d\">\n", p->SeqOffset); gf_isom_box_dump_done("PacketSequenceOffsetBox", a, trace); return GF_OK; } GF_Err tims_dump(GF_Box *a, FILE * trace) { GF_TSHintEntryBox *p; p = (GF_TSHintEntryBox *)a; gf_isom_box_dump_start(a, "RTPTimeScaleBox", trace); fprintf(trace, "TimeScale=\"%d\">\n", p->timeScale); gf_isom_box_dump_done("RTPTimeScaleBox", a, trace); return GF_OK; } GF_Err tsro_dump(GF_Box *a, FILE * trace) { GF_TimeOffHintEntryBox *p; p = (GF_TimeOffHintEntryBox *)a; gf_isom_box_dump_start(a, "TimeStampOffsetBox", trace); fprintf(trace, "TimeStampOffset=\"%d\">\n", p->TimeOffset); gf_isom_box_dump_done("TimeStampOffsetBox", a, trace); return GF_OK; } GF_Err ghnt_dump(GF_Box *a, FILE * trace) { char *name; GF_HintSampleEntryBox *p; p = (GF_HintSampleEntryBox *)a; if (a->type == GF_ISOM_BOX_TYPE_RTP_STSD) { name = "RTPHintSampleEntryBox"; } else if (a->type == GF_ISOM_BOX_TYPE_SRTP_STSD) { name = "SRTPHintSampleEntryBox"; } else if (a->type == GF_ISOM_BOX_TYPE_FDP_STSD) { name = "FDPHintSampleEntryBox"; } else if (a->type == GF_ISOM_BOX_TYPE_RRTP_STSD) { name = "RTPReceptionHintSampleEntryBox"; } else if (a->type == GF_ISOM_BOX_TYPE_RTCP_STSD) { name = "RTCPReceptionHintSampleEntryBox"; } else { name = "GenericHintSampleEntryBox"; } gf_isom_box_dump_start(a, name, trace); fprintf(trace, "DataReferenceIndex=\"%d\" HintTrackVersion=\"%d\" LastCompatibleVersion=\"%d\"", p->dataReferenceIndex, p->HintTrackVersion, p->LastCompatibleVersion); if ((a->type == GF_ISOM_BOX_TYPE_RTP_STSD) || (a->type == GF_ISOM_BOX_TYPE_SRTP_STSD) || (a->type == GF_ISOM_BOX_TYPE_RRTP_STSD) || (a->type == GF_ISOM_BOX_TYPE_RTCP_STSD)) { fprintf(trace, " MaxPacketSize=\"%d\"", p->MaxPacketSize); } else if (a->type == GF_ISOM_BOX_TYPE_FDP_STSD) { fprintf(trace, " partition_entry_ID=\"%d\" FEC_overhead=\"%d\"", p->partition_entry_ID, p->FEC_overhead); } fprintf(trace, ">\n"); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err hnti_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "HintTrackInfoBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("HintTrackInfoBox", NULL, trace); return GF_OK; } GF_Err sdp_dump(GF_Box *a, FILE * trace) { GF_SDPBox *p = (GF_SDPBox *)a; gf_isom_box_dump_start(a, "SDPBox", trace); fprintf(trace, ">\n"); if (p->sdpText) fprintf(trace, "<!-- sdp text: %s -->\n", p->sdpText); gf_isom_box_dump_done("SDPBox", a, trace); return GF_OK; } GF_Err rtp_hnti_dump(GF_Box *a, FILE * trace) { GF_RTPBox *p = (GF_RTPBox *)a; gf_isom_box_dump_start(a, "RTPMovieHintInformationBox", trace); fprintf(trace, "descriptionformat=\"%s\">\n", gf_4cc_to_str(p->subType)); if (p->sdpText) fprintf(trace, "<!-- sdp text: %s -->\n", p->sdpText); gf_isom_box_dump_done("RTPMovieHintInformationBox", a, trace); return GF_OK; } GF_Err rtpo_dump(GF_Box *a, FILE * trace) { GF_RTPOBox *p; p = (GF_RTPOBox *)a; gf_isom_box_dump_start(a, "RTPTimeOffsetBox", trace); fprintf(trace, "PacketTimeOffset=\"%d\">\n", p->timeOffset); gf_isom_box_dump_done("RTPTimeOffsetBox", a, trace); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS GF_Err mvex_dump(GF_Box *a, FILE * trace) { GF_MovieExtendsBox *p; p = (GF_MovieExtendsBox *)a; gf_isom_box_dump_start(a, "MovieExtendsBox", trace); fprintf(trace, ">\n"); if (p->mehd) gf_isom_box_dump(p->mehd, trace); gf_isom_box_array_dump(p->TrackExList, trace); gf_isom_box_array_dump(p->TrackExPropList, trace); gf_isom_box_dump_done("MovieExtendsBox", a, trace); return GF_OK; } GF_Err mehd_dump(GF_Box *a, FILE * trace) { GF_MovieExtendsHeaderBox *p = (GF_MovieExtendsHeaderBox*)a; gf_isom_box_dump_start(a, "MovieExtendsHeaderBox", trace); fprintf(trace, "fragmentDuration=\""LLD"\" >\n", LLD_CAST p->fragment_duration); gf_isom_box_dump_done("MovieExtendsHeaderBox", a, trace); return GF_OK; } void sample_flags_dump(const char *name, u32 sample_flags, FILE * trace) { fprintf(trace, "<%s", name); fprintf(trace, " IsLeading=\"%d\"", GF_ISOM_GET_FRAG_LEAD(sample_flags) ); fprintf(trace, " SampleDependsOn=\"%d\"", GF_ISOM_GET_FRAG_DEPENDS(sample_flags) ); fprintf(trace, " SampleIsDependedOn=\"%d\"", GF_ISOM_GET_FRAG_DEPENDED(sample_flags) ); fprintf(trace, " SampleHasRedundancy=\"%d\"", GF_ISOM_GET_FRAG_REDUNDANT(sample_flags) ); fprintf(trace, " SamplePadding=\"%d\"", GF_ISOM_GET_FRAG_PAD(sample_flags) ); fprintf(trace, " SampleSync=\"%d\"", GF_ISOM_GET_FRAG_SYNC(sample_flags)); fprintf(trace, " SampleDegradationPriority=\"%d\"", GF_ISOM_GET_FRAG_DEG(sample_flags)); fprintf(trace, "/>\n"); } GF_Err trex_dump(GF_Box *a, FILE * trace) { GF_TrackExtendsBox *p; p = (GF_TrackExtendsBox *)a; gf_isom_box_dump_start(a, "TrackExtendsBox", trace); fprintf(trace, "TrackID=\"%d\"", p->trackID); fprintf(trace, " SampleDescriptionIndex=\"%d\" SampleDuration=\"%d\" SampleSize=\"%d\"", p->def_sample_desc_index, p->def_sample_duration, p->def_sample_size); fprintf(trace, ">\n"); sample_flags_dump("DefaultSampleFlags", p->def_sample_flags, trace); gf_isom_box_dump_done("TrackExtendsBox", a, trace); return GF_OK; } GF_Err trep_dump(GF_Box *a, FILE * trace) { GF_TrackExtensionPropertiesBox *p = (GF_TrackExtensionPropertiesBox*)a; gf_isom_box_dump_start(a, "TrackExtensionPropertiesBox", trace); fprintf(trace, "TrackID=\"%d\">\n", p->trackID); gf_isom_box_dump_done("TrackExtensionPropertiesBox", a, trace); return GF_OK; } GF_Err moof_dump(GF_Box *a, FILE * trace) { GF_MovieFragmentBox *p; p = (GF_MovieFragmentBox *)a; gf_isom_box_dump_start(a, "MovieFragmentBox", trace); fprintf(trace, "TrackFragments=\"%d\">\n", gf_list_count(p->TrackList)); if (p->mfhd) gf_isom_box_dump(p->mfhd, trace); gf_isom_box_array_dump(p->TrackList, trace); gf_isom_box_dump_done("MovieFragmentBox", a, trace); return GF_OK; } GF_Err mfhd_dump(GF_Box *a, FILE * trace) { GF_MovieFragmentHeaderBox *p; p = (GF_MovieFragmentHeaderBox *)a; gf_isom_box_dump_start(a, "MovieFragmentHeaderBox", trace); fprintf(trace, "FragmentSequenceNumber=\"%d\">\n", p->sequence_number); gf_isom_box_dump_done("MovieFragmentHeaderBox", a, trace); return GF_OK; } GF_Err traf_dump(GF_Box *a, FILE * trace) { GF_TrackFragmentBox *p; p = (GF_TrackFragmentBox *)a; gf_isom_box_dump_start(a, "TrackFragmentBox", trace); fprintf(trace, ">\n"); if (p->tfhd) gf_isom_box_dump(p->tfhd, trace); if (p->sdtp) gf_isom_box_dump(p->sdtp, trace); if (p->tfdt) gf_isom_box_dump(p->tfdt, trace); if (p->sub_samples) gf_isom_box_array_dump(p->sub_samples, trace); if (p->sampleGroupsDescription) gf_isom_box_array_dump(p->sampleGroupsDescription, trace); if (p->sampleGroups) gf_isom_box_array_dump(p->sampleGroups, trace); gf_isom_box_array_dump(p->TrackRuns, trace); if (p->sai_sizes) gf_isom_box_array_dump(p->sai_sizes, trace); if (p->sai_offsets) gf_isom_box_array_dump(p->sai_offsets, trace); if (p->sample_encryption) gf_isom_box_dump(p->sample_encryption, trace); gf_isom_box_dump_done("TrackFragmentBox", a, trace); return GF_OK; } static void frag_dump_sample_flags(FILE * trace, u32 flags) { fprintf(trace, " SamplePadding=\"%d\" Sync=\"%d\" DegradationPriority=\"%d\" IsLeading=\"%d\" DependsOn=\"%d\" IsDependedOn=\"%d\" HasRedundancy=\"%d\"", GF_ISOM_GET_FRAG_PAD(flags), GF_ISOM_GET_FRAG_SYNC(flags), GF_ISOM_GET_FRAG_DEG(flags), GF_ISOM_GET_FRAG_LEAD(flags), GF_ISOM_GET_FRAG_DEPENDS(flags), GF_ISOM_GET_FRAG_DEPENDED(flags), GF_ISOM_GET_FRAG_REDUNDANT(flags)); } GF_Err tfhd_dump(GF_Box *a, FILE * trace) { GF_TrackFragmentHeaderBox *p; p = (GF_TrackFragmentHeaderBox *)a; gf_isom_box_dump_start(a, "TrackFragmentHeaderBox", trace); fprintf(trace, "TrackID=\"%u\"", p->trackID); if (p->flags & GF_ISOM_TRAF_BASE_OFFSET) { fprintf(trace, " BaseDataOffset=\""LLU"\"", p->base_data_offset); } else { fprintf(trace, " BaseDataOffset=\"%s\"", (p->flags & GF_ISOM_MOOF_BASE_OFFSET) ? "moof" : "moof-or-previous-traf"); } if (p->flags & GF_ISOM_TRAF_SAMPLE_DESC) fprintf(trace, " SampleDescriptionIndex=\"%u\"", p->sample_desc_index); if (p->flags & GF_ISOM_TRAF_SAMPLE_DUR) fprintf(trace, " SampleDuration=\"%u\"", p->def_sample_duration); if (p->flags & GF_ISOM_TRAF_SAMPLE_SIZE) fprintf(trace, " SampleSize=\"%u\"", p->def_sample_size); if (p->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) { frag_dump_sample_flags(trace, p->def_sample_flags); } fprintf(trace, ">\n"); gf_isom_box_dump_done("TrackFragmentHeaderBox", a, trace); return GF_OK; } GF_Err tfxd_dump(GF_Box *a, FILE * trace) { GF_MSSTimeExtBox *ptr = (GF_MSSTimeExtBox*)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "MSSTimeExtensionBox", trace); fprintf(trace, "AbsoluteTime=\""LLU"\" FragmentDuration=\""LLU"\">\n", ptr->absolute_time_in_track_timescale, ptr->fragment_duration_in_track_timescale); fprintf(trace, "<FullBoxInfo Version=\"%d\" Flags=\"%d\"/>\n", ptr->version, ptr->flags); gf_isom_box_dump_done("MSSTimeExtensionBox", a, trace); return GF_OK; } GF_Err trun_dump(GF_Box *a, FILE * trace) { u32 i; GF_TrunEntry *ent; GF_TrackFragmentRunBox *p; p = (GF_TrackFragmentRunBox *)a; gf_isom_box_dump_start(a, "TrackRunBox", trace); fprintf(trace, "SampleCount=\"%d\"", p->sample_count); if (p->flags & GF_ISOM_TRUN_DATA_OFFSET) fprintf(trace, " DataOffset=\"%d\"", p->data_offset); fprintf(trace, ">\n"); if (p->flags & GF_ISOM_TRUN_FIRST_FLAG) { sample_flags_dump("FirstSampleFlags", p->first_sample_flags, trace); } if (p->flags & (GF_ISOM_TRUN_DURATION|GF_ISOM_TRUN_SIZE|GF_ISOM_TRUN_CTS_OFFSET|GF_ISOM_TRUN_FLAGS)) { i=0; while ((ent = (GF_TrunEntry *)gf_list_enum(p->entries, &i))) { fprintf(trace, "<TrackRunEntry"); if (p->flags & GF_ISOM_TRUN_DURATION) fprintf(trace, " Duration=\"%u\"", ent->Duration); if (p->flags & GF_ISOM_TRUN_SIZE) fprintf(trace, " Size=\"%u\"", ent->size); if (p->flags & GF_ISOM_TRUN_CTS_OFFSET) { if (p->version == 0) fprintf(trace, " CTSOffset=\"%u\"", (u32) ent->CTS_Offset); else fprintf(trace, " CTSOffset=\"%d\"", ent->CTS_Offset); } if (p->flags & GF_ISOM_TRUN_FLAGS) { frag_dump_sample_flags(trace, ent->flags); } fprintf(trace, "/>\n"); } } else if (p->size) { fprintf(trace, "<!-- all default values used -->\n"); } else { fprintf(trace, "<TrackRunEntry Duration=\"\" Size=\"\" CTSOffset=\"\""); frag_dump_sample_flags(trace, 0); fprintf(trace, "/>\n"); } gf_isom_box_dump_done("TrackRunBox", a, trace); return GF_OK; } #endif #ifndef GPAC_DISABLE_ISOM_HINTING GF_Err DTE_Dump(GF_List *dte, FILE * trace) { GF_GenericDTE *p; GF_ImmediateDTE *i_p; GF_SampleDTE *s_p; GF_StreamDescDTE *sd_p; u32 i, count; count = gf_list_count(dte); for (i=0; i<count; i++) { p = (GF_GenericDTE *)gf_list_get(dte, i); switch (p->source) { case 0: fprintf(trace, "<EmptyDataEntry/>\n"); break; case 1: i_p = (GF_ImmediateDTE *) p; fprintf(trace, "<ImmediateDataEntry DataSize=\"%d\"/>\n", i_p->dataLength); break; case 2: s_p = (GF_SampleDTE *) p; fprintf(trace, "<SampleDataEntry DataSize=\"%d\" SampleOffset=\"%d\" SampleNumber=\"%d\" TrackReference=\"%d\"/>\n", s_p->dataLength, s_p->byteOffset, s_p->sampleNumber, s_p->trackRefIndex); break; case 3: sd_p = (GF_StreamDescDTE *) p; fprintf(trace, "<SampleDescriptionEntry DataSize=\"%d\" DescriptionOffset=\"%d\" StreamDescriptionindex=\"%d\" TrackReference=\"%d\"/>\n", sd_p->dataLength, sd_p->byteOffset, sd_p->streamDescIndex, sd_p->trackRefIndex); break; default: fprintf(trace, "<UnknownTableEntry/>\n"); break; } } return GF_OK; } GF_EXPORT GF_Err gf_isom_dump_hint_sample(GF_ISOFile *the_file, u32 trackNumber, u32 SampleNum, FILE * trace) { GF_ISOSample *tmp; GF_HintSampleEntryBox *entry; u32 descIndex, count, count2, i; GF_Err e=GF_OK; GF_BitStream *bs; GF_HintSample *s; GF_TrackBox *trak; GF_RTPPacket *pck; char *szName; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !IsHintTrack(trak)) return GF_BAD_PARAM; tmp = gf_isom_get_sample(the_file, trackNumber, SampleNum, &descIndex); if (!tmp) return GF_BAD_PARAM; e = Media_GetSampleDesc(trak->Media, descIndex, (GF_SampleEntryBox **) &entry, &count); if (e) { gf_isom_sample_del(&tmp); return e; } //check we can read the sample switch (entry->type) { case GF_ISOM_BOX_TYPE_RTP_STSD: case GF_ISOM_BOX_TYPE_SRTP_STSD: case GF_ISOM_BOX_TYPE_RRTP_STSD: szName = "RTP"; break; case GF_ISOM_BOX_TYPE_RTCP_STSD: szName = "RCTP"; break; case GF_ISOM_BOX_TYPE_FDP_STSD: szName = "FDP"; break; default: gf_isom_sample_del(&tmp); return GF_NOT_SUPPORTED; } bs = gf_bs_new(tmp->data, tmp->dataLength, GF_BITSTREAM_READ); s = gf_isom_hint_sample_new(entry->type); s->trackID = trak->Header->trackID; s->sampleNumber = SampleNum; gf_isom_hint_sample_read(s, bs, tmp->dataLength); gf_bs_del(bs); count = gf_list_count(s->packetTable); fprintf(trace, "<%sHintSample SampleNumber=\"%d\" DecodingTime=\""LLD"\" RandomAccessPoint=\"%d\" PacketCount=\"%u\" reserved=\"%u\">\n", szName, SampleNum, LLD_CAST tmp->DTS, tmp->IsRAP, s->packetCount, s->reserved); if (s->hint_subtype==GF_ISOM_BOX_TYPE_FDP_STSD) { e = gf_isom_box_dump((GF_Box*) s, trace); goto err_exit; } if (s->packetCount != count) { fprintf(trace, "<!-- WARNING: Broken %s hint sample, %d entries indicated but only %d parsed -->\n", szName, s->packetCount, count); } for (i=0; i<count; i++) { pck = (GF_RTPPacket *)gf_list_get(s->packetTable, i); if (pck->hint_subtype==GF_ISOM_BOX_TYPE_RTCP_STSD) { GF_RTCPPacket *rtcp_pck = (GF_RTCPPacket *) pck; fprintf(trace, "<RTCPHintPacket PacketNumber=\"%d\" V=\"%d\" P=\"%d\" Count=\"%d\" PayloadType=\"%d\" ", i+1, rtcp_pck->Version, rtcp_pck->Padding, rtcp_pck->Count, rtcp_pck->PayloadType); if (rtcp_pck->data) dump_data_attribute(trace, "payload", (char*)rtcp_pck->data, rtcp_pck->length); fprintf(trace, ">\n"); fprintf(trace, "</RTCPHintPacket>\n"); } else { fprintf(trace, "<RTPHintPacket PacketNumber=\"%d\" P=\"%d\" X=\"%d\" M=\"%d\" PayloadType=\"%d\"", i+1, pck->P_bit, pck->X_bit, pck->M_bit, pck->payloadType); fprintf(trace, " SequenceNumber=\"%d\" RepeatedPacket=\"%d\" DropablePacket=\"%d\" RelativeTransmissionTime=\"%d\" FullPacketSize=\"%d\">\n", pck->SequenceNumber, pck->R_bit, pck->B_bit, pck->relativeTransTime, gf_isom_hint_rtp_length(pck)); //TLV is made of Boxes count2 = gf_list_count(pck->TLV); if (count2) { fprintf(trace, "<PrivateExtensionTable EntryCount=\"%d\">\n", count2); gf_isom_box_array_dump(pck->TLV, trace); fprintf(trace, "</PrivateExtensionTable>\n"); } //DTE is made of NON boxes count2 = gf_list_count(pck->DataTable); if (count2) { fprintf(trace, "<PacketDataTable EntryCount=\"%d\">\n", count2); DTE_Dump(pck->DataTable, trace); fprintf(trace, "</PacketDataTable>\n"); } fprintf(trace, "</RTPHintPacket>\n"); } } err_exit: fprintf(trace, "</%sHintSample>\n", szName); gf_isom_sample_del(&tmp); gf_isom_hint_sample_del(s); return e; } #endif /*GPAC_DISABLE_ISOM_HINTING*/ static void tx3g_dump_box_nobox(FILE * trace, GF_BoxRecord *rec) { fprintf(trace, "<TextBox top=\"%d\" left=\"%d\" bottom=\"%d\" right=\"%d\"/>\n", rec->top, rec->left, rec->bottom, rec->right); } static void tx3g_print_char_offsets(FILE * trace, u32 start, u32 end, u32 *shift_offset, u32 so_count) { u32 i; if (shift_offset) { for (i=0; i<so_count; i++) { if (start>shift_offset[i]) { start --; break; } } for (i=0; i<so_count; i++) { if (end>shift_offset[i]) { end --; break; } } } if (start || end) fprintf(trace, "fromChar=\"%d\" toChar=\"%d\" ", start, end); } static void tx3g_dump_style_nobox(FILE * trace, GF_StyleRecord *rec, u32 *shift_offset, u32 so_count) { fprintf(trace, "<Style "); if (rec->startCharOffset || rec->endCharOffset) tx3g_print_char_offsets(trace, rec->startCharOffset, rec->endCharOffset, shift_offset, so_count); fprintf(trace, "styles=\""); if (!rec->style_flags) { fprintf(trace, "Normal"); } else { if (rec->style_flags & 1) fprintf(trace, "Bold "); if (rec->style_flags & 2) fprintf(trace, "Italic "); if (rec->style_flags & 4) fprintf(trace, "Underlined "); } fprintf(trace, "\" fontID=\"%d\" fontSize=\"%d\" ", rec->fontID, rec->font_size); tx3g_dump_rgba8(trace, "color", rec->text_color); fprintf(trace, "/>\n"); } static char *tx3g_format_time(u64 ts, u32 timescale, char *szDur, Bool is_srt) { u32 h, m, s, ms; ts = (u32) (ts*1000 / timescale); h = (u32) (ts / 3600000); m = (u32) (ts/ 60000) - h*60; s = (u32) (ts/1000) - h*3600 - m*60; ms = (u32) (ts) - h*3600000 - m*60000 - s*1000; if (is_srt) { sprintf(szDur, "%02d:%02d:%02d,%03d", h, m, s, ms); } else { sprintf(szDur, "%02d:%02d:%02d.%03d", h, m, s, ms); } return szDur; } static GF_Err gf_isom_dump_ttxt_track(GF_ISOFile *the_file, u32 track, FILE *dump, Bool box_dump) { u32 i, j, count, di, nb_descs, shift_offset[20], so_count; u64 last_DTS; size_t len; GF_Box *a; Bool has_scroll; char szDur[100]; GF_Tx3gSampleEntryBox *txt; GF_TrackBox *trak = gf_isom_get_track_from_file(the_file, track); if (!trak) return GF_BAD_PARAM; switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: return GF_BAD_PARAM; } txt = (GF_Tx3gSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->other_boxes, 0); switch (txt->type) { case GF_ISOM_BOX_TYPE_TX3G: case GF_ISOM_BOX_TYPE_TEXT: break; case GF_ISOM_BOX_TYPE_STPP: case GF_ISOM_BOX_TYPE_SBTT: default: return GF_BAD_PARAM; } if (box_dump) { fprintf(dump, "<TextTrack trackID=\"%d\" version=\"1.1\">\n", gf_isom_get_track_id(the_file, track) ); } else { fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- GPAC 3GPP Text Stream -->\n"); fprintf(dump, "<TextStream version=\"1.1\">\n"); } fprintf(dump, "<TextStreamHeader width=\"%d\" height=\"%d\" layer=\"%d\" translation_x=\"%d\" translation_y=\"%d\">\n", trak->Header->width >> 16 , trak->Header->height >> 16, trak->Header->layer, trak->Header->matrix[6] >> 16, trak->Header->matrix[7] >> 16); nb_descs = gf_list_count(trak->Media->information->sampleTable->SampleDescription->other_boxes); for (i=0; i<nb_descs; i++) { GF_Tx3gSampleEntryBox *txt = (GF_Tx3gSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->other_boxes, i); if (box_dump) { gf_isom_box_dump((GF_Box*) txt, dump); } else if (txt->type==GF_ISOM_BOX_TYPE_TX3G) { fprintf(dump, "<TextSampleDescription horizontalJustification=\""); switch (txt->horizontal_justification) { case 1: fprintf(dump, "center"); break; case -1: fprintf(dump, "right"); break; default: fprintf(dump, "left"); break; } fprintf(dump, "\" verticalJustification=\""); switch (txt->vertical_justification) { case 1: fprintf(dump, "center"); break; case -1: fprintf(dump, "bottom"); break; default: fprintf(dump, "top"); break; } fprintf(dump, "\" "); tx3g_dump_rgba8(dump, "backColor", txt->back_color); fprintf(dump, " verticalText=\"%s\"", (txt->displayFlags & GF_TXT_VERTICAL) ? "yes" : "no"); fprintf(dump, " fillTextRegion=\"%s\"", (txt->displayFlags & GF_TXT_FILL_REGION) ? "yes" : "no"); fprintf(dump, " continuousKaraoke=\"%s\"", (txt->displayFlags & GF_TXT_KARAOKE) ? "yes" : "no"); has_scroll = GF_FALSE; if (txt->displayFlags & GF_TXT_SCROLL_IN) { has_scroll = GF_TRUE; if (txt->displayFlags & GF_TXT_SCROLL_OUT) fprintf(dump, " scroll=\"InOut\""); else fprintf(dump, " scroll=\"In\""); } else if (txt->displayFlags & GF_TXT_SCROLL_OUT) { has_scroll = GF_TRUE; fprintf(dump, " scroll=\"Out\""); } else { fprintf(dump, " scroll=\"None\""); } if (has_scroll) { u32 mode = (txt->displayFlags & GF_TXT_SCROLL_DIRECTION)>>7; switch (mode) { case GF_TXT_SCROLL_CREDITS: fprintf(dump, " scrollMode=\"Credits\""); break; case GF_TXT_SCROLL_MARQUEE: fprintf(dump, " scrollMode=\"Marquee\""); break; case GF_TXT_SCROLL_DOWN: fprintf(dump, " scrollMode=\"Down\""); break; case GF_TXT_SCROLL_RIGHT: fprintf(dump, " scrollMode=\"Right\""); break; default: fprintf(dump, " scrollMode=\"Unknown\""); break; } } fprintf(dump, ">\n"); fprintf(dump, "<FontTable>\n"); if (txt->font_table) { for (j=0; j<txt->font_table->entry_count; j++) { fprintf(dump, "<FontTableEntry fontName=\"%s\" fontID=\"%d\"/>\n", txt->font_table->fonts[j].fontName, txt->font_table->fonts[j].fontID); } } fprintf(dump, "</FontTable>\n"); if ((txt->default_box.bottom == txt->default_box.top) || (txt->default_box.right == txt->default_box.left)) { txt->default_box.top = txt->default_box.left = 0; txt->default_box.right = trak->Header->width / 65536; txt->default_box.bottom = trak->Header->height / 65536; } tx3g_dump_box_nobox(dump, &txt->default_box); tx3g_dump_style_nobox(dump, &txt->default_style, NULL, 0); fprintf(dump, "</TextSampleDescription>\n"); } else { GF_TextSampleEntryBox *text = (GF_TextSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->other_boxes, i); fprintf(dump, "<TextSampleDescription horizontalJustification=\""); switch (text->textJustification) { case 1: fprintf(dump, "center"); break; case -1: fprintf(dump, "right"); break; default: fprintf(dump, "left"); break; } fprintf(dump, "\""); tx3g_dump_rgb16(dump, " backColor", text->background_color); if ((text->default_box.bottom == text->default_box.top) || (text->default_box.right == text->default_box.left)) { text->default_box.top = text->default_box.left = 0; text->default_box.right = trak->Header->width / 65536; text->default_box.bottom = trak->Header->height / 65536; } if (text->displayFlags & GF_TXT_SCROLL_IN) { if (text->displayFlags & GF_TXT_SCROLL_OUT) fprintf(dump, " scroll=\"InOut\""); else fprintf(dump, " scroll=\"In\""); } else if (text->displayFlags & GF_TXT_SCROLL_OUT) { fprintf(dump, " scroll=\"Out\""); } else { fprintf(dump, " scroll=\"None\""); } fprintf(dump, ">\n"); tx3g_dump_box_nobox(dump, &text->default_box); fprintf(dump, "</TextSampleDescription>\n"); } } fprintf(dump, "</TextStreamHeader>\n"); last_DTS = 0; count = gf_isom_get_sample_count(the_file, track); for (i=0; i<count; i++) { GF_BitStream *bs; GF_TextSample *txt; GF_ISOSample *s = gf_isom_get_sample(the_file, track, i+1, &di); if (!s) continue; fprintf(dump, "<TextSample sampleTime=\"%s\" sampleDescriptionIndex=\"%d\"", tx3g_format_time(s->DTS, trak->Media->mediaHeader->timeScale, szDur, GF_FALSE), di); bs = gf_bs_new(s->data, s->dataLength, GF_BITSTREAM_READ); txt = gf_isom_parse_texte_sample(bs); gf_bs_del(bs); if (!box_dump) { if (txt->highlight_color) { fprintf(dump, " "); tx3g_dump_rgba8(dump, "highlightColor", txt->highlight_color->hil_color); } if (txt->scroll_delay) { Double delay = txt->scroll_delay->scroll_delay; delay /= trak->Media->mediaHeader->timeScale; fprintf(dump, " scrollDelay=\"%g\"", delay); } if (txt->wrap) fprintf(dump, " wrap=\"%s\"", (txt->wrap->wrap_flag==0x01) ? "Automatic" : "None"); } so_count = 0; fprintf(dump, " xml:space=\"preserve\">"); if (!txt->len) { last_DTS = (u32) trak->Media->mediaHeader->duration; } else { unsigned short utf16Line[10000]; last_DTS = s->DTS; /*UTF16*/ if ((txt->len>2) && ((unsigned char) txt->text[0] == (unsigned char) 0xFE) && ((unsigned char) txt->text[1] == (unsigned char) 0xFF)) { /*copy 2 more chars because the lib always add 2 '0' at the end for UTF16 end of string*/ memcpy((char *) utf16Line, txt->text+2, sizeof(char) * (txt->len)); len = gf_utf8_wcslen((const u16*)utf16Line); } else { char *str; str = txt->text; len = gf_utf8_mbstowcs((u16*)utf16Line, 10000, (const char **) &str); } if (len != (size_t) -1) { utf16Line[len] = 0; for (j=0; j<len; j++) { if ((utf16Line[j]=='\n') || (utf16Line[j]=='\r') || (utf16Line[j]==0x85) || (utf16Line[j]==0x2028) || (utf16Line[j]==0x2029) ) { fprintf(dump, "\n"); if ((utf16Line[j]=='\r') && (utf16Line[j+1]=='\n')) { shift_offset[so_count] = j; so_count++; j++; } } else { switch (utf16Line[j]) { case '\'': fprintf(dump, "&apos;"); break; case '\"': fprintf(dump, "&quot;"); break; case '&': fprintf(dump, "&amp;"); break; case '>': fprintf(dump, "&gt;"); break; case '<': fprintf(dump, "&lt;"); break; default: if (utf16Line[j] < 128) { fprintf(dump, "%c", (u8) utf16Line[j]); } else { fprintf(dump, "&#%d;", utf16Line[j]); } break; } } } } } if (box_dump) { if (txt->highlight_color) gf_isom_box_dump((GF_Box*) txt->highlight_color, dump); if (txt->scroll_delay) gf_isom_box_dump((GF_Box*) txt->scroll_delay, dump); if (txt->wrap) gf_isom_box_dump((GF_Box*) txt->wrap, dump); if (txt->box) gf_isom_box_dump((GF_Box*) txt->box, dump); if (txt->styles) gf_isom_box_dump((GF_Box*) txt->styles, dump); } else { if (txt->box) tx3g_dump_box_nobox(dump, &txt->box->box); if (txt->styles) { for (j=0; j<txt->styles->entry_count; j++) { tx3g_dump_style_nobox(dump, &txt->styles->styles[j], shift_offset, so_count); } } } j=0; while ((a = (GF_Box *)gf_list_enum(txt->others, &j))) { if (box_dump) { gf_isom_box_dump((GF_Box*) a, dump); continue; } switch (a->type) { case GF_ISOM_BOX_TYPE_HLIT: fprintf(dump, "<Highlight "); tx3g_print_char_offsets(dump, ((GF_TextHighlightBox *)a)->startcharoffset, ((GF_TextHighlightBox *)a)->endcharoffset, shift_offset, so_count); fprintf(dump, "/>\n"); break; case GF_ISOM_BOX_TYPE_HREF: { GF_TextHyperTextBox *ht = (GF_TextHyperTextBox *)a; fprintf(dump, "<HyperLink "); tx3g_print_char_offsets(dump, ht->startcharoffset, ht->endcharoffset, shift_offset, so_count); fprintf(dump, "URL=\"%s\" URLToolTip=\"%s\"/>\n", ht->URL ? ht->URL : "", ht->URL_hint ? ht->URL_hint : ""); } break; case GF_ISOM_BOX_TYPE_BLNK: fprintf(dump, "<Blinking "); tx3g_print_char_offsets(dump, ((GF_TextBlinkBox *)a)->startcharoffset, ((GF_TextBlinkBox *)a)->endcharoffset, shift_offset, so_count); fprintf(dump, "/>\n"); break; case GF_ISOM_BOX_TYPE_KROK: { u32 k; Double t; GF_TextKaraokeBox *krok = (GF_TextKaraokeBox *)a; t = krok->highlight_starttime; t /= trak->Media->mediaHeader->timeScale; fprintf(dump, "<Karaoke startTime=\"%g\">\n", t); for (k=0; k<krok->nb_entries; k++) { t = krok->records[k].highlight_endtime; t /= trak->Media->mediaHeader->timeScale; fprintf(dump, "<KaraokeRange "); tx3g_print_char_offsets(dump, krok->records[k].start_charoffset, krok->records[k].end_charoffset, shift_offset, so_count); fprintf(dump, "endTime=\"%g\"/>\n", t); } fprintf(dump, "</Karaoke>\n"); } break; } } fprintf(dump, "</TextSample>\n"); gf_isom_sample_del(&s); gf_isom_delete_text_sample(txt); gf_set_progress("TTXT Extract", i, count); } if (last_DTS < trak->Media->mediaHeader->duration) { fprintf(dump, "<TextSample sampleTime=\"%s\" text=\"\" />\n", tx3g_format_time(trak->Media->mediaHeader->duration, trak->Media->mediaHeader->timeScale, szDur, GF_FALSE)); } if (box_dump) { fprintf(dump, "</TextTrack>\n"); } else { fprintf(dump, "</TextStream>\n"); } if (count) gf_set_progress("TTXT Extract", count, count); return GF_OK; } static GF_Err gf_isom_dump_srt_track(GF_ISOFile *the_file, u32 track, FILE *dump) { u32 i, j, k, count, di, len, ts, cur_frame; u64 start, end; GF_Tx3gSampleEntryBox *txtd; GF_BitStream *bs; char szDur[100]; GF_TrackBox *trak = gf_isom_get_track_from_file(the_file, track); if (!trak) return GF_BAD_PARAM; switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: return GF_BAD_PARAM; } ts = trak->Media->mediaHeader->timeScale; cur_frame = 0; end = 0; count = gf_isom_get_sample_count(the_file, track); for (i=0; i<count; i++) { GF_TextSample *txt; GF_ISOSample *s = gf_isom_get_sample(the_file, track, i+1, &di); if (!s) continue; start = s->DTS; if (s->dataLength==2) { gf_isom_sample_del(&s); continue; } if (i+1<count) { GF_ISOSample *next = gf_isom_get_sample_info(the_file, track, i+2, NULL, NULL); if (next) { end = next->DTS; gf_isom_sample_del(&next); } } else { end = gf_isom_get_media_duration(the_file, track) ; } cur_frame++; fprintf(dump, "%d\n", cur_frame); tx3g_format_time(start, ts, szDur, GF_TRUE); fprintf(dump, "%s --> ", szDur); tx3g_format_time(end, ts, szDur, GF_TRUE); fprintf(dump, "%s\n", szDur); bs = gf_bs_new(s->data, s->dataLength, GF_BITSTREAM_READ); txt = gf_isom_parse_texte_sample(bs); gf_bs_del(bs); txtd = (GF_Tx3gSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->other_boxes, di-1); if (!txt->len) { fprintf(dump, "\n"); } else { u32 styles, char_num, new_styles, color, new_color; u16 utf16Line[10000]; /*UTF16*/ if ((txt->len>2) && ((unsigned char) txt->text[0] == (unsigned char) 0xFE) && ((unsigned char) txt->text[1] == (unsigned char) 0xFF)) { memcpy(utf16Line, txt->text+2, sizeof(char)*txt->len); ( ((char *)utf16Line)[txt->len] ) = 0; len = txt->len; } else { u8 *str = (u8 *) (txt->text); size_t res = gf_utf8_mbstowcs(utf16Line, 10000, (const char **) &str); if (res==(size_t)-1) return GF_NON_COMPLIANT_BITSTREAM; len = (u32) res; utf16Line[len] = 0; } char_num = 0; styles = 0; new_styles = txtd->default_style.style_flags; color = new_color = txtd->default_style.text_color; for (j=0; j<len; j++) { Bool is_new_line; if (txt->styles) { new_styles = txtd->default_style.style_flags; new_color = txtd->default_style.text_color; for (k=0; k<txt->styles->entry_count; k++) { if (txt->styles->styles[k].startCharOffset>char_num) continue; if (txt->styles->styles[k].endCharOffset<char_num+1) continue; if (txt->styles->styles[k].style_flags & (GF_TXT_STYLE_ITALIC | GF_TXT_STYLE_BOLD | GF_TXT_STYLE_UNDERLINED)) { new_styles = txt->styles->styles[k].style_flags; new_color = txt->styles->styles[k].text_color; break; } } } if (new_styles != styles) { if ((new_styles & GF_TXT_STYLE_BOLD) && !(styles & GF_TXT_STYLE_BOLD)) fprintf(dump, "<b>"); if ((new_styles & GF_TXT_STYLE_ITALIC) && !(styles & GF_TXT_STYLE_ITALIC)) fprintf(dump, "<i>"); if ((new_styles & GF_TXT_STYLE_UNDERLINED) && !(styles & GF_TXT_STYLE_UNDERLINED)) fprintf(dump, "<u>"); if ((styles & GF_TXT_STYLE_UNDERLINED) && !(new_styles & GF_TXT_STYLE_UNDERLINED)) fprintf(dump, "</u>"); if ((styles & GF_TXT_STYLE_ITALIC) && !(new_styles & GF_TXT_STYLE_ITALIC)) fprintf(dump, "</i>"); if ((styles & GF_TXT_STYLE_BOLD) && !(new_styles & GF_TXT_STYLE_BOLD)) fprintf(dump, "</b>"); styles = new_styles; } if (new_color != color) { if (new_color ==txtd->default_style.text_color) { fprintf(dump, "</font>"); } else { fprintf(dump, "<font color=\"%s\">", gf_color_get_name(new_color) ); } color = new_color; } /*not sure if styles must be reseted at line breaks in srt...*/ is_new_line = GF_FALSE; if ((utf16Line[j]=='\n') || (utf16Line[j]=='\r') ) { if ((utf16Line[j]=='\r') && (utf16Line[j+1]=='\n')) j++; fprintf(dump, "\n"); is_new_line = GF_TRUE; } if (!is_new_line) { size_t sl; char szChar[30]; s16 swT[2], *swz; swT[0] = utf16Line[j]; swT[1] = 0; swz= (s16 *)swT; sl = gf_utf8_wcstombs(szChar, 30, (const unsigned short **) &swz); if (sl == (size_t)-1) sl=0; szChar[(u32) sl]=0; fprintf(dump, "%s", szChar); } char_num++; } new_styles = 0; if (new_styles != styles) { if (styles & GF_TXT_STYLE_UNDERLINED) fprintf(dump, "</u>"); if (styles & GF_TXT_STYLE_ITALIC) fprintf(dump, "</i>"); if (styles & GF_TXT_STYLE_BOLD) fprintf(dump, "</b>"); // styles = 0; } if (color != txtd->default_style.text_color) { fprintf(dump, "</font>"); // color = txtd->default_style.text_color; } fprintf(dump, "\n"); } gf_isom_sample_del(&s); gf_isom_delete_text_sample(txt); fprintf(dump, "\n"); gf_set_progress("SRT Extract", i, count); } if (count) gf_set_progress("SRT Extract", i, count); return GF_OK; } static GF_Err gf_isom_dump_svg_track(GF_ISOFile *the_file, u32 track, FILE *dump) { char nhmlFileName[1024]; FILE *nhmlFile; u32 i, count, di, ts, cur_frame; u64 start, end; GF_BitStream *bs; GF_TrackBox *trak = gf_isom_get_track_from_file(the_file, track); if (!trak) return GF_BAD_PARAM; switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: return GF_BAD_PARAM; } strcpy(nhmlFileName, the_file->fileName); strcat(nhmlFileName, ".nhml"); nhmlFile = gf_fopen(nhmlFileName, "wt"); fprintf(nhmlFile, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); fprintf(nhmlFile, "<NHNTStream streamType=\"3\" objectTypeIndication=\"10\" timeScale=\"%d\" baseMediaFile=\"file.svg\" inRootOD=\"yes\">\n", trak->Media->mediaHeader->timeScale); fprintf(nhmlFile, "<NHNTSample isRAP=\"yes\" DTS=\"0\" xmlFrom=\"doc.start\" xmlTo=\"text_1.start\"/>\n"); ts = trak->Media->mediaHeader->timeScale; cur_frame = 0; end = 0; fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); fprintf(dump, "<svg version=\"1.2\" baseProfile=\"tiny\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"%d\" height=\"%d\" fill=\"black\">\n", trak->Header->width >> 16 , trak->Header->height >> 16); fprintf(dump, "<g transform=\"translate(%d, %d)\" text-anchor=\"middle\">\n", (trak->Header->width >> 16)/2 , (trak->Header->height >> 16)/2); count = gf_isom_get_sample_count(the_file, track); for (i=0; i<count; i++) { GF_TextSample *txt; GF_ISOSample *s = gf_isom_get_sample(the_file, track, i+1, &di); if (!s) continue; start = s->DTS; if (s->dataLength==2) { gf_isom_sample_del(&s); continue; } if (i+1<count) { GF_ISOSample *next = gf_isom_get_sample_info(the_file, track, i+2, NULL, NULL); if (next) { end = next->DTS; gf_isom_sample_del(&next); } } cur_frame++; bs = gf_bs_new(s->data, s->dataLength, GF_BITSTREAM_READ); txt = gf_isom_parse_texte_sample(bs); gf_bs_del(bs); if (!txt->len) continue; fprintf(dump, " <text id=\"text_%d\" display=\"none\">%s\n", cur_frame, txt->text); fprintf(dump, " <set attributeName=\"display\" to=\"inline\" begin=\"%g\" end=\"%g\"/>\n", ((s64)start*1.0)/ts, ((s64)end*1.0)/ts); fprintf(dump, " <discard begin=\"%g\"/>\n", ((s64)end*1.0)/ts); fprintf(dump, " </text>\n"); gf_isom_sample_del(&s); gf_isom_delete_text_sample(txt); fprintf(dump, "\n"); gf_set_progress("SRT Extract", i, count); if (i == count - 2) { fprintf(nhmlFile, "<NHNTSample isRAP=\"no\" DTS=\"%f\" xmlFrom=\"text_%d.start\" xmlTo=\"doc.end\"/>\n", ((s64)start*1.0), cur_frame); } else { fprintf(nhmlFile, "<NHNTSample isRAP=\"no\" DTS=\"%f\" xmlFrom=\"text_%d.start\" xmlTo=\"text_%d.start\"/>\n", ((s64)start*1.0), cur_frame, cur_frame+1); } } fprintf(dump, "</g>\n"); fprintf(dump, "</svg>\n"); fprintf(nhmlFile, "</NHNTStream>\n"); gf_fclose(nhmlFile); if (count) gf_set_progress("SRT Extract", i, count); return GF_OK; } GF_EXPORT GF_Err gf_isom_text_dump(GF_ISOFile *the_file, u32 track, FILE *dump, GF_TextDumpType dump_type) { switch (dump_type) { case GF_TEXTDUMPTYPE_SVG: return gf_isom_dump_svg_track(the_file, track, dump); case GF_TEXTDUMPTYPE_SRT: return gf_isom_dump_srt_track(the_file, track, dump); case GF_TEXTDUMPTYPE_TTXT: case GF_TEXTDUMPTYPE_TTXT_BOXES: return gf_isom_dump_ttxt_track(the_file, track, dump, (dump_type==GF_TEXTDUMPTYPE_TTXT_BOXES) ? GF_TRUE : GF_FALSE); default: return GF_BAD_PARAM; } } /* ISMA 1.0 Encryption and Authentication V 1.0 dump */ GF_Err sinf_dump(GF_Box *a, FILE * trace) { GF_ProtectionSchemeInfoBox *p; p = (GF_ProtectionSchemeInfoBox *)a; gf_isom_box_dump_start(a, "ProtectionSchemeInfoBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->original_format, trace, GF_ISOM_BOX_TYPE_FRMA); if (p->size) gf_isom_box_dump_ex(p->scheme_type, trace, GF_ISOM_BOX_TYPE_SCHM); if (p->size) gf_isom_box_dump_ex(p->info, trace, GF_ISOM_BOX_TYPE_SCHI); gf_isom_box_dump_done("ProtectionSchemeInfoBox", a, trace); return GF_OK; } GF_Err frma_dump(GF_Box *a, FILE * trace) { GF_OriginalFormatBox *p; p = (GF_OriginalFormatBox *)a; gf_isom_box_dump_start(a, "OriginalFormatBox", trace); fprintf(trace, "data_format=\"%s\">\n", gf_4cc_to_str(p->data_format)); gf_isom_box_dump_done("OriginalFormatBox", a, trace); return GF_OK; } GF_Err schm_dump(GF_Box *a, FILE * trace) { GF_SchemeTypeBox *p; p = (GF_SchemeTypeBox *)a; gf_isom_box_dump_start(a, "SchemeTypeBox", trace); fprintf(trace, "scheme_type=\"%s\" scheme_version=\"%d\" ", gf_4cc_to_str(p->scheme_type), p->scheme_version); if (p->URI) fprintf(trace, "scheme_uri=\"%s\"", p->URI); fprintf(trace, ">\n"); gf_isom_box_dump_done("SchemeTypeBox", a, trace); return GF_OK; } GF_Err schi_dump(GF_Box *a, FILE * trace) { GF_SchemeInformationBox *p; p = (GF_SchemeInformationBox *)a; gf_isom_box_dump_start(a, "SchemeInformationBox", trace); fprintf(trace, ">\n"); if (p->ikms) gf_isom_box_dump(p->ikms, trace); if (p->isfm) gf_isom_box_dump(p->isfm, trace); if (p->islt) gf_isom_box_dump(p->islt, trace); if (p->odkm) gf_isom_box_dump(p->odkm, trace); if (p->tenc) gf_isom_box_dump(p->tenc, trace); if (p->adkm) gf_isom_box_dump(p->adkm, trace); gf_isom_box_dump_done("SchemeInformationBox", a, trace); return GF_OK; } GF_Err iKMS_dump(GF_Box *a, FILE * trace) { GF_ISMAKMSBox *p; p = (GF_ISMAKMSBox *)a; gf_isom_box_dump_start(a, "KMSBox", trace); fprintf(trace, "kms_URI=\"%s\">\n", p->URI); gf_isom_box_dump_done("KMSBox", a, trace); return GF_OK; } GF_Err iSFM_dump(GF_Box *a, FILE * trace) { GF_ISMASampleFormatBox *p; const char *name = (a->type==GF_ISOM_BOX_TYPE_ISFM) ? "ISMASampleFormat" : "OMADRMAUFormatBox"; p = (GF_ISMASampleFormatBox *)a; gf_isom_box_dump_start(a, name, trace); fprintf(trace, "selective_encryption=\"%d\" key_indicator_length=\"%d\" IV_length=\"%d\">\n", p->selective_encryption, p->key_indicator_length, p->IV_length); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err iSLT_dump(GF_Box *a, FILE * trace) { GF_ISMACrypSaltBox *p = (GF_ISMACrypSaltBox *)a; gf_isom_box_dump_start(a, "ISMACrypSaltBox", trace); fprintf(trace, "salt=\""LLU"\">\n", p->salt); gf_isom_box_dump_done("ISMACrypSaltBox", a, trace); return GF_OK; } GF_EXPORT GF_Err gf_isom_dump_ismacryp_protection(GF_ISOFile *the_file, u32 trackNumber, FILE * trace) { u32 i, count; GF_SampleEntryBox *entry; GF_Err e; GF_TrackBox *trak; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak) return GF_BAD_PARAM; fprintf(trace, "<ISMACrypSampleDescriptions>\n"); count = gf_isom_get_sample_description_count(the_file, trackNumber); for (i=0; i<count; i++) { e = Media_GetSampleDesc(trak->Media, i+1, (GF_SampleEntryBox **) &entry, NULL); if (e) return e; switch (entry->type) { case GF_ISOM_BOX_TYPE_ENCA: case GF_ISOM_BOX_TYPE_ENCV: case GF_ISOM_BOX_TYPE_ENCT: case GF_ISOM_BOX_TYPE_ENCS: break; default: continue; } gf_isom_box_dump(entry, trace); } fprintf(trace, "</ISMACrypSampleDescriptions>\n"); return GF_OK; } GF_EXPORT GF_Err gf_isom_dump_ismacryp_sample(GF_ISOFile *the_file, u32 trackNumber, u32 SampleNum, FILE * trace) { GF_ISOSample *samp; GF_ISMASample *isma_samp; u32 descIndex; samp = gf_isom_get_sample(the_file, trackNumber, SampleNum, &descIndex); if (!samp) return GF_BAD_PARAM; isma_samp = gf_isom_get_ismacryp_sample(the_file, trackNumber, samp, descIndex); if (!isma_samp) { gf_isom_sample_del(&samp); return GF_NOT_SUPPORTED; } fprintf(trace, "<ISMACrypSample SampleNumber=\"%d\" DataSize=\"%d\" CompositionTime=\""LLD"\" ", SampleNum, isma_samp->dataLength, LLD_CAST (samp->DTS+samp->CTS_Offset) ); if (samp->CTS_Offset) fprintf(trace, "DecodingTime=\""LLD"\" ", LLD_CAST samp->DTS); if (gf_isom_has_sync_points(the_file, trackNumber)) fprintf(trace, "RandomAccessPoint=\"%s\" ", samp->IsRAP ? "Yes" : "No"); fprintf(trace, "IsEncrypted=\"%s\" ", (isma_samp->flags & GF_ISOM_ISMA_IS_ENCRYPTED) ? "Yes" : "No"); if (isma_samp->flags & GF_ISOM_ISMA_IS_ENCRYPTED) { fprintf(trace, "IV=\""LLD"\" ", LLD_CAST isma_samp->IV); if (isma_samp->key_indicator) dump_data_attribute(trace, "KeyIndicator", (char*)isma_samp->key_indicator, isma_samp->KI_length); } fprintf(trace, "/>\n"); gf_isom_sample_del(&samp); gf_isom_ismacryp_delete_sample(isma_samp); return GF_OK; } /* end of ISMA 1.0 Encryption and Authentication V 1.0 */ /* Apple extensions */ GF_Err ilst_item_dump(GF_Box *a, FILE * trace) { GF_BitStream *bs; u32 val; Bool no_dump = GF_FALSE; char *name = "UnknownBox"; GF_ListItemBox *itune = (GF_ListItemBox *)a; switch (itune->type) { case GF_ISOM_BOX_TYPE_0xA9NAM: name = "NameBox"; break; case GF_ISOM_BOX_TYPE_0xA9CMT: name = "CommentBox"; break; case GF_ISOM_BOX_TYPE_0xA9DAY: name = "CreatedBox"; break; case GF_ISOM_BOX_TYPE_0xA9ART: name = "ArtistBox"; break; case GF_ISOM_BOX_TYPE_0xA9TRK: name = "TrackBox"; break; case GF_ISOM_BOX_TYPE_0xA9ALB: name = "AlbumBox"; break; case GF_ISOM_BOX_TYPE_0xA9COM: name = "CompositorBox"; break; case GF_ISOM_BOX_TYPE_0xA9WRT: name = "WriterBox"; break; case GF_ISOM_BOX_TYPE_0xA9TOO: name = "ToolBox"; break; case GF_ISOM_BOX_TYPE_0xA9CPY: name = "CopyrightBox"; break; case GF_ISOM_BOX_TYPE_0xA9DES: name = "DescriptionBox"; break; case GF_ISOM_BOX_TYPE_0xA9GEN: case GF_ISOM_BOX_TYPE_GNRE: name = "GenreBox"; break; case GF_ISOM_BOX_TYPE_aART: name = "AlbumArtistBox"; break; case GF_ISOM_BOX_TYPE_PGAP: name = "GapelessBox"; break; case GF_ISOM_BOX_TYPE_DISK: name = "DiskBox"; break; case GF_ISOM_BOX_TYPE_TRKN: name = "TrackNumberBox"; break; case GF_ISOM_BOX_TYPE_TMPO: name = "TempoBox"; break; case GF_ISOM_BOX_TYPE_CPIL: name = "CompilationBox"; break; case GF_ISOM_BOX_TYPE_COVR: name = "CoverArtBox"; no_dump = GF_TRUE; break; case GF_ISOM_BOX_TYPE_iTunesSpecificInfo: name = "iTunesSpecificBox"; no_dump = GF_TRUE; break; case GF_ISOM_BOX_TYPE_0xA9GRP: name = "GroupBox"; break; case GF_ISOM_ITUNE_ENCODER: name = "EncoderBox"; break; } gf_isom_box_dump_start(a, name, trace); if (!no_dump) { switch (itune->type) { case GF_ISOM_BOX_TYPE_DISK: case GF_ISOM_BOX_TYPE_TRKN: bs = gf_bs_new(itune->data->data, itune->data->dataSize, GF_BITSTREAM_READ); gf_bs_read_int(bs, 16); val = gf_bs_read_int(bs, 16); if (itune->type==GF_ISOM_BOX_TYPE_DISK) { fprintf(trace, " DiskNumber=\"%d\" NbDisks=\"%d\" ", val, gf_bs_read_int(bs, 16) ); } else { fprintf(trace, " TrackNumber=\"%d\" NbTracks=\"%d\" ", val, gf_bs_read_int(bs, 16) ); } gf_bs_del(bs); break; case GF_ISOM_BOX_TYPE_TMPO: bs = gf_bs_new(itune->data->data, itune->data->dataSize, GF_BITSTREAM_READ); fprintf(trace, " BPM=\"%d\" ", gf_bs_read_int(bs, 16) ); gf_bs_del(bs); break; case GF_ISOM_BOX_TYPE_CPIL: fprintf(trace, " IsCompilation=\"%s\" ", (itune->data && itune->data->data && itune->data->data[0]) ? "yes" : "no"); break; case GF_ISOM_BOX_TYPE_PGAP: fprintf(trace, " IsGapeless=\"%s\" ", (itune->data && itune->data->data && itune->data->data[0]) ? "yes" : "no"); break; default: if (strcmp(name, "UnknownBox") && itune->data && itune->data->data) { fprintf(trace, " value=\""); if (itune->data && itune->data->data[0]) { dump_data_string(trace, itune->data->data, itune->data->dataSize); } else { dump_data(trace, itune->data->data, itune->data->dataSize); } fprintf(trace, "\" "); } break; } } fprintf(trace, ">\n"); gf_isom_box_dump_done(name, a, trace); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_ADOBE GF_Err abst_dump(GF_Box *a, FILE * trace) { u32 i; GF_AdobeBootstrapInfoBox *p = (GF_AdobeBootstrapInfoBox*)a; gf_isom_box_dump_start(a, "AdobeBootstrapBox", trace); fprintf(trace, "BootstrapinfoVersion=\"%u\" Profile=\"%u\" Live=\"%u\" Update=\"%u\" TimeScale=\"%u\" CurrentMediaTime=\""LLU"\" SmpteTimeCodeOffset=\""LLU"\" ", p->bootstrapinfo_version, p->profile, p->live, p->update, p->time_scale, p->current_media_time, p->smpte_time_code_offset); if (p->movie_identifier) fprintf(trace, "MovieIdentifier=\"%s\" ", p->movie_identifier); if (p->drm_data) fprintf(trace, "DrmData=\"%s\" ", p->drm_data); if (p->meta_data) fprintf(trace, "MetaData=\"%s\" ", p->meta_data); fprintf(trace, ">\n"); for (i=0; i<p->server_entry_count; i++) { char *str = (char*)gf_list_get(p->server_entry_table, i); fprintf(trace, "<ServerEntry>%s</ServerEntry>\n", str); } for (i=0; i<p->quality_entry_count; i++) { char *str = (char*)gf_list_get(p->quality_entry_table, i); fprintf(trace, "<QualityEntry>%s</QualityEntry>\n", str); } for (i=0; i<p->segment_run_table_count; i++) gf_isom_box_dump(gf_list_get(p->segment_run_table_entries, i), trace); for (i=0; i<p->fragment_run_table_count; i++) gf_isom_box_dump(gf_list_get(p->fragment_run_table_entries, i), trace); gf_isom_box_dump_done("AdobeBootstrapBox", a, trace); return GF_OK; } GF_Err afra_dump(GF_Box *a, FILE * trace) { u32 i; GF_AdobeFragRandomAccessBox *p = (GF_AdobeFragRandomAccessBox*)a; gf_isom_box_dump_start(a, "AdobeFragmentRandomAccessBox", trace); fprintf(trace, "LongIDs=\"%u\" LongOffsets=\"%u\" TimeScale=\"%u\">\n", p->long_ids, p->long_offsets, p->time_scale); for (i=0; i<p->entry_count; i++) { GF_AfraEntry *ae = (GF_AfraEntry *)gf_list_get(p->local_access_entries, i); fprintf(trace, "<LocalAccessEntry Time=\""LLU"\" Offset=\""LLU"\"/>\n", ae->time, ae->offset); } for (i=0; i<p->global_entry_count; i++) { GF_GlobalAfraEntry *gae = (GF_GlobalAfraEntry *)gf_list_get(p->global_access_entries, i); fprintf(trace, "<GlobalAccessEntry Time=\""LLU"\" Segment=\"%u\" Fragment=\"%u\" AfraOffset=\""LLU"\" OffsetFromAfra=\""LLU"\"/>\n", gae->time, gae->segment, gae->fragment, gae->afra_offset, gae->offset_from_afra); } gf_isom_box_dump_done("AdobeFragmentRandomAccessBox", a, trace); return GF_OK; } GF_Err afrt_dump(GF_Box *a, FILE * trace) { u32 i; GF_AdobeFragmentRunTableBox *p = (GF_AdobeFragmentRunTableBox*)a; gf_isom_box_dump_start(a, "AdobeFragmentRunTableBox", trace); fprintf(trace, "TimeScale=\"%u\">\n", p->timescale); for (i=0; i<p->quality_entry_count; i++) { char *str = (char*)gf_list_get(p->quality_segment_url_modifiers, i); fprintf(trace, "<QualityEntry>%s</QualityEntry>\n", str); } for (i=0; i<p->fragment_run_entry_count; i++) { GF_AdobeFragmentRunEntry *fre = (GF_AdobeFragmentRunEntry *)gf_list_get(p->fragment_run_entry_table, i); fprintf(trace, "<FragmentRunEntry FirstFragment=\"%u\" FirstFragmentTimestamp=\""LLU"\" FirstFragmentDuration=\"%u\"", fre->first_fragment, fre->first_fragment_timestamp, fre->fragment_duration); if (!fre->fragment_duration) fprintf(trace, " DiscontinuityIndicator=\"%u\"", fre->discontinuity_indicator); fprintf(trace, "/>\n"); } gf_isom_box_dump_done("AdobeFragmentRunTableBox", a, trace); return GF_OK; } GF_Err asrt_dump(GF_Box *a, FILE * trace) { u32 i; GF_AdobeSegmentRunTableBox *p = (GF_AdobeSegmentRunTableBox*)a; gf_isom_box_dump_start(a, "AdobeSegmentRunTableBox", trace); fprintf(trace, ">\n"); for (i=0; i<p->quality_entry_count; i++) { char *str = (char*)gf_list_get(p->quality_segment_url_modifiers, i); fprintf(trace, "<QualityEntry>%s</QualityEntry>\n", str); } for (i=0; i<p->segment_run_entry_count; i++) { GF_AdobeSegmentRunEntry *sre = (GF_AdobeSegmentRunEntry *)gf_list_get(p->segment_run_entry_table, i); fprintf(trace, "<SegmentRunEntry FirstSegment=\"%u\" FragmentsPerSegment=\"%u\"/>\n", sre->first_segment, sre->fragment_per_segment); } gf_isom_box_dump_done("AdobeSegmentRunTableBox", a, trace); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_ADOBE*/ GF_Err ilst_dump(GF_Box *a, FILE * trace) { u32 i; GF_Box *tag; GF_Err e; GF_ItemListBox *ptr; ptr = (GF_ItemListBox *)a; gf_isom_box_dump_start(a, "ItemListBox", trace); fprintf(trace, ">\n"); i=0; while ( (tag = (GF_Box*)gf_list_enum(ptr->other_boxes, &i))) { e = ilst_item_dump(tag, trace); if(e) return e; } gf_isom_box_dump_done("ItemListBox", NULL, trace); return GF_OK; } GF_Err databox_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "data", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("data", a, trace); return GF_OK; } GF_Err ohdr_dump(GF_Box *a, FILE * trace) { GF_OMADRMCommonHeaderBox *ptr = (GF_OMADRMCommonHeaderBox *)a; gf_isom_box_dump_start(a, "OMADRMCommonHeaderBox", trace); fprintf(trace, "EncryptionMethod=\"%d\" PaddingScheme=\"%d\" PlaintextLength=\""LLD"\" ", ptr->EncryptionMethod, ptr->PaddingScheme, ptr->PlaintextLength); if (ptr->RightsIssuerURL) fprintf(trace, "RightsIssuerURL=\"%s\" ", ptr->RightsIssuerURL); if (ptr->ContentID) fprintf(trace, "ContentID=\"%s\" ", ptr->ContentID); if (ptr->TextualHeaders) { u32 i, offset; char *start = ptr->TextualHeaders; fprintf(trace, "TextualHeaders=\""); i=offset=0; while (i<ptr->TextualHeadersLen) { if (start[i]==0) { fprintf(trace, "%s ", start+offset); offset=i+1; } i++; } fprintf(trace, "%s\" ", start+offset); } fprintf(trace, ">\n"); gf_isom_box_dump_done("OMADRMCommonHeaderBox", a, trace); return GF_OK; } GF_Err grpi_dump(GF_Box *a, FILE * trace) { GF_OMADRMGroupIDBox *ptr = (GF_OMADRMGroupIDBox *)a; gf_isom_box_dump_start(a, "OMADRMGroupIDBox", trace); fprintf(trace, "GroupID=\"%s\" EncryptionMethod=\"%d\" GroupKey=\" ", ptr->GroupID, ptr->GKEncryptionMethod); if (ptr->GroupKey) dump_data(trace, ptr->GroupKey, ptr->GKLength); fprintf(trace, "\">\n"); gf_isom_box_dump_done("OMADRMGroupIDBox", a, trace); return GF_OK; } GF_Err mdri_dump(GF_Box *a, FILE * trace) { //GF_OMADRMMutableInformationBox *ptr = (GF_OMADRMMutableInformationBox*)a; gf_isom_box_dump_start(a, "OMADRMMutableInformationBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("OMADRMMutableInformationBox", a, trace); return GF_OK; } GF_Err odtt_dump(GF_Box *a, FILE * trace) { GF_OMADRMTransactionTrackingBox *ptr = (GF_OMADRMTransactionTrackingBox *)a; gf_isom_box_dump_start(a, "OMADRMTransactionTrackingBox", trace); fprintf(trace, "TransactionID=\""); dump_data(trace, ptr->TransactionID, 16); fprintf(trace, "\">\n"); gf_isom_box_dump_done("OMADRMTransactionTrackingBox", a, trace); return GF_OK; } GF_Err odrb_dump(GF_Box *a, FILE * trace) { GF_OMADRMRightsObjectBox*ptr = (GF_OMADRMRightsObjectBox*)a; gf_isom_box_dump_start(a, "OMADRMRightsObjectBox", trace); fprintf(trace, "OMARightsObject=\""); dump_data(trace, ptr->oma_ro, ptr->oma_ro_size); fprintf(trace, "\">\n"); gf_isom_box_dump_done("OMADRMRightsObjectBox", a, trace); return GF_OK; } GF_Err odkm_dump(GF_Box *a, FILE * trace) { GF_OMADRMKMSBox *ptr = (GF_OMADRMKMSBox*)a; gf_isom_box_dump_start(a, "OMADRMKMSBox", trace); fprintf(trace, ">\n"); if (ptr->hdr) gf_isom_box_dump((GF_Box *)ptr->hdr, trace); if (ptr->fmt) gf_isom_box_dump((GF_Box *)ptr->fmt, trace); gf_isom_box_dump_done("OMADRMKMSBox", a, trace); return GF_OK; } GF_Err pasp_dump(GF_Box *a, FILE * trace) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox*)a; gf_isom_box_dump_start(a, "PixelAspectRatioBox", trace); fprintf(trace, "hSpacing=\"%d\" vSpacing=\"%d\" >\n", ptr->hSpacing, ptr->vSpacing); gf_isom_box_dump_done("PixelAspectRatioBox", a, trace); return GF_OK; } GF_Err clap_dump(GF_Box *a, FILE * trace) { GF_CleanAppertureBox *ptr = (GF_CleanAppertureBox*)a; gf_isom_box_dump_start(a, "CleanAppertureBox", trace); fprintf(trace, "cleanApertureWidthN=\"%d\" cleanApertureWidthD=\"%d\" ", ptr->cleanApertureWidthN, ptr->cleanApertureWidthD); fprintf(trace, "cleanApertureHeightN=\"%d\" cleanApertureHeightD=\"%d\" ", ptr->cleanApertureHeightN, ptr->cleanApertureHeightD); fprintf(trace, "horizOffN=\"%d\" horizOffD=\"%d\" ", ptr->horizOffN, ptr->horizOffD); fprintf(trace, "vertOffN=\"%d\" vertOffD=\"%d\"", ptr->vertOffN, ptr->vertOffD); fprintf(trace, ">\n"); gf_isom_box_dump_done("CleanAppertureBox", a, trace); return GF_OK; } GF_Err tsel_dump(GF_Box *a, FILE * trace) { u32 i; GF_TrackSelectionBox *ptr = (GF_TrackSelectionBox *)a; gf_isom_box_dump_start(a, "TrackSelectionBox", trace); fprintf(trace, "switchGroup=\"%d\" >\n", ptr->switchGroup); for (i=0; i<ptr->attributeListCount; i++) { fprintf(trace, "<TrackSelectionCriteria value=\"%s\"/>\n", gf_4cc_to_str(ptr->attributeList[i]) ); } if (!ptr->size) fprintf(trace, "<TrackSelectionCriteria value=\"\"/>\n"); gf_isom_box_dump_done("TrackSelectionBox", a, trace); return GF_OK; } GF_Err metx_dump(GF_Box *a, FILE * trace) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)a; const char *name; switch (ptr->type) { case GF_ISOM_BOX_TYPE_METX: name = "XMLMetaDataSampleEntryBox"; break; case GF_ISOM_BOX_TYPE_METT: name = "TextMetaDataSampleEntryBox"; break; case GF_ISOM_BOX_TYPE_SBTT: name = "SubtitleSampleEntryBox"; break; case GF_ISOM_BOX_TYPE_STXT: name = "SimpleTextSampleEntryBox"; break; case GF_ISOM_BOX_TYPE_STPP: name = "XMLSubtitleSampleEntryBox"; break; default: name = "UnknownTextSampleEntryBox"; break; } gf_isom_box_dump_start(a, name, trace); if (ptr->type==GF_ISOM_BOX_TYPE_METX) { fprintf(trace, "namespace=\"%s\" ", ptr->xml_namespace); if (ptr->xml_schema_loc) fprintf(trace, "schema_location=\"%s\" ", ptr->xml_schema_loc); if (ptr->content_encoding) fprintf(trace, "content_encoding=\"%s\" ", ptr->content_encoding); } else if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { fprintf(trace, "namespace=\"%s\" ", ptr->xml_namespace); if (ptr->xml_schema_loc) fprintf(trace, "schema_location=\"%s\" ", ptr->xml_schema_loc); if (ptr->mime_type) fprintf(trace, "auxiliary_mime_types=\"%s\" ", ptr->mime_type); } //mett, sbtt, stxt else { fprintf(trace, "mime_type=\"%s\" ", ptr->mime_type); if (ptr->content_encoding) fprintf(trace, "content_encoding=\"%s\" ", ptr->content_encoding); } fprintf(trace, ">\n"); if ((ptr->type!=GF_ISOM_BOX_TYPE_METX) && (ptr->type!=GF_ISOM_BOX_TYPE_STPP) ) { if (ptr->config) gf_isom_box_dump(ptr->config, trace); } gf_isom_box_array_dump(ptr->protections, trace); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err txtc_dump(GF_Box *a, FILE * trace) { GF_TextConfigBox *ptr = (GF_TextConfigBox*)a; const char *name = "TextConfigBox"; gf_isom_box_dump_start(a, name, trace); fprintf(trace, ">\n"); if (ptr->config) fprintf(trace, "<![CDATA[%s]]>", ptr->config); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err dims_dump(GF_Box *a, FILE * trace) { GF_DIMSSampleEntryBox *p = (GF_DIMSSampleEntryBox*)a; gf_isom_box_dump_start(a, "DIMSSampleEntryBox", trace); fprintf(trace, "dataReferenceIndex=\"%d\">\n", p->dataReferenceIndex); if (p->config) gf_isom_box_dump(p->config, trace); if (p->scripts) gf_isom_box_dump(p->scripts, trace); gf_isom_box_array_dump(p->protections, trace); gf_isom_box_dump_done("DIMSSampleEntryBox", a, trace); return GF_OK; } GF_Err diST_dump(GF_Box *a, FILE * trace) { GF_DIMSScriptTypesBox *p = (GF_DIMSScriptTypesBox*)a; gf_isom_box_dump_start(a, "DIMSScriptTypesBox", trace); fprintf(trace, "types=\"%s\">\n", p->content_script_types); gf_isom_box_dump_done("DIMSScriptTypesBox", a, trace); return GF_OK; } GF_Err dimC_dump(GF_Box *a, FILE * trace) { GF_DIMSSceneConfigBox *p = (GF_DIMSSceneConfigBox *)a; gf_isom_box_dump_start(a, "DIMSSceneConfigBox", trace); fprintf(trace, "profile=\"%d\" level=\"%d\" pathComponents=\"%d\" useFullRequestHosts=\"%d\" streamType=\"%d\" containsRedundant=\"%d\" textEncoding=\"%s\" contentEncoding=\"%s\" >\n", p->profile, p->level, p->pathComponents, p->fullRequestHost, p->streamType, p->containsRedundant, p->textEncoding, p->contentEncoding); gf_isom_box_dump_done("DIMSSceneConfigBox", a, trace); return GF_OK; } GF_Err dac3_dump(GF_Box *a, FILE * trace) { GF_AC3ConfigBox *p = (GF_AC3ConfigBox *)a; if (p->cfg.is_ec3) { u32 i; a->type = GF_ISOM_BOX_TYPE_DEC3; gf_isom_box_dump_start(a, "EC3SpecificBox", trace); a->type = GF_ISOM_BOX_TYPE_DAC3; fprintf(trace, "nb_streams=\"%d\" data_rate=\"%d\">\n", p->cfg.nb_streams, p->cfg.brcode); for (i=0; i<p->cfg.nb_streams; i++) { fprintf(trace, "<EC3StreamConfig fscod=\"%d\" bsid=\"%d\" bsmod=\"%d\" acmod=\"%d\" lfon=\"%d\" num_sub_dep=\"%d\" chan_loc=\"%d\"/>\n", p->cfg.streams[i].fscod, p->cfg.streams[i].bsid, p->cfg.streams[i].bsmod, p->cfg.streams[i].acmod, p->cfg.streams[i].lfon, p->cfg.streams[i].nb_dep_sub, p->cfg.streams[i].chan_loc); } gf_isom_box_dump_done("EC3SpecificBox", a, trace); } else { gf_isom_box_dump_start(a, "AC3SpecificBox", trace); fprintf(trace, "fscod=\"%d\" bsid=\"%d\" bsmod=\"%d\" acmod=\"%d\" lfon=\"%d\" bit_rate_code=\"%d\">\n", p->cfg.streams[0].fscod, p->cfg.streams[0].bsid, p->cfg.streams[0].bsmod, p->cfg.streams[0].acmod, p->cfg.streams[0].lfon, p->cfg.brcode); gf_isom_box_dump_done("AC3SpecificBox", a, trace); } return GF_OK; } GF_Err lsrc_dump(GF_Box *a, FILE * trace) { GF_LASERConfigurationBox *p = (GF_LASERConfigurationBox *)a; gf_isom_box_dump_start(a, "LASeRConfigurationBox", trace); dump_data_attribute(trace, "LASeRHeader", p->hdr, p->hdr_size); fprintf(trace, ">"); gf_isom_box_dump_done("LASeRConfigurationBox", a, trace); return GF_OK; } GF_Err lsr1_dump(GF_Box *a, FILE * trace) { GF_LASeRSampleEntryBox *p = (GF_LASeRSampleEntryBox*)a; gf_isom_box_dump_start(a, "LASeRSampleEntryBox", trace); fprintf(trace, "DataReferenceIndex=\"%d\">\n", p->dataReferenceIndex); if (p->lsr_config) gf_isom_box_dump(p->lsr_config, trace); if (p->descr) gf_isom_box_dump(p->descr, trace); gf_isom_box_dump_done("LASeRSampleEntryBox", a, trace); return GF_OK; } GF_Err sidx_dump(GF_Box *a, FILE * trace) { u32 i; GF_SegmentIndexBox *p = (GF_SegmentIndexBox *)a; gf_isom_box_dump_start(a, "SegmentIndexBox", trace); fprintf(trace, "reference_ID=\"%d\" timescale=\"%d\" earliest_presentation_time=\""LLD"\" first_offset=\""LLD"\" ", p->reference_ID, p->timescale, p->earliest_presentation_time, p->first_offset); fprintf(trace, ">\n"); for (i=0; i<p->nb_refs; i++) { fprintf(trace, "<Reference type=\"%d\" size=\"%d\" duration=\"%d\" startsWithSAP=\"%d\" SAP_type=\"%d\" SAPDeltaTime=\"%d\"/>\n", p->refs[i].reference_type, p->refs[i].reference_size, p->refs[i].subsegment_duration, p->refs[i].starts_with_SAP, p->refs[i].SAP_type, p->refs[i].SAP_delta_time); } if (!p->size) { fprintf(trace, "<Reference type=\"\" size=\"\" duration=\"\" startsWithSAP=\"\" SAP_type=\"\" SAPDeltaTime=\"\"/>\n"); } gf_isom_box_dump_done("SegmentIndexBox", a, trace); return GF_OK; } GF_Err ssix_dump(GF_Box *a, FILE * trace) { u32 i, j; GF_SubsegmentIndexBox *p = (GF_SubsegmentIndexBox *)a; gf_isom_box_dump_start(a, "SubsegmentIndexBox", trace); fprintf(trace, "subsegment_count=\"%d\" >\n", p->subsegment_count); for (i = 0; i < p->subsegment_count; i++) { fprintf(trace, "<Subsegment range_count=\"%d\">\n", p->subsegments[i].range_count); for (j = 0; j < p->subsegments[i].range_count; j++) { fprintf(trace, "<Range level=\"%d\" range_size=\"%d\"/>\n", p->subsegments[i].levels[j], p->subsegments[i].range_sizes[j]); } fprintf(trace, "</Subsegment>\n"); } if (!p->size) { fprintf(trace, "<Subsegment range_count=\"\">\n"); fprintf(trace, "<Range level=\"\" range_size=\"\"/>\n"); fprintf(trace, "</Subsegment>\n"); } gf_isom_box_dump_done("SubsegmentIndexBox", a, trace); return GF_OK; } GF_Err leva_dump(GF_Box *a, FILE * trace) { u32 i; GF_LevelAssignmentBox *p = (GF_LevelAssignmentBox *)a; gf_isom_box_dump_start(a, "LevelAssignmentBox", trace); fprintf(trace, "level_count=\"%d\" >\n", p->level_count); for (i = 0; i < p->level_count; i++) { fprintf(trace, "<Assignement track_id=\"%d\" padding_flag=\"%d\" assignement_type=\"%d\" grouping_type=\"%s\" grouping_type_parameter=\"%d\" sub_track_id=\"%d\" />\n", p->levels[i].track_id, p->levels[i].padding_flag, p->levels[i].type, gf_4cc_to_str(p->levels[i].grouping_type) , p->levels[i].grouping_type_parameter, p->levels[i].sub_track_id); } if (!p->size) { fprintf(trace, "<Assignement track_id=\"\" padding_flag=\"\" assignement_type=\"\" grouping_type=\"\" grouping_type_parameter=\"\" sub_track_id=\"\" />\n"); } gf_isom_box_dump_done("LevelAssignmentBox", a, trace); return GF_OK; } GF_Err strk_dump(GF_Box *a, FILE * trace) { GF_SubTrackBox *p = (GF_SubTrackBox *)a; gf_isom_box_dump_start(a, "SubTrackBox", trace); fprintf(trace, ">\n"); if (p->info) { gf_isom_box_dump(p->info, trace); } gf_isom_box_dump_done("SubTrackBox", a, trace); return GF_OK; } GF_Err stri_dump(GF_Box *a, FILE * trace) { u32 i; GF_SubTrackInformationBox *p = (GF_SubTrackInformationBox *)a; gf_isom_box_dump_start(a, "SubTrackInformationBox", trace); fprintf(trace, "switch_group=\"%d\" alternate_group=\"%d\" sub_track_id=\"%d\">\n", p->switch_group, p->alternate_group, p->sub_track_id); for (i = 0; i < p->attribute_count; i++) { fprintf(trace, "<SubTrackInformationAttribute value=\"%s\"/>\n", gf_4cc_to_str(p->attribute_list[i]) ); } if (!p->size) fprintf(trace, "<SubTrackInformationAttribute value=\"\"/>\n"); gf_isom_box_dump_done("SubTrackInformationBox", a, trace); return GF_OK; } GF_Err stsg_dump(GF_Box *a, FILE * trace) { u32 i; GF_SubTrackSampleGroupBox *p = (GF_SubTrackSampleGroupBox *)a; gf_isom_box_dump_start(a, "SubTrackSampleGroupBox", trace); if (p->grouping_type) fprintf(trace, "grouping_type=\"%s\"", gf_4cc_to_str(p->grouping_type) ); fprintf(trace, ">\n"); for (i = 0; i < p->nb_groups; i++) { fprintf(trace, "<SubTrackSampleGroupBoxEntry group_description_index=\"%d\"/>\n", p->group_description_index[i]); } if (!p->size) fprintf(trace, "<SubTrackSampleGroupBoxEntry group_description_index=\"\"/>\n"); gf_isom_box_dump_done("SubTrackSampleGroupBox", a, trace); return GF_OK; } GF_Err pcrb_dump(GF_Box *a, FILE * trace) { u32 i; GF_PcrInfoBox *p = (GF_PcrInfoBox *)a; gf_isom_box_dump_start(a, "MPEG2TSPCRInfoBox", trace); fprintf(trace, "subsegment_count=\"%d\">\n", p->subsegment_count); for (i=0; i<p->subsegment_count; i++) { fprintf(trace, "<PCRInfo PCR=\""LLU"\" />\n", p->pcr_values[i]); } if (!p->size) { fprintf(trace, "<PCRInfo PCR=\"\" />\n"); } gf_isom_box_dump_done("MPEG2TSPCRInfoBox", a, trace); return GF_OK; } GF_Err subs_dump(GF_Box *a, FILE * trace) { u32 entry_count, i, j; u16 subsample_count; GF_SubSampleInfoEntry *pSamp; GF_SubSampleEntry *pSubSamp; GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *) a; if (!a) return GF_BAD_PARAM; entry_count = gf_list_count(ptr->Samples); gf_isom_box_dump_start(a, "SubSampleInformationBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", entry_count); for (i=0; i<entry_count; i++) { pSamp = (GF_SubSampleInfoEntry*) gf_list_get(ptr->Samples, i); subsample_count = gf_list_count(pSamp->SubSamples); fprintf(trace, "<SampleEntry SampleDelta=\"%d\" SubSampleCount=\"%d\">\n", pSamp->sample_delta, subsample_count); for (j=0; j<subsample_count; j++) { pSubSamp = (GF_SubSampleEntry*) gf_list_get(pSamp->SubSamples, j); fprintf(trace, "<SubSample Size=\"%u\" Priority=\"%u\" Discardable=\"%d\" Reserved=\"%08X\"/>\n", pSubSamp->subsample_size, pSubSamp->subsample_priority, pSubSamp->discardable, pSubSamp->reserved); } fprintf(trace, "</SampleEntry>\n"); } if (!ptr->size) { fprintf(trace, "<SampleEntry SampleDelta=\"\" SubSampleCount=\"\">\n"); fprintf(trace, "<SubSample Size=\"\" Priority=\"\" Discardable=\"\" Reserved=\"\"/>\n"); fprintf(trace, "</SampleEntry>\n"); } gf_isom_box_dump_done("SubSampleInformationBox", a, trace); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS GF_Err tfdt_dump(GF_Box *a, FILE * trace) { GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "TrackFragmentBaseMediaDecodeTimeBox", trace); fprintf(trace, "baseMediaDecodeTime=\""LLD"\">\n", ptr->baseMediaDecodeTime); gf_isom_box_dump_done("TrackFragmentBaseMediaDecodeTimeBox", a, trace); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ GF_Err rvcc_dump(GF_Box *a, FILE * trace) { GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "RVCConfigurationBox", trace); fprintf(trace, "predefined=\"%d\"", ptr->predefined_rvc_config); if (! ptr->predefined_rvc_config) fprintf(trace, " rvc_meta_idx=\"%d\"", ptr->rvc_meta_idx); fprintf(trace, ">\n"); gf_isom_box_dump_done("RVCConfigurationBox", a, trace); return GF_OK; } GF_Err sbgp_dump(GF_Box *a, FILE * trace) { u32 i; GF_SampleGroupBox *ptr = (GF_SampleGroupBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleGroupBox", trace); if (ptr->grouping_type) fprintf(trace, "grouping_type=\"%s\"", gf_4cc_to_str(ptr->grouping_type) ); if (ptr->version==1) { if (isalnum(ptr->grouping_type_parameter&0xFF)) { fprintf(trace, " grouping_type_parameter=\"%s\"", gf_4cc_to_str(ptr->grouping_type_parameter) ); } else { fprintf(trace, " grouping_type_parameter=\"%d\"", ptr->grouping_type_parameter); } } fprintf(trace, ">\n"); for (i=0; i<ptr->entry_count; i++) { fprintf(trace, "<SampleGroupBoxEntry sample_count=\"%d\" group_description_index=\"%d\"/>\n", ptr->sample_entries[i].sample_count, ptr->sample_entries[i].group_description_index ); } if (!ptr->size) { fprintf(trace, "<SampleGroupBoxEntry sample_count=\"\" group_description_index=\"\"/>\n"); } gf_isom_box_dump_done("SampleGroupBox", a, trace); return GF_OK; } static void oinf_entry_dump(GF_OperatingPointsInformation *ptr, FILE * trace) { u32 i, count; if (!ptr) { fprintf(trace, "<OperatingPointsInformation scalability_mask=\"Multiview|Spatial scalability|Auxilary|unknown\" num_profile_tier_level=\"\" num_operating_points=\"\" dependency_layers=\"\">\n"); fprintf(trace, " <ProfileTierLevel general_profile_space=\"\" general_tier_flag=\"\" general_profile_idc=\"\" general_profile_compatibility_flags=\"\" general_constraint_indicator_flags=\"\" />\n"); fprintf(trace, "<OperatingPoint output_layer_set_idx=\"\" max_temporal_id=\"\" layer_count=\"\" minPicWidth=\"\" minPicHeight=\"\" maxPicWidth=\"\" maxPicHeight=\"\" maxChromaFormat=\"\" maxBitDepth=\"\" frame_rate_info_flag=\"\" bit_rate_info_flag=\"\" avgFrameRate=\"\" constantFrameRate=\"\" maxBitRate=\"\" avgBitRate=\"\"/>\n"); fprintf(trace, "<Layer dependent_layerID=\"\" num_layers_dependent_on=\"\" dependent_on_layerID=\"\" dimension_identifier=\"\"/>\n"); fprintf(trace, "</OperatingPointsInformation>\n"); return; } fprintf(trace, "<OperatingPointsInformation"); fprintf(trace, " scalability_mask=\"%u (", ptr->scalability_mask); switch (ptr->scalability_mask) { case 2: fprintf(trace, "Multiview"); break; case 4: fprintf(trace, "Spatial scalability"); break; case 8: fprintf(trace, "Auxilary"); break; default: fprintf(trace, "unknown"); } fprintf(trace, ")\" num_profile_tier_level=\"%u\"", gf_list_count(ptr->profile_tier_levels) ); fprintf(trace, " num_operating_points=\"%u\" dependency_layers=\"%u\"", gf_list_count(ptr->operating_points), gf_list_count(ptr->dependency_layers)); fprintf(trace, ">\n"); count=gf_list_count(ptr->profile_tier_levels); for (i = 0; i < count; i++) { LHEVC_ProfileTierLevel *ptl = (LHEVC_ProfileTierLevel *)gf_list_get(ptr->profile_tier_levels, i); fprintf(trace, " <ProfileTierLevel general_profile_space=\"%u\" general_tier_flag=\"%u\" general_profile_idc=\"%u\" general_profile_compatibility_flags=\"%X\" general_constraint_indicator_flags=\""LLX"\" />\n", ptl->general_profile_space, ptl->general_tier_flag, ptl->general_profile_idc, ptl->general_profile_compatibility_flags, ptl->general_constraint_indicator_flags); } count=gf_list_count(ptr->operating_points); for (i = 0; i < count; i++) { LHEVC_OperatingPoint *op = (LHEVC_OperatingPoint *)gf_list_get(ptr->operating_points, i); fprintf(trace, "<OperatingPoint output_layer_set_idx=\"%u\"", op->output_layer_set_idx); fprintf(trace, " max_temporal_id=\"%u\" layer_count=\"%u\"", op->max_temporal_id, op->layer_count); fprintf(trace, " minPicWidth=\"%u\" minPicHeight=\"%u\"", op->minPicWidth, op->minPicHeight); fprintf(trace, " maxPicWidth=\"%u\" maxPicHeight=\"%u\"", op->maxPicWidth, op->maxPicHeight); fprintf(trace, " maxChromaFormat=\"%u\" maxBitDepth=\"%u\"", op->maxChromaFormat, op->maxBitDepth); fprintf(trace, " frame_rate_info_flag=\"%u\" bit_rate_info_flag=\"%u\"", op->frame_rate_info_flag, op->bit_rate_info_flag); if (op->frame_rate_info_flag) fprintf(trace, " avgFrameRate=\"%u\" constantFrameRate=\"%u\"", op->avgFrameRate, op->constantFrameRate); if (op->bit_rate_info_flag) fprintf(trace, " maxBitRate=\"%u\" avgBitRate=\"%u\"", op->maxBitRate, op->avgBitRate); fprintf(trace, "/>\n"); } count=gf_list_count(ptr->dependency_layers); for (i = 0; i < count; i++) { u32 j; LHEVC_DependentLayer *dep = (LHEVC_DependentLayer *)gf_list_get(ptr->dependency_layers, i); fprintf(trace, "<Layer dependent_layerID=\"%u\" num_layers_dependent_on=\"%u\"", dep->dependent_layerID, dep->num_layers_dependent_on); if (dep->num_layers_dependent_on) { fprintf(trace, " dependent_on_layerID=\""); for (j = 0; j < dep->num_layers_dependent_on; j++) fprintf(trace, "%d ", dep->dependent_on_layerID[j]); fprintf(trace, "\""); } fprintf(trace, " dimension_identifier=\""); for (j = 0; j < 16; j++) if (ptr->scalability_mask & (1 << j)) fprintf(trace, "%d ", dep->dimension_identifier[j]); fprintf(trace, "\"/>\n"); } fprintf(trace, "</OperatingPointsInformation>\n"); return; } static void linf_dump(GF_LHVCLayerInformation *ptr, FILE * trace) { u32 i, count; if (!ptr) { fprintf(trace, "<LayerInformation num_layers=\"\">\n"); fprintf(trace, "<LayerInfoItem layer_id=\"\" min_temporalId=\"\" max_temporalId=\"\" sub_layer_presence_flags=\"\"/>\n"); fprintf(trace, "</LayerInformation>\n"); return; } count = gf_list_count(ptr->num_layers_in_track); fprintf(trace, "<LayerInformation num_layers=\"%d\">\n", count ); for (i = 0; i < count; i++) { LHVCLayerInfoItem *li = (LHVCLayerInfoItem *)gf_list_get(ptr->num_layers_in_track, i); fprintf(trace, "<LayerInfoItem layer_id=\"%d\" min_temporalId=\"%d\" max_temporalId=\"%d\" sub_layer_presence_flags=\"%d\"/>\n", li->layer_id, li->min_TemporalId, li->max_TemporalId, li->sub_layer_presence_flags); } fprintf(trace, "</LayerInformation>\n"); return; } static void trif_dump(FILE * trace, char *data, u32 data_size) { GF_BitStream *bs; u32 id, independent, filter_disabled; Bool full_picture, has_dep, tile_group; if (!data) { fprintf(trace, "<TileRegionGroupEntry ID=\"\" tileGroup=\"\" independent=\"\" full_picture=\"\" filter_disabled=\"\" x=\"\" y=\"\" w=\"\" h=\"\">\n"); fprintf(trace, "<TileRegionDependency tileID=\"\"/>\n"); fprintf(trace, "</TileRegionGroupEntry>\n"); return; } bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); id = gf_bs_read_u16(bs); tile_group = gf_bs_read_int(bs, 1); fprintf(trace, "<TileRegionGroupEntry ID=\"%d\" tileGroup=\"%d\" ", id, tile_group); if (tile_group) { independent = gf_bs_read_int(bs, 2); full_picture = (Bool)gf_bs_read_int(bs, 1); filter_disabled = gf_bs_read_int(bs, 1); has_dep = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 2); fprintf(trace, "independent=\"%d\" full_picture=\"%d\" filter_disabled=\"%d\" ", independent, full_picture, filter_disabled); if (!full_picture) { fprintf(trace, "x=\"%d\" y=\"%d\" ", gf_bs_read_u16(bs), gf_bs_read_u16(bs)); } fprintf(trace, "w=\"%d\" h=\"%d\" ", gf_bs_read_u16(bs), gf_bs_read_u16(bs)); if (!has_dep) { fprintf(trace, "/>\n"); } else { u32 count = gf_bs_read_u16(bs); fprintf(trace, ">\n"); while (count) { count--; fprintf(trace, "<TileRegionDependency tileID=\"%d\"/>\n", gf_bs_read_u16(bs) ); } fprintf(trace, "</TileRegionGroupEntry>\n"); } } gf_bs_del(bs); } static void nalm_dump(FILE * trace, char *data, u32 data_size) { GF_BitStream *bs; Bool rle, large_size; u32 entry_count; if (!data) { fprintf(trace, "<NALUMap rle=\"\" large_size=\"\">\n"); fprintf(trace, "<NALUMapEntry NALU_startNumber=\"\" groupID=\"\"/>\n"); fprintf(trace, "</NALUMap>\n"); return; } bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); gf_bs_read_int(bs, 6); large_size = gf_bs_read_int(bs, 1); rle = gf_bs_read_int(bs, 1); entry_count = gf_bs_read_int(bs, large_size ? 16 : 8); fprintf(trace, "<NALUMap rle=\"%d\" large_size=\"%d\">\n", rle, large_size); while (entry_count) { u32 ID; fprintf(trace, "<NALUMapEntry "); if (rle) { u32 start_num = gf_bs_read_int(bs, large_size ? 16 : 8); fprintf(trace, "NALU_startNumber=\"%d\" ", start_num); } ID = gf_bs_read_u16(bs); fprintf(trace, "groupID=\"%d\"/>\n", ID); entry_count--; } gf_bs_del(bs); fprintf(trace, "</NALUMap>\n"); return; } GF_Err sgpd_dump(GF_Box *a, FILE * trace) { u32 i; GF_SampleGroupDescriptionBox *ptr = (GF_SampleGroupDescriptionBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleGroupDescriptionBox", trace); if (ptr->grouping_type) fprintf(trace, "grouping_type=\"%s\"", gf_4cc_to_str(ptr->grouping_type) ); if (ptr->version==1) fprintf(trace, " default_length=\"%d\"", ptr->default_length); if ((ptr->version>=2) && ptr->default_description_index) fprintf(trace, " default_group_index=\"%d\"", ptr->default_description_index); fprintf(trace, ">\n"); for (i=0; i<gf_list_count(ptr->group_descriptions); i++) { void *entry = gf_list_get(ptr->group_descriptions, i); switch (ptr->grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: fprintf(trace, "<RollRecoveryEntry roll_distance=\"%d\" />\n", ((GF_RollRecoveryEntry*)entry)->roll_distance ); break; case GF_ISOM_SAMPLE_GROUP_PROL: fprintf(trace, "<AudioPreRollEntry roll_distance=\"%d\" />\n", ((GF_RollRecoveryEntry*)entry)->roll_distance ); break; case GF_ISOM_SAMPLE_GROUP_TELE: fprintf(trace, "<TemporalLevelEntry level_independently_decodable=\"%d\"/>\n", ((GF_TemporalLevelEntry*)entry)->level_independently_decodable); break; case GF_ISOM_SAMPLE_GROUP_RAP: fprintf(trace, "<VisualRandomAccessEntry num_leading_samples_known=\"%s\"", ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known ? "yes" : "no"); if (((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known) fprintf(trace, " num_leading_samples=\"%d\"", ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples); fprintf(trace, "/>\n"); break; case GF_ISOM_SAMPLE_GROUP_SYNC: fprintf(trace, "<SyncSampleGroupEntry NAL_unit_type=\"%d\"/>\n", ((GF_SYNCEntry*)entry)->NALU_type); break; case GF_ISOM_SAMPLE_GROUP_SEIG: fprintf(trace, "<CENCSampleEncryptionGroupEntry IsEncrypted=\"%d\" IV_size=\"%d\" KID=\"", ((GF_CENCSampleEncryptionGroupEntry*)entry)->IsProtected, ((GF_CENCSampleEncryptionGroupEntry*)entry)->Per_Sample_IV_size); dump_data_hex(trace, (char *)((GF_CENCSampleEncryptionGroupEntry*)entry)->KID, 16); if ((((GF_CENCSampleEncryptionGroupEntry*)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry*)entry)->Per_Sample_IV_size) { fprintf(trace, "\" constant_IV_size=\"%d\" constant_IV=\"", ((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV_size); dump_data_hex(trace, (char *)((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV, ((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV_size); } fprintf(trace, "\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_OINF: oinf_entry_dump(entry, trace); break; case GF_ISOM_SAMPLE_GROUP_LINF: linf_dump(entry, trace); break; case GF_ISOM_SAMPLE_GROUP_TRIF: trif_dump(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); break; case GF_ISOM_SAMPLE_GROUP_NALM: nalm_dump(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); break; case GF_ISOM_SAMPLE_GROUP_SAP: fprintf(trace, "<SAPEntry dependent_flag=\"%d\" SAP_type=\"%d\" />\n", ((GF_SAPEntry*)entry)->dependent_flag, ((GF_SAPEntry*)entry)->SAP_type); break; default: fprintf(trace, "<DefaultSampleGroupDescriptionEntry size=\"%d\" data=\"", ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); dump_data(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); fprintf(trace, "\"/>\n"); } } if (!ptr->size) { switch (ptr->grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: fprintf(trace, "<RollRecoveryEntry roll_distance=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_PROL: fprintf(trace, "<AudioPreRollEntry roll_distance=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_TELE: fprintf(trace, "<TemporalLevelEntry level_independently_decodable=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_RAP: fprintf(trace, "<VisualRandomAccessEntry num_leading_samples_known=\"yes|no\" num_leading_samples=\"\" />\n"); break; case GF_ISOM_SAMPLE_GROUP_SYNC: fprintf(trace, "<SyncSampleGroupEntry NAL_unit_type=\"\" />\n"); break; case GF_ISOM_SAMPLE_GROUP_SEIG: fprintf(trace, "<CENCSampleEncryptionGroupEntry IsEncrypted=\"\" IV_size=\"\" KID=\"\" constant_IV_size=\"\" constant_IV=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_OINF: oinf_entry_dump(NULL, trace); break; case GF_ISOM_SAMPLE_GROUP_LINF: linf_dump(NULL, trace); break; case GF_ISOM_SAMPLE_GROUP_TRIF: trif_dump(trace, NULL, 0); break; case GF_ISOM_SAMPLE_GROUP_NALM: nalm_dump(trace, NULL, 0); break; case GF_ISOM_SAMPLE_GROUP_SAP: fprintf(trace, "<SAPEntry dependent_flag=\"\" SAP_type=\"\" />\n"); break; default: fprintf(trace, "<DefaultSampleGroupDescriptionEntry size=\"\" data=\"\"/>\n"); } } gf_isom_box_dump_done("SampleGroupDescriptionBox", a, trace); return GF_OK; } GF_Err saiz_dump(GF_Box *a, FILE * trace) { u32 i; GF_SampleAuxiliaryInfoSizeBox *ptr = (GF_SampleAuxiliaryInfoSizeBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleAuxiliaryInfoSizeBox", trace); fprintf(trace, "default_sample_info_size=\"%d\" sample_count=\"%d\"", ptr->default_sample_info_size, ptr->sample_count); if (ptr->flags & 1) { if (isalnum(ptr->aux_info_type>>24)) { fprintf(trace, " aux_info_type=\"%s\" aux_info_type_parameter=\"%d\"", gf_4cc_to_str(ptr->aux_info_type), ptr->aux_info_type_parameter); } else { fprintf(trace, " aux_info_type=\"%d\" aux_info_type_parameter=\"%d\"", ptr->aux_info_type, ptr->aux_info_type_parameter); } } fprintf(trace, ">\n"); if (ptr->default_sample_info_size==0) { for (i=0; i<ptr->sample_count; i++) { fprintf(trace, "<SAISize size=\"%d\" />\n", ptr->sample_info_size[i]); } } if (!ptr->size) { fprintf(trace, "<SAISize size=\"\" />\n"); } gf_isom_box_dump_done("SampleAuxiliaryInfoSizeBox", a, trace); return GF_OK; } GF_Err saio_dump(GF_Box *a, FILE * trace) { u32 i; GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleAuxiliaryInfoOffsetBox", trace); fprintf(trace, "entry_count=\"%d\"", ptr->entry_count); if (ptr->flags & 1) { if (isalnum(ptr->aux_info_type>>24)) { fprintf(trace, " aux_info_type=\"%s\" aux_info_type_parameter=\"%d\"", gf_4cc_to_str(ptr->aux_info_type), ptr->aux_info_type_parameter); } else { fprintf(trace, " aux_info_type=\"%d\" aux_info_type_parameter=\"%d\"", ptr->aux_info_type, ptr->aux_info_type_parameter); } } fprintf(trace, ">\n"); if (ptr->version==0) { for (i=0; i<ptr->entry_count; i++) { fprintf(trace, "<SAIChunkOffset offset=\"%d\"/>\n", ptr->offsets[i]); } } else { for (i=0; i<ptr->entry_count; i++) { fprintf(trace, "<SAIChunkOffset offset=\""LLD"\"/>\n", ptr->offsets_large[i]); } } if (!ptr->size) { fprintf(trace, "<SAIChunkOffset offset=\"\"/>\n"); } gf_isom_box_dump_done("SampleAuxiliaryInfoOffsetBox", a, trace); return GF_OK; } GF_Err pssh_dump(GF_Box *a, FILE * trace) { GF_ProtectionSystemHeaderBox *ptr = (GF_ProtectionSystemHeaderBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ProtectionSystemHeaderBox", trace); fprintf(trace, "SystemID=\""); dump_data_hex(trace, (char *) ptr->SystemID, 16); fprintf(trace, "\">\n"); if (ptr->KID_count) { u32 i; for (i=0; i<ptr->KID_count; i++) { fprintf(trace, " <PSSHKey KID=\""); dump_data_hex(trace, (char *) ptr->KIDs[i], 16); fprintf(trace, "\"/>\n"); } } if (ptr->private_data_size) { fprintf(trace, " <PSSHData size=\"%d\" value=\"", ptr->private_data_size); dump_data_hex(trace, (char *) ptr->private_data, ptr->private_data_size); fprintf(trace, "\"/>\n"); } if (!ptr->size) { fprintf(trace, " <PSSHKey KID=\"\"/>\n"); fprintf(trace, " <PSSHData size=\"\" value=\"\"/>\n"); } gf_isom_box_dump_done("ProtectionSystemHeaderBox", a, trace); return GF_OK; } GF_Err tenc_dump(GF_Box *a, FILE * trace) { GF_TrackEncryptionBox *ptr = (GF_TrackEncryptionBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "TrackEncryptionBox", trace); fprintf(trace, "isEncrypted=\"%d\"", ptr->isProtected); if (ptr->Per_Sample_IV_Size) fprintf(trace, " IV_size=\"%d\" KID=\"", ptr->Per_Sample_IV_Size); else { fprintf(trace, " constant_IV_size=\"%d\" constant_IV=\"", ptr->constant_IV_size); dump_data_hex(trace, (char *) ptr->constant_IV, ptr->constant_IV_size); fprintf(trace, "\" KID=\""); } dump_data_hex(trace, (char *) ptr->KID, 16); if (ptr->version) fprintf(trace, "\" crypt_byte_block=\"%d\" skip_byte_block=\"%d", ptr->crypt_byte_block, ptr->skip_byte_block); fprintf(trace, "\">\n"); gf_isom_box_dump_done("TrackEncryptionBox", a, trace); return GF_OK; } GF_Err piff_pssh_dump(GF_Box *a, FILE * trace) { GF_PIFFProtectionSystemHeaderBox *ptr = (GF_PIFFProtectionSystemHeaderBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "PIFFProtectionSystemHeaderBox", trace); fprintf(trace, "SystemID=\""); dump_data_hex(trace, (char *) ptr->SystemID, 16); fprintf(trace, "\" PrivateData=\""); dump_data_hex(trace, (char *) ptr->private_data, ptr->private_data_size); fprintf(trace, "\">\n"); gf_isom_box_dump_done("PIFFProtectionSystemHeaderBox", a, trace); return GF_OK; } GF_Err piff_tenc_dump(GF_Box *a, FILE * trace) { GF_PIFFTrackEncryptionBox *ptr = (GF_PIFFTrackEncryptionBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "PIFFTrackEncryptionBox", trace); fprintf(trace, "AlgorithmID=\"%d\" IV_size=\"%d\" KID=\"", ptr->AlgorithmID, ptr->IV_size); dump_data_hex(trace,(char *) ptr->KID, 16); fprintf(trace, "\">\n"); gf_isom_box_dump_done("PIFFTrackEncryptionBox", a, trace); return GF_OK; } GF_Err piff_psec_dump(GF_Box *a, FILE * trace) { u32 i, j, sample_count; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "PIFFSampleEncryptionBox", trace); sample_count = gf_list_count(ptr->samp_aux_info); fprintf(trace, "sampleCount=\"%d\"", sample_count); if (ptr->flags & 1) { fprintf(trace, " AlgorithmID=\"%d\" IV_size=\"%d\" KID=\"", ptr->AlgorithmID, ptr->IV_size); dump_data(trace, (char *) ptr->KID, 16); fprintf(trace, "\""); } fprintf(trace, ">\n"); if (sample_count) { for (i=0; i<sample_count; i++) { GF_CENCSampleAuxInfo *cenc_sample = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (cenc_sample) { if (!strlen((char *)cenc_sample->IV)) continue; fprintf(trace, "<PIFFSampleEncryptionEntry IV_size=\"%u\" IV=\"", cenc_sample->IV_size); dump_data_hex(trace, (char *) cenc_sample->IV, cenc_sample->IV_size); if (ptr->flags & 0x2) { fprintf(trace, "\" SubsampleCount=\"%d\"", cenc_sample->subsample_count); fprintf(trace, ">\n"); for (j=0; j<cenc_sample->subsample_count; j++) { fprintf(trace, "<PIFFSubSampleEncryptionEntry NumClearBytes=\"%d\" NumEncryptedBytes=\"%d\"/>\n", cenc_sample->subsamples[j].bytes_clear_data, cenc_sample->subsamples[j].bytes_encrypted_data); } } fprintf(trace, "</PIFFSampleEncryptionEntry>\n"); } } } if (!ptr->size) { fprintf(trace, "<PIFFSampleEncryptionEntry IV=\"\" SubsampleCount=\"\">\n"); fprintf(trace, "<PIFFSubSampleEncryptionEntry NumClearBytes=\"\" NumEncryptedBytes=\"\"/>\n"); fprintf(trace, "</PIFFSampleEncryptionEntry>\n"); } gf_isom_box_dump_done("PIFFSampleEncryptionBox", a, trace); return GF_OK; } GF_Err senc_dump(GF_Box *a, FILE * trace) { u32 i, j, sample_count; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleEncryptionBox", trace); sample_count = gf_list_count(ptr->samp_aux_info); fprintf(trace, "sampleCount=\"%d\">\n", sample_count); //WARNING - PSEC (UUID) IS TYPECASTED TO SENC (FULL BOX) SO WE CANNOT USE USUAL FULL BOX FUNCTIONS fprintf(trace, "<FullBoxInfo Version=\"%d\" Flags=\"0x%X\"/>\n", ptr->version, ptr->flags); for (i=0; i<sample_count; i++) { GF_CENCSampleAuxInfo *cenc_sample = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (cenc_sample) { fprintf(trace, "<SampleEncryptionEntry sampleNumber=\"%d\" IV_size=\"%u\" IV=\"", i+1, cenc_sample->IV_size); dump_data_hex(trace, (char *) cenc_sample->IV, cenc_sample->IV_size); fprintf(trace, "\""); if (ptr->flags & 0x2) { fprintf(trace, " SubsampleCount=\"%d\"", cenc_sample->subsample_count); fprintf(trace, ">\n"); for (j=0; j<cenc_sample->subsample_count; j++) { fprintf(trace, "<SubSampleEncryptionEntry NumClearBytes=\"%d\" NumEncryptedBytes=\"%d\"/>\n", cenc_sample->subsamples[j].bytes_clear_data, cenc_sample->subsamples[j].bytes_encrypted_data); } } else { fprintf(trace, ">\n"); } fprintf(trace, "</SampleEncryptionEntry>\n"); } } if (!ptr->size) { fprintf(trace, "<SampleEncryptionEntry sampleCount=\"\" IV=\"\" SubsampleCount=\"\">\n"); fprintf(trace, "<SubSampleEncryptionEntry NumClearBytes=\"\" NumEncryptedBytes=\"\"/>\n"); fprintf(trace, "</SampleEncryptionEntry>\n"); } gf_isom_box_dump_done("SampleEncryptionBox", a, trace); return GF_OK; } GF_Err prft_dump(GF_Box *a, FILE * trace) { Double fracs; GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox *) a; time_t secs; struct tm t; secs = (ptr->ntp >> 32) - GF_NTP_SEC_1900_TO_1970; if (secs < 0) { if (ptr->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("NTP time is not valid, using value 0\n")); } secs = 0; } t = *gmtime(&secs); fracs = (Double) (ptr->ntp & 0xFFFFFFFFULL); fracs /= 0xFFFFFFFF; fracs *= 1000; gf_isom_box_dump_start(a, "ProducerReferenceTimeBox", trace); fprintf(trace, "referenceTrackID=\"%d\" timestamp=\""LLU"\" NTP=\""LLU"\" UTC=\"%d-%02d-%02dT%02d:%02d:%02d.%03dZ\">\n", ptr->refTrackID, ptr->timestamp, ptr->ntp, 1900+t.tm_year, t.tm_mon+1, t.tm_mday, t.tm_hour, t.tm_min, (u32) t.tm_sec, (u32) fracs); gf_isom_box_dump_done("ProducerReferenceTimeBox", a, trace); return GF_OK; } GF_Err adkm_dump(GF_Box *a, FILE * trace) { GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeDRMKeyManagementSystemBox", trace); fprintf(trace, ">\n"); if (ptr->header) gf_isom_box_dump((GF_Box *)ptr->header, trace); if (ptr->au_format) gf_isom_box_dump((GF_Box *)ptr->au_format, trace); gf_isom_box_dump_done("AdobeDRMKeyManagementSystemBox", a, trace); return GF_OK; } GF_Err ahdr_dump(GF_Box *a, FILE * trace) { GF_AdobeDRMHeaderBox *ptr = (GF_AdobeDRMHeaderBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeDRMHeaderBox", trace); fprintf(trace, ">\n"); if (ptr->std_enc_params) gf_isom_box_dump((GF_Box *)ptr->std_enc_params, trace); gf_isom_box_dump_done("AdobeDRMHeaderBox", a, trace); return GF_OK; } GF_Err aprm_dump(GF_Box *a, FILE * trace) { GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeStdEncryptionParamsBox", trace); fprintf(trace, ">\n"); if (ptr->enc_info) gf_isom_box_dump((GF_Box *)ptr->enc_info, trace); if (ptr->key_info) gf_isom_box_dump((GF_Box *)ptr->key_info, trace); gf_isom_box_dump_done("AdobeStdEncryptionParamsBox", a, trace); return GF_OK; } GF_Err aeib_dump(GF_Box *a, FILE * trace) { GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeEncryptionInfoBox", trace); fprintf(trace, "EncryptionAlgorithm=\"%s\" KeyLength=\"%d\">\n", ptr->enc_algo, ptr->key_length); gf_isom_box_dump_done("AdobeEncryptionInfoBox", a, trace); return GF_OK; } GF_Err akey_dump(GF_Box *a, FILE * trace) { GF_AdobeKeyInfoBox *ptr = (GF_AdobeKeyInfoBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeKeyInfoBox", trace); fprintf(trace, ">\n"); if (ptr->params) gf_isom_box_dump((GF_Box *)ptr->params, trace); gf_isom_box_dump_done("AdobeKeyInfoBox", a, trace); return GF_OK; } GF_Err flxs_dump(GF_Box *a, FILE * trace) { GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeFlashAccessParamsBox", trace); fprintf(trace, ">\n"); if (ptr->metadata) fprintf(trace, "<FmrmsV2Metadata=\"%s\"/>\n", ptr->metadata); gf_isom_box_dump_done("AdobeFlashAccessParamsBox", a, trace); return GF_OK; } GF_Err adaf_dump(GF_Box *a, FILE * trace) { GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeDRMAUFormatBox ", trace); fprintf(trace, "SelectiveEncryption=\"%d\" IV_length=\"%d\">\n", ptr->selective_enc ? 1 : 0, ptr->IV_length); gf_isom_box_dump_done("AdobeDRMAUFormatBox", a, trace); return GF_OK; } /* Image File Format dump */ GF_Err ispe_dump(GF_Box *a, FILE * trace) { GF_ImageSpatialExtentsPropertyBox *ptr = (GF_ImageSpatialExtentsPropertyBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ImageSpatialExtentsPropertyBox", trace); fprintf(trace, "image_width=\"%d\" image_height=\"%d\">\n", ptr->image_width, ptr->image_height); gf_isom_box_dump_done("ImageSpatialExtentsPropertyBox", a, trace); return GF_OK; } GF_Err colr_dump(GF_Box *a, FILE * trace) { GF_ColourInformationBox *ptr = (GF_ColourInformationBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ColourInformationBox", trace); fprintf(trace, "colour_type=\"%s\" colour_primaries=\"%d\" transfer_characteristics=\"%d\" matrix_coefficients=\"%d\" full_range_flag=\"%d\">\n", gf_4cc_to_str(ptr->colour_type), ptr->colour_primaries, ptr->transfer_characteristics, ptr->matrix_coefficients, ptr->full_range_flag); gf_isom_box_dump_done("ColourInformationBox", a, trace); return GF_OK; } GF_Err pixi_dump(GF_Box *a, FILE * trace) { u32 i; GF_PixelInformationPropertyBox *ptr = (GF_PixelInformationPropertyBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "PixelInformationPropertyBox", trace); fprintf(trace, ">\n"); for (i = 0; i < ptr->num_channels; i++) { fprintf(trace, "<BitPerChannel bits_per_channel=\"%d\"/>\n", ptr->bits_per_channel[i]); } if (!ptr->size) fprintf(trace, "<BitPerChannel bits_per_channel=\"\"/>\n"); gf_isom_box_dump_done("PixelInformationPropertyBox", a, trace); return GF_OK; } GF_Err rloc_dump(GF_Box *a, FILE * trace) { GF_RelativeLocationPropertyBox *ptr = (GF_RelativeLocationPropertyBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "RelativeLocationPropertyBox", trace); fprintf(trace, "horizontal_offset=\"%d\" vertical_offset=\"%d\">\n", ptr->horizontal_offset, ptr->vertical_offset); gf_isom_box_dump_done("RelativeLocationPropertyBox", a, trace); return GF_OK; } GF_Err irot_dump(GF_Box *a, FILE * trace) { GF_ImageRotationBox *ptr = (GF_ImageRotationBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ImageRotationBox", trace); fprintf(trace, "angle=\"%d\">\n", (ptr->angle*90)); gf_isom_box_dump_done("ImageRotationBox", a, trace); return GF_OK; } GF_Err ipco_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "ItemPropertyContainerBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("ItemPropertyContainerBox", a, trace); return GF_OK; } GF_Err iprp_dump(GF_Box *a, FILE * trace) { GF_ItemPropertiesBox *ptr = (GF_ItemPropertiesBox *)a; gf_isom_box_dump_start(a, "ItemPropertiesBox", trace); fprintf(trace, ">\n"); if (ptr->property_container) gf_isom_box_dump(ptr->property_container, trace); if (ptr->property_association) gf_isom_box_dump(ptr->property_association, trace); gf_isom_box_dump_done("ItemPropertiesBox", a, trace); return GF_OK; } GF_Err ipma_dump(GF_Box *a, FILE * trace) { u32 i, j; GF_ItemPropertyAssociationBox *ptr = (GF_ItemPropertyAssociationBox *)a; u32 entry_count = gf_list_count(ptr->entries); if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ItemPropertyAssociationBox", trace); fprintf(trace, "entry_count=\"%d\">\n", entry_count); for (i = 0; i < entry_count; i++) { GF_ItemPropertyAssociationEntry *entry = (GF_ItemPropertyAssociationEntry *)gf_list_get(ptr->entries, i); u32 association_count = gf_list_count(entry->essential); fprintf(trace, "<AssociationEntry item_ID=\"%d\" association_count=\"%d\">\n", entry->item_id, association_count); for (j = 0; j < association_count; j++) { Bool *ess = (Bool *)gf_list_get(entry->essential, j); u32 *prop_index = (u32 *)gf_list_get(entry->property_index, j); fprintf(trace, "<Property index=\"%d\" essential=\"%d\"/>\n", *prop_index, *ess); } fprintf(trace, "</AssociationEntry>\n"); } if (!ptr->size) { fprintf(trace, "<AssociationEntry item_ID=\"\" association_count=\"\">\n"); fprintf(trace, "<Property index=\"\" essential=\"\"/>\n"); fprintf(trace, "</AssociationEntry>\n"); } gf_isom_box_dump_done("ItemPropertyAssociationBox", a, trace); return GF_OK; } GF_Err auxc_dump(GF_Box *a, FILE * trace) { GF_AuxiliaryTypePropertyBox *ptr = (GF_AuxiliaryTypePropertyBox *)a; gf_isom_box_dump_start(a, "AuxiliaryTypePropertyBox", trace); fprintf(trace, "aux_type=\"%s\" ", ptr->aux_urn); dump_data_attribute(trace, "aux_subtype", ptr->data, ptr->data_size); fprintf(trace, ">\n"); gf_isom_box_dump_done("AuxiliaryTypePropertyBox", a, trace); return GF_OK; } GF_Err oinf_dump(GF_Box *a, FILE * trace) { GF_OINFPropertyBox *ptr = (GF_OINFPropertyBox *)a; gf_isom_box_dump_start(a, "OperatingPointsInformationPropertyBox", trace); fprintf(trace, ">\n"); oinf_entry_dump(ptr->oinf, trace); gf_isom_box_dump_done("OperatingPointsInformationPropertyBox", a, trace); return GF_OK; } GF_Err tols_dump(GF_Box *a, FILE * trace) { GF_TargetOLSPropertyBox *ptr = (GF_TargetOLSPropertyBox *)a; gf_isom_box_dump_start(a, "TargetOLSPropertyBox", trace); fprintf(trace, "target_ols_index=\"%d\">\n", ptr->target_ols_index); gf_isom_box_dump_done("TargetOLSPropertyBox", a, trace); return GF_OK; } GF_Err trgr_dump(GF_Box *a, FILE * trace) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *) a; gf_isom_box_dump_start(a, "TrackGroupBox", trace); fprintf(trace, ">\n"); gf_isom_box_array_dump(ptr->groups, trace); gf_isom_box_dump_done("TrackGroupBox", a, trace); return GF_OK; } GF_Err trgt_dump(GF_Box *a, FILE * trace) { GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *) a; a->type = ptr->group_type; gf_isom_box_dump_start(a, "TrackGroupTypeBox", trace); a->type = GF_ISOM_BOX_TYPE_TRGT; fprintf(trace, "track_group_id=\"%d\">\n", ptr->track_group_id); gf_isom_box_dump_done("TrackGroupTypeBox", a, trace); return GF_OK; } GF_Err grpl_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "GroupListBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("GroupListBox", a, trace); return GF_OK; } GF_Err grptype_dump(GF_Box *a, FILE * trace) { u32 i; GF_EntityToGroupTypeBox *ptr = (GF_EntityToGroupTypeBox *) a; a->type = ptr->grouping_type; gf_isom_box_dump_start(a, "EntityToGroupTypeBox", trace); a->type = GF_ISOM_BOX_TYPE_GRPT; fprintf(trace, "group_id=\"%d\">\n", ptr->group_id); for (i=0; i<ptr->entity_id_count ; i++) fprintf(trace, "<EntityToGroupTypeBoxEntry EntityID=\"%d\"/>\n", ptr->entity_ids[i]); if (!ptr->size) fprintf(trace, "<EntityToGroupTypeBoxEntry EntityID=\"\"/>\n"); gf_isom_box_dump_done("EntityToGroupTypeBox", a, trace); return GF_OK; } GF_Err stvi_dump(GF_Box *a, FILE * trace) { GF_StereoVideoBox *ptr = (GF_StereoVideoBox *) a; gf_isom_box_dump_start(a, "StereoVideoBox", trace); fprintf(trace, "single_view_allowed=\"%d\" stereo_scheme=\"%d\" ", ptr->single_view_allowed, ptr->stereo_scheme); dump_data_attribute(trace, "stereo_indication_type", ptr->stereo_indication_type, ptr->sit_len); fprintf(trace, ">\n"); gf_isom_box_dump_done("StereoVideoBox", a, trace); return GF_OK; } GF_Err def_cont_box_dump(GF_Box *a, FILE *trace) { char *name = "SubTrackDefinitionBox"; //only one using generic box container for now gf_isom_box_dump_start(a, name, trace); fprintf(trace, ">\n"); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err fiin_dump(GF_Box *a, FILE * trace) { FDItemInformationBox *ptr = (FDItemInformationBox *) a; gf_isom_box_dump_start(a, "FDItemInformationBox", trace); fprintf(trace, ">\n"); if (ptr->partition_entries) gf_isom_box_array_dump(ptr->partition_entries, trace); if (ptr->session_info) gf_isom_box_dump(ptr->session_info, trace); if (ptr->group_id_to_name) gf_isom_box_dump(ptr->group_id_to_name, trace); gf_isom_box_dump_done("FDItemInformationBox", a, trace); return GF_OK; } GF_Err fecr_dump(GF_Box *a, FILE * trace) { u32 i; char *box_name; FECReservoirBox *ptr = (FECReservoirBox *) a; if (a->type==GF_ISOM_BOX_TYPE_FIRE) { box_name = "FILEReservoirBox"; } else { box_name = "FECReservoirBox"; } gf_isom_box_dump_start(a, box_name, trace); fprintf(trace, ">\n"); for (i=0; i<ptr->nb_entries; i++) { fprintf(trace, "<%sEntry itemID=\"%d\" symbol_count=\"%d\"/>\n", box_name, ptr->entries[i].item_id, ptr->entries[i].symbol_count); } if (!ptr->size) { fprintf(trace, "<%sEntry itemID=\"\" symbol_count=\"\"/>\n", box_name); } gf_isom_box_dump_done(box_name, a, trace); return GF_OK; } GF_Err gitn_dump(GF_Box *a, FILE * trace) { u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *) a; gf_isom_box_dump_start(a, "GroupIdToNameBox", trace); fprintf(trace, ">\n"); for (i=0; i<ptr->nb_entries; i++) { fprintf(trace, "<GroupIdToNameBoxEntry groupID=\"%d\" name=\"%s\"/>\n", ptr->entries[i].group_id, ptr->entries[i].name); } if (!ptr->size) { fprintf(trace, "<GroupIdToNameBoxEntryEntry groupID=\"\" name=\"\"/>\n"); } gf_isom_box_dump_done("GroupIdToNameBox", a, trace); return GF_OK; } GF_Err paen_dump(GF_Box *a, FILE * trace) { FDPartitionEntryBox *ptr = (FDPartitionEntryBox *) a; gf_isom_box_dump_start(a, "FDPartitionEntryBox", trace); fprintf(trace, ">\n"); if (ptr->blocks_and_symbols) gf_isom_box_dump(ptr->blocks_and_symbols, trace); if (ptr->FEC_symbol_locations) gf_isom_box_dump(ptr->FEC_symbol_locations, trace); if (ptr->FEC_symbol_locations) gf_isom_box_dump(ptr->FEC_symbol_locations, trace); gf_isom_box_dump_done("FDPartitionEntryBox", a, trace); return GF_OK; } GF_Err fpar_dump(GF_Box *a, FILE * trace) { u32 i; FilePartitionBox *ptr = (FilePartitionBox *) a; gf_isom_box_dump_start(a, "FilePartitionBox", trace); fprintf(trace, "itemID=\"%d\" FEC_encoding_ID=\"%d\" FEC_instance_ID=\"%d\" max_source_block_length=\"%d\" encoding_symbol_length=\"%d\" max_number_of_encoding_symbols=\"%d\" ", ptr->itemID, ptr->FEC_encoding_ID, ptr->FEC_instance_ID, ptr->max_source_block_length, ptr->encoding_symbol_length, ptr->max_number_of_encoding_symbols); if (ptr->scheme_specific_info) dump_data_attribute(trace, "scheme_specific_info", (char*)ptr->scheme_specific_info, (u32)strlen(ptr->scheme_specific_info) ); fprintf(trace, ">\n"); for (i=0; i<ptr->nb_entries; i++) { fprintf(trace, "<FilePartitionBoxEntry block_count=\"%d\" block_size=\"%d\"/>\n", ptr->entries[i].block_count, ptr->entries[i].block_size); } if (!ptr->size) { fprintf(trace, "<FilePartitionBoxEntry block_count=\"\" block_size=\"\"/>\n"); } gf_isom_box_dump_done("FilePartitionBox", a, trace); return GF_OK; } GF_Err segr_dump(GF_Box *a, FILE * trace) { u32 i, k; FDSessionGroupBox *ptr = (FDSessionGroupBox *) a; gf_isom_box_dump_start(a, "FDSessionGroupBox", trace); fprintf(trace, ">\n"); for (i=0; i<ptr->num_session_groups; i++) { fprintf(trace, "<FDSessionGroupBoxEntry groupIDs=\""); for (k=0; k<ptr->session_groups[i].nb_groups; k++) { fprintf(trace, "%d ", ptr->session_groups[i].group_ids[k]); } fprintf(trace, "\" channels=\""); for (k=0; k<ptr->session_groups[i].nb_channels; k++) { fprintf(trace, "%d ", ptr->session_groups[i].channels[k]); } fprintf(trace, "\"/>\n"); } if (!ptr->size) { fprintf(trace, "<FDSessionGroupBoxEntry groupIDs=\"\" channels=\"\"/>\n"); } gf_isom_box_dump_done("FDSessionGroupBox", a, trace); return GF_OK; } GF_Err srpp_dump(GF_Box *a, FILE * trace) { GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *) a; gf_isom_box_dump_start(a, "SRTPProcessBox", trace); fprintf(trace, "encryption_algorithm_rtp=\"%d\" encryption_algorithm_rtcp=\"%d\" integrity_algorithm_rtp=\"%d\" integrity_algorithm_rtcp=\"%d\">\n", ptr->encryption_algorithm_rtp, ptr->encryption_algorithm_rtcp, ptr->integrity_algorithm_rtp, ptr->integrity_algorithm_rtcp); if (ptr->info) gf_isom_box_dump(ptr->info, trace); if (ptr->scheme_type) gf_isom_box_dump(ptr->scheme_type, trace); gf_isom_box_dump_done("SRTPProcessBox", a, trace); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_HINTING GF_Err fdpa_dump(GF_Box *a, FILE * trace) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "FDpacketBox", trace); fprintf(trace, "sender_current_time_present=\"%d\" expected_residual_time_present=\"%d\" session_close_bit=\"%d\" object_close_bit=\"%d\" transport_object_identifier=\"%d\">\n", ptr->info.sender_current_time_present, ptr->info.expected_residual_time_present, ptr->info.session_close_bit, ptr->info.object_close_bit, ptr->info.transport_object_identifier); for (i=0; i<ptr->header_ext_count; i++) { fprintf(trace, "<FDHeaderExt type=\"%d\"", ptr->headers[i].header_extension_type); if (ptr->headers[i].header_extension_type > 127) { dump_data_attribute(trace, "content", (char *) ptr->headers[i].content, 3); } else if (ptr->headers[i].data_length) { dump_data_attribute(trace, "data", ptr->headers[i].data, ptr->headers[i].data_length); } fprintf(trace, "/>\n"); } if (!ptr->size) { fprintf(trace, "<FDHeaderExt type=\"\" content=\"\" data=\"\"/>\n"); } gf_isom_box_dump_done("FDpacketBox", a, trace); return GF_OK; } GF_Err extr_dump(GF_Box *a, FILE * trace) { GF_ExtraDataBox *ptr = (GF_ExtraDataBox *) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ExtraDataBox", trace); dump_data_attribute(trace, "data", ptr->data, ptr->data_length); fprintf(trace, ">\n"); if (ptr->feci) { gf_isom_box_dump((GF_Box *)ptr->feci, trace); } gf_isom_box_dump_done("ExtraDataBox", a, trace); return GF_OK; } GF_Err fdsa_dump(GF_Box *a, FILE * trace) { GF_Err e; GF_HintSample *ptr = (GF_HintSample *) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "FDSampleBox", trace); fprintf(trace, ">\n"); e = gf_isom_box_array_dump(ptr->packetTable, trace); if (e) return e; if (ptr->extra_data) { e = gf_isom_box_dump((GF_Box *)ptr->extra_data, trace); if (e) return e; } gf_isom_box_dump_done("FDSampleBox", a, trace); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_HINTING*/ GF_Err trik_dump(GF_Box *a, FILE * trace) { u32 i; GF_TrickPlayBox *p = (GF_TrickPlayBox *) a; gf_isom_box_dump_start(a, "TrickPlayBox", trace); fprintf(trace, ">\n"); for (i=0; i<p->entry_count; i++) { fprintf(trace, "<TrickPlayBoxEntry pic_type=\"%d\" dependency_level=\"%d\"/>\n", p->entries[i].pic_type, p->entries[i].dependency_level); } if (!p->size) fprintf(trace, "<TrickPlayBoxEntry pic_type=\"\" dependency_level=\"\"/>\n"); gf_isom_box_dump_done("TrickPlayBox", a, trace); return GF_OK; } GF_Err bloc_dump(GF_Box *a, FILE * trace) { GF_BaseLocationBox *p = (GF_BaseLocationBox *) a; gf_isom_box_dump_start(a, "BaseLocationBox", trace); fprintf(trace, "baseLocation=\"%s\" basePurlLocation=\"%s\">\n", p->baseLocation, p->basePurlLocation); gf_isom_box_dump_done("BaseLocationBox", a, trace); return GF_OK; } GF_Err ainf_dump(GF_Box *a, FILE * trace) { GF_AssetInformationBox *p = (GF_AssetInformationBox *) a; gf_isom_box_dump_start(a, "AssetInformationBox", trace); fprintf(trace, "profile_version=\"%d\" APID=\"%s\">\n", p->profile_version, p->APID); gf_isom_box_dump_done("AssetInformationBox", a, trace); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_DUMP*/
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2012 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #include <gpac/utf.h> #include <gpac/network.h> #include <gpac/color.h> #include <gpac/avparse.h> #include <time.h> #ifndef GPAC_DISABLE_ISOM_DUMP static void dump_data(FILE *trace, char *data, u32 dataLength) { u32 i; fprintf(trace, "data:application/octet-string,"); for (i=0; i<dataLength; i++) { fprintf(trace, "%02X", (unsigned char) data[i]); } } static void dump_data_hex(FILE *trace, char *data, u32 dataLength) { u32 i; fprintf(trace, "0x"); for (i=0; i<dataLength; i++) { fprintf(trace, "%02X", (unsigned char) data[i]); } } static void dump_data_attribute(FILE *trace, char *name, char *data, u32 data_size) { u32 i; if (!data || !data_size) { fprintf(trace, "%s=\"\"", name); return; } fprintf(trace, "%s=\"0x", name); for (i=0; i<data_size; i++) fprintf(trace, "%02X", (unsigned char) data[i]); fprintf(trace, "\" "); } static void dump_data_string(FILE *trace, char *data, u32 dataLength) { u32 i; for (i=0; i<dataLength; i++) { switch ((unsigned char) data[i]) { case '\'': fprintf(trace, "&apos;"); break; case '\"': fprintf(trace, "&quot;"); break; case '&': fprintf(trace, "&amp;"); break; case '>': fprintf(trace, "&gt;"); break; case '<': fprintf(trace, "&lt;"); break; default: fprintf(trace, "%c", (u8) data[i]); break; } } } GF_Err gf_isom_box_dump(void *ptr, FILE * trace) { return gf_isom_box_dump_ex(ptr, trace, 0); } GF_Err gf_isom_box_array_dump(GF_List *list, FILE * trace) { u32 i; GF_Box *a; if (!list) return GF_OK; i=0; while ((a = (GF_Box *)gf_list_enum(list, &i))) { gf_isom_box_dump(a, trace); } return GF_OK; } extern Bool use_dump_mode; GF_EXPORT GF_Err gf_isom_dump(GF_ISOFile *mov, FILE * trace) { u32 i; GF_Box *box; if (!mov || !trace) return GF_BAD_PARAM; use_dump_mode = mov->dump_mode_alloc; fprintf(trace, "<!--MP4Box dump trace-->\n"); fprintf(trace, "<IsoMediaFile xmlns=\"urn:mpeg:isobmff:schema:file:2016\" Name=\"%s\">\n", mov->fileName); i=0; while ((box = (GF_Box *)gf_list_enum(mov->TopBoxes, &i))) { if (box->type==GF_ISOM_BOX_TYPE_UNKNOWN) { fprintf(trace, "<!--WARNING: Unknown Top-level Box Found -->\n"); } else if (box->type==GF_ISOM_BOX_TYPE_UUID) { } else if (!gf_isom_box_is_file_level(box)) { fprintf(trace, "<!--ERROR: Invalid Top-level Box Found (\"%s\")-->\n", gf_4cc_to_str(box->type)); } gf_isom_box_dump(box, trace); } fprintf(trace, "</IsoMediaFile>\n"); return GF_OK; } GF_Err reftype_dump(GF_Box *a, FILE * trace) { u32 i; GF_TrackReferenceTypeBox *p = (GF_TrackReferenceTypeBox *)a; if (!p->reference_type) return GF_OK; p->type = p->reference_type; gf_isom_box_dump_start(a, "TrackReferenceTypeBox", trace); fprintf(trace, ">\n"); for (i=0; i<p->trackIDCount; i++) { fprintf(trace, "<TrackReferenceEntry TrackID=\"%d\"/>\n", p->trackIDs[i]); } if (!p->size) fprintf(trace, "<TrackReferenceEntry TrackID=\"\"/>\n"); gf_isom_box_dump_done("TrackReferenceTypeBox", a, trace); p->type = GF_ISOM_BOX_TYPE_REFT; return GF_OK; } GF_Err ireftype_dump(GF_Box *a, FILE * trace) { u32 i; GF_ItemReferenceTypeBox *p = (GF_ItemReferenceTypeBox *)a; if (!p->reference_type) return GF_OK; p->type = p->reference_type; gf_isom_box_dump_start(a, "ItemReferenceBox", trace); fprintf(trace, "from_item_id=\"%d\">\n", p->from_item_id); for (i = 0; i < p->reference_count; i++) { fprintf(trace, "<ItemReferenceBoxEntry ItemID=\"%d\"/>\n", p->to_item_IDs[i]); } if (!p->size) fprintf(trace, "<ItemReferenceBoxEntry ItemID=\"\"/>\n"); gf_isom_box_dump_done("ItemReferenceBox", a, trace); p->type = GF_ISOM_BOX_TYPE_REFI; return GF_OK; } GF_Err free_dump(GF_Box *a, FILE * trace) { GF_FreeSpaceBox *p = (GF_FreeSpaceBox *)a; gf_isom_box_dump_start(a, (a->type==GF_ISOM_BOX_TYPE_FREE) ? "FreeSpaceBox" : "SkipBox", trace); fprintf(trace, "dataSize=\"%d\">\n", p->dataSize); gf_isom_box_dump_done( (a->type==GF_ISOM_BOX_TYPE_FREE) ? "FreeSpaceBox" : "SkipBox", a, trace); return GF_OK; } GF_Err mdat_dump(GF_Box *a, FILE * trace) { GF_MediaDataBox *p; const char *name = (a->type==GF_ISOM_BOX_TYPE_IDAT ? "ItemDataBox" : "MediaDataBox"); p = (GF_MediaDataBox *)a; gf_isom_box_dump_start(a, name, trace); fprintf(trace, "dataSize=\""LLD"\">\n", LLD_CAST p->dataSize); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err moov_dump(GF_Box *a, FILE * trace) { GF_MovieBox *p; p = (GF_MovieBox *)a; gf_isom_box_dump_start(a, "MovieBox", trace); fprintf(trace, ">\n"); if (p->iods) gf_isom_box_dump(p->iods, trace); if (p->meta) gf_isom_box_dump(p->meta, trace); //dump only if size if (p->size) gf_isom_box_dump_ex(p->mvhd, trace,GF_ISOM_BOX_TYPE_MVHD); #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (p->mvex) gf_isom_box_dump(p->mvex, trace); #endif gf_isom_box_array_dump(p->trackList, trace); if (p->udta) gf_isom_box_dump(p->udta, trace); gf_isom_box_dump_done("MovieBox", a, trace); return GF_OK; } GF_Err mvhd_dump(GF_Box *a, FILE * trace) { GF_MovieHeaderBox *p; p = (GF_MovieHeaderBox *) a; gf_isom_box_dump_start(a, "MovieHeaderBox", trace); fprintf(trace, "CreationTime=\""LLD"\" ", LLD_CAST p->creationTime); fprintf(trace, "ModificationTime=\""LLD"\" ", LLD_CAST p->modificationTime); fprintf(trace, "TimeScale=\"%d\" ", p->timeScale); fprintf(trace, "Duration=\""LLD"\" ", LLD_CAST p->duration); fprintf(trace, "NextTrackID=\"%d\">\n", p->nextTrackID); gf_isom_box_dump_done("MovieHeaderBox", a, trace); return GF_OK; } GF_Err mdhd_dump(GF_Box *a, FILE * trace) { GF_MediaHeaderBox *p; p = (GF_MediaHeaderBox *)a; gf_isom_box_dump_start(a, "MediaHeaderBox", trace); fprintf(trace, "CreationTime=\""LLD"\" ", LLD_CAST p->creationTime); fprintf(trace, "ModificationTime=\""LLD"\" ", LLD_CAST p->modificationTime); fprintf(trace, "TimeScale=\"%d\" ", p->timeScale); fprintf(trace, "Duration=\""LLD"\" ", LLD_CAST p->duration); fprintf(trace, "LanguageCode=\"%c%c%c\">\n", p->packedLanguage[0], p->packedLanguage[1], p->packedLanguage[2]); gf_isom_box_dump_done("MediaHeaderBox", a, trace); return GF_OK; } GF_Err vmhd_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "VideoMediaHeaderBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("VideoMediaHeaderBox", a, trace); return GF_OK; } GF_Err smhd_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "SoundMediaHeaderBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("SoundMediaHeaderBox", a, trace); return GF_OK; } GF_Err hmhd_dump(GF_Box *a, FILE * trace) { GF_HintMediaHeaderBox *p; p = (GF_HintMediaHeaderBox *)a; gf_isom_box_dump_start(a, "HintMediaHeaderBox", trace); fprintf(trace, "MaximumPDUSize=\"%d\" ", p->maxPDUSize); fprintf(trace, "AveragePDUSize=\"%d\" ", p->avgPDUSize); fprintf(trace, "MaxBitRate=\"%d\" ", p->maxBitrate); fprintf(trace, "AverageBitRate=\"%d\">\n", p->avgBitrate); gf_isom_box_dump_done("HintMediaHeaderBox", a, trace); return GF_OK; } GF_Err nmhd_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "MPEGMediaHeaderBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("MPEGMediaHeaderBox", a, trace); return GF_OK; } GF_Err stbl_dump(GF_Box *a, FILE * trace) { GF_SampleTableBox *p; p = (GF_SampleTableBox *)a; gf_isom_box_dump_start(a, "SampleTableBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->SampleDescription, trace, GF_ISOM_BOX_TYPE_STSD); if (p->size) gf_isom_box_dump_ex(p->TimeToSample, trace, GF_ISOM_BOX_TYPE_STTS); if (p->CompositionOffset) gf_isom_box_dump(p->CompositionOffset, trace); if (p->CompositionToDecode) gf_isom_box_dump(p->CompositionToDecode, trace); if (p->SyncSample) gf_isom_box_dump(p->SyncSample, trace); if (p->ShadowSync) gf_isom_box_dump(p->ShadowSync, trace); if (p->size) gf_isom_box_dump_ex(p->SampleToChunk, trace, GF_ISOM_BOX_TYPE_STSC); if (p->size) gf_isom_box_dump_ex(p->SampleSize, trace, GF_ISOM_BOX_TYPE_STSZ); if (p->size) gf_isom_box_dump_ex(p->ChunkOffset, trace, GF_ISOM_BOX_TYPE_STCO); if (p->DegradationPriority) gf_isom_box_dump(p->DegradationPriority, trace); if (p->SampleDep) gf_isom_box_dump(p->SampleDep, trace); if (p->PaddingBits) gf_isom_box_dump(p->PaddingBits, trace); if (p->Fragments) gf_isom_box_dump(p->Fragments, trace); if (p->sub_samples) gf_isom_box_array_dump(p->sub_samples, trace); if (p->sampleGroupsDescription) gf_isom_box_array_dump(p->sampleGroupsDescription, trace); if (p->sampleGroups) gf_isom_box_array_dump(p->sampleGroups, trace); if (p->sai_sizes) { u32 i; for (i = 0; i < gf_list_count(p->sai_sizes); i++) { GF_SampleAuxiliaryInfoSizeBox *saiz = (GF_SampleAuxiliaryInfoSizeBox *)gf_list_get(p->sai_sizes, i); gf_isom_box_dump(saiz, trace); } } if (p->sai_offsets) { u32 i; for (i = 0; i < gf_list_count(p->sai_offsets); i++) { GF_SampleAuxiliaryInfoOffsetBox *saio = (GF_SampleAuxiliaryInfoOffsetBox *)gf_list_get(p->sai_offsets, i); gf_isom_box_dump(saio, trace); } } gf_isom_box_dump_done("SampleTableBox", a, trace); return GF_OK; } GF_Err dinf_dump(GF_Box *a, FILE * trace) { GF_DataInformationBox *p; p = (GF_DataInformationBox *)a; gf_isom_box_dump_start(a, "DataInformationBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->dref, trace, GF_ISOM_BOX_TYPE_DREF); gf_isom_box_dump_done("DataInformationBox", a, trace); return GF_OK; } GF_Err url_dump(GF_Box *a, FILE * trace) { GF_DataEntryURLBox *p; p = (GF_DataEntryURLBox *)a; gf_isom_box_dump_start(a, "URLDataEntryBox", trace); if (p->location) { fprintf(trace, " URL=\"%s\">\n", p->location); } else { fprintf(trace, ">\n"); if (p->size) { if (! (p->flags & 1) ) { fprintf(trace, "<!--ERROR: No location indicated-->\n"); } else { fprintf(trace, "<!--Data is contained in the movie file-->\n"); } } } gf_isom_box_dump_done("URLDataEntryBox", a, trace); return GF_OK; } GF_Err urn_dump(GF_Box *a, FILE * trace) { GF_DataEntryURNBox *p; p = (GF_DataEntryURNBox *)a; gf_isom_box_dump_start(a, "URNDataEntryBox", trace); if (p->nameURN) fprintf(trace, " URN=\"%s\"", p->nameURN); if (p->location) fprintf(trace, " URL=\"%s\"", p->location); fprintf(trace, ">\n"); gf_isom_box_dump_done("URNDataEntryBox", a, trace); return GF_OK; } GF_Err cprt_dump(GF_Box *a, FILE * trace) { GF_CopyrightBox *p; p = (GF_CopyrightBox *)a; gf_isom_box_dump_start(a, "CopyrightBox", trace); fprintf(trace, "LanguageCode=\"%s\" CopyrightNotice=\"%s\">\n", p->packedLanguageCode, p->notice); gf_isom_box_dump_done("CopyrightBox", a, trace); return GF_OK; } GF_Err kind_dump(GF_Box *a, FILE * trace) { GF_KindBox *p; p = (GF_KindBox *)a; gf_isom_box_dump_start(a, "KindBox", trace); fprintf(trace, "schemeURI=\"%s\" value=\"%s\">\n", p->schemeURI, (p->value ? p->value : "")); gf_isom_box_dump_done("KindBox", a, trace); return GF_OK; } static char *format_duration(u64 dur, u32 timescale, char *szDur) { u32 h, m, s, ms; dur = (u32) (( ((Double) (s64) dur)/timescale)*1000); h = (u32) (dur / 3600000); dur -= h*3600000; m = (u32) (dur / 60000); dur -= m*60000; s = (u32) (dur/1000); dur -= s*1000; ms = (u32) (dur); sprintf(szDur, "%02d:%02d:%02d.%03d", h, m, s, ms); return szDur; } static void dump_escape_string(FILE * trace, char *name) { u32 i, len = (u32) strlen(name); for (i=0; i<len; i++) { if (name[i]=='"') fprintf(trace, "&quot;"); else fputc(name[i], trace); } } GF_Err chpl_dump(GF_Box *a, FILE * trace) { u32 i, count; char szDur[20]; GF_ChapterListBox *p = (GF_ChapterListBox *)a; gf_isom_box_dump_start(a, "ChapterListBox", trace); fprintf(trace, ">\n"); if (p->size) { count = gf_list_count(p->list); for (i=0; i<count; i++) { GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(p->list, i); fprintf(trace, "<Chapter name=\""); dump_escape_string(trace, ce->name); fprintf(trace, "\" startTime=\"%s\" />\n", format_duration(ce->start_time, 1000*10000, szDur)); } } else { fprintf(trace, "<Chapter name=\"\" startTime=\"\"/>\n"); } gf_isom_box_dump_done("ChapterListBox", a, trace); return GF_OK; } GF_Err pdin_dump(GF_Box *a, FILE * trace) { u32 i; GF_ProgressiveDownloadBox *p = (GF_ProgressiveDownloadBox *)a; gf_isom_box_dump_start(a, "ProgressiveDownloadBox", trace); fprintf(trace, ">\n"); if (p->size) { for (i=0; i<p->count; i++) { fprintf(trace, "<DownloadInfo rate=\"%d\" estimatedTime=\"%d\" />\n", p->rates[i], p->times[i]); } } else { fprintf(trace, "<DownloadInfo rate=\"\" estimatedTime=\"\" />\n"); } gf_isom_box_dump_done("ProgressiveDownloadBox", a, trace); return GF_OK; } GF_Err hdlr_dump(GF_Box *a, FILE * trace) { GF_HandlerBox *p = (GF_HandlerBox *)a; gf_isom_box_dump_start(a, "HandlerBox", trace); if (p->nameUTF8 && (u32) p->nameUTF8[0] == strlen(p->nameUTF8)-1) { fprintf(trace, "hdlrType=\"%s\" Name=\"%s\" ", gf_4cc_to_str(p->handlerType), p->nameUTF8+1); } else { fprintf(trace, "hdlrType=\"%s\" Name=\"%s\" ", gf_4cc_to_str(p->handlerType), p->nameUTF8); } fprintf(trace, "reserved1=\"%d\" reserved2=\"", p->reserved1); dump_data(trace, (char *) p->reserved2, 12); fprintf(trace, "\""); fprintf(trace, ">\n"); gf_isom_box_dump_done("HandlerBox", a, trace); return GF_OK; } GF_Err iods_dump(GF_Box *a, FILE * trace) { GF_ObjectDescriptorBox *p; p = (GF_ObjectDescriptorBox *)a; gf_isom_box_dump_start(a, "ObjectDescriptorBox", trace); fprintf(trace, ">\n"); if (p->descriptor) { #ifndef GPAC_DISABLE_OD_DUMP gf_odf_dump_desc(p->descriptor, trace, 1, GF_TRUE); #else fprintf(trace, "<!-- Object Descriptor Dumping disabled in this build of GPAC -->\n"); #endif } else if (p->size) { fprintf(trace, "<!--WARNING: Object Descriptor not present-->\n"); } gf_isom_box_dump_done("ObjectDescriptorBox", a, trace); return GF_OK; } GF_Err trak_dump(GF_Box *a, FILE * trace) { GF_TrackBox *p; p = (GF_TrackBox *)a; gf_isom_box_dump_start(a, "TrackBox", trace); fprintf(trace, ">\n"); if (p->Header) { gf_isom_box_dump(p->Header, trace); } else if (p->size) { fprintf(trace, "<!--INVALID FILE: Missing Track Header-->\n"); } if (p->References) gf_isom_box_dump(p->References, trace); if (p->meta) gf_isom_box_dump(p->meta, trace); if (p->editBox) gf_isom_box_dump(p->editBox, trace); if (p->Media) gf_isom_box_dump(p->Media, trace); if (p->groups) gf_isom_box_dump(p->groups, trace); if (p->udta) gf_isom_box_dump(p->udta, trace); gf_isom_box_dump_done("TrackBox", a, trace); return GF_OK; } GF_Err mp4s_dump(GF_Box *a, FILE * trace) { GF_MPEGSampleEntryBox *p; p = (GF_MPEGSampleEntryBox *)a; gf_isom_box_dump_start(a, "MPEGSystemsSampleDescriptionBox", trace); fprintf(trace, "DataReferenceIndex=\"%d\">\n", p->dataReferenceIndex); if (p->esd) { gf_isom_box_dump(p->esd, trace); } else if (p->size) { fprintf(trace, "<!--INVALID MP4 FILE: ESDBox not present in MPEG Sample Description or corrupted-->\n"); } if (a->type == GF_ISOM_BOX_TYPE_ENCS) { gf_isom_box_array_dump(p->protections, trace); } gf_isom_box_dump_done("MPEGSystemsSampleDescriptionBox", a, trace); return GF_OK; } GF_Err video_sample_entry_dump(GF_Box *a, FILE * trace) { GF_MPEGVisualSampleEntryBox *p = (GF_MPEGVisualSampleEntryBox *)a; const char *name; switch (p->type) { case GF_ISOM_SUBTYPE_AVC_H264: case GF_ISOM_SUBTYPE_AVC2_H264: case GF_ISOM_SUBTYPE_AVC3_H264: case GF_ISOM_SUBTYPE_AVC4_H264: name = "AVCSampleEntryBox"; break; case GF_ISOM_SUBTYPE_MVC_H264: name = "MVCSampleEntryBox"; break; case GF_ISOM_SUBTYPE_SVC_H264: name = "SVCSampleEntryBox"; break; case GF_ISOM_SUBTYPE_HVC1: case GF_ISOM_SUBTYPE_HEV1: case GF_ISOM_SUBTYPE_HVC2: case GF_ISOM_SUBTYPE_HEV2: name = "HEVCSampleEntryBox"; break; case GF_ISOM_SUBTYPE_LHV1: case GF_ISOM_SUBTYPE_LHE1: name = "LHEVCSampleEntryBox"; break; case GF_ISOM_SUBTYPE_3GP_H263: name = "H263SampleDescriptionBox"; break; default: name = "MPEGVisualSampleDescriptionBox"; } gf_isom_box_dump_start(a, name, trace); fprintf(trace, " DataReferenceIndex=\"%d\" Width=\"%d\" Height=\"%d\"", p->dataReferenceIndex, p->Width, p->Height); //dump reserved info fprintf(trace, " XDPI=\"%d\" YDPI=\"%d\" BitDepth=\"%d\"", p->horiz_res, p->vert_res, p->bit_depth); if (strlen((const char*)p->compressor_name) ) fprintf(trace, " CompressorName=\"%s\"\n", p->compressor_name+1); fprintf(trace, ">\n"); if (p->esd) { gf_isom_box_dump(p->esd, trace); } else { if (p->hevc_config) gf_isom_box_dump(p->hevc_config, trace); if (p->avc_config) gf_isom_box_dump(p->avc_config, trace); if (p->ipod_ext) gf_isom_box_dump(p->ipod_ext, trace); if (p->descr) gf_isom_box_dump(p->descr, trace); if (p->svc_config) gf_isom_box_dump(p->svc_config, trace); if (p->mvc_config) gf_isom_box_dump(p->mvc_config, trace); if (p->lhvc_config) gf_isom_box_dump(p->lhvc_config, trace); if (p->cfg_3gpp) gf_isom_box_dump(p->cfg_3gpp, trace); } if (a->type == GF_ISOM_BOX_TYPE_ENCV) { gf_isom_box_array_dump(p->protections, trace); } if (p->pasp) gf_isom_box_dump(p->pasp, trace); if (p->rvcc) gf_isom_box_dump(p->rvcc, trace); if (p->rinf) gf_isom_box_dump(p->rinf, trace); gf_isom_box_dump_done(name, a, trace); return GF_OK; } void base_audio_entry_dump(GF_AudioSampleEntryBox *p, FILE * trace) { fprintf(trace, " DataReferenceIndex=\"%d\" SampleRate=\"%d\"", p->dataReferenceIndex, p->samplerate_hi); fprintf(trace, " Channels=\"%d\" BitsPerSample=\"%d\"", p->channel_count, p->bitspersample); } GF_Err audio_sample_entry_dump(GF_Box *a, FILE * trace) { char *szName; Bool is_3gpp = GF_FALSE; GF_MPEGAudioSampleEntryBox *p = (GF_MPEGAudioSampleEntryBox *)a; switch (p->type) { case GF_ISOM_SUBTYPE_3GP_AMR: szName = "AMRSampleDescriptionBox"; is_3gpp = GF_TRUE; break; case GF_ISOM_SUBTYPE_3GP_AMR_WB: szName = "AMR_WB_SampleDescriptionBox"; is_3gpp = GF_TRUE; break; case GF_ISOM_SUBTYPE_3GP_EVRC: szName = "EVRCSampleDescriptionBox"; is_3gpp = GF_TRUE; break; case GF_ISOM_SUBTYPE_3GP_QCELP: szName = "QCELPSampleDescriptionBox"; is_3gpp = GF_TRUE; break; case GF_ISOM_SUBTYPE_3GP_SMV: szName = "SMVSampleDescriptionBox"; is_3gpp = GF_TRUE; break; case GF_ISOM_BOX_TYPE_MP4A: szName = "MPEGAudioSampleDescriptionBox"; break; case GF_ISOM_BOX_TYPE_AC3: szName = "AC3SampleEntryBox"; break; case GF_ISOM_BOX_TYPE_EC3: szName = "EC3SampleEntryBox"; break; default: szName = "AudioSampleDescriptionBox"; break; } gf_isom_box_dump_start(a, szName, trace); base_audio_entry_dump((GF_AudioSampleEntryBox *)p, trace); fprintf(trace, ">\n"); if (p->esd) { gf_isom_box_dump(p->esd, trace); } else if (p->cfg_3gpp) { gf_isom_box_dump(p->cfg_3gpp, trace); } else if (p->cfg_ac3) { if (p->size) gf_isom_box_dump(p->cfg_ac3, trace); } else if (p->size) { if (is_3gpp) { fprintf(trace, "<!-- INVALID 3GPP FILE: Config not present in Sample Description-->\n"); } else { fprintf(trace, "<!--INVALID MP4 FILE: ESDBox not present in MPEG Sample Description or corrupted-->\n"); } } if (a->type == GF_ISOM_BOX_TYPE_ENCA) { gf_isom_box_array_dump(p->protections, trace); } gf_isom_box_dump_done(szName, a, trace); return GF_OK; } GF_Err gnrm_dump(GF_Box *a, FILE * trace) { GF_GenericSampleEntryBox *p = (GF_GenericSampleEntryBox *)a; if (p->EntryType) a->type = p->EntryType; gf_isom_box_dump_start(a, "SampleDescriptionBox", trace); fprintf(trace, "DataReferenceIndex=\"%d\" ExtensionDataSize=\"%d\">\n", p->dataReferenceIndex, p->data_size); a->type = GF_ISOM_BOX_TYPE_GNRM; gf_isom_box_dump_done("SampleDescriptionBox", a, trace); return GF_OK; } GF_Err gnrv_dump(GF_Box *a, FILE * trace) { GF_GenericVisualSampleEntryBox *p = (GF_GenericVisualSampleEntryBox *)a; if (p->EntryType) a->type = p->EntryType; gf_isom_box_dump_start(a, "VisualSampleDescriptionBox", trace); fprintf(trace, "DataReferenceIndex=\"%d\" Version=\"%d\" Revision=\"%d\" Vendor=\"%d\" TemporalQuality=\"%d\" SpacialQuality=\"%d\" Width=\"%d\" Height=\"%d\" HorizontalResolution=\"%d\" VerticalResolution=\"%d\" CompressorName=\"%s\" BitDepth=\"%d\">\n", p->dataReferenceIndex, p->version, p->revision, p->vendor, p->temporal_quality, p->spatial_quality, p->Width, p->Height, p->horiz_res, p->vert_res, p->compressor_name+1, p->bit_depth); a->type = GF_ISOM_BOX_TYPE_GNRV; gf_isom_box_dump_done("VisualSampleDescriptionBox", a, trace); return GF_OK; } GF_Err gnra_dump(GF_Box *a, FILE * trace) { GF_GenericAudioSampleEntryBox *p = (GF_GenericAudioSampleEntryBox *)a; if (p->EntryType) a->type = p->EntryType; gf_isom_box_dump_start(a, "AudioSampleDescriptionBox", trace); fprintf(trace, "DataReferenceIndex=\"%d\" Version=\"%d\" Revision=\"%d\" Vendor=\"%d\" ChannelCount=\"%d\" BitsPerSample=\"%d\" Samplerate=\"%d\">\n", p->dataReferenceIndex, p->version, p->revision, p->vendor, p->channel_count, p->bitspersample, p->samplerate_hi); a->type = GF_ISOM_BOX_TYPE_GNRA; gf_isom_box_dump_done("AudioSampleDescriptionBox", a, trace); return GF_OK; } GF_Err edts_dump(GF_Box *a, FILE * trace) { GF_EditBox *p; p = (GF_EditBox *)a; gf_isom_box_dump_start(a, "EditBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->editList, trace, GF_ISOM_BOX_TYPE_ELST); gf_isom_box_dump_done("EditBox", a, trace); return GF_OK; } GF_Err udta_dump(GF_Box *a, FILE * trace) { GF_UserDataBox *p; GF_UserDataMap *map; u32 i; p = (GF_UserDataBox *)a; gf_isom_box_dump_start(a, "UserDataBox", trace); fprintf(trace, ">\n"); i=0; while ((map = (GF_UserDataMap *)gf_list_enum(p->recordList, &i))) { gf_isom_box_array_dump(map->other_boxes, trace); } gf_isom_box_dump_done("UserDataBox", a, trace); return GF_OK; } GF_Err dref_dump(GF_Box *a, FILE * trace) { // GF_DataReferenceBox *p = (GF_DataReferenceBox *)a; gf_isom_box_dump_start(a, "DataReferenceBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("DataReferenceBox", a, trace); return GF_OK; } GF_Err stsd_dump(GF_Box *a, FILE * trace) { // GF_SampleDescriptionBox *p = (GF_SampleDescriptionBox *)a; gf_isom_box_dump_start(a, "SampleDescriptionBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("SampleDescriptionBox", a, trace); return GF_OK; } GF_Err stts_dump(GF_Box *a, FILE * trace) { GF_TimeToSampleBox *p; u32 i, nb_samples; p = (GF_TimeToSampleBox *)a; gf_isom_box_dump_start(a, "TimeToSampleBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); nb_samples = 0; for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<TimeToSampleEntry SampleDelta=\"%d\" SampleCount=\"%d\"/>\n", p->entries[i].sampleDelta, p->entries[i].sampleCount); nb_samples += p->entries[i].sampleCount; } if (p->size) fprintf(trace, "<!-- counted %d samples in STTS entries -->\n", nb_samples); else fprintf(trace, "<TimeToSampleEntry SampleDelta=\"\" SampleCount=\"\"/>\n"); gf_isom_box_dump_done("TimeToSampleBox", a, trace); return GF_OK; } GF_Err ctts_dump(GF_Box *a, FILE * trace) { GF_CompositionOffsetBox *p; u32 i, nb_samples; p = (GF_CompositionOffsetBox *)a; gf_isom_box_dump_start(a, "CompositionOffsetBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); nb_samples = 0; for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<CompositionOffsetEntry CompositionOffset=\"%d\" SampleCount=\"%d\"/>\n", p->entries[i].decodingOffset, p->entries[i].sampleCount); nb_samples += p->entries[i].sampleCount; } if (p->size) fprintf(trace, "<!-- counted %d samples in CTTS entries -->\n", nb_samples); else fprintf(trace, "<CompositionOffsetEntry CompositionOffset=\"\" SampleCount=\"\"/>\n"); gf_isom_box_dump_done("CompositionOffsetBox", a, trace); return GF_OK; } GF_Err cslg_dump(GF_Box *a, FILE * trace) { GF_CompositionToDecodeBox *p; p = (GF_CompositionToDecodeBox *)a; gf_isom_box_dump_start(a, "CompositionToDecodeBox", trace); fprintf(trace, "compositionToDTSShift=\"%d\" leastDecodeToDisplayDelta=\"%d\" compositionStartTime=\"%d\" compositionEndTime=\"%d\">\n", p->leastDecodeToDisplayDelta, p->greatestDecodeToDisplayDelta, p->compositionStartTime, p->compositionEndTime); gf_isom_box_dump_done("CompositionToDecodeBox", a, trace); return GF_OK; } GF_Err ccst_dump(GF_Box *a, FILE * trace) { GF_CodingConstraintsBox *p = (GF_CodingConstraintsBox *)a; gf_isom_box_dump_start(a, "CodingConstraintsBox", trace); fprintf(trace, "all_ref_pics_intra=\"%d\" intra_pred_used=\"%d\" max_ref_per_pic=\"%d\" reserved=\"%d\">\n", p->all_ref_pics_intra, p->intra_pred_used, p->max_ref_per_pic, p->reserved); gf_isom_box_dump_done("CodingConstraintsBox", a, trace); return GF_OK; } GF_Err stsh_dump(GF_Box *a, FILE * trace) { GF_ShadowSyncBox *p; u32 i; GF_StshEntry *t; p = (GF_ShadowSyncBox *)a; gf_isom_box_dump_start(a, "SyncShadowBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", gf_list_count(p->entries)); i=0; while ((t = (GF_StshEntry *)gf_list_enum(p->entries, &i))) { fprintf(trace, "<SyncShadowEntry ShadowedSample=\"%d\" SyncSample=\"%d\"/>\n", t->shadowedSampleNumber, t->syncSampleNumber); } if (!p->size) { fprintf(trace, "<SyncShadowEntry ShadowedSample=\"\" SyncSample=\"\"/>\n"); } gf_isom_box_dump_done("SyncShadowBox", a, trace); return GF_OK; } GF_Err elst_dump(GF_Box *a, FILE * trace) { GF_EditListBox *p; u32 i; GF_EdtsEntry *t; p = (GF_EditListBox *)a; gf_isom_box_dump_start(a, "EditListBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", gf_list_count(p->entryList)); i=0; while ((t = (GF_EdtsEntry *)gf_list_enum(p->entryList, &i))) { fprintf(trace, "<EditListEntry Duration=\""LLD"\" MediaTime=\""LLD"\" MediaRate=\"%u\"/>\n", LLD_CAST t->segmentDuration, LLD_CAST t->mediaTime, t->mediaRate); } if (!p->size) { fprintf(trace, "<EditListEntry Duration=\"\" MediaTime=\"\" MediaRate=\"\"/>\n"); } gf_isom_box_dump_done("EditListBox", a, trace); return GF_OK; } GF_Err stsc_dump(GF_Box *a, FILE * trace) { GF_SampleToChunkBox *p; u32 i, nb_samples; p = (GF_SampleToChunkBox *)a; gf_isom_box_dump_start(a, "SampleToChunkBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); nb_samples = 0; for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<SampleToChunkEntry FirstChunk=\"%d\" SamplesPerChunk=\"%d\" SampleDescriptionIndex=\"%d\"/>\n", p->entries[i].firstChunk, p->entries[i].samplesPerChunk, p->entries[i].sampleDescriptionIndex); if (i+1<p->nb_entries) { nb_samples += (p->entries[i+1].firstChunk - p->entries[i].firstChunk) * p->entries[i].samplesPerChunk; } else { nb_samples += p->entries[i].samplesPerChunk; } } if (p->size) fprintf(trace, "<!-- counted %d samples in STSC entries (could be less than sample count) -->\n", nb_samples); else fprintf(trace, "<SampleToChunkEntry FirstChunk=\"\" SamplesPerChunk=\"\" SampleDescriptionIndex=\"\"/>\n"); gf_isom_box_dump_done("SampleToChunkBox", a, trace); return GF_OK; } GF_Err stsz_dump(GF_Box *a, FILE * trace) { GF_SampleSizeBox *p; u32 i; p = (GF_SampleSizeBox *)a; if (a->type == GF_ISOM_BOX_TYPE_STSZ) { gf_isom_box_dump_start(a, "SampleSizeBox", trace); } else { gf_isom_box_dump_start(a, "CompactSampleSizeBox", trace); } fprintf(trace, "SampleCount=\"%d\"", p->sampleCount); if (a->type == GF_ISOM_BOX_TYPE_STSZ) { if (p->sampleSize) { fprintf(trace, " ConstantSampleSize=\"%d\"", p->sampleSize); } } else { fprintf(trace, " SampleSizeBits=\"%d\"", p->sampleSize); } fprintf(trace, ">\n"); if ((a->type != GF_ISOM_BOX_TYPE_STSZ) || !p->sampleSize) { if (!p->sizes && p->size) { fprintf(trace, "<!--WARNING: No Sample Size indications-->\n"); } else { for (i=0; i<p->sampleCount; i++) { fprintf(trace, "<SampleSizeEntry Size=\"%d\"/>\n", p->sizes[i]); } } } if (!p->size) { fprintf(trace, "<SampleSizeEntry Size=\"\"/>\n"); } gf_isom_box_dump_done((a->type == GF_ISOM_BOX_TYPE_STSZ) ? "SampleSizeBox" : "CompactSampleSizeBox", a, trace); return GF_OK; } GF_Err stco_dump(GF_Box *a, FILE * trace) { GF_ChunkOffsetBox *p; u32 i; p = (GF_ChunkOffsetBox *)a; gf_isom_box_dump_start(a, "ChunkOffsetBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); if (!p->offsets && p->size) { fprintf(trace, "<!--Warning: No Chunk Offsets indications-->\n"); } else { for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<ChunkEntry offset=\"%u\"/>\n", p->offsets[i]); } } if (!p->size) { fprintf(trace, "<ChunkEntry offset=\"\"/>\n"); } gf_isom_box_dump_done("ChunkOffsetBox", a, trace); return GF_OK; } GF_Err stss_dump(GF_Box *a, FILE * trace) { GF_SyncSampleBox *p; u32 i; p = (GF_SyncSampleBox *)a; gf_isom_box_dump_start(a, "SyncSampleBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); if (!p->sampleNumbers && p->size) { fprintf(trace, "<!--Warning: No Key Frames indications-->\n"); } else { for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<SyncSampleEntry sampleNumber=\"%u\"/>\n", p->sampleNumbers[i]); } } if (!p->size) { fprintf(trace, "<SyncSampleEntry sampleNumber=\"\"/>\n"); } gf_isom_box_dump_done("SyncSampleBox", a, trace); return GF_OK; } GF_Err stdp_dump(GF_Box *a, FILE * trace) { GF_DegradationPriorityBox *p; u32 i; p = (GF_DegradationPriorityBox *)a; gf_isom_box_dump_start(a, "DegradationPriorityBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); if (!p->priorities && p->size) { fprintf(trace, "<!--Warning: No Degradation Priority indications-->\n"); } else { for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<DegradationPriorityEntry DegradationPriority=\"%d\"/>\n", p->priorities[i]); } } if (!p->size) { fprintf(trace, "<DegradationPriorityEntry DegradationPriority=\"\"/>\n"); } gf_isom_box_dump_done("DegradationPriorityBox", a, trace); return GF_OK; } GF_Err sdtp_dump(GF_Box *a, FILE * trace) { GF_SampleDependencyTypeBox *p; u32 i; p = (GF_SampleDependencyTypeBox*)a; gf_isom_box_dump_start(a, "SampleDependencyTypeBox", trace); fprintf(trace, "SampleCount=\"%d\">\n", p->sampleCount); if (!p->sample_info && p->size) { fprintf(trace, "<!--Warning: No sample dependencies indications-->\n"); } else { for (i=0; i<p->sampleCount; i++) { u8 flag = p->sample_info[i]; fprintf(trace, "<SampleDependencyEntry "); switch ( (flag >> 4) & 3) { case 0: fprintf(trace, "dependsOnOther=\"unknown\" "); break; case 1: fprintf(trace, "dependsOnOther=\"yes\" "); break; case 2: fprintf(trace, "dependsOnOther=\"no\" "); break; case 3: fprintf(trace, "dependsOnOther=\"RESERVED\" "); break; } switch ( (flag >> 2) & 3) { case 0: fprintf(trace, "dependedOn=\"unknown\" "); break; case 1: fprintf(trace, "dependedOn=\"yes\" "); break; case 2: fprintf(trace, "dependedOn=\"no\" "); break; case 3: fprintf(trace, "dependedOn=\"RESERVED\" "); break; } switch ( flag & 3) { case 0: fprintf(trace, "hasRedundancy=\"unknown\" "); break; case 1: fprintf(trace, "hasRedundancy=\"yes\" "); break; case 2: fprintf(trace, "hasRedundancy=\"no\" "); break; case 3: fprintf(trace, "hasRedundancy=\"RESERVED\" "); break; } fprintf(trace, " />\n"); } } if (!p->size) { fprintf(trace, "<SampleDependencyEntry dependsOnOther=\"unknown|yes|no|RESERVED\" dependedOn=\"unknown|yes|no|RESERVED\" hasRedundancy=\"unknown|yes|no|RESERVED\"/>\n"); } gf_isom_box_dump_done("SampleDependencyTypeBox", a, trace); return GF_OK; } GF_Err co64_dump(GF_Box *a, FILE * trace) { GF_ChunkLargeOffsetBox *p; u32 i; p = (GF_ChunkLargeOffsetBox *)a; gf_isom_box_dump_start(a, "ChunkLargeOffsetBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->nb_entries); if (!p->offsets && p->size) { fprintf(trace, "<!-- Warning: No Chunk Offsets indications/>\n"); } else { for (i=0; i<p->nb_entries; i++) fprintf(trace, "<ChunkOffsetEntry offset=\""LLU"\"/>\n", LLU_CAST p->offsets[i]); } if (!p->size) { fprintf(trace, "<ChunkOffsetEntry offset=\"\"/>\n"); } gf_isom_box_dump_done("ChunkLargeOffsetBox", a, trace); return GF_OK; } GF_Err esds_dump(GF_Box *a, FILE * trace) { GF_ESDBox *p; p = (GF_ESDBox *)a; gf_isom_box_dump_start(a, "MPEG4ESDescriptorBox", trace); fprintf(trace, ">\n"); if (p->desc) { #ifndef GPAC_DISABLE_OD_DUMP gf_odf_dump_desc((GF_Descriptor *) p->desc, trace, 1, GF_TRUE); #else fprintf(trace, "<!-- Object Descriptor Dumping disabled in this build of GPAC -->\n"); #endif } else if (p->size) { fprintf(trace, "<!--INVALID MP4 FILE: ESD not present in MPEG Sample Description or corrupted-->\n"); } gf_isom_box_dump_done("MPEG4ESDescriptorBox", a, trace); return GF_OK; } GF_Err minf_dump(GF_Box *a, FILE * trace) { GF_MediaInformationBox *p; p = (GF_MediaInformationBox *)a; gf_isom_box_dump_start(a, "MediaInformationBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->InfoHeader, trace, GF_ISOM_BOX_TYPE_NMHD); if (p->size) gf_isom_box_dump_ex(p->dataInformation, trace, GF_ISOM_BOX_TYPE_DINF); if (p->size) gf_isom_box_dump_ex(p->sampleTable, trace, GF_ISOM_BOX_TYPE_STBL); gf_isom_box_dump_done("MediaInformationBox", a, trace); return GF_OK; } GF_Err tkhd_dump(GF_Box *a, FILE * trace) { GF_TrackHeaderBox *p; p = (GF_TrackHeaderBox *)a; gf_isom_box_dump_start(a, "TrackHeaderBox", trace); fprintf(trace, "CreationTime=\""LLD"\" ModificationTime=\""LLD"\" TrackID=\"%u\" Duration=\""LLD"\"", LLD_CAST p->creationTime, LLD_CAST p->modificationTime, p->trackID, LLD_CAST p->duration); if (p->alternate_group) fprintf(trace, " AlternateGroupID=\"%d\"", p->alternate_group); if (p->volume) { fprintf(trace, " Volume=\"%.2f\"", (Float)p->volume / 256); } else if (p->width || p->height) { fprintf(trace, " Width=\"%.2f\" Height=\"%.2f\"", (Float)p->width / 65536, (Float)p->height / 65536); if (p->layer) fprintf(trace, " Layer=\"%d\"", p->layer); } fprintf(trace, ">\n"); if (p->width || p->height) { fprintf(trace, "<Matrix m11=\"0x%.8x\" m12=\"0x%.8x\" m13=\"0x%.8x\" ", p->matrix[0], p->matrix[1], p->matrix[2]); fprintf(trace, "m21=\"0x%.8x\" m22=\"0x%.8x\" m23=\"0x%.8x\" ", p->matrix[3], p->matrix[4], p->matrix[5]); fprintf(trace, "m31=\"0x%.8x\" m32=\"0x%.8x\" m33=\"0x%.8x\"/>\n", p->matrix[6], p->matrix[7], p->matrix[8]); } gf_isom_box_dump_done("TrackHeaderBox", a, trace); return GF_OK; } GF_Err tref_dump(GF_Box *a, FILE * trace) { // GF_TrackReferenceBox *p = (GF_TrackReferenceBox *)a; gf_isom_box_dump_start(a, "TrackReferenceBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("TrackReferenceBox", a, trace); return GF_OK; } GF_Err mdia_dump(GF_Box *a, FILE * trace) { GF_MediaBox *p = (GF_MediaBox *)a; gf_isom_box_dump_start(a, "MediaBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->mediaHeader, trace, GF_ISOM_BOX_TYPE_MDHD); if (p->size) gf_isom_box_dump_ex(p->handler, trace,GF_ISOM_BOX_TYPE_HDLR); if (p->size) gf_isom_box_dump_ex(p->information, trace, GF_ISOM_BOX_TYPE_MINF); gf_isom_box_dump_done("MediaBox", a, trace); return GF_OK; } GF_Err mfra_dump(GF_Box *a, FILE * trace) { GF_MovieFragmentRandomAccessBox *p = (GF_MovieFragmentRandomAccessBox *)a; u32 i, count; GF_TrackFragmentRandomAccessBox *tfra; gf_isom_box_dump_start(a, "MovieFragmentRandomAccessBox", trace); fprintf(trace, ">\n"); count = gf_list_count(p->tfra_list); for (i=0; i<count; i++) { tfra = (GF_TrackFragmentRandomAccessBox *)gf_list_get(p->tfra_list, i); gf_isom_box_dump_ex(tfra, trace, GF_ISOM_BOX_TYPE_TFRA); } gf_isom_box_dump_done("MovieFragmentRandomAccessBox", a, trace); return GF_OK; } GF_Err tfra_dump(GF_Box *a, FILE * trace) { u32 i; GF_TrackFragmentRandomAccessBox *p = (GF_TrackFragmentRandomAccessBox *)a; gf_isom_box_dump_start(a, "TrackFragmentRandomAccessBox", trace); fprintf(trace, "TrackId=\"%u\" number_of_entries=\"%u\">\n", p->track_id, p->nb_entries); for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<RandomAccessEntry time=\""LLU"\" moof_offset=\""LLU"\" traf=\"%u\" trun=\"%u\" sample=\"%u\"/>\n", p->entries[i].time, p->entries[i].moof_offset, p->entries[i].traf_number, p->entries[i].trun_number, p->entries[i].sample_number); } if (!p->size) { fprintf(trace, "<RandomAccessEntry time=\"\" moof_offset=\"\" traf=\"\" trun=\"\" sample=\"\"/>\n"); } gf_isom_box_dump_done("TrackFragmentRandomAccessBox", a, trace); return GF_OK; } GF_Err mfro_dump(GF_Box *a, FILE * trace) { GF_MovieFragmentRandomAccessOffsetBox *p = (GF_MovieFragmentRandomAccessOffsetBox *)a; gf_isom_box_dump_start(a, "MovieFragmentRandomAccessOffsetBox", trace); fprintf(trace, "container_size=\"%d\" >\n", p->container_size); gf_isom_box_dump_done("MovieFragmentRandomAccessOffsetBox", a, trace); return GF_OK; } GF_Err elng_dump(GF_Box *a, FILE * trace) { GF_ExtendedLanguageBox *p = (GF_ExtendedLanguageBox *)a; gf_isom_box_dump_start(a, "ExtendedLanguageBox", trace); fprintf(trace, "LanguageCode=\"%s\">\n", p->extended_language); gf_isom_box_dump_done("ExtendedLanguageBox", a, trace); return GF_OK; } GF_Err unkn_dump(GF_Box *a, FILE * trace) { GF_UnknownBox *u = (GF_UnknownBox *)a; u->type = u->original_4cc; gf_isom_box_dump_start(a, "UnknownBox", trace); u->type = GF_ISOM_BOX_TYPE_UNKNOWN; if (u->dataSize<100) dump_data_attribute(trace, "data", u->data, u->dataSize); fprintf(trace, ">\n"); gf_isom_box_dump_done("UnknownBox", a, trace); return GF_OK; } GF_Err uuid_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "UUIDBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("UUIDBox", a, trace); return GF_OK; } GF_Err void_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "VoidBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("VoidBox", a, trace); return GF_OK; } GF_Err ftyp_dump(GF_Box *a, FILE * trace) { GF_FileTypeBox *p; u32 i; p = (GF_FileTypeBox *)a; gf_isom_box_dump_start(a, (a->type == GF_ISOM_BOX_TYPE_FTYP ? "FileTypeBox" : "SegmentTypeBox"), trace); fprintf(trace, "MajorBrand=\"%s\" MinorVersion=\"%d\">\n", gf_4cc_to_str(p->majorBrand), p->minorVersion); for (i=0; i<p->altCount; i++) { fprintf(trace, "<BrandEntry AlternateBrand=\"%s\"/>\n", gf_4cc_to_str(p->altBrand[i])); } if (!p->type) { fprintf(trace, "<BrandEntry AlternateBrand=\"4CC\"/>\n"); } gf_isom_box_dump_done((a->type == GF_ISOM_BOX_TYPE_FTYP ? "FileTypeBox" : "SegmentTypeBox"), a, trace); return GF_OK; } GF_Err padb_dump(GF_Box *a, FILE * trace) { GF_PaddingBitsBox *p; u32 i; p = (GF_PaddingBitsBox *)a; gf_isom_box_dump_start(a, "PaddingBitsBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", p->SampleCount); for (i=0; i<p->SampleCount; i+=1) { fprintf(trace, "<PaddingBitsEntry PaddingBits=\"%d\"/>\n", p->padbits[i]); } if (!p->size) { fprintf(trace, "<PaddingBitsEntry PaddingBits=\"\"/>\n"); } gf_isom_box_dump_done("PaddingBitsBox", a, trace); return GF_OK; } GF_Err stsf_dump(GF_Box *a, FILE * trace) { GF_SampleFragmentBox *p; GF_StsfEntry *ent; u32 i, j, count; p = (GF_SampleFragmentBox *)a; count = gf_list_count(p->entryList); gf_isom_box_dump_start(a, "SampleFragmentBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", count); for (i=0; i<count; i++) { ent = (GF_StsfEntry *)gf_list_get(p->entryList, i); fprintf(trace, "<SampleFragmentEntry SampleNumber=\"%d\" FragmentCount=\"%d\">\n", ent->SampleNumber, ent->fragmentCount); for (j=0; j<ent->fragmentCount; j++) fprintf(trace, "<FragmentSizeEntry size=\"%d\"/>\n", ent->fragmentSizes[j]); fprintf(trace, "</SampleFragmentEntry>\n"); } if (!p->size) { fprintf(trace, "<SampleFragmentEntry SampleNumber=\"\" FragmentCount=\"\">\n"); fprintf(trace, "<FragmentSizeEntry size=\"\"/>\n"); fprintf(trace, "</SampleFragmentEntry>\n"); } gf_isom_box_dump_done("SampleFragmentBox", a, trace); return GF_OK; } GF_Err gppc_dump(GF_Box *a, FILE * trace) { GF_3GPPConfigBox *p = (GF_3GPPConfigBox *)a; const char *name = gf_4cc_to_str(p->cfg.vendor); switch (p->cfg.type) { case GF_ISOM_SUBTYPE_3GP_AMR: case GF_ISOM_SUBTYPE_3GP_AMR_WB: gf_isom_box_dump_start(a, "AMRConfigurationBox", trace); fprintf(trace, "Vendor=\"%s\" Version=\"%d\"", name, p->cfg.decoder_version); fprintf(trace, " FramesPerSample=\"%d\" SupportedModes=\"%x\" ModeRotating=\"%d\"", p->cfg.frames_per_sample, p->cfg.AMR_mode_set, p->cfg.AMR_mode_change_period); fprintf(trace, ">\n"); gf_isom_box_dump_done("AMRConfigurationBox", a, trace); break; case GF_ISOM_SUBTYPE_3GP_EVRC: gf_isom_box_dump_start(a, "EVRCConfigurationBox", trace); fprintf(trace, "Vendor=\"%s\" Version=\"%d\" FramesPerSample=\"%d\" >\n", name, p->cfg.decoder_version, p->cfg.frames_per_sample); gf_isom_box_dump_done("EVRCConfigurationBox", a, trace); break; case GF_ISOM_SUBTYPE_3GP_QCELP: gf_isom_box_dump_start(a, "QCELPConfigurationBox", trace); fprintf(trace, "Vendor=\"%s\" Version=\"%d\" FramesPerSample=\"%d\" >\n", name, p->cfg.decoder_version, p->cfg.frames_per_sample); gf_isom_box_dump_done("QCELPConfigurationBox", a, trace); break; case GF_ISOM_SUBTYPE_3GP_SMV: gf_isom_box_dump_start(a, "SMVConfigurationBox", trace); fprintf(trace, "Vendor=\"%s\" Version=\"%d\" FramesPerSample=\"%d\" >\n", name, p->cfg.decoder_version, p->cfg.frames_per_sample); gf_isom_box_dump_done("SMVConfigurationBox", a, trace); break; case GF_ISOM_SUBTYPE_3GP_H263: gf_isom_box_dump_start(a, "H263ConfigurationBox", trace); fprintf(trace, "Vendor=\"%s\" Version=\"%d\"", name, p->cfg.decoder_version); fprintf(trace, " Profile=\"%d\" Level=\"%d\"", p->cfg.H263_profile, p->cfg.H263_level); fprintf(trace, ">\n"); gf_isom_box_dump_done("H263ConfigurationBox", a, trace); break; default: break; } return GF_OK; } GF_Err avcc_dump(GF_Box *a, FILE * trace) { u32 i, count; GF_AVCConfigurationBox *p = (GF_AVCConfigurationBox *) a; const char *name = (p->type==GF_ISOM_BOX_TYPE_MVCC) ? "MVC" : (p->type==GF_ISOM_BOX_TYPE_SVCC) ? "SVC" : "AVC"; char boxname[256]; sprintf(boxname, "%sConfigurationBox", name); gf_isom_box_dump_start(a, boxname, trace); fprintf(trace, ">\n"); fprintf(trace, "<%sDecoderConfigurationRecord", name); if (! p->config) { if (p->size) { fprintf(trace, ">\n"); fprintf(trace, "<!-- INVALID AVC ENTRY : no AVC/SVC config record -->\n"); } else { fprintf(trace, " configurationVersion=\"\" AVCProfileIndication=\"\" profile_compatibility=\"\" AVCLevelIndication=\"\" nal_unit_size=\"\" complete_representation=\"\""); fprintf(trace, " chroma_format=\"\" luma_bit_depth=\"\" chroma_bit_depth=\"\""); fprintf(trace, ">\n"); fprintf(trace, "<SequenceParameterSet size=\"\" content=\"\"/>\n"); fprintf(trace, "<PictureParameterSet size=\"\" content=\"\"/>\n"); fprintf(trace, "<SequenceParameterSetExtensions size=\"\" content=\"\"/>\n"); } fprintf(trace, "</%sDecoderConfigurationRecord>\n", name); gf_isom_box_dump_done(boxname, a, trace); return GF_OK; } fprintf(trace, " configurationVersion=\"%d\" AVCProfileIndication=\"%d\" profile_compatibility=\"%d\" AVCLevelIndication=\"%d\" nal_unit_size=\"%d\"", p->config->configurationVersion, p->config->AVCProfileIndication, p->config->profile_compatibility, p->config->AVCLevelIndication, p->config->nal_unit_size); if ((p->type==GF_ISOM_BOX_TYPE_SVCC) || (p->type==GF_ISOM_BOX_TYPE_MVCC) ) fprintf(trace, " complete_representation=\"%d\"", p->config->complete_representation); if (p->type==GF_ISOM_BOX_TYPE_AVCC) { if (gf_avc_is_rext_profile(p->config->AVCProfileIndication)) { fprintf(trace, " chroma_format=\"%s\" luma_bit_depth=\"%d\" chroma_bit_depth=\"%d\"", gf_avc_hevc_get_chroma_format_name(p->config->chroma_format), p->config->luma_bit_depth, p->config->chroma_bit_depth); } } fprintf(trace, ">\n"); count = gf_list_count(p->config->sequenceParameterSets); for (i=0; i<count; i++) { GF_AVCConfigSlot *c = (GF_AVCConfigSlot *)gf_list_get(p->config->sequenceParameterSets, i); fprintf(trace, "<SequenceParameterSet size=\"%d\" content=\"", c->size); dump_data(trace, c->data, c->size); fprintf(trace, "\"/>\n"); } count = gf_list_count(p->config->pictureParameterSets); for (i=0; i<count; i++) { GF_AVCConfigSlot *c = (GF_AVCConfigSlot *)gf_list_get(p->config->pictureParameterSets, i); fprintf(trace, "<PictureParameterSet size=\"%d\" content=\"", c->size); dump_data(trace, c->data, c->size); fprintf(trace, "\"/>\n"); } if (p->config->sequenceParameterSetExtensions) { count = gf_list_count(p->config->sequenceParameterSetExtensions); for (i=0; i<count; i++) { GF_AVCConfigSlot *c = (GF_AVCConfigSlot *)gf_list_get(p->config->sequenceParameterSetExtensions, i); fprintf(trace, "<SequenceParameterSetExtensions size=\"%d\" content=\"", c->size); dump_data(trace, c->data, c->size); fprintf(trace, "\"/>\n"); } } fprintf(trace, "</%sDecoderConfigurationRecord>\n", name); gf_isom_box_dump_done(boxname, a, trace); return GF_OK; } GF_Err hvcc_dump(GF_Box *a, FILE * trace) { u32 i, count; const char *name = (a->type==GF_ISOM_BOX_TYPE_HVCC) ? "HEVC" : "L-HEVC"; char boxname[256]; GF_HEVCConfigurationBox *p = (GF_HEVCConfigurationBox *) a; sprintf(boxname, "%sConfigurationBox", name); gf_isom_box_dump_start(a, boxname, trace); fprintf(trace, ">\n"); if (! p->config) { if (p->size) { fprintf(trace, "<!-- INVALID HEVC ENTRY: no HEVC/SHVC config record -->\n"); } else { fprintf(trace, "<%sDecoderConfigurationRecord nal_unit_size=\"\" configurationVersion=\"\" ", name); if (a->type==GF_ISOM_BOX_TYPE_HVCC) { fprintf(trace, "profile_space=\"\" tier_flag=\"\" profile_idc=\"\" general_profile_compatibility_flags=\"\" progressive_source_flag=\"\" interlaced_source_flag=\"\" non_packed_constraint_flag=\"\" frame_only_constraint_flag=\"\" constraint_indicator_flags=\"\" level_idc=\"\" "); } fprintf(trace, "min_spatial_segmentation_idc=\"\" parallelismType=\"\" "); if (a->type==GF_ISOM_BOX_TYPE_HVCC) fprintf(trace, "chroma_format=\"\" luma_bit_depth=\"\" chroma_bit_depth=\"\" avgFrameRate=\"\" constantFrameRate=\"\" numTemporalLayers=\"\" temporalIdNested=\"\""); fprintf(trace, ">\n"); fprintf(trace, "<ParameterSetArray nalu_type=\"\" complete_set=\"\">\n"); fprintf(trace, "<ParameterSet size=\"\" content=\"\"/>\n"); fprintf(trace, "</ParameterSetArray>\n"); fprintf(trace, "</%sDecoderConfigurationRecord>\n", name); } fprintf(trace, "</%sConfigurationBox>\n", name); return GF_OK; } fprintf(trace, "<%sDecoderConfigurationRecord nal_unit_size=\"%d\" ", name, p->config->nal_unit_size); fprintf(trace, "configurationVersion=\"%u\" ", p->config->configurationVersion); if (a->type==GF_ISOM_BOX_TYPE_HVCC) { fprintf(trace, "profile_space=\"%u\" ", p->config->profile_space); fprintf(trace, "tier_flag=\"%u\" ", p->config->tier_flag); fprintf(trace, "profile_idc=\"%u\" ", p->config->profile_idc); fprintf(trace, "general_profile_compatibility_flags=\"%X\" ", p->config->general_profile_compatibility_flags); fprintf(trace, "progressive_source_flag=\"%u\" ", p->config->progressive_source_flag); fprintf(trace, "interlaced_source_flag=\"%u\" ", p->config->interlaced_source_flag); fprintf(trace, "non_packed_constraint_flag=\"%u\" ", p->config->non_packed_constraint_flag); fprintf(trace, "frame_only_constraint_flag=\"%u\" ", p->config->frame_only_constraint_flag); fprintf(trace, "constraint_indicator_flags=\""LLX"\" ", p->config->constraint_indicator_flags); fprintf(trace, "level_idc=\"%d\" ", p->config->level_idc); } fprintf(trace, "min_spatial_segmentation_idc=\"%u\" ", p->config->min_spatial_segmentation_idc); fprintf(trace, "parallelismType=\"%u\" ", p->config->parallelismType); if (a->type==GF_ISOM_BOX_TYPE_HVCC) fprintf(trace, "chroma_format=\"%s\" luma_bit_depth=\"%u\" chroma_bit_depth=\"%u\" avgFrameRate=\"%u\" constantFrameRate=\"%u\" numTemporalLayers=\"%u\" temporalIdNested=\"%u\"", gf_avc_hevc_get_chroma_format_name(p->config->chromaFormat), p->config->luma_bit_depth, p->config->chroma_bit_depth, p->config->avgFrameRate, p->config->constantFrameRate, p->config->numTemporalLayers, p->config->temporalIdNested); fprintf(trace, ">\n"); count = gf_list_count(p->config->param_array); for (i=0; i<count; i++) { u32 nalucount, j; GF_HEVCParamArray *ar = (GF_HEVCParamArray*)gf_list_get(p->config->param_array, i); fprintf(trace, "<ParameterSetArray nalu_type=\"%d\" complete_set=\"%d\">\n", ar->type, ar->array_completeness); nalucount = gf_list_count(ar->nalus); for (j=0; j<nalucount; j++) { GF_AVCConfigSlot *c = (GF_AVCConfigSlot *)gf_list_get(ar->nalus, j); fprintf(trace, "<ParameterSet size=\"%d\" content=\"", c->size); dump_data(trace, c->data, c->size); fprintf(trace, "\"/>\n"); } fprintf(trace, "</ParameterSetArray>\n"); } fprintf(trace, "</%sDecoderConfigurationRecord>\n", name); gf_isom_box_dump_done(boxname, a, trace); return GF_OK; } GF_Err m4ds_dump(GF_Box *a, FILE * trace) { u32 i; GF_Descriptor *desc; GF_MPEG4ExtensionDescriptorsBox *p = (GF_MPEG4ExtensionDescriptorsBox *) a; gf_isom_box_dump_start(a, "MPEG4ExtensionDescriptorsBox", trace); fprintf(trace, ">\n"); i=0; while ((desc = (GF_Descriptor *)gf_list_enum(p->descriptors, &i))) { #ifndef GPAC_DISABLE_OD_DUMP gf_odf_dump_desc(desc, trace, 1, GF_TRUE); #else fprintf(trace, "<!-- Object Descriptor Dumping disabled in this build of GPAC -->\n"); #endif } gf_isom_box_dump_done("MPEG4ExtensionDescriptorsBox", a, trace); return GF_OK; } GF_Err btrt_dump(GF_Box *a, FILE * trace) { GF_BitRateBox *p = (GF_BitRateBox*)a; gf_isom_box_dump_start(a, "BitRateBox", trace); fprintf(trace, "BufferSizeDB=\"%d\" avgBitRate=\"%d\" maxBitRate=\"%d\">\n", p->bufferSizeDB, p->avgBitrate, p->maxBitrate); gf_isom_box_dump_done("BitRateBox", a, trace); return GF_OK; } GF_Err ftab_dump(GF_Box *a, FILE * trace) { u32 i; GF_FontTableBox *p = (GF_FontTableBox *)a; gf_isom_box_dump_start(a, "FontTableBox", trace); fprintf(trace, ">\n"); for (i=0; i<p->entry_count; i++) { fprintf(trace, "<FontRecord ID=\"%d\" name=\"%s\"/>\n", p->fonts[i].fontID, p->fonts[i].fontName ? p->fonts[i].fontName : "NULL"); } if (!p->size) { fprintf(trace, "<FontRecord ID=\"\" name=\"\"/>\n"); } gf_isom_box_dump_done("FontTableBox", a, trace); return GF_OK; } static void tx3g_dump_rgba8(FILE * trace, char *name, u32 col) { fprintf(trace, "%s=\"%x %x %x %x\"", name, (col>>16)&0xFF, (col>>8)&0xFF, (col)&0xFF, (col>>24)&0xFF); } static void tx3g_dump_rgb16(FILE * trace, char *name, char col[6]) { fprintf(trace, "%s=\"%x %x %x\"", name, *((u16*)col), *((u16*)(col+1)), *((u16*)(col+2))); } static void tx3g_dump_box(FILE * trace, GF_BoxRecord *rec) { fprintf(trace, "<BoxRecord top=\"%d\" left=\"%d\" bottom=\"%d\" right=\"%d\"/>\n", rec->top, rec->left, rec->bottom, rec->right); } static void tx3g_dump_style(FILE * trace, GF_StyleRecord *rec) { fprintf(trace, "<StyleRecord startChar=\"%d\" endChar=\"%d\" fontID=\"%d\" styles=\"", rec->startCharOffset, rec->endCharOffset, rec->fontID); if (!rec->style_flags) { fprintf(trace, "Normal"); } else { if (rec->style_flags & 1) fprintf(trace, "Bold "); if (rec->style_flags & 2) fprintf(trace, "Italic "); if (rec->style_flags & 4) fprintf(trace, "Underlined "); } fprintf(trace, "\" fontSize=\"%d\" ", rec->font_size); tx3g_dump_rgba8(trace, "textColor", rec->text_color); fprintf(trace, "/>\n"); } GF_Err tx3g_dump(GF_Box *a, FILE * trace) { GF_Tx3gSampleEntryBox *p = (GF_Tx3gSampleEntryBox *)a; gf_isom_box_dump_start(a, "Tx3gSampleEntryBox", trace); fprintf(trace, "dataReferenceIndex=\"%d\" displayFlags=\"%x\" horizontal-justification=\"%d\" vertical-justification=\"%d\" ", p->dataReferenceIndex, p->displayFlags, p->horizontal_justification, p->vertical_justification); tx3g_dump_rgba8(trace, "backgroundColor", p->back_color); fprintf(trace, ">\n"); fprintf(trace, "<DefaultBox>\n"); tx3g_dump_box(trace, &p->default_box); gf_isom_box_dump_done("DefaultBox", a, trace); fprintf(trace, "<DefaultStyle>\n"); tx3g_dump_style(trace, &p->default_style); fprintf(trace, "</DefaultStyle>\n"); if (p->size) { gf_isom_box_dump_ex(p->font_table, trace, GF_ISOM_BOX_TYPE_FTAB); } gf_isom_box_dump_done("Tx3gSampleEntryBox", a, trace); return GF_OK; } GF_Err text_dump(GF_Box *a, FILE * trace) { GF_TextSampleEntryBox *p = (GF_TextSampleEntryBox *)a; gf_isom_box_dump_start(a, "TextSampleEntryBox", trace); fprintf(trace, "dataReferenceIndex=\"%d\" displayFlags=\"%x\" textJustification=\"%d\" ", p->dataReferenceIndex, p->displayFlags, p->textJustification); if (p->textName) fprintf(trace, "textName=\"%s\" ", p->textName); tx3g_dump_rgb16(trace, "background-color", p->background_color); tx3g_dump_rgb16(trace, " foreground-color", p->foreground_color); fprintf(trace, ">\n"); fprintf(trace, "<DefaultBox>\n"); tx3g_dump_box(trace, &p->default_box); gf_isom_box_dump_done("DefaultBox", a, trace); gf_isom_box_dump_done("TextSampleEntryBox", a, trace); return GF_OK; } GF_Err styl_dump(GF_Box *a, FILE * trace) { u32 i; GF_TextStyleBox*p = (GF_TextStyleBox*)a; gf_isom_box_dump_start(a, "TextStyleBox", trace); fprintf(trace, ">\n"); for (i=0; i<p->entry_count; i++) tx3g_dump_style(trace, &p->styles[i]); if (!p->size) { fprintf(trace, "<StyleRecord startChar=\"\" endChar=\"\" fontID=\"\" styles=\"Normal|Bold|Italic|Underlined\" fontSize=\"\" textColor=\"\" />\n"); } gf_isom_box_dump_done("TextStyleBox", a, trace); return GF_OK; } GF_Err hlit_dump(GF_Box *a, FILE * trace) { GF_TextHighlightBox*p = (GF_TextHighlightBox*)a; gf_isom_box_dump_start(a, "TextHighlightBox", trace); fprintf(trace, "startcharoffset=\"%d\" endcharoffset=\"%d\">\n", p->startcharoffset, p->endcharoffset); gf_isom_box_dump_done("TextHighlightBox", a, trace); return GF_OK; } GF_Err hclr_dump(GF_Box *a, FILE * trace) { GF_TextHighlightColorBox*p = (GF_TextHighlightColorBox*)a; gf_isom_box_dump_start(a, "TextHighlightColorBox", trace); tx3g_dump_rgba8(trace, "highlight_color", p->hil_color); fprintf(trace, ">\n"); gf_isom_box_dump_done("TextHighlightColorBox", a, trace); return GF_OK; } GF_Err krok_dump(GF_Box *a, FILE * trace) { u32 i; GF_TextKaraokeBox*p = (GF_TextKaraokeBox*)a; gf_isom_box_dump_start(a, "TextKaraokeBox", trace); fprintf(trace, "highlight_starttime=\"%d\">\n", p->highlight_starttime); for (i=0; i<p->nb_entries; i++) { fprintf(trace, "<KaraokeRecord highlight_endtime=\"%d\" start_charoffset=\"%d\" end_charoffset=\"%d\"/>\n", p->records[i].highlight_endtime, p->records[i].start_charoffset, p->records[i].end_charoffset); } if (!p->size) { fprintf(trace, "<KaraokeRecord highlight_endtime=\"\" start_charoffset=\"\" end_charoffset=\"\"/>\n"); } gf_isom_box_dump_done("TextKaraokeBox", a, trace); return GF_OK; } GF_Err dlay_dump(GF_Box *a, FILE * trace) { GF_TextScrollDelayBox*p = (GF_TextScrollDelayBox*)a; gf_isom_box_dump_start(a, "TextScrollDelayBox", trace); fprintf(trace, "scroll_delay=\"%d\">\n", p->scroll_delay); gf_isom_box_dump_done("TextScrollDelayBox", a, trace); return GF_OK; } GF_Err href_dump(GF_Box *a, FILE * trace) { GF_TextHyperTextBox*p = (GF_TextHyperTextBox*)a; gf_isom_box_dump_start(a, "TextHyperTextBox", trace); fprintf(trace, "startcharoffset=\"%d\" endcharoffset=\"%d\" URL=\"%s\" altString=\"%s\">\n", p->startcharoffset, p->endcharoffset, p->URL ? p->URL : "NULL", p->URL_hint ? p->URL_hint : "NULL"); gf_isom_box_dump_done("TextHyperTextBox", a, trace); return GF_OK; } GF_Err tbox_dump(GF_Box *a, FILE * trace) { GF_TextBoxBox*p = (GF_TextBoxBox*)a; gf_isom_box_dump_start(a, "TextBoxBox", trace); fprintf(trace, ">\n"); tx3g_dump_box(trace, &p->box); gf_isom_box_dump_done("TextBoxBox", a, trace); return GF_OK; } GF_Err blnk_dump(GF_Box *a, FILE * trace) { GF_TextBlinkBox*p = (GF_TextBlinkBox*)a; gf_isom_box_dump_start(a, "TextBlinkBox", trace); fprintf(trace, "start_charoffset=\"%d\" end_charoffset=\"%d\">\n", p->startcharoffset, p->endcharoffset); gf_isom_box_dump_done("TextBlinkBox", a, trace); return GF_OK; } GF_Err twrp_dump(GF_Box *a, FILE * trace) { GF_TextWrapBox*p = (GF_TextWrapBox*)a; gf_isom_box_dump_start(a, "TextWrapBox", trace); fprintf(trace, "wrap_flag=\"%s\">\n", p->wrap_flag ? ( (p->wrap_flag>1) ? "Reserved" : "Automatic" ) : "No Wrap"); gf_isom_box_dump_done("TextWrapBox", a, trace); return GF_OK; } GF_Err meta_dump(GF_Box *a, FILE * trace) { GF_MetaBox *p; p = (GF_MetaBox *)a; gf_isom_box_dump_start(a, "MetaBox", trace); fprintf(trace, ">\n"); if (p->handler) gf_isom_box_dump(p->handler, trace); if (p->primary_resource) gf_isom_box_dump(p->primary_resource, trace); if (p->file_locations) gf_isom_box_dump(p->file_locations, trace); if (p->item_locations) gf_isom_box_dump(p->item_locations, trace); if (p->protections) gf_isom_box_dump(p->protections, trace); if (p->item_infos) gf_isom_box_dump(p->item_infos, trace); if (p->IPMP_control) gf_isom_box_dump(p->IPMP_control, trace); if (p->item_refs) gf_isom_box_dump(p->item_refs, trace); if (p->item_props) gf_isom_box_dump(p->item_props, trace); gf_isom_box_dump_done("MetaBox", a, trace); return GF_OK; } GF_Err xml_dump(GF_Box *a, FILE * trace) { GF_XMLBox *p = (GF_XMLBox *)a; gf_isom_box_dump_start(a, "XMLBox", trace); fprintf(trace, ">\n"); fprintf(trace, "<![CDATA[\n"); if (p->xml) gf_fwrite(p->xml, strlen(p->xml), 1, trace); fprintf(trace, "]]>\n"); gf_isom_box_dump_done("XMLBox", a, trace); return GF_OK; } GF_Err bxml_dump(GF_Box *a, FILE * trace) { GF_BinaryXMLBox *p = (GF_BinaryXMLBox *)a; gf_isom_box_dump_start(a, "BinaryXMLBox", trace); fprintf(trace, "binarySize=\"%d\">\n", p->data_length); gf_isom_box_dump_done("BinaryXMLBox", a, trace); return GF_OK; } GF_Err pitm_dump(GF_Box *a, FILE * trace) { GF_PrimaryItemBox *p = (GF_PrimaryItemBox *)a; gf_isom_box_dump_start(a, "PrimaryItemBox", trace); fprintf(trace, "item_ID=\"%d\">\n", p->item_ID); gf_isom_box_dump_done("PrimaryItemBox", a, trace); return GF_OK; } GF_Err ipro_dump(GF_Box *a, FILE * trace) { GF_ItemProtectionBox *p = (GF_ItemProtectionBox *)a; gf_isom_box_dump_start(a, "ItemProtectionBox", trace); fprintf(trace, ">\n"); gf_isom_box_array_dump(p->protection_information, trace); gf_isom_box_dump_done("ItemProtectionBox", a, trace); return GF_OK; } GF_Err infe_dump(GF_Box *a, FILE * trace) { GF_ItemInfoEntryBox *p = (GF_ItemInfoEntryBox *)a; gf_isom_box_dump_start(a, "ItemInfoEntryBox", trace); fprintf(trace, "item_ID=\"%d\" item_protection_index=\"%d\" item_name=\"%s\" content_type=\"%s\" content_encoding=\"%s\" item_type=\"%s\">\n", p->item_ID, p->item_protection_index, p->item_name, p->content_type, p->content_encoding, gf_4cc_to_str(p->item_type)); gf_isom_box_dump_done("ItemInfoEntryBox", a, trace); return GF_OK; } GF_Err iinf_dump(GF_Box *a, FILE * trace) { GF_ItemInfoBox *p = (GF_ItemInfoBox *)a; gf_isom_box_dump_start(a, "ItemInfoBox", trace); fprintf(trace, ">\n"); gf_isom_box_array_dump(p->item_infos, trace); gf_isom_box_dump_done("ItemInfoBox", a, trace); return GF_OK; } GF_Err iloc_dump(GF_Box *a, FILE * trace) { u32 i, j, count, count2; GF_ItemLocationBox *p = (GF_ItemLocationBox*)a; gf_isom_box_dump_start(a, "ItemLocationBox", trace); fprintf(trace, "offset_size=\"%d\" length_size=\"%d\" base_offset_size=\"%d\" index_size=\"%d\">\n", p->offset_size, p->length_size, p->base_offset_size, p->index_size); count = gf_list_count(p->location_entries); for (i=0; i<count; i++) { GF_ItemLocationEntry *ie = (GF_ItemLocationEntry *)gf_list_get(p->location_entries, i); count2 = gf_list_count(ie->extent_entries); fprintf(trace, "<ItemLocationEntry item_ID=\"%d\" data_reference_index=\"%d\" base_offset=\""LLD"\" construction_method=\"%d\">\n", ie->item_ID, ie->data_reference_index, LLD_CAST ie->base_offset, ie->construction_method); for (j=0; j<count2; j++) { GF_ItemExtentEntry *iee = (GF_ItemExtentEntry *)gf_list_get(ie->extent_entries, j); fprintf(trace, "<ItemExtentEntry extent_offset=\""LLD"\" extent_length=\""LLD"\" extent_index=\""LLD"\" />\n", LLD_CAST iee->extent_offset, LLD_CAST iee->extent_length, LLD_CAST iee->extent_index); } fprintf(trace, "</ItemLocationEntry>\n"); } if (!p->size) { fprintf(trace, "<ItemLocationEntry item_ID=\"\" data_reference_index=\"\" base_offset=\"\" construction_method=\"\">\n"); fprintf(trace, "<ItemExtentEntry extent_offset=\"\" extent_length=\"\" extent_index=\"\" />\n"); fprintf(trace, "</ItemLocationEntry>\n"); } gf_isom_box_dump_done("ItemLocationBox", a, trace); return GF_OK; } GF_Err iref_dump(GF_Box *a, FILE * trace) { GF_ItemReferenceBox *p = (GF_ItemReferenceBox *)a; gf_isom_box_dump_start(a, "ItemReferenceBox", trace); fprintf(trace, ">\n"); gf_isom_box_array_dump(p->references, trace); gf_isom_box_dump_done("ItemReferenceBox", a, trace); return GF_OK; } GF_Err hinf_dump(GF_Box *a, FILE * trace) { // GF_HintInfoBox *p = (GF_HintInfoBox *)a; gf_isom_box_dump_start(a, "HintInfoBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("HintInfoBox", a, trace); return GF_OK; } GF_Err trpy_dump(GF_Box *a, FILE * trace) { GF_TRPYBox *p = (GF_TRPYBox *)a; gf_isom_box_dump_start(a, "LargeTotalRTPBytesBox", trace); fprintf(trace, "RTPBytesSent=\""LLD"\">\n", LLD_CAST p->nbBytes); gf_isom_box_dump_done("LargeTotalRTPBytesBox", a, trace); return GF_OK; } GF_Err totl_dump(GF_Box *a, FILE * trace) { GF_TOTLBox *p; p = (GF_TOTLBox *)a; gf_isom_box_dump_start(a, "TotalRTPBytesBox", trace); fprintf(trace, "RTPBytesSent=\"%d\">\n", p->nbBytes); gf_isom_box_dump_done("TotalRTPBytesBox", a, trace); return GF_OK; } GF_Err nump_dump(GF_Box *a, FILE * trace) { GF_NUMPBox *p; p = (GF_NUMPBox *)a; gf_isom_box_dump_start(a, "LargeTotalPacketBox", trace); fprintf(trace, "PacketsSent=\""LLD"\">\n", LLD_CAST p->nbPackets); gf_isom_box_dump_done("LargeTotalPacketBox", a, trace); return GF_OK; } GF_Err npck_dump(GF_Box *a, FILE * trace) { GF_NPCKBox *p; p = (GF_NPCKBox *)a; gf_isom_box_dump_start(a, "TotalPacketBox", trace); fprintf(trace, "packetsSent=\"%d\">\n", p->nbPackets); gf_isom_box_dump_done("TotalPacketBox", a, trace); return GF_OK; } GF_Err tpyl_dump(GF_Box *a, FILE * trace) { GF_NTYLBox *p; p = (GF_NTYLBox *)a; gf_isom_box_dump_start(a, "LargeTotalMediaBytesBox", trace); fprintf(trace, "BytesSent=\""LLD"\">\n", LLD_CAST p->nbBytes); gf_isom_box_dump_done("LargeTotalMediaBytesBox", a, trace); return GF_OK; } GF_Err tpay_dump(GF_Box *a, FILE * trace) { GF_TPAYBox *p; p = (GF_TPAYBox *)a; gf_isom_box_dump_start(a, "TotalMediaBytesBox", trace); fprintf(trace, "BytesSent=\"%d\">\n", p->nbBytes); gf_isom_box_dump_done("TotalMediaBytesBox", a, trace); return GF_OK; } GF_Err maxr_dump(GF_Box *a, FILE * trace) { GF_MAXRBox *p; p = (GF_MAXRBox *)a; gf_isom_box_dump_start(a, "MaxDataRateBox", trace); fprintf(trace, "MaxDataRate=\"%d\" Granularity=\"%d\">\n", p->maxDataRate, p->granularity); gf_isom_box_dump_done("MaxDataRateBox", a, trace); return GF_OK; } GF_Err dmed_dump(GF_Box *a, FILE * trace) { GF_DMEDBox *p; p = (GF_DMEDBox *)a; gf_isom_box_dump_start(a, "BytesFromMediaTrackBox", trace); fprintf(trace, "BytesSent=\""LLD"\">\n", LLD_CAST p->nbBytes); gf_isom_box_dump_done("BytesFromMediaTrackBox", a, trace); return GF_OK; } GF_Err dimm_dump(GF_Box *a, FILE * trace) { GF_DIMMBox *p; p = (GF_DIMMBox *)a; gf_isom_box_dump_start(a, "ImmediateDataBytesBox", trace); fprintf(trace, "BytesSent=\""LLD"\">\n", LLD_CAST p->nbBytes); gf_isom_box_dump_done("ImmediateDataBytesBox", a, trace); return GF_OK; } GF_Err drep_dump(GF_Box *a, FILE * trace) { GF_DREPBox *p; p = (GF_DREPBox *)a; gf_isom_box_dump_start(a, "RepeatedDataBytesBox", trace); fprintf(trace, "RepeatedBytes=\""LLD"\">\n", LLD_CAST p->nbBytes); gf_isom_box_dump_done("RepeatedDataBytesBox", a, trace); return GF_OK; } GF_Err tssy_dump(GF_Box *a, FILE * trace) { GF_TimeStampSynchronyBox *p = (GF_TimeStampSynchronyBox *)a; gf_isom_box_dump_start(a, "TimeStampSynchronyBox", trace); fprintf(trace, "timestamp_sync=\"%d\">\n", p->timestamp_sync); gf_isom_box_dump_done("TimeStampSynchronyBox", a, trace); return GF_OK; } GF_Err rssr_dump(GF_Box *a, FILE * trace) { GF_ReceivedSsrcBox *p = (GF_ReceivedSsrcBox *)a; gf_isom_box_dump_start(a, "ReceivedSsrcBox", trace); fprintf(trace, "SSRC=\"%d\">\n", p->ssrc); gf_isom_box_dump_done("ReceivedSsrcBox", a, trace); return GF_OK; } GF_Err tmin_dump(GF_Box *a, FILE * trace) { GF_TMINBox *p; p = (GF_TMINBox *)a; gf_isom_box_dump_start(a, "MinTransmissionTimeBox", trace); fprintf(trace, "MinimumTransmitTime=\"%d\">\n", p->minTime); gf_isom_box_dump_done("MinTransmissionTimeBox", a, trace); return GF_OK; } GF_Err tmax_dump(GF_Box *a, FILE * trace) { GF_TMAXBox *p; p = (GF_TMAXBox *)a; gf_isom_box_dump_start(a, "MaxTransmissionTimeBox", trace); fprintf(trace, "MaximumTransmitTime=\"%d\">\n", p->maxTime); gf_isom_box_dump_done("MaxTransmissionTimeBox", a, trace); return GF_OK; } GF_Err pmax_dump(GF_Box *a, FILE * trace) { GF_PMAXBox *p; p = (GF_PMAXBox *)a; gf_isom_box_dump_start(a, "MaxPacketSizeBox", trace); fprintf(trace, "MaximumSize=\"%d\">\n", p->maxSize); gf_isom_box_dump_done("MaxPacketSizeBox", a, trace); return GF_OK; } GF_Err dmax_dump(GF_Box *a, FILE * trace) { GF_DMAXBox *p; p = (GF_DMAXBox *)a; gf_isom_box_dump_start(a, "MaxPacketDurationBox", trace); fprintf(trace, "MaximumDuration=\"%d\">\n", p->maxDur); gf_isom_box_dump_done("MaxPacketDurationBox", a, trace); return GF_OK; } GF_Err payt_dump(GF_Box *a, FILE * trace) { GF_PAYTBox *p; p = (GF_PAYTBox *)a; gf_isom_box_dump_start(a, "PayloadTypeBox", trace); fprintf(trace, "PayloadID=\"%d\" PayloadString=\"%s\">\n", p->payloadCode, p->payloadString); gf_isom_box_dump_done("PayloadTypeBox", a, trace); return GF_OK; } GF_Err name_dump(GF_Box *a, FILE * trace) { GF_NameBox *p; p = (GF_NameBox *)a; gf_isom_box_dump_start(a, "NameBox", trace); fprintf(trace, "Name=\"%s\">\n", p->string); gf_isom_box_dump_done("NameBox", a, trace); return GF_OK; } GF_Err rely_dump(GF_Box *a, FILE * trace) { GF_RelyHintBox *p; p = (GF_RelyHintBox *)a; gf_isom_box_dump_start(a, "RelyTransmissionBox", trace); fprintf(trace, "Prefered=\"%d\" required=\"%d\">\n", p->prefered, p->required); gf_isom_box_dump_done("RelyTransmissionBox", a, trace); return GF_OK; } GF_Err snro_dump(GF_Box *a, FILE * trace) { GF_SeqOffHintEntryBox *p; p = (GF_SeqOffHintEntryBox *)a; gf_isom_box_dump_start(a, "PacketSequenceOffsetBox", trace); fprintf(trace, "SeqNumOffset=\"%d\">\n", p->SeqOffset); gf_isom_box_dump_done("PacketSequenceOffsetBox", a, trace); return GF_OK; } GF_Err tims_dump(GF_Box *a, FILE * trace) { GF_TSHintEntryBox *p; p = (GF_TSHintEntryBox *)a; gf_isom_box_dump_start(a, "RTPTimeScaleBox", trace); fprintf(trace, "TimeScale=\"%d\">\n", p->timeScale); gf_isom_box_dump_done("RTPTimeScaleBox", a, trace); return GF_OK; } GF_Err tsro_dump(GF_Box *a, FILE * trace) { GF_TimeOffHintEntryBox *p; p = (GF_TimeOffHintEntryBox *)a; gf_isom_box_dump_start(a, "TimeStampOffsetBox", trace); fprintf(trace, "TimeStampOffset=\"%d\">\n", p->TimeOffset); gf_isom_box_dump_done("TimeStampOffsetBox", a, trace); return GF_OK; } GF_Err ghnt_dump(GF_Box *a, FILE * trace) { char *name; GF_HintSampleEntryBox *p; p = (GF_HintSampleEntryBox *)a; if (a->type == GF_ISOM_BOX_TYPE_RTP_STSD) { name = "RTPHintSampleEntryBox"; } else if (a->type == GF_ISOM_BOX_TYPE_SRTP_STSD) { name = "SRTPHintSampleEntryBox"; } else if (a->type == GF_ISOM_BOX_TYPE_FDP_STSD) { name = "FDPHintSampleEntryBox"; } else if (a->type == GF_ISOM_BOX_TYPE_RRTP_STSD) { name = "RTPReceptionHintSampleEntryBox"; } else if (a->type == GF_ISOM_BOX_TYPE_RTCP_STSD) { name = "RTCPReceptionHintSampleEntryBox"; } else { name = "GenericHintSampleEntryBox"; } gf_isom_box_dump_start(a, name, trace); fprintf(trace, "DataReferenceIndex=\"%d\" HintTrackVersion=\"%d\" LastCompatibleVersion=\"%d\"", p->dataReferenceIndex, p->HintTrackVersion, p->LastCompatibleVersion); if ((a->type == GF_ISOM_BOX_TYPE_RTP_STSD) || (a->type == GF_ISOM_BOX_TYPE_SRTP_STSD) || (a->type == GF_ISOM_BOX_TYPE_RRTP_STSD) || (a->type == GF_ISOM_BOX_TYPE_RTCP_STSD)) { fprintf(trace, " MaxPacketSize=\"%d\"", p->MaxPacketSize); } else if (a->type == GF_ISOM_BOX_TYPE_FDP_STSD) { fprintf(trace, " partition_entry_ID=\"%d\" FEC_overhead=\"%d\"", p->partition_entry_ID, p->FEC_overhead); } fprintf(trace, ">\n"); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err hnti_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "HintTrackInfoBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("HintTrackInfoBox", NULL, trace); return GF_OK; } GF_Err sdp_dump(GF_Box *a, FILE * trace) { GF_SDPBox *p = (GF_SDPBox *)a; gf_isom_box_dump_start(a, "SDPBox", trace); fprintf(trace, ">\n"); if (p->sdpText) fprintf(trace, "<!-- sdp text: %s -->\n", p->sdpText); gf_isom_box_dump_done("SDPBox", a, trace); return GF_OK; } GF_Err rtp_hnti_dump(GF_Box *a, FILE * trace) { GF_RTPBox *p = (GF_RTPBox *)a; gf_isom_box_dump_start(a, "RTPMovieHintInformationBox", trace); fprintf(trace, "descriptionformat=\"%s\">\n", gf_4cc_to_str(p->subType)); if (p->sdpText) fprintf(trace, "<!-- sdp text: %s -->\n", p->sdpText); gf_isom_box_dump_done("RTPMovieHintInformationBox", a, trace); return GF_OK; } GF_Err rtpo_dump(GF_Box *a, FILE * trace) { GF_RTPOBox *p; p = (GF_RTPOBox *)a; gf_isom_box_dump_start(a, "RTPTimeOffsetBox", trace); fprintf(trace, "PacketTimeOffset=\"%d\">\n", p->timeOffset); gf_isom_box_dump_done("RTPTimeOffsetBox", a, trace); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS GF_Err mvex_dump(GF_Box *a, FILE * trace) { GF_MovieExtendsBox *p; p = (GF_MovieExtendsBox *)a; gf_isom_box_dump_start(a, "MovieExtendsBox", trace); fprintf(trace, ">\n"); if (p->mehd) gf_isom_box_dump(p->mehd, trace); gf_isom_box_array_dump(p->TrackExList, trace); gf_isom_box_array_dump(p->TrackExPropList, trace); gf_isom_box_dump_done("MovieExtendsBox", a, trace); return GF_OK; } GF_Err mehd_dump(GF_Box *a, FILE * trace) { GF_MovieExtendsHeaderBox *p = (GF_MovieExtendsHeaderBox*)a; gf_isom_box_dump_start(a, "MovieExtendsHeaderBox", trace); fprintf(trace, "fragmentDuration=\""LLD"\" >\n", LLD_CAST p->fragment_duration); gf_isom_box_dump_done("MovieExtendsHeaderBox", a, trace); return GF_OK; } void sample_flags_dump(const char *name, u32 sample_flags, FILE * trace) { fprintf(trace, "<%s", name); fprintf(trace, " IsLeading=\"%d\"", GF_ISOM_GET_FRAG_LEAD(sample_flags) ); fprintf(trace, " SampleDependsOn=\"%d\"", GF_ISOM_GET_FRAG_DEPENDS(sample_flags) ); fprintf(trace, " SampleIsDependedOn=\"%d\"", GF_ISOM_GET_FRAG_DEPENDED(sample_flags) ); fprintf(trace, " SampleHasRedundancy=\"%d\"", GF_ISOM_GET_FRAG_REDUNDANT(sample_flags) ); fprintf(trace, " SamplePadding=\"%d\"", GF_ISOM_GET_FRAG_PAD(sample_flags) ); fprintf(trace, " SampleSync=\"%d\"", GF_ISOM_GET_FRAG_SYNC(sample_flags)); fprintf(trace, " SampleDegradationPriority=\"%d\"", GF_ISOM_GET_FRAG_DEG(sample_flags)); fprintf(trace, "/>\n"); } GF_Err trex_dump(GF_Box *a, FILE * trace) { GF_TrackExtendsBox *p; p = (GF_TrackExtendsBox *)a; gf_isom_box_dump_start(a, "TrackExtendsBox", trace); fprintf(trace, "TrackID=\"%d\"", p->trackID); fprintf(trace, " SampleDescriptionIndex=\"%d\" SampleDuration=\"%d\" SampleSize=\"%d\"", p->def_sample_desc_index, p->def_sample_duration, p->def_sample_size); fprintf(trace, ">\n"); sample_flags_dump("DefaultSampleFlags", p->def_sample_flags, trace); gf_isom_box_dump_done("TrackExtendsBox", a, trace); return GF_OK; } GF_Err trep_dump(GF_Box *a, FILE * trace) { GF_TrackExtensionPropertiesBox *p = (GF_TrackExtensionPropertiesBox*)a; gf_isom_box_dump_start(a, "TrackExtensionPropertiesBox", trace); fprintf(trace, "TrackID=\"%d\">\n", p->trackID); gf_isom_box_dump_done("TrackExtensionPropertiesBox", a, trace); return GF_OK; } GF_Err moof_dump(GF_Box *a, FILE * trace) { GF_MovieFragmentBox *p; p = (GF_MovieFragmentBox *)a; gf_isom_box_dump_start(a, "MovieFragmentBox", trace); fprintf(trace, "TrackFragments=\"%d\">\n", gf_list_count(p->TrackList)); if (p->mfhd) gf_isom_box_dump(p->mfhd, trace); gf_isom_box_array_dump(p->TrackList, trace); gf_isom_box_dump_done("MovieFragmentBox", a, trace); return GF_OK; } GF_Err mfhd_dump(GF_Box *a, FILE * trace) { GF_MovieFragmentHeaderBox *p; p = (GF_MovieFragmentHeaderBox *)a; gf_isom_box_dump_start(a, "MovieFragmentHeaderBox", trace); fprintf(trace, "FragmentSequenceNumber=\"%d\">\n", p->sequence_number); gf_isom_box_dump_done("MovieFragmentHeaderBox", a, trace); return GF_OK; } GF_Err traf_dump(GF_Box *a, FILE * trace) { GF_TrackFragmentBox *p; p = (GF_TrackFragmentBox *)a; gf_isom_box_dump_start(a, "TrackFragmentBox", trace); fprintf(trace, ">\n"); if (p->tfhd) gf_isom_box_dump(p->tfhd, trace); if (p->sdtp) gf_isom_box_dump(p->sdtp, trace); if (p->tfdt) gf_isom_box_dump(p->tfdt, trace); if (p->sub_samples) gf_isom_box_array_dump(p->sub_samples, trace); if (p->sampleGroupsDescription) gf_isom_box_array_dump(p->sampleGroupsDescription, trace); if (p->sampleGroups) gf_isom_box_array_dump(p->sampleGroups, trace); gf_isom_box_array_dump(p->TrackRuns, trace); if (p->sai_sizes) gf_isom_box_array_dump(p->sai_sizes, trace); if (p->sai_offsets) gf_isom_box_array_dump(p->sai_offsets, trace); if (p->sample_encryption) gf_isom_box_dump(p->sample_encryption, trace); gf_isom_box_dump_done("TrackFragmentBox", a, trace); return GF_OK; } static void frag_dump_sample_flags(FILE * trace, u32 flags) { fprintf(trace, " SamplePadding=\"%d\" Sync=\"%d\" DegradationPriority=\"%d\" IsLeading=\"%d\" DependsOn=\"%d\" IsDependedOn=\"%d\" HasRedundancy=\"%d\"", GF_ISOM_GET_FRAG_PAD(flags), GF_ISOM_GET_FRAG_SYNC(flags), GF_ISOM_GET_FRAG_DEG(flags), GF_ISOM_GET_FRAG_LEAD(flags), GF_ISOM_GET_FRAG_DEPENDS(flags), GF_ISOM_GET_FRAG_DEPENDED(flags), GF_ISOM_GET_FRAG_REDUNDANT(flags)); } GF_Err tfhd_dump(GF_Box *a, FILE * trace) { GF_TrackFragmentHeaderBox *p; p = (GF_TrackFragmentHeaderBox *)a; gf_isom_box_dump_start(a, "TrackFragmentHeaderBox", trace); fprintf(trace, "TrackID=\"%u\"", p->trackID); if (p->flags & GF_ISOM_TRAF_BASE_OFFSET) { fprintf(trace, " BaseDataOffset=\""LLU"\"", p->base_data_offset); } else { fprintf(trace, " BaseDataOffset=\"%s\"", (p->flags & GF_ISOM_MOOF_BASE_OFFSET) ? "moof" : "moof-or-previous-traf"); } if (p->flags & GF_ISOM_TRAF_SAMPLE_DESC) fprintf(trace, " SampleDescriptionIndex=\"%u\"", p->sample_desc_index); if (p->flags & GF_ISOM_TRAF_SAMPLE_DUR) fprintf(trace, " SampleDuration=\"%u\"", p->def_sample_duration); if (p->flags & GF_ISOM_TRAF_SAMPLE_SIZE) fprintf(trace, " SampleSize=\"%u\"", p->def_sample_size); if (p->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) { frag_dump_sample_flags(trace, p->def_sample_flags); } fprintf(trace, ">\n"); gf_isom_box_dump_done("TrackFragmentHeaderBox", a, trace); return GF_OK; } GF_Err tfxd_dump(GF_Box *a, FILE * trace) { GF_MSSTimeExtBox *ptr = (GF_MSSTimeExtBox*)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "MSSTimeExtensionBox", trace); fprintf(trace, "AbsoluteTime=\""LLU"\" FragmentDuration=\""LLU"\">\n", ptr->absolute_time_in_track_timescale, ptr->fragment_duration_in_track_timescale); fprintf(trace, "<FullBoxInfo Version=\"%d\" Flags=\"%d\"/>\n", ptr->version, ptr->flags); gf_isom_box_dump_done("MSSTimeExtensionBox", a, trace); return GF_OK; } GF_Err trun_dump(GF_Box *a, FILE * trace) { u32 i; GF_TrunEntry *ent; GF_TrackFragmentRunBox *p; p = (GF_TrackFragmentRunBox *)a; gf_isom_box_dump_start(a, "TrackRunBox", trace); fprintf(trace, "SampleCount=\"%d\"", p->sample_count); if (p->flags & GF_ISOM_TRUN_DATA_OFFSET) fprintf(trace, " DataOffset=\"%d\"", p->data_offset); fprintf(trace, ">\n"); if (p->flags & GF_ISOM_TRUN_FIRST_FLAG) { sample_flags_dump("FirstSampleFlags", p->first_sample_flags, trace); } if (p->flags & (GF_ISOM_TRUN_DURATION|GF_ISOM_TRUN_SIZE|GF_ISOM_TRUN_CTS_OFFSET|GF_ISOM_TRUN_FLAGS)) { i=0; while ((ent = (GF_TrunEntry *)gf_list_enum(p->entries, &i))) { fprintf(trace, "<TrackRunEntry"); if (p->flags & GF_ISOM_TRUN_DURATION) fprintf(trace, " Duration=\"%u\"", ent->Duration); if (p->flags & GF_ISOM_TRUN_SIZE) fprintf(trace, " Size=\"%u\"", ent->size); if (p->flags & GF_ISOM_TRUN_CTS_OFFSET) { if (p->version == 0) fprintf(trace, " CTSOffset=\"%u\"", (u32) ent->CTS_Offset); else fprintf(trace, " CTSOffset=\"%d\"", ent->CTS_Offset); } if (p->flags & GF_ISOM_TRUN_FLAGS) { frag_dump_sample_flags(trace, ent->flags); } fprintf(trace, "/>\n"); } } else if (p->size) { fprintf(trace, "<!-- all default values used -->\n"); } else { fprintf(trace, "<TrackRunEntry Duration=\"\" Size=\"\" CTSOffset=\"\""); frag_dump_sample_flags(trace, 0); fprintf(trace, "/>\n"); } gf_isom_box_dump_done("TrackRunBox", a, trace); return GF_OK; } #endif #ifndef GPAC_DISABLE_ISOM_HINTING GF_Err DTE_Dump(GF_List *dte, FILE * trace) { GF_GenericDTE *p; GF_ImmediateDTE *i_p; GF_SampleDTE *s_p; GF_StreamDescDTE *sd_p; u32 i, count; count = gf_list_count(dte); for (i=0; i<count; i++) { p = (GF_GenericDTE *)gf_list_get(dte, i); switch (p->source) { case 0: fprintf(trace, "<EmptyDataEntry/>\n"); break; case 1: i_p = (GF_ImmediateDTE *) p; fprintf(trace, "<ImmediateDataEntry DataSize=\"%d\"/>\n", i_p->dataLength); break; case 2: s_p = (GF_SampleDTE *) p; fprintf(trace, "<SampleDataEntry DataSize=\"%d\" SampleOffset=\"%d\" SampleNumber=\"%d\" TrackReference=\"%d\"/>\n", s_p->dataLength, s_p->byteOffset, s_p->sampleNumber, s_p->trackRefIndex); break; case 3: sd_p = (GF_StreamDescDTE *) p; fprintf(trace, "<SampleDescriptionEntry DataSize=\"%d\" DescriptionOffset=\"%d\" StreamDescriptionindex=\"%d\" TrackReference=\"%d\"/>\n", sd_p->dataLength, sd_p->byteOffset, sd_p->streamDescIndex, sd_p->trackRefIndex); break; default: fprintf(trace, "<UnknownTableEntry/>\n"); break; } } return GF_OK; } GF_EXPORT GF_Err gf_isom_dump_hint_sample(GF_ISOFile *the_file, u32 trackNumber, u32 SampleNum, FILE * trace) { GF_ISOSample *tmp; GF_HintSampleEntryBox *entry; u32 descIndex, count, count2, i; GF_Err e=GF_OK; GF_BitStream *bs; GF_HintSample *s; GF_TrackBox *trak; GF_RTPPacket *pck; char *szName; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !IsHintTrack(trak)) return GF_BAD_PARAM; tmp = gf_isom_get_sample(the_file, trackNumber, SampleNum, &descIndex); if (!tmp) return GF_BAD_PARAM; e = Media_GetSampleDesc(trak->Media, descIndex, (GF_SampleEntryBox **) &entry, &count); if (e) { gf_isom_sample_del(&tmp); return e; } //check we can read the sample switch (entry->type) { case GF_ISOM_BOX_TYPE_RTP_STSD: case GF_ISOM_BOX_TYPE_SRTP_STSD: case GF_ISOM_BOX_TYPE_RRTP_STSD: szName = "RTP"; break; case GF_ISOM_BOX_TYPE_RTCP_STSD: szName = "RCTP"; break; case GF_ISOM_BOX_TYPE_FDP_STSD: szName = "FDP"; break; default: gf_isom_sample_del(&tmp); return GF_NOT_SUPPORTED; } bs = gf_bs_new(tmp->data, tmp->dataLength, GF_BITSTREAM_READ); s = gf_isom_hint_sample_new(entry->type); s->trackID = trak->Header->trackID; s->sampleNumber = SampleNum; gf_isom_hint_sample_read(s, bs, tmp->dataLength); gf_bs_del(bs); count = gf_list_count(s->packetTable); fprintf(trace, "<%sHintSample SampleNumber=\"%d\" DecodingTime=\""LLD"\" RandomAccessPoint=\"%d\" PacketCount=\"%u\" reserved=\"%u\">\n", szName, SampleNum, LLD_CAST tmp->DTS, tmp->IsRAP, s->packetCount, s->reserved); if (s->hint_subtype==GF_ISOM_BOX_TYPE_FDP_STSD) { e = gf_isom_box_dump((GF_Box*) s, trace); goto err_exit; } if (s->packetCount != count) { fprintf(trace, "<!-- WARNING: Broken %s hint sample, %d entries indicated but only %d parsed -->\n", szName, s->packetCount, count); } for (i=0; i<count; i++) { pck = (GF_RTPPacket *)gf_list_get(s->packetTable, i); if (pck->hint_subtype==GF_ISOM_BOX_TYPE_RTCP_STSD) { GF_RTCPPacket *rtcp_pck = (GF_RTCPPacket *) pck; fprintf(trace, "<RTCPHintPacket PacketNumber=\"%d\" V=\"%d\" P=\"%d\" Count=\"%d\" PayloadType=\"%d\" ", i+1, rtcp_pck->Version, rtcp_pck->Padding, rtcp_pck->Count, rtcp_pck->PayloadType); if (rtcp_pck->data) dump_data_attribute(trace, "payload", (char*)rtcp_pck->data, rtcp_pck->length); fprintf(trace, ">\n"); fprintf(trace, "</RTCPHintPacket>\n"); } else { fprintf(trace, "<RTPHintPacket PacketNumber=\"%d\" P=\"%d\" X=\"%d\" M=\"%d\" PayloadType=\"%d\"", i+1, pck->P_bit, pck->X_bit, pck->M_bit, pck->payloadType); fprintf(trace, " SequenceNumber=\"%d\" RepeatedPacket=\"%d\" DropablePacket=\"%d\" RelativeTransmissionTime=\"%d\" FullPacketSize=\"%d\">\n", pck->SequenceNumber, pck->R_bit, pck->B_bit, pck->relativeTransTime, gf_isom_hint_rtp_length(pck)); //TLV is made of Boxes count2 = gf_list_count(pck->TLV); if (count2) { fprintf(trace, "<PrivateExtensionTable EntryCount=\"%d\">\n", count2); gf_isom_box_array_dump(pck->TLV, trace); fprintf(trace, "</PrivateExtensionTable>\n"); } //DTE is made of NON boxes count2 = gf_list_count(pck->DataTable); if (count2) { fprintf(trace, "<PacketDataTable EntryCount=\"%d\">\n", count2); DTE_Dump(pck->DataTable, trace); fprintf(trace, "</PacketDataTable>\n"); } fprintf(trace, "</RTPHintPacket>\n"); } } err_exit: fprintf(trace, "</%sHintSample>\n", szName); gf_isom_sample_del(&tmp); gf_isom_hint_sample_del(s); return e; } #endif /*GPAC_DISABLE_ISOM_HINTING*/ static void tx3g_dump_box_nobox(FILE * trace, GF_BoxRecord *rec) { fprintf(trace, "<TextBox top=\"%d\" left=\"%d\" bottom=\"%d\" right=\"%d\"/>\n", rec->top, rec->left, rec->bottom, rec->right); } static void tx3g_print_char_offsets(FILE * trace, u32 start, u32 end, u32 *shift_offset, u32 so_count) { u32 i; if (shift_offset) { for (i=0; i<so_count; i++) { if (start>shift_offset[i]) { start --; break; } } for (i=0; i<so_count; i++) { if (end>shift_offset[i]) { end --; break; } } } if (start || end) fprintf(trace, "fromChar=\"%d\" toChar=\"%d\" ", start, end); } static void tx3g_dump_style_nobox(FILE * trace, GF_StyleRecord *rec, u32 *shift_offset, u32 so_count) { fprintf(trace, "<Style "); if (rec->startCharOffset || rec->endCharOffset) tx3g_print_char_offsets(trace, rec->startCharOffset, rec->endCharOffset, shift_offset, so_count); fprintf(trace, "styles=\""); if (!rec->style_flags) { fprintf(trace, "Normal"); } else { if (rec->style_flags & 1) fprintf(trace, "Bold "); if (rec->style_flags & 2) fprintf(trace, "Italic "); if (rec->style_flags & 4) fprintf(trace, "Underlined "); } fprintf(trace, "\" fontID=\"%d\" fontSize=\"%d\" ", rec->fontID, rec->font_size); tx3g_dump_rgba8(trace, "color", rec->text_color); fprintf(trace, "/>\n"); } static char *tx3g_format_time(u64 ts, u32 timescale, char *szDur, Bool is_srt) { u32 h, m, s, ms; ts = (u32) (ts*1000 / timescale); h = (u32) (ts / 3600000); m = (u32) (ts/ 60000) - h*60; s = (u32) (ts/1000) - h*3600 - m*60; ms = (u32) (ts) - h*3600000 - m*60000 - s*1000; if (is_srt) { sprintf(szDur, "%02d:%02d:%02d,%03d", h, m, s, ms); } else { sprintf(szDur, "%02d:%02d:%02d.%03d", h, m, s, ms); } return szDur; } static GF_Err gf_isom_dump_ttxt_track(GF_ISOFile *the_file, u32 track, FILE *dump, Bool box_dump) { u32 i, j, count, di, nb_descs, shift_offset[20], so_count; u64 last_DTS; size_t len; GF_Box *a; Bool has_scroll; char szDur[100]; GF_Tx3gSampleEntryBox *txt; GF_TrackBox *trak = gf_isom_get_track_from_file(the_file, track); if (!trak) return GF_BAD_PARAM; switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: return GF_BAD_PARAM; } txt = (GF_Tx3gSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->other_boxes, 0); switch (txt->type) { case GF_ISOM_BOX_TYPE_TX3G: case GF_ISOM_BOX_TYPE_TEXT: break; case GF_ISOM_BOX_TYPE_STPP: case GF_ISOM_BOX_TYPE_SBTT: default: return GF_BAD_PARAM; } if (box_dump) { fprintf(dump, "<TextTrack trackID=\"%d\" version=\"1.1\">\n", gf_isom_get_track_id(the_file, track) ); } else { fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- GPAC 3GPP Text Stream -->\n"); fprintf(dump, "<TextStream version=\"1.1\">\n"); } fprintf(dump, "<TextStreamHeader width=\"%d\" height=\"%d\" layer=\"%d\" translation_x=\"%d\" translation_y=\"%d\">\n", trak->Header->width >> 16 , trak->Header->height >> 16, trak->Header->layer, trak->Header->matrix[6] >> 16, trak->Header->matrix[7] >> 16); nb_descs = gf_list_count(trak->Media->information->sampleTable->SampleDescription->other_boxes); for (i=0; i<nb_descs; i++) { GF_Tx3gSampleEntryBox *txt = (GF_Tx3gSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->other_boxes, i); if (box_dump) { gf_isom_box_dump((GF_Box*) txt, dump); } else if (txt->type==GF_ISOM_BOX_TYPE_TX3G) { fprintf(dump, "<TextSampleDescription horizontalJustification=\""); switch (txt->horizontal_justification) { case 1: fprintf(dump, "center"); break; case -1: fprintf(dump, "right"); break; default: fprintf(dump, "left"); break; } fprintf(dump, "\" verticalJustification=\""); switch (txt->vertical_justification) { case 1: fprintf(dump, "center"); break; case -1: fprintf(dump, "bottom"); break; default: fprintf(dump, "top"); break; } fprintf(dump, "\" "); tx3g_dump_rgba8(dump, "backColor", txt->back_color); fprintf(dump, " verticalText=\"%s\"", (txt->displayFlags & GF_TXT_VERTICAL) ? "yes" : "no"); fprintf(dump, " fillTextRegion=\"%s\"", (txt->displayFlags & GF_TXT_FILL_REGION) ? "yes" : "no"); fprintf(dump, " continuousKaraoke=\"%s\"", (txt->displayFlags & GF_TXT_KARAOKE) ? "yes" : "no"); has_scroll = GF_FALSE; if (txt->displayFlags & GF_TXT_SCROLL_IN) { has_scroll = GF_TRUE; if (txt->displayFlags & GF_TXT_SCROLL_OUT) fprintf(dump, " scroll=\"InOut\""); else fprintf(dump, " scroll=\"In\""); } else if (txt->displayFlags & GF_TXT_SCROLL_OUT) { has_scroll = GF_TRUE; fprintf(dump, " scroll=\"Out\""); } else { fprintf(dump, " scroll=\"None\""); } if (has_scroll) { u32 mode = (txt->displayFlags & GF_TXT_SCROLL_DIRECTION)>>7; switch (mode) { case GF_TXT_SCROLL_CREDITS: fprintf(dump, " scrollMode=\"Credits\""); break; case GF_TXT_SCROLL_MARQUEE: fprintf(dump, " scrollMode=\"Marquee\""); break; case GF_TXT_SCROLL_DOWN: fprintf(dump, " scrollMode=\"Down\""); break; case GF_TXT_SCROLL_RIGHT: fprintf(dump, " scrollMode=\"Right\""); break; default: fprintf(dump, " scrollMode=\"Unknown\""); break; } } fprintf(dump, ">\n"); fprintf(dump, "<FontTable>\n"); if (txt->font_table) { for (j=0; j<txt->font_table->entry_count; j++) { fprintf(dump, "<FontTableEntry fontName=\"%s\" fontID=\"%d\"/>\n", txt->font_table->fonts[j].fontName, txt->font_table->fonts[j].fontID); } } fprintf(dump, "</FontTable>\n"); if ((txt->default_box.bottom == txt->default_box.top) || (txt->default_box.right == txt->default_box.left)) { txt->default_box.top = txt->default_box.left = 0; txt->default_box.right = trak->Header->width / 65536; txt->default_box.bottom = trak->Header->height / 65536; } tx3g_dump_box_nobox(dump, &txt->default_box); tx3g_dump_style_nobox(dump, &txt->default_style, NULL, 0); fprintf(dump, "</TextSampleDescription>\n"); } else { GF_TextSampleEntryBox *text = (GF_TextSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->other_boxes, i); fprintf(dump, "<TextSampleDescription horizontalJustification=\""); switch (text->textJustification) { case 1: fprintf(dump, "center"); break; case -1: fprintf(dump, "right"); break; default: fprintf(dump, "left"); break; } fprintf(dump, "\""); tx3g_dump_rgb16(dump, " backColor", text->background_color); if ((text->default_box.bottom == text->default_box.top) || (text->default_box.right == text->default_box.left)) { text->default_box.top = text->default_box.left = 0; text->default_box.right = trak->Header->width / 65536; text->default_box.bottom = trak->Header->height / 65536; } if (text->displayFlags & GF_TXT_SCROLL_IN) { if (text->displayFlags & GF_TXT_SCROLL_OUT) fprintf(dump, " scroll=\"InOut\""); else fprintf(dump, " scroll=\"In\""); } else if (text->displayFlags & GF_TXT_SCROLL_OUT) { fprintf(dump, " scroll=\"Out\""); } else { fprintf(dump, " scroll=\"None\""); } fprintf(dump, ">\n"); tx3g_dump_box_nobox(dump, &text->default_box); fprintf(dump, "</TextSampleDescription>\n"); } } fprintf(dump, "</TextStreamHeader>\n"); last_DTS = 0; count = gf_isom_get_sample_count(the_file, track); for (i=0; i<count; i++) { GF_BitStream *bs; GF_TextSample *txt; GF_ISOSample *s = gf_isom_get_sample(the_file, track, i+1, &di); if (!s) continue; fprintf(dump, "<TextSample sampleTime=\"%s\" sampleDescriptionIndex=\"%d\"", tx3g_format_time(s->DTS, trak->Media->mediaHeader->timeScale, szDur, GF_FALSE), di); bs = gf_bs_new(s->data, s->dataLength, GF_BITSTREAM_READ); txt = gf_isom_parse_texte_sample(bs); gf_bs_del(bs); if (!box_dump) { if (txt->highlight_color) { fprintf(dump, " "); tx3g_dump_rgba8(dump, "highlightColor", txt->highlight_color->hil_color); } if (txt->scroll_delay) { Double delay = txt->scroll_delay->scroll_delay; delay /= trak->Media->mediaHeader->timeScale; fprintf(dump, " scrollDelay=\"%g\"", delay); } if (txt->wrap) fprintf(dump, " wrap=\"%s\"", (txt->wrap->wrap_flag==0x01) ? "Automatic" : "None"); } so_count = 0; fprintf(dump, " xml:space=\"preserve\">"); if (!txt->len) { last_DTS = (u32) trak->Media->mediaHeader->duration; } else { unsigned short utf16Line[10000]; last_DTS = s->DTS; /*UTF16*/ if ((txt->len>2) && ((unsigned char) txt->text[0] == (unsigned char) 0xFE) && ((unsigned char) txt->text[1] == (unsigned char) 0xFF)) { /*copy 2 more chars because the lib always add 2 '0' at the end for UTF16 end of string*/ memcpy((char *) utf16Line, txt->text+2, sizeof(char) * (txt->len)); len = gf_utf8_wcslen((const u16*)utf16Line); } else { char *str; str = txt->text; len = gf_utf8_mbstowcs((u16*)utf16Line, 10000, (const char **) &str); } if (len != (size_t) -1) { utf16Line[len] = 0; for (j=0; j<len; j++) { if ((utf16Line[j]=='\n') || (utf16Line[j]=='\r') || (utf16Line[j]==0x85) || (utf16Line[j]==0x2028) || (utf16Line[j]==0x2029) ) { fprintf(dump, "\n"); if ((utf16Line[j]=='\r') && (utf16Line[j+1]=='\n')) { shift_offset[so_count] = j; so_count++; j++; } } else { switch (utf16Line[j]) { case '\'': fprintf(dump, "&apos;"); break; case '\"': fprintf(dump, "&quot;"); break; case '&': fprintf(dump, "&amp;"); break; case '>': fprintf(dump, "&gt;"); break; case '<': fprintf(dump, "&lt;"); break; default: if (utf16Line[j] < 128) { fprintf(dump, "%c", (u8) utf16Line[j]); } else { fprintf(dump, "&#%d;", utf16Line[j]); } break; } } } } } if (box_dump) { if (txt->highlight_color) gf_isom_box_dump((GF_Box*) txt->highlight_color, dump); if (txt->scroll_delay) gf_isom_box_dump((GF_Box*) txt->scroll_delay, dump); if (txt->wrap) gf_isom_box_dump((GF_Box*) txt->wrap, dump); if (txt->box) gf_isom_box_dump((GF_Box*) txt->box, dump); if (txt->styles) gf_isom_box_dump((GF_Box*) txt->styles, dump); } else { if (txt->box) tx3g_dump_box_nobox(dump, &txt->box->box); if (txt->styles) { for (j=0; j<txt->styles->entry_count; j++) { tx3g_dump_style_nobox(dump, &txt->styles->styles[j], shift_offset, so_count); } } } j=0; while ((a = (GF_Box *)gf_list_enum(txt->others, &j))) { if (box_dump) { gf_isom_box_dump((GF_Box*) a, dump); continue; } switch (a->type) { case GF_ISOM_BOX_TYPE_HLIT: fprintf(dump, "<Highlight "); tx3g_print_char_offsets(dump, ((GF_TextHighlightBox *)a)->startcharoffset, ((GF_TextHighlightBox *)a)->endcharoffset, shift_offset, so_count); fprintf(dump, "/>\n"); break; case GF_ISOM_BOX_TYPE_HREF: { GF_TextHyperTextBox *ht = (GF_TextHyperTextBox *)a; fprintf(dump, "<HyperLink "); tx3g_print_char_offsets(dump, ht->startcharoffset, ht->endcharoffset, shift_offset, so_count); fprintf(dump, "URL=\"%s\" URLToolTip=\"%s\"/>\n", ht->URL ? ht->URL : "", ht->URL_hint ? ht->URL_hint : ""); } break; case GF_ISOM_BOX_TYPE_BLNK: fprintf(dump, "<Blinking "); tx3g_print_char_offsets(dump, ((GF_TextBlinkBox *)a)->startcharoffset, ((GF_TextBlinkBox *)a)->endcharoffset, shift_offset, so_count); fprintf(dump, "/>\n"); break; case GF_ISOM_BOX_TYPE_KROK: { u32 k; Double t; GF_TextKaraokeBox *krok = (GF_TextKaraokeBox *)a; t = krok->highlight_starttime; t /= trak->Media->mediaHeader->timeScale; fprintf(dump, "<Karaoke startTime=\"%g\">\n", t); for (k=0; k<krok->nb_entries; k++) { t = krok->records[k].highlight_endtime; t /= trak->Media->mediaHeader->timeScale; fprintf(dump, "<KaraokeRange "); tx3g_print_char_offsets(dump, krok->records[k].start_charoffset, krok->records[k].end_charoffset, shift_offset, so_count); fprintf(dump, "endTime=\"%g\"/>\n", t); } fprintf(dump, "</Karaoke>\n"); } break; } } fprintf(dump, "</TextSample>\n"); gf_isom_sample_del(&s); gf_isom_delete_text_sample(txt); gf_set_progress("TTXT Extract", i, count); } if (last_DTS < trak->Media->mediaHeader->duration) { fprintf(dump, "<TextSample sampleTime=\"%s\" text=\"\" />\n", tx3g_format_time(trak->Media->mediaHeader->duration, trak->Media->mediaHeader->timeScale, szDur, GF_FALSE)); } if (box_dump) { fprintf(dump, "</TextTrack>\n"); } else { fprintf(dump, "</TextStream>\n"); } if (count) gf_set_progress("TTXT Extract", count, count); return GF_OK; } static GF_Err gf_isom_dump_srt_track(GF_ISOFile *the_file, u32 track, FILE *dump) { u32 i, j, k, count, di, len, ts, cur_frame; u64 start, end; GF_Tx3gSampleEntryBox *txtd; GF_BitStream *bs; char szDur[100]; GF_TrackBox *trak = gf_isom_get_track_from_file(the_file, track); if (!trak) return GF_BAD_PARAM; switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: return GF_BAD_PARAM; } ts = trak->Media->mediaHeader->timeScale; cur_frame = 0; end = 0; count = gf_isom_get_sample_count(the_file, track); for (i=0; i<count; i++) { GF_TextSample *txt; GF_ISOSample *s = gf_isom_get_sample(the_file, track, i+1, &di); if (!s) continue; start = s->DTS; if (s->dataLength==2) { gf_isom_sample_del(&s); continue; } if (i+1<count) { GF_ISOSample *next = gf_isom_get_sample_info(the_file, track, i+2, NULL, NULL); if (next) { end = next->DTS; gf_isom_sample_del(&next); } } else { end = gf_isom_get_media_duration(the_file, track) ; } cur_frame++; fprintf(dump, "%d\n", cur_frame); tx3g_format_time(start, ts, szDur, GF_TRUE); fprintf(dump, "%s --> ", szDur); tx3g_format_time(end, ts, szDur, GF_TRUE); fprintf(dump, "%s\n", szDur); bs = gf_bs_new(s->data, s->dataLength, GF_BITSTREAM_READ); txt = gf_isom_parse_texte_sample(bs); gf_bs_del(bs); txtd = (GF_Tx3gSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->other_boxes, di-1); if (!txt->len) { fprintf(dump, "\n"); } else { u32 styles, char_num, new_styles, color, new_color; u16 utf16Line[10000]; /*UTF16*/ if ((txt->len>2) && ((unsigned char) txt->text[0] == (unsigned char) 0xFE) && ((unsigned char) txt->text[1] == (unsigned char) 0xFF)) { memcpy(utf16Line, txt->text+2, sizeof(char)*txt->len); ( ((char *)utf16Line)[txt->len] ) = 0; len = txt->len; } else { u8 *str = (u8 *) (txt->text); size_t res = gf_utf8_mbstowcs(utf16Line, 10000, (const char **) &str); if (res==(size_t)-1) return GF_NON_COMPLIANT_BITSTREAM; len = (u32) res; utf16Line[len] = 0; } char_num = 0; styles = 0; new_styles = txtd->default_style.style_flags; color = new_color = txtd->default_style.text_color; for (j=0; j<len; j++) { Bool is_new_line; if (txt->styles) { new_styles = txtd->default_style.style_flags; new_color = txtd->default_style.text_color; for (k=0; k<txt->styles->entry_count; k++) { if (txt->styles->styles[k].startCharOffset>char_num) continue; if (txt->styles->styles[k].endCharOffset<char_num+1) continue; if (txt->styles->styles[k].style_flags & (GF_TXT_STYLE_ITALIC | GF_TXT_STYLE_BOLD | GF_TXT_STYLE_UNDERLINED)) { new_styles = txt->styles->styles[k].style_flags; new_color = txt->styles->styles[k].text_color; break; } } } if (new_styles != styles) { if ((new_styles & GF_TXT_STYLE_BOLD) && !(styles & GF_TXT_STYLE_BOLD)) fprintf(dump, "<b>"); if ((new_styles & GF_TXT_STYLE_ITALIC) && !(styles & GF_TXT_STYLE_ITALIC)) fprintf(dump, "<i>"); if ((new_styles & GF_TXT_STYLE_UNDERLINED) && !(styles & GF_TXT_STYLE_UNDERLINED)) fprintf(dump, "<u>"); if ((styles & GF_TXT_STYLE_UNDERLINED) && !(new_styles & GF_TXT_STYLE_UNDERLINED)) fprintf(dump, "</u>"); if ((styles & GF_TXT_STYLE_ITALIC) && !(new_styles & GF_TXT_STYLE_ITALIC)) fprintf(dump, "</i>"); if ((styles & GF_TXT_STYLE_BOLD) && !(new_styles & GF_TXT_STYLE_BOLD)) fprintf(dump, "</b>"); styles = new_styles; } if (new_color != color) { if (new_color ==txtd->default_style.text_color) { fprintf(dump, "</font>"); } else { fprintf(dump, "<font color=\"%s\">", gf_color_get_name(new_color) ); } color = new_color; } /*not sure if styles must be reseted at line breaks in srt...*/ is_new_line = GF_FALSE; if ((utf16Line[j]=='\n') || (utf16Line[j]=='\r') ) { if ((utf16Line[j]=='\r') && (utf16Line[j+1]=='\n')) j++; fprintf(dump, "\n"); is_new_line = GF_TRUE; } if (!is_new_line) { size_t sl; char szChar[30]; s16 swT[2], *swz; swT[0] = utf16Line[j]; swT[1] = 0; swz= (s16 *)swT; sl = gf_utf8_wcstombs(szChar, 30, (const unsigned short **) &swz); if (sl == (size_t)-1) sl=0; szChar[(u32) sl]=0; fprintf(dump, "%s", szChar); } char_num++; } new_styles = 0; if (new_styles != styles) { if (styles & GF_TXT_STYLE_UNDERLINED) fprintf(dump, "</u>"); if (styles & GF_TXT_STYLE_ITALIC) fprintf(dump, "</i>"); if (styles & GF_TXT_STYLE_BOLD) fprintf(dump, "</b>"); // styles = 0; } if (color != txtd->default_style.text_color) { fprintf(dump, "</font>"); // color = txtd->default_style.text_color; } fprintf(dump, "\n"); } gf_isom_sample_del(&s); gf_isom_delete_text_sample(txt); fprintf(dump, "\n"); gf_set_progress("SRT Extract", i, count); } if (count) gf_set_progress("SRT Extract", i, count); return GF_OK; } static GF_Err gf_isom_dump_svg_track(GF_ISOFile *the_file, u32 track, FILE *dump) { char nhmlFileName[1024]; FILE *nhmlFile; u32 i, count, di, ts, cur_frame; u64 start, end; GF_BitStream *bs; GF_TrackBox *trak = gf_isom_get_track_from_file(the_file, track); if (!trak) return GF_BAD_PARAM; switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: return GF_BAD_PARAM; } strcpy(nhmlFileName, the_file->fileName); strcat(nhmlFileName, ".nhml"); nhmlFile = gf_fopen(nhmlFileName, "wt"); fprintf(nhmlFile, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); fprintf(nhmlFile, "<NHNTStream streamType=\"3\" objectTypeIndication=\"10\" timeScale=\"%d\" baseMediaFile=\"file.svg\" inRootOD=\"yes\">\n", trak->Media->mediaHeader->timeScale); fprintf(nhmlFile, "<NHNTSample isRAP=\"yes\" DTS=\"0\" xmlFrom=\"doc.start\" xmlTo=\"text_1.start\"/>\n"); ts = trak->Media->mediaHeader->timeScale; cur_frame = 0; end = 0; fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); fprintf(dump, "<svg version=\"1.2\" baseProfile=\"tiny\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"%d\" height=\"%d\" fill=\"black\">\n", trak->Header->width >> 16 , trak->Header->height >> 16); fprintf(dump, "<g transform=\"translate(%d, %d)\" text-anchor=\"middle\">\n", (trak->Header->width >> 16)/2 , (trak->Header->height >> 16)/2); count = gf_isom_get_sample_count(the_file, track); for (i=0; i<count; i++) { GF_TextSample *txt; GF_ISOSample *s = gf_isom_get_sample(the_file, track, i+1, &di); if (!s) continue; start = s->DTS; if (s->dataLength==2) { gf_isom_sample_del(&s); continue; } if (i+1<count) { GF_ISOSample *next = gf_isom_get_sample_info(the_file, track, i+2, NULL, NULL); if (next) { end = next->DTS; gf_isom_sample_del(&next); } } cur_frame++; bs = gf_bs_new(s->data, s->dataLength, GF_BITSTREAM_READ); txt = gf_isom_parse_texte_sample(bs); gf_bs_del(bs); if (!txt->len) continue; fprintf(dump, " <text id=\"text_%d\" display=\"none\">%s\n", cur_frame, txt->text); fprintf(dump, " <set attributeName=\"display\" to=\"inline\" begin=\"%g\" end=\"%g\"/>\n", ((s64)start*1.0)/ts, ((s64)end*1.0)/ts); fprintf(dump, " <discard begin=\"%g\"/>\n", ((s64)end*1.0)/ts); fprintf(dump, " </text>\n"); gf_isom_sample_del(&s); gf_isom_delete_text_sample(txt); fprintf(dump, "\n"); gf_set_progress("SRT Extract", i, count); if (i == count - 2) { fprintf(nhmlFile, "<NHNTSample isRAP=\"no\" DTS=\"%f\" xmlFrom=\"text_%d.start\" xmlTo=\"doc.end\"/>\n", ((s64)start*1.0), cur_frame); } else { fprintf(nhmlFile, "<NHNTSample isRAP=\"no\" DTS=\"%f\" xmlFrom=\"text_%d.start\" xmlTo=\"text_%d.start\"/>\n", ((s64)start*1.0), cur_frame, cur_frame+1); } } fprintf(dump, "</g>\n"); fprintf(dump, "</svg>\n"); fprintf(nhmlFile, "</NHNTStream>\n"); gf_fclose(nhmlFile); if (count) gf_set_progress("SRT Extract", i, count); return GF_OK; } GF_EXPORT GF_Err gf_isom_text_dump(GF_ISOFile *the_file, u32 track, FILE *dump, GF_TextDumpType dump_type) { switch (dump_type) { case GF_TEXTDUMPTYPE_SVG: return gf_isom_dump_svg_track(the_file, track, dump); case GF_TEXTDUMPTYPE_SRT: return gf_isom_dump_srt_track(the_file, track, dump); case GF_TEXTDUMPTYPE_TTXT: case GF_TEXTDUMPTYPE_TTXT_BOXES: return gf_isom_dump_ttxt_track(the_file, track, dump, (dump_type==GF_TEXTDUMPTYPE_TTXT_BOXES) ? GF_TRUE : GF_FALSE); default: return GF_BAD_PARAM; } } /* ISMA 1.0 Encryption and Authentication V 1.0 dump */ GF_Err sinf_dump(GF_Box *a, FILE * trace) { GF_ProtectionSchemeInfoBox *p; p = (GF_ProtectionSchemeInfoBox *)a; gf_isom_box_dump_start(a, "ProtectionSchemeInfoBox", trace); fprintf(trace, ">\n"); if (p->size) gf_isom_box_dump_ex(p->original_format, trace, GF_ISOM_BOX_TYPE_FRMA); if (p->size) gf_isom_box_dump_ex(p->scheme_type, trace, GF_ISOM_BOX_TYPE_SCHM); if (p->size) gf_isom_box_dump_ex(p->info, trace, GF_ISOM_BOX_TYPE_SCHI); gf_isom_box_dump_done("ProtectionSchemeInfoBox", a, trace); return GF_OK; } GF_Err frma_dump(GF_Box *a, FILE * trace) { GF_OriginalFormatBox *p; p = (GF_OriginalFormatBox *)a; gf_isom_box_dump_start(a, "OriginalFormatBox", trace); fprintf(trace, "data_format=\"%s\">\n", gf_4cc_to_str(p->data_format)); gf_isom_box_dump_done("OriginalFormatBox", a, trace); return GF_OK; } GF_Err schm_dump(GF_Box *a, FILE * trace) { GF_SchemeTypeBox *p; p = (GF_SchemeTypeBox *)a; gf_isom_box_dump_start(a, "SchemeTypeBox", trace); fprintf(trace, "scheme_type=\"%s\" scheme_version=\"%d\" ", gf_4cc_to_str(p->scheme_type), p->scheme_version); if (p->URI) fprintf(trace, "scheme_uri=\"%s\"", p->URI); fprintf(trace, ">\n"); gf_isom_box_dump_done("SchemeTypeBox", a, trace); return GF_OK; } GF_Err schi_dump(GF_Box *a, FILE * trace) { GF_SchemeInformationBox *p; p = (GF_SchemeInformationBox *)a; gf_isom_box_dump_start(a, "SchemeInformationBox", trace); fprintf(trace, ">\n"); if (p->ikms) gf_isom_box_dump(p->ikms, trace); if (p->isfm) gf_isom_box_dump(p->isfm, trace); if (p->islt) gf_isom_box_dump(p->islt, trace); if (p->odkm) gf_isom_box_dump(p->odkm, trace); if (p->tenc) gf_isom_box_dump(p->tenc, trace); if (p->adkm) gf_isom_box_dump(p->adkm, trace); gf_isom_box_dump_done("SchemeInformationBox", a, trace); return GF_OK; } GF_Err iKMS_dump(GF_Box *a, FILE * trace) { GF_ISMAKMSBox *p; p = (GF_ISMAKMSBox *)a; gf_isom_box_dump_start(a, "KMSBox", trace); fprintf(trace, "kms_URI=\"%s\">\n", p->URI); gf_isom_box_dump_done("KMSBox", a, trace); return GF_OK; } GF_Err iSFM_dump(GF_Box *a, FILE * trace) { GF_ISMASampleFormatBox *p; const char *name = (a->type==GF_ISOM_BOX_TYPE_ISFM) ? "ISMASampleFormat" : "OMADRMAUFormatBox"; p = (GF_ISMASampleFormatBox *)a; gf_isom_box_dump_start(a, name, trace); fprintf(trace, "selective_encryption=\"%d\" key_indicator_length=\"%d\" IV_length=\"%d\">\n", p->selective_encryption, p->key_indicator_length, p->IV_length); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err iSLT_dump(GF_Box *a, FILE * trace) { GF_ISMACrypSaltBox *p = (GF_ISMACrypSaltBox *)a; gf_isom_box_dump_start(a, "ISMACrypSaltBox", trace); fprintf(trace, "salt=\""LLU"\">\n", p->salt); gf_isom_box_dump_done("ISMACrypSaltBox", a, trace); return GF_OK; } GF_EXPORT GF_Err gf_isom_dump_ismacryp_protection(GF_ISOFile *the_file, u32 trackNumber, FILE * trace) { u32 i, count; GF_SampleEntryBox *entry; GF_Err e; GF_TrackBox *trak; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak) return GF_BAD_PARAM; fprintf(trace, "<ISMACrypSampleDescriptions>\n"); count = gf_isom_get_sample_description_count(the_file, trackNumber); for (i=0; i<count; i++) { e = Media_GetSampleDesc(trak->Media, i+1, (GF_SampleEntryBox **) &entry, NULL); if (e) return e; switch (entry->type) { case GF_ISOM_BOX_TYPE_ENCA: case GF_ISOM_BOX_TYPE_ENCV: case GF_ISOM_BOX_TYPE_ENCT: case GF_ISOM_BOX_TYPE_ENCS: break; default: continue; } gf_isom_box_dump(entry, trace); } fprintf(trace, "</ISMACrypSampleDescriptions>\n"); return GF_OK; } GF_EXPORT GF_Err gf_isom_dump_ismacryp_sample(GF_ISOFile *the_file, u32 trackNumber, u32 SampleNum, FILE * trace) { GF_ISOSample *samp; GF_ISMASample *isma_samp; u32 descIndex; samp = gf_isom_get_sample(the_file, trackNumber, SampleNum, &descIndex); if (!samp) return GF_BAD_PARAM; isma_samp = gf_isom_get_ismacryp_sample(the_file, trackNumber, samp, descIndex); if (!isma_samp) { gf_isom_sample_del(&samp); return GF_NOT_SUPPORTED; } fprintf(trace, "<ISMACrypSample SampleNumber=\"%d\" DataSize=\"%d\" CompositionTime=\""LLD"\" ", SampleNum, isma_samp->dataLength, LLD_CAST (samp->DTS+samp->CTS_Offset) ); if (samp->CTS_Offset) fprintf(trace, "DecodingTime=\""LLD"\" ", LLD_CAST samp->DTS); if (gf_isom_has_sync_points(the_file, trackNumber)) fprintf(trace, "RandomAccessPoint=\"%s\" ", samp->IsRAP ? "Yes" : "No"); fprintf(trace, "IsEncrypted=\"%s\" ", (isma_samp->flags & GF_ISOM_ISMA_IS_ENCRYPTED) ? "Yes" : "No"); if (isma_samp->flags & GF_ISOM_ISMA_IS_ENCRYPTED) { fprintf(trace, "IV=\""LLD"\" ", LLD_CAST isma_samp->IV); if (isma_samp->key_indicator) dump_data_attribute(trace, "KeyIndicator", (char*)isma_samp->key_indicator, isma_samp->KI_length); } fprintf(trace, "/>\n"); gf_isom_sample_del(&samp); gf_isom_ismacryp_delete_sample(isma_samp); return GF_OK; } /* end of ISMA 1.0 Encryption and Authentication V 1.0 */ /* Apple extensions */ GF_Err ilst_item_dump(GF_Box *a, FILE * trace) { GF_BitStream *bs; u32 val; Bool no_dump = GF_FALSE; char *name = "UnknownBox"; GF_ListItemBox *itune = (GF_ListItemBox *)a; switch (itune->type) { case GF_ISOM_BOX_TYPE_0xA9NAM: name = "NameBox"; break; case GF_ISOM_BOX_TYPE_0xA9CMT: name = "CommentBox"; break; case GF_ISOM_BOX_TYPE_0xA9DAY: name = "CreatedBox"; break; case GF_ISOM_BOX_TYPE_0xA9ART: name = "ArtistBox"; break; case GF_ISOM_BOX_TYPE_0xA9TRK: name = "TrackBox"; break; case GF_ISOM_BOX_TYPE_0xA9ALB: name = "AlbumBox"; break; case GF_ISOM_BOX_TYPE_0xA9COM: name = "CompositorBox"; break; case GF_ISOM_BOX_TYPE_0xA9WRT: name = "WriterBox"; break; case GF_ISOM_BOX_TYPE_0xA9TOO: name = "ToolBox"; break; case GF_ISOM_BOX_TYPE_0xA9CPY: name = "CopyrightBox"; break; case GF_ISOM_BOX_TYPE_0xA9DES: name = "DescriptionBox"; break; case GF_ISOM_BOX_TYPE_0xA9GEN: case GF_ISOM_BOX_TYPE_GNRE: name = "GenreBox"; break; case GF_ISOM_BOX_TYPE_aART: name = "AlbumArtistBox"; break; case GF_ISOM_BOX_TYPE_PGAP: name = "GapelessBox"; break; case GF_ISOM_BOX_TYPE_DISK: name = "DiskBox"; break; case GF_ISOM_BOX_TYPE_TRKN: name = "TrackNumberBox"; break; case GF_ISOM_BOX_TYPE_TMPO: name = "TempoBox"; break; case GF_ISOM_BOX_TYPE_CPIL: name = "CompilationBox"; break; case GF_ISOM_BOX_TYPE_COVR: name = "CoverArtBox"; no_dump = GF_TRUE; break; case GF_ISOM_BOX_TYPE_iTunesSpecificInfo: name = "iTunesSpecificBox"; no_dump = GF_TRUE; break; case GF_ISOM_BOX_TYPE_0xA9GRP: name = "GroupBox"; break; case GF_ISOM_ITUNE_ENCODER: name = "EncoderBox"; break; } gf_isom_box_dump_start(a, name, trace); if (!no_dump) { switch (itune->type) { case GF_ISOM_BOX_TYPE_DISK: case GF_ISOM_BOX_TYPE_TRKN: bs = gf_bs_new(itune->data->data, itune->data->dataSize, GF_BITSTREAM_READ); gf_bs_read_int(bs, 16); val = gf_bs_read_int(bs, 16); if (itune->type==GF_ISOM_BOX_TYPE_DISK) { fprintf(trace, " DiskNumber=\"%d\" NbDisks=\"%d\" ", val, gf_bs_read_int(bs, 16) ); } else { fprintf(trace, " TrackNumber=\"%d\" NbTracks=\"%d\" ", val, gf_bs_read_int(bs, 16) ); } gf_bs_del(bs); break; case GF_ISOM_BOX_TYPE_TMPO: bs = gf_bs_new(itune->data->data, itune->data->dataSize, GF_BITSTREAM_READ); fprintf(trace, " BPM=\"%d\" ", gf_bs_read_int(bs, 16) ); gf_bs_del(bs); break; case GF_ISOM_BOX_TYPE_CPIL: fprintf(trace, " IsCompilation=\"%s\" ", (itune->data && itune->data->data && itune->data->data[0]) ? "yes" : "no"); break; case GF_ISOM_BOX_TYPE_PGAP: fprintf(trace, " IsGapeless=\"%s\" ", (itune->data && itune->data->data && itune->data->data[0]) ? "yes" : "no"); break; default: if (strcmp(name, "UnknownBox") && itune->data && itune->data->data) { fprintf(trace, " value=\""); if (itune->data && itune->data->data[0]) { dump_data_string(trace, itune->data->data, itune->data->dataSize); } else { dump_data(trace, itune->data->data, itune->data->dataSize); } fprintf(trace, "\" "); } break; } } fprintf(trace, ">\n"); gf_isom_box_dump_done(name, a, trace); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_ADOBE GF_Err abst_dump(GF_Box *a, FILE * trace) { u32 i; GF_AdobeBootstrapInfoBox *p = (GF_AdobeBootstrapInfoBox*)a; gf_isom_box_dump_start(a, "AdobeBootstrapBox", trace); fprintf(trace, "BootstrapinfoVersion=\"%u\" Profile=\"%u\" Live=\"%u\" Update=\"%u\" TimeScale=\"%u\" CurrentMediaTime=\""LLU"\" SmpteTimeCodeOffset=\""LLU"\" ", p->bootstrapinfo_version, p->profile, p->live, p->update, p->time_scale, p->current_media_time, p->smpte_time_code_offset); if (p->movie_identifier) fprintf(trace, "MovieIdentifier=\"%s\" ", p->movie_identifier); if (p->drm_data) fprintf(trace, "DrmData=\"%s\" ", p->drm_data); if (p->meta_data) fprintf(trace, "MetaData=\"%s\" ", p->meta_data); fprintf(trace, ">\n"); for (i=0; i<p->server_entry_count; i++) { char *str = (char*)gf_list_get(p->server_entry_table, i); fprintf(trace, "<ServerEntry>%s</ServerEntry>\n", str); } for (i=0; i<p->quality_entry_count; i++) { char *str = (char*)gf_list_get(p->quality_entry_table, i); fprintf(trace, "<QualityEntry>%s</QualityEntry>\n", str); } for (i=0; i<p->segment_run_table_count; i++) gf_isom_box_dump(gf_list_get(p->segment_run_table_entries, i), trace); for (i=0; i<p->fragment_run_table_count; i++) gf_isom_box_dump(gf_list_get(p->fragment_run_table_entries, i), trace); gf_isom_box_dump_done("AdobeBootstrapBox", a, trace); return GF_OK; } GF_Err afra_dump(GF_Box *a, FILE * trace) { u32 i; GF_AdobeFragRandomAccessBox *p = (GF_AdobeFragRandomAccessBox*)a; gf_isom_box_dump_start(a, "AdobeFragmentRandomAccessBox", trace); fprintf(trace, "LongIDs=\"%u\" LongOffsets=\"%u\" TimeScale=\"%u\">\n", p->long_ids, p->long_offsets, p->time_scale); for (i=0; i<p->entry_count; i++) { GF_AfraEntry *ae = (GF_AfraEntry *)gf_list_get(p->local_access_entries, i); fprintf(trace, "<LocalAccessEntry Time=\""LLU"\" Offset=\""LLU"\"/>\n", ae->time, ae->offset); } for (i=0; i<p->global_entry_count; i++) { GF_GlobalAfraEntry *gae = (GF_GlobalAfraEntry *)gf_list_get(p->global_access_entries, i); fprintf(trace, "<GlobalAccessEntry Time=\""LLU"\" Segment=\"%u\" Fragment=\"%u\" AfraOffset=\""LLU"\" OffsetFromAfra=\""LLU"\"/>\n", gae->time, gae->segment, gae->fragment, gae->afra_offset, gae->offset_from_afra); } gf_isom_box_dump_done("AdobeFragmentRandomAccessBox", a, trace); return GF_OK; } GF_Err afrt_dump(GF_Box *a, FILE * trace) { u32 i; GF_AdobeFragmentRunTableBox *p = (GF_AdobeFragmentRunTableBox*)a; gf_isom_box_dump_start(a, "AdobeFragmentRunTableBox", trace); fprintf(trace, "TimeScale=\"%u\">\n", p->timescale); for (i=0; i<p->quality_entry_count; i++) { char *str = (char*)gf_list_get(p->quality_segment_url_modifiers, i); fprintf(trace, "<QualityEntry>%s</QualityEntry>\n", str); } for (i=0; i<p->fragment_run_entry_count; i++) { GF_AdobeFragmentRunEntry *fre = (GF_AdobeFragmentRunEntry *)gf_list_get(p->fragment_run_entry_table, i); fprintf(trace, "<FragmentRunEntry FirstFragment=\"%u\" FirstFragmentTimestamp=\""LLU"\" FirstFragmentDuration=\"%u\"", fre->first_fragment, fre->first_fragment_timestamp, fre->fragment_duration); if (!fre->fragment_duration) fprintf(trace, " DiscontinuityIndicator=\"%u\"", fre->discontinuity_indicator); fprintf(trace, "/>\n"); } gf_isom_box_dump_done("AdobeFragmentRunTableBox", a, trace); return GF_OK; } GF_Err asrt_dump(GF_Box *a, FILE * trace) { u32 i; GF_AdobeSegmentRunTableBox *p = (GF_AdobeSegmentRunTableBox*)a; gf_isom_box_dump_start(a, "AdobeSegmentRunTableBox", trace); fprintf(trace, ">\n"); for (i=0; i<p->quality_entry_count; i++) { char *str = (char*)gf_list_get(p->quality_segment_url_modifiers, i); fprintf(trace, "<QualityEntry>%s</QualityEntry>\n", str); } for (i=0; i<p->segment_run_entry_count; i++) { GF_AdobeSegmentRunEntry *sre = (GF_AdobeSegmentRunEntry *)gf_list_get(p->segment_run_entry_table, i); fprintf(trace, "<SegmentRunEntry FirstSegment=\"%u\" FragmentsPerSegment=\"%u\"/>\n", sre->first_segment, sre->fragment_per_segment); } gf_isom_box_dump_done("AdobeSegmentRunTableBox", a, trace); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_ADOBE*/ GF_Err ilst_dump(GF_Box *a, FILE * trace) { u32 i; GF_Box *tag; GF_Err e; GF_ItemListBox *ptr; ptr = (GF_ItemListBox *)a; gf_isom_box_dump_start(a, "ItemListBox", trace); fprintf(trace, ">\n"); i=0; while ( (tag = (GF_Box*)gf_list_enum(ptr->other_boxes, &i))) { e = ilst_item_dump(tag, trace); if(e) return e; } gf_isom_box_dump_done("ItemListBox", NULL, trace); return GF_OK; } GF_Err databox_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "data", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("data", a, trace); return GF_OK; } GF_Err ohdr_dump(GF_Box *a, FILE * trace) { GF_OMADRMCommonHeaderBox *ptr = (GF_OMADRMCommonHeaderBox *)a; gf_isom_box_dump_start(a, "OMADRMCommonHeaderBox", trace); fprintf(trace, "EncryptionMethod=\"%d\" PaddingScheme=\"%d\" PlaintextLength=\""LLD"\" ", ptr->EncryptionMethod, ptr->PaddingScheme, ptr->PlaintextLength); if (ptr->RightsIssuerURL) fprintf(trace, "RightsIssuerURL=\"%s\" ", ptr->RightsIssuerURL); if (ptr->ContentID) fprintf(trace, "ContentID=\"%s\" ", ptr->ContentID); if (ptr->TextualHeaders) { u32 i, offset; char *start = ptr->TextualHeaders; fprintf(trace, "TextualHeaders=\""); i=offset=0; while (i<ptr->TextualHeadersLen) { if (start[i]==0) { fprintf(trace, "%s ", start+offset); offset=i+1; } i++; } fprintf(trace, "%s\" ", start+offset); } fprintf(trace, ">\n"); gf_isom_box_dump_done("OMADRMCommonHeaderBox", a, trace); return GF_OK; } GF_Err grpi_dump(GF_Box *a, FILE * trace) { GF_OMADRMGroupIDBox *ptr = (GF_OMADRMGroupIDBox *)a; gf_isom_box_dump_start(a, "OMADRMGroupIDBox", trace); fprintf(trace, "GroupID=\"%s\" EncryptionMethod=\"%d\" GroupKey=\" ", ptr->GroupID, ptr->GKEncryptionMethod); if (ptr->GroupKey) dump_data(trace, ptr->GroupKey, ptr->GKLength); fprintf(trace, "\">\n"); gf_isom_box_dump_done("OMADRMGroupIDBox", a, trace); return GF_OK; } GF_Err mdri_dump(GF_Box *a, FILE * trace) { //GF_OMADRMMutableInformationBox *ptr = (GF_OMADRMMutableInformationBox*)a; gf_isom_box_dump_start(a, "OMADRMMutableInformationBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("OMADRMMutableInformationBox", a, trace); return GF_OK; } GF_Err odtt_dump(GF_Box *a, FILE * trace) { GF_OMADRMTransactionTrackingBox *ptr = (GF_OMADRMTransactionTrackingBox *)a; gf_isom_box_dump_start(a, "OMADRMTransactionTrackingBox", trace); fprintf(trace, "TransactionID=\""); dump_data(trace, ptr->TransactionID, 16); fprintf(trace, "\">\n"); gf_isom_box_dump_done("OMADRMTransactionTrackingBox", a, trace); return GF_OK; } GF_Err odrb_dump(GF_Box *a, FILE * trace) { GF_OMADRMRightsObjectBox*ptr = (GF_OMADRMRightsObjectBox*)a; gf_isom_box_dump_start(a, "OMADRMRightsObjectBox", trace); fprintf(trace, "OMARightsObject=\""); dump_data(trace, ptr->oma_ro, ptr->oma_ro_size); fprintf(trace, "\">\n"); gf_isom_box_dump_done("OMADRMRightsObjectBox", a, trace); return GF_OK; } GF_Err odkm_dump(GF_Box *a, FILE * trace) { GF_OMADRMKMSBox *ptr = (GF_OMADRMKMSBox*)a; gf_isom_box_dump_start(a, "OMADRMKMSBox", trace); fprintf(trace, ">\n"); if (ptr->hdr) gf_isom_box_dump((GF_Box *)ptr->hdr, trace); if (ptr->fmt) gf_isom_box_dump((GF_Box *)ptr->fmt, trace); gf_isom_box_dump_done("OMADRMKMSBox", a, trace); return GF_OK; } GF_Err pasp_dump(GF_Box *a, FILE * trace) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox*)a; gf_isom_box_dump_start(a, "PixelAspectRatioBox", trace); fprintf(trace, "hSpacing=\"%d\" vSpacing=\"%d\" >\n", ptr->hSpacing, ptr->vSpacing); gf_isom_box_dump_done("PixelAspectRatioBox", a, trace); return GF_OK; } GF_Err clap_dump(GF_Box *a, FILE * trace) { GF_CleanAppertureBox *ptr = (GF_CleanAppertureBox*)a; gf_isom_box_dump_start(a, "CleanAppertureBox", trace); fprintf(trace, "cleanApertureWidthN=\"%d\" cleanApertureWidthD=\"%d\" ", ptr->cleanApertureWidthN, ptr->cleanApertureWidthD); fprintf(trace, "cleanApertureHeightN=\"%d\" cleanApertureHeightD=\"%d\" ", ptr->cleanApertureHeightN, ptr->cleanApertureHeightD); fprintf(trace, "horizOffN=\"%d\" horizOffD=\"%d\" ", ptr->horizOffN, ptr->horizOffD); fprintf(trace, "vertOffN=\"%d\" vertOffD=\"%d\"", ptr->vertOffN, ptr->vertOffD); fprintf(trace, ">\n"); gf_isom_box_dump_done("CleanAppertureBox", a, trace); return GF_OK; } GF_Err tsel_dump(GF_Box *a, FILE * trace) { u32 i; GF_TrackSelectionBox *ptr = (GF_TrackSelectionBox *)a; gf_isom_box_dump_start(a, "TrackSelectionBox", trace); fprintf(trace, "switchGroup=\"%d\" >\n", ptr->switchGroup); for (i=0; i<ptr->attributeListCount; i++) { fprintf(trace, "<TrackSelectionCriteria value=\"%s\"/>\n", gf_4cc_to_str(ptr->attributeList[i]) ); } if (!ptr->size) fprintf(trace, "<TrackSelectionCriteria value=\"\"/>\n"); gf_isom_box_dump_done("TrackSelectionBox", a, trace); return GF_OK; } GF_Err metx_dump(GF_Box *a, FILE * trace) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)a; const char *name; switch (ptr->type) { case GF_ISOM_BOX_TYPE_METX: name = "XMLMetaDataSampleEntryBox"; break; case GF_ISOM_BOX_TYPE_METT: name = "TextMetaDataSampleEntryBox"; break; case GF_ISOM_BOX_TYPE_SBTT: name = "SubtitleSampleEntryBox"; break; case GF_ISOM_BOX_TYPE_STXT: name = "SimpleTextSampleEntryBox"; break; case GF_ISOM_BOX_TYPE_STPP: name = "XMLSubtitleSampleEntryBox"; break; default: name = "UnknownTextSampleEntryBox"; break; } gf_isom_box_dump_start(a, name, trace); if (ptr->type==GF_ISOM_BOX_TYPE_METX) { fprintf(trace, "namespace=\"%s\" ", ptr->xml_namespace); if (ptr->xml_schema_loc) fprintf(trace, "schema_location=\"%s\" ", ptr->xml_schema_loc); if (ptr->content_encoding) fprintf(trace, "content_encoding=\"%s\" ", ptr->content_encoding); } else if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { fprintf(trace, "namespace=\"%s\" ", ptr->xml_namespace); if (ptr->xml_schema_loc) fprintf(trace, "schema_location=\"%s\" ", ptr->xml_schema_loc); if (ptr->mime_type) fprintf(trace, "auxiliary_mime_types=\"%s\" ", ptr->mime_type); } //mett, sbtt, stxt else { fprintf(trace, "mime_type=\"%s\" ", ptr->mime_type); if (ptr->content_encoding) fprintf(trace, "content_encoding=\"%s\" ", ptr->content_encoding); } fprintf(trace, ">\n"); if ((ptr->type!=GF_ISOM_BOX_TYPE_METX) && (ptr->type!=GF_ISOM_BOX_TYPE_STPP) ) { if (ptr->config) gf_isom_box_dump(ptr->config, trace); } gf_isom_box_array_dump(ptr->protections, trace); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err txtc_dump(GF_Box *a, FILE * trace) { GF_TextConfigBox *ptr = (GF_TextConfigBox*)a; const char *name = "TextConfigBox"; gf_isom_box_dump_start(a, name, trace); fprintf(trace, ">\n"); if (ptr->config) fprintf(trace, "<![CDATA[%s]]>", ptr->config); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err dims_dump(GF_Box *a, FILE * trace) { GF_DIMSSampleEntryBox *p = (GF_DIMSSampleEntryBox*)a; gf_isom_box_dump_start(a, "DIMSSampleEntryBox", trace); fprintf(trace, "dataReferenceIndex=\"%d\">\n", p->dataReferenceIndex); if (p->config) gf_isom_box_dump(p->config, trace); if (p->scripts) gf_isom_box_dump(p->scripts, trace); gf_isom_box_array_dump(p->protections, trace); gf_isom_box_dump_done("DIMSSampleEntryBox", a, trace); return GF_OK; } GF_Err diST_dump(GF_Box *a, FILE * trace) { GF_DIMSScriptTypesBox *p = (GF_DIMSScriptTypesBox*)a; gf_isom_box_dump_start(a, "DIMSScriptTypesBox", trace); fprintf(trace, "types=\"%s\">\n", p->content_script_types); gf_isom_box_dump_done("DIMSScriptTypesBox", a, trace); return GF_OK; } GF_Err dimC_dump(GF_Box *a, FILE * trace) { GF_DIMSSceneConfigBox *p = (GF_DIMSSceneConfigBox *)a; gf_isom_box_dump_start(a, "DIMSSceneConfigBox", trace); fprintf(trace, "profile=\"%d\" level=\"%d\" pathComponents=\"%d\" useFullRequestHosts=\"%d\" streamType=\"%d\" containsRedundant=\"%d\" textEncoding=\"%s\" contentEncoding=\"%s\" >\n", p->profile, p->level, p->pathComponents, p->fullRequestHost, p->streamType, p->containsRedundant, p->textEncoding, p->contentEncoding); gf_isom_box_dump_done("DIMSSceneConfigBox", a, trace); return GF_OK; } GF_Err dac3_dump(GF_Box *a, FILE * trace) { GF_AC3ConfigBox *p = (GF_AC3ConfigBox *)a; if (p->cfg.is_ec3) { u32 i; a->type = GF_ISOM_BOX_TYPE_DEC3; gf_isom_box_dump_start(a, "EC3SpecificBox", trace); a->type = GF_ISOM_BOX_TYPE_DAC3; fprintf(trace, "nb_streams=\"%d\" data_rate=\"%d\">\n", p->cfg.nb_streams, p->cfg.brcode); for (i=0; i<p->cfg.nb_streams; i++) { fprintf(trace, "<EC3StreamConfig fscod=\"%d\" bsid=\"%d\" bsmod=\"%d\" acmod=\"%d\" lfon=\"%d\" num_sub_dep=\"%d\" chan_loc=\"%d\"/>\n", p->cfg.streams[i].fscod, p->cfg.streams[i].bsid, p->cfg.streams[i].bsmod, p->cfg.streams[i].acmod, p->cfg.streams[i].lfon, p->cfg.streams[i].nb_dep_sub, p->cfg.streams[i].chan_loc); } gf_isom_box_dump_done("EC3SpecificBox", a, trace); } else { gf_isom_box_dump_start(a, "AC3SpecificBox", trace); fprintf(trace, "fscod=\"%d\" bsid=\"%d\" bsmod=\"%d\" acmod=\"%d\" lfon=\"%d\" bit_rate_code=\"%d\">\n", p->cfg.streams[0].fscod, p->cfg.streams[0].bsid, p->cfg.streams[0].bsmod, p->cfg.streams[0].acmod, p->cfg.streams[0].lfon, p->cfg.brcode); gf_isom_box_dump_done("AC3SpecificBox", a, trace); } return GF_OK; } GF_Err lsrc_dump(GF_Box *a, FILE * trace) { GF_LASERConfigurationBox *p = (GF_LASERConfigurationBox *)a; gf_isom_box_dump_start(a, "LASeRConfigurationBox", trace); dump_data_attribute(trace, "LASeRHeader", p->hdr, p->hdr_size); fprintf(trace, ">"); gf_isom_box_dump_done("LASeRConfigurationBox", a, trace); return GF_OK; } GF_Err lsr1_dump(GF_Box *a, FILE * trace) { GF_LASeRSampleEntryBox *p = (GF_LASeRSampleEntryBox*)a; gf_isom_box_dump_start(a, "LASeRSampleEntryBox", trace); fprintf(trace, "DataReferenceIndex=\"%d\">\n", p->dataReferenceIndex); if (p->lsr_config) gf_isom_box_dump(p->lsr_config, trace); if (p->descr) gf_isom_box_dump(p->descr, trace); gf_isom_box_dump_done("LASeRSampleEntryBox", a, trace); return GF_OK; } GF_Err sidx_dump(GF_Box *a, FILE * trace) { u32 i; GF_SegmentIndexBox *p = (GF_SegmentIndexBox *)a; gf_isom_box_dump_start(a, "SegmentIndexBox", trace); fprintf(trace, "reference_ID=\"%d\" timescale=\"%d\" earliest_presentation_time=\""LLD"\" first_offset=\""LLD"\" ", p->reference_ID, p->timescale, p->earliest_presentation_time, p->first_offset); fprintf(trace, ">\n"); for (i=0; i<p->nb_refs; i++) { fprintf(trace, "<Reference type=\"%d\" size=\"%d\" duration=\"%d\" startsWithSAP=\"%d\" SAP_type=\"%d\" SAPDeltaTime=\"%d\"/>\n", p->refs[i].reference_type, p->refs[i].reference_size, p->refs[i].subsegment_duration, p->refs[i].starts_with_SAP, p->refs[i].SAP_type, p->refs[i].SAP_delta_time); } if (!p->size) { fprintf(trace, "<Reference type=\"\" size=\"\" duration=\"\" startsWithSAP=\"\" SAP_type=\"\" SAPDeltaTime=\"\"/>\n"); } gf_isom_box_dump_done("SegmentIndexBox", a, trace); return GF_OK; } GF_Err ssix_dump(GF_Box *a, FILE * trace) { u32 i, j; GF_SubsegmentIndexBox *p = (GF_SubsegmentIndexBox *)a; gf_isom_box_dump_start(a, "SubsegmentIndexBox", trace); fprintf(trace, "subsegment_count=\"%d\" >\n", p->subsegment_count); for (i = 0; i < p->subsegment_count; i++) { fprintf(trace, "<Subsegment range_count=\"%d\">\n", p->subsegments[i].range_count); for (j = 0; j < p->subsegments[i].range_count; j++) { fprintf(trace, "<Range level=\"%d\" range_size=\"%d\"/>\n", p->subsegments[i].levels[j], p->subsegments[i].range_sizes[j]); } fprintf(trace, "</Subsegment>\n"); } if (!p->size) { fprintf(trace, "<Subsegment range_count=\"\">\n"); fprintf(trace, "<Range level=\"\" range_size=\"\"/>\n"); fprintf(trace, "</Subsegment>\n"); } gf_isom_box_dump_done("SubsegmentIndexBox", a, trace); return GF_OK; } GF_Err leva_dump(GF_Box *a, FILE * trace) { u32 i; GF_LevelAssignmentBox *p = (GF_LevelAssignmentBox *)a; gf_isom_box_dump_start(a, "LevelAssignmentBox", trace); fprintf(trace, "level_count=\"%d\" >\n", p->level_count); for (i = 0; i < p->level_count; i++) { fprintf(trace, "<Assignement track_id=\"%d\" padding_flag=\"%d\" assignement_type=\"%d\" grouping_type=\"%s\" grouping_type_parameter=\"%d\" sub_track_id=\"%d\" />\n", p->levels[i].track_id, p->levels[i].padding_flag, p->levels[i].type, gf_4cc_to_str(p->levels[i].grouping_type) , p->levels[i].grouping_type_parameter, p->levels[i].sub_track_id); } if (!p->size) { fprintf(trace, "<Assignement track_id=\"\" padding_flag=\"\" assignement_type=\"\" grouping_type=\"\" grouping_type_parameter=\"\" sub_track_id=\"\" />\n"); } gf_isom_box_dump_done("LevelAssignmentBox", a, trace); return GF_OK; } GF_Err strk_dump(GF_Box *a, FILE * trace) { GF_SubTrackBox *p = (GF_SubTrackBox *)a; gf_isom_box_dump_start(a, "SubTrackBox", trace); fprintf(trace, ">\n"); if (p->info) { gf_isom_box_dump(p->info, trace); } gf_isom_box_dump_done("SubTrackBox", a, trace); return GF_OK; } GF_Err stri_dump(GF_Box *a, FILE * trace) { u32 i; GF_SubTrackInformationBox *p = (GF_SubTrackInformationBox *)a; gf_isom_box_dump_start(a, "SubTrackInformationBox", trace); fprintf(trace, "switch_group=\"%d\" alternate_group=\"%d\" sub_track_id=\"%d\">\n", p->switch_group, p->alternate_group, p->sub_track_id); for (i = 0; i < p->attribute_count; i++) { fprintf(trace, "<SubTrackInformationAttribute value=\"%s\"/>\n", gf_4cc_to_str(p->attribute_list[i]) ); } if (!p->size) fprintf(trace, "<SubTrackInformationAttribute value=\"\"/>\n"); gf_isom_box_dump_done("SubTrackInformationBox", a, trace); return GF_OK; } GF_Err stsg_dump(GF_Box *a, FILE * trace) { u32 i; GF_SubTrackSampleGroupBox *p = (GF_SubTrackSampleGroupBox *)a; gf_isom_box_dump_start(a, "SubTrackSampleGroupBox", trace); if (p->grouping_type) fprintf(trace, "grouping_type=\"%s\"", gf_4cc_to_str(p->grouping_type) ); fprintf(trace, ">\n"); for (i = 0; i < p->nb_groups; i++) { fprintf(trace, "<SubTrackSampleGroupBoxEntry group_description_index=\"%d\"/>\n", p->group_description_index[i]); } if (!p->size) fprintf(trace, "<SubTrackSampleGroupBoxEntry group_description_index=\"\"/>\n"); gf_isom_box_dump_done("SubTrackSampleGroupBox", a, trace); return GF_OK; } GF_Err pcrb_dump(GF_Box *a, FILE * trace) { u32 i; GF_PcrInfoBox *p = (GF_PcrInfoBox *)a; gf_isom_box_dump_start(a, "MPEG2TSPCRInfoBox", trace); fprintf(trace, "subsegment_count=\"%d\">\n", p->subsegment_count); for (i=0; i<p->subsegment_count; i++) { fprintf(trace, "<PCRInfo PCR=\""LLU"\" />\n", p->pcr_values[i]); } if (!p->size) { fprintf(trace, "<PCRInfo PCR=\"\" />\n"); } gf_isom_box_dump_done("MPEG2TSPCRInfoBox", a, trace); return GF_OK; } GF_Err subs_dump(GF_Box *a, FILE * trace) { u32 entry_count, i, j; u16 subsample_count; GF_SubSampleInfoEntry *pSamp; GF_SubSampleEntry *pSubSamp; GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *) a; if (!a) return GF_BAD_PARAM; entry_count = gf_list_count(ptr->Samples); gf_isom_box_dump_start(a, "SubSampleInformationBox", trace); fprintf(trace, "EntryCount=\"%d\">\n", entry_count); for (i=0; i<entry_count; i++) { pSamp = (GF_SubSampleInfoEntry*) gf_list_get(ptr->Samples, i); subsample_count = gf_list_count(pSamp->SubSamples); fprintf(trace, "<SampleEntry SampleDelta=\"%d\" SubSampleCount=\"%d\">\n", pSamp->sample_delta, subsample_count); for (j=0; j<subsample_count; j++) { pSubSamp = (GF_SubSampleEntry*) gf_list_get(pSamp->SubSamples, j); fprintf(trace, "<SubSample Size=\"%u\" Priority=\"%u\" Discardable=\"%d\" Reserved=\"%08X\"/>\n", pSubSamp->subsample_size, pSubSamp->subsample_priority, pSubSamp->discardable, pSubSamp->reserved); } fprintf(trace, "</SampleEntry>\n"); } if (!ptr->size) { fprintf(trace, "<SampleEntry SampleDelta=\"\" SubSampleCount=\"\">\n"); fprintf(trace, "<SubSample Size=\"\" Priority=\"\" Discardable=\"\" Reserved=\"\"/>\n"); fprintf(trace, "</SampleEntry>\n"); } gf_isom_box_dump_done("SubSampleInformationBox", a, trace); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS GF_Err tfdt_dump(GF_Box *a, FILE * trace) { GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "TrackFragmentBaseMediaDecodeTimeBox", trace); fprintf(trace, "baseMediaDecodeTime=\""LLD"\">\n", ptr->baseMediaDecodeTime); gf_isom_box_dump_done("TrackFragmentBaseMediaDecodeTimeBox", a, trace); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ GF_Err rvcc_dump(GF_Box *a, FILE * trace) { GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "RVCConfigurationBox", trace); fprintf(trace, "predefined=\"%d\"", ptr->predefined_rvc_config); if (! ptr->predefined_rvc_config) fprintf(trace, " rvc_meta_idx=\"%d\"", ptr->rvc_meta_idx); fprintf(trace, ">\n"); gf_isom_box_dump_done("RVCConfigurationBox", a, trace); return GF_OK; } GF_Err sbgp_dump(GF_Box *a, FILE * trace) { u32 i; GF_SampleGroupBox *ptr = (GF_SampleGroupBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleGroupBox", trace); if (ptr->grouping_type) fprintf(trace, "grouping_type=\"%s\"", gf_4cc_to_str(ptr->grouping_type) ); if (ptr->version==1) { if (isalnum(ptr->grouping_type_parameter&0xFF)) { fprintf(trace, " grouping_type_parameter=\"%s\"", gf_4cc_to_str(ptr->grouping_type_parameter) ); } else { fprintf(trace, " grouping_type_parameter=\"%d\"", ptr->grouping_type_parameter); } } fprintf(trace, ">\n"); for (i=0; i<ptr->entry_count; i++) { fprintf(trace, "<SampleGroupBoxEntry sample_count=\"%d\" group_description_index=\"%d\"/>\n", ptr->sample_entries[i].sample_count, ptr->sample_entries[i].group_description_index ); } if (!ptr->size) { fprintf(trace, "<SampleGroupBoxEntry sample_count=\"\" group_description_index=\"\"/>\n"); } gf_isom_box_dump_done("SampleGroupBox", a, trace); return GF_OK; } static void oinf_entry_dump(GF_OperatingPointsInformation *ptr, FILE * trace) { u32 i, count; if (!ptr) { fprintf(trace, "<OperatingPointsInformation scalability_mask=\"Multiview|Spatial scalability|Auxilary|unknown\" num_profile_tier_level=\"\" num_operating_points=\"\" dependency_layers=\"\">\n"); fprintf(trace, " <ProfileTierLevel general_profile_space=\"\" general_tier_flag=\"\" general_profile_idc=\"\" general_profile_compatibility_flags=\"\" general_constraint_indicator_flags=\"\" />\n"); fprintf(trace, "<OperatingPoint output_layer_set_idx=\"\" max_temporal_id=\"\" layer_count=\"\" minPicWidth=\"\" minPicHeight=\"\" maxPicWidth=\"\" maxPicHeight=\"\" maxChromaFormat=\"\" maxBitDepth=\"\" frame_rate_info_flag=\"\" bit_rate_info_flag=\"\" avgFrameRate=\"\" constantFrameRate=\"\" maxBitRate=\"\" avgBitRate=\"\"/>\n"); fprintf(trace, "<Layer dependent_layerID=\"\" num_layers_dependent_on=\"\" dependent_on_layerID=\"\" dimension_identifier=\"\"/>\n"); fprintf(trace, "</OperatingPointsInformation>\n"); return; } fprintf(trace, "<OperatingPointsInformation"); fprintf(trace, " scalability_mask=\"%u (", ptr->scalability_mask); switch (ptr->scalability_mask) { case 2: fprintf(trace, "Multiview"); break; case 4: fprintf(trace, "Spatial scalability"); break; case 8: fprintf(trace, "Auxilary"); break; default: fprintf(trace, "unknown"); } fprintf(trace, ")\" num_profile_tier_level=\"%u\"", gf_list_count(ptr->profile_tier_levels) ); fprintf(trace, " num_operating_points=\"%u\" dependency_layers=\"%u\"", gf_list_count(ptr->operating_points), gf_list_count(ptr->dependency_layers)); fprintf(trace, ">\n"); count=gf_list_count(ptr->profile_tier_levels); for (i = 0; i < count; i++) { LHEVC_ProfileTierLevel *ptl = (LHEVC_ProfileTierLevel *)gf_list_get(ptr->profile_tier_levels, i); fprintf(trace, " <ProfileTierLevel general_profile_space=\"%u\" general_tier_flag=\"%u\" general_profile_idc=\"%u\" general_profile_compatibility_flags=\"%X\" general_constraint_indicator_flags=\""LLX"\" />\n", ptl->general_profile_space, ptl->general_tier_flag, ptl->general_profile_idc, ptl->general_profile_compatibility_flags, ptl->general_constraint_indicator_flags); } count=gf_list_count(ptr->operating_points); for (i = 0; i < count; i++) { LHEVC_OperatingPoint *op = (LHEVC_OperatingPoint *)gf_list_get(ptr->operating_points, i); fprintf(trace, "<OperatingPoint output_layer_set_idx=\"%u\"", op->output_layer_set_idx); fprintf(trace, " max_temporal_id=\"%u\" layer_count=\"%u\"", op->max_temporal_id, op->layer_count); fprintf(trace, " minPicWidth=\"%u\" minPicHeight=\"%u\"", op->minPicWidth, op->minPicHeight); fprintf(trace, " maxPicWidth=\"%u\" maxPicHeight=\"%u\"", op->maxPicWidth, op->maxPicHeight); fprintf(trace, " maxChromaFormat=\"%u\" maxBitDepth=\"%u\"", op->maxChromaFormat, op->maxBitDepth); fprintf(trace, " frame_rate_info_flag=\"%u\" bit_rate_info_flag=\"%u\"", op->frame_rate_info_flag, op->bit_rate_info_flag); if (op->frame_rate_info_flag) fprintf(trace, " avgFrameRate=\"%u\" constantFrameRate=\"%u\"", op->avgFrameRate, op->constantFrameRate); if (op->bit_rate_info_flag) fprintf(trace, " maxBitRate=\"%u\" avgBitRate=\"%u\"", op->maxBitRate, op->avgBitRate); fprintf(trace, "/>\n"); } count=gf_list_count(ptr->dependency_layers); for (i = 0; i < count; i++) { u32 j; LHEVC_DependentLayer *dep = (LHEVC_DependentLayer *)gf_list_get(ptr->dependency_layers, i); fprintf(trace, "<Layer dependent_layerID=\"%u\" num_layers_dependent_on=\"%u\"", dep->dependent_layerID, dep->num_layers_dependent_on); if (dep->num_layers_dependent_on) { fprintf(trace, " dependent_on_layerID=\""); for (j = 0; j < dep->num_layers_dependent_on; j++) fprintf(trace, "%d ", dep->dependent_on_layerID[j]); fprintf(trace, "\""); } fprintf(trace, " dimension_identifier=\""); for (j = 0; j < 16; j++) if (ptr->scalability_mask & (1 << j)) fprintf(trace, "%d ", dep->dimension_identifier[j]); fprintf(trace, "\"/>\n"); } fprintf(trace, "</OperatingPointsInformation>\n"); return; } static void linf_dump(GF_LHVCLayerInformation *ptr, FILE * trace) { u32 i, count; if (!ptr) { fprintf(trace, "<LayerInformation num_layers=\"\">\n"); fprintf(trace, "<LayerInfoItem layer_id=\"\" min_temporalId=\"\" max_temporalId=\"\" sub_layer_presence_flags=\"\"/>\n"); fprintf(trace, "</LayerInformation>\n"); return; } count = gf_list_count(ptr->num_layers_in_track); fprintf(trace, "<LayerInformation num_layers=\"%d\">\n", count ); for (i = 0; i < count; i++) { LHVCLayerInfoItem *li = (LHVCLayerInfoItem *)gf_list_get(ptr->num_layers_in_track, i); fprintf(trace, "<LayerInfoItem layer_id=\"%d\" min_temporalId=\"%d\" max_temporalId=\"%d\" sub_layer_presence_flags=\"%d\"/>\n", li->layer_id, li->min_TemporalId, li->max_TemporalId, li->sub_layer_presence_flags); } fprintf(trace, "</LayerInformation>\n"); return; } static void trif_dump(FILE * trace, char *data, u32 data_size) { GF_BitStream *bs; u32 id, independent, filter_disabled; Bool full_picture, has_dep, tile_group; if (!data) { fprintf(trace, "<TileRegionGroupEntry ID=\"\" tileGroup=\"\" independent=\"\" full_picture=\"\" filter_disabled=\"\" x=\"\" y=\"\" w=\"\" h=\"\">\n"); fprintf(trace, "<TileRegionDependency tileID=\"\"/>\n"); fprintf(trace, "</TileRegionGroupEntry>\n"); return; } bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); id = gf_bs_read_u16(bs); tile_group = gf_bs_read_int(bs, 1); fprintf(trace, "<TileRegionGroupEntry ID=\"%d\" tileGroup=\"%d\" ", id, tile_group); if (tile_group) { independent = gf_bs_read_int(bs, 2); full_picture = (Bool)gf_bs_read_int(bs, 1); filter_disabled = gf_bs_read_int(bs, 1); has_dep = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 2); fprintf(trace, "independent=\"%d\" full_picture=\"%d\" filter_disabled=\"%d\" ", independent, full_picture, filter_disabled); if (!full_picture) { fprintf(trace, "x=\"%d\" y=\"%d\" ", gf_bs_read_u16(bs), gf_bs_read_u16(bs)); } fprintf(trace, "w=\"%d\" h=\"%d\" ", gf_bs_read_u16(bs), gf_bs_read_u16(bs)); if (!has_dep) { fprintf(trace, "/>\n"); } else { u32 count = gf_bs_read_u16(bs); fprintf(trace, ">\n"); while (count) { count--; fprintf(trace, "<TileRegionDependency tileID=\"%d\"/>\n", gf_bs_read_u16(bs) ); } fprintf(trace, "</TileRegionGroupEntry>\n"); } } gf_bs_del(bs); } static void nalm_dump(FILE * trace, char *data, u32 data_size) { GF_BitStream *bs; Bool rle, large_size; u32 entry_count; if (!data) { fprintf(trace, "<NALUMap rle=\"\" large_size=\"\">\n"); fprintf(trace, "<NALUMapEntry NALU_startNumber=\"\" groupID=\"\"/>\n"); fprintf(trace, "</NALUMap>\n"); return; } bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); gf_bs_read_int(bs, 6); large_size = gf_bs_read_int(bs, 1); rle = gf_bs_read_int(bs, 1); entry_count = gf_bs_read_int(bs, large_size ? 16 : 8); fprintf(trace, "<NALUMap rle=\"%d\" large_size=\"%d\">\n", rle, large_size); while (entry_count) { u32 ID; fprintf(trace, "<NALUMapEntry "); if (rle) { u32 start_num = gf_bs_read_int(bs, large_size ? 16 : 8); fprintf(trace, "NALU_startNumber=\"%d\" ", start_num); } ID = gf_bs_read_u16(bs); fprintf(trace, "groupID=\"%d\"/>\n", ID); entry_count--; } gf_bs_del(bs); fprintf(trace, "</NALUMap>\n"); return; } GF_Err sgpd_dump(GF_Box *a, FILE * trace) { u32 i; GF_SampleGroupDescriptionBox *ptr = (GF_SampleGroupDescriptionBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleGroupDescriptionBox", trace); if (ptr->grouping_type) fprintf(trace, "grouping_type=\"%s\"", gf_4cc_to_str(ptr->grouping_type) ); if (ptr->version==1) fprintf(trace, " default_length=\"%d\"", ptr->default_length); if ((ptr->version>=2) && ptr->default_description_index) fprintf(trace, " default_group_index=\"%d\"", ptr->default_description_index); fprintf(trace, ">\n"); for (i=0; i<gf_list_count(ptr->group_descriptions); i++) { void *entry = gf_list_get(ptr->group_descriptions, i); switch (ptr->grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: fprintf(trace, "<RollRecoveryEntry roll_distance=\"%d\" />\n", ((GF_RollRecoveryEntry*)entry)->roll_distance ); break; case GF_ISOM_SAMPLE_GROUP_PROL: fprintf(trace, "<AudioPreRollEntry roll_distance=\"%d\" />\n", ((GF_RollRecoveryEntry*)entry)->roll_distance ); break; case GF_ISOM_SAMPLE_GROUP_TELE: fprintf(trace, "<TemporalLevelEntry level_independently_decodable=\"%d\"/>\n", ((GF_TemporalLevelEntry*)entry)->level_independently_decodable); break; case GF_ISOM_SAMPLE_GROUP_RAP: fprintf(trace, "<VisualRandomAccessEntry num_leading_samples_known=\"%s\"", ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known ? "yes" : "no"); if (((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known) fprintf(trace, " num_leading_samples=\"%d\"", ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples); fprintf(trace, "/>\n"); break; case GF_ISOM_SAMPLE_GROUP_SYNC: fprintf(trace, "<SyncSampleGroupEntry NAL_unit_type=\"%d\"/>\n", ((GF_SYNCEntry*)entry)->NALU_type); break; case GF_ISOM_SAMPLE_GROUP_SEIG: fprintf(trace, "<CENCSampleEncryptionGroupEntry IsEncrypted=\"%d\" IV_size=\"%d\" KID=\"", ((GF_CENCSampleEncryptionGroupEntry*)entry)->IsProtected, ((GF_CENCSampleEncryptionGroupEntry*)entry)->Per_Sample_IV_size); dump_data_hex(trace, (char *)((GF_CENCSampleEncryptionGroupEntry*)entry)->KID, 16); if ((((GF_CENCSampleEncryptionGroupEntry*)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry*)entry)->Per_Sample_IV_size) { fprintf(trace, "\" constant_IV_size=\"%d\" constant_IV=\"", ((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV_size); dump_data_hex(trace, (char *)((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV, ((GF_CENCSampleEncryptionGroupEntry*)entry)->constant_IV_size); } fprintf(trace, "\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_OINF: oinf_entry_dump(entry, trace); break; case GF_ISOM_SAMPLE_GROUP_LINF: linf_dump(entry, trace); break; case GF_ISOM_SAMPLE_GROUP_TRIF: trif_dump(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); break; case GF_ISOM_SAMPLE_GROUP_NALM: nalm_dump(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); break; case GF_ISOM_SAMPLE_GROUP_SAP: fprintf(trace, "<SAPEntry dependent_flag=\"%d\" SAP_type=\"%d\" />\n", ((GF_SAPEntry*)entry)->dependent_flag, ((GF_SAPEntry*)entry)->SAP_type); break; default: fprintf(trace, "<DefaultSampleGroupDescriptionEntry size=\"%d\" data=\"", ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); dump_data(trace, (char *) ((GF_DefaultSampleGroupDescriptionEntry*)entry)->data, ((GF_DefaultSampleGroupDescriptionEntry*)entry)->length); fprintf(trace, "\"/>\n"); } } if (!ptr->size) { switch (ptr->grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: fprintf(trace, "<RollRecoveryEntry roll_distance=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_PROL: fprintf(trace, "<AudioPreRollEntry roll_distance=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_TELE: fprintf(trace, "<TemporalLevelEntry level_independently_decodable=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_RAP: fprintf(trace, "<VisualRandomAccessEntry num_leading_samples_known=\"yes|no\" num_leading_samples=\"\" />\n"); break; case GF_ISOM_SAMPLE_GROUP_SYNC: fprintf(trace, "<SyncSampleGroupEntry NAL_unit_type=\"\" />\n"); break; case GF_ISOM_SAMPLE_GROUP_SEIG: fprintf(trace, "<CENCSampleEncryptionGroupEntry IsEncrypted=\"\" IV_size=\"\" KID=\"\" constant_IV_size=\"\" constant_IV=\"\"/>\n"); break; case GF_ISOM_SAMPLE_GROUP_OINF: oinf_entry_dump(NULL, trace); break; case GF_ISOM_SAMPLE_GROUP_LINF: linf_dump(NULL, trace); break; case GF_ISOM_SAMPLE_GROUP_TRIF: trif_dump(trace, NULL, 0); break; case GF_ISOM_SAMPLE_GROUP_NALM: nalm_dump(trace, NULL, 0); break; case GF_ISOM_SAMPLE_GROUP_SAP: fprintf(trace, "<SAPEntry dependent_flag=\"\" SAP_type=\"\" />\n"); break; default: fprintf(trace, "<DefaultSampleGroupDescriptionEntry size=\"\" data=\"\"/>\n"); } } gf_isom_box_dump_done("SampleGroupDescriptionBox", a, trace); return GF_OK; } GF_Err saiz_dump(GF_Box *a, FILE * trace) { u32 i; GF_SampleAuxiliaryInfoSizeBox *ptr = (GF_SampleAuxiliaryInfoSizeBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleAuxiliaryInfoSizeBox", trace); fprintf(trace, "default_sample_info_size=\"%d\" sample_count=\"%d\"", ptr->default_sample_info_size, ptr->sample_count); if (ptr->flags & 1) { if (isalnum(ptr->aux_info_type>>24)) { fprintf(trace, " aux_info_type=\"%s\" aux_info_type_parameter=\"%d\"", gf_4cc_to_str(ptr->aux_info_type), ptr->aux_info_type_parameter); } else { fprintf(trace, " aux_info_type=\"%d\" aux_info_type_parameter=\"%d\"", ptr->aux_info_type, ptr->aux_info_type_parameter); } } fprintf(trace, ">\n"); if (ptr->default_sample_info_size==0) { for (i=0; i<ptr->sample_count; i++) { fprintf(trace, "<SAISize size=\"%d\" />\n", ptr->sample_info_size[i]); } } if (!ptr->size) { fprintf(trace, "<SAISize size=\"\" />\n"); } gf_isom_box_dump_done("SampleAuxiliaryInfoSizeBox", a, trace); return GF_OK; } GF_Err saio_dump(GF_Box *a, FILE * trace) { u32 i; GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleAuxiliaryInfoOffsetBox", trace); fprintf(trace, "entry_count=\"%d\"", ptr->entry_count); if (ptr->flags & 1) { if (isalnum(ptr->aux_info_type>>24)) { fprintf(trace, " aux_info_type=\"%s\" aux_info_type_parameter=\"%d\"", gf_4cc_to_str(ptr->aux_info_type), ptr->aux_info_type_parameter); } else { fprintf(trace, " aux_info_type=\"%d\" aux_info_type_parameter=\"%d\"", ptr->aux_info_type, ptr->aux_info_type_parameter); } } fprintf(trace, ">\n"); if (ptr->version==0) { for (i=0; i<ptr->entry_count; i++) { fprintf(trace, "<SAIChunkOffset offset=\"%d\"/>\n", ptr->offsets[i]); } } else { for (i=0; i<ptr->entry_count; i++) { fprintf(trace, "<SAIChunkOffset offset=\""LLD"\"/>\n", ptr->offsets_large[i]); } } if (!ptr->size) { fprintf(trace, "<SAIChunkOffset offset=\"\"/>\n"); } gf_isom_box_dump_done("SampleAuxiliaryInfoOffsetBox", a, trace); return GF_OK; } GF_Err pssh_dump(GF_Box *a, FILE * trace) { GF_ProtectionSystemHeaderBox *ptr = (GF_ProtectionSystemHeaderBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ProtectionSystemHeaderBox", trace); fprintf(trace, "SystemID=\""); dump_data_hex(trace, (char *) ptr->SystemID, 16); fprintf(trace, "\">\n"); if (ptr->KID_count) { u32 i; for (i=0; i<ptr->KID_count; i++) { fprintf(trace, " <PSSHKey KID=\""); dump_data_hex(trace, (char *) ptr->KIDs[i], 16); fprintf(trace, "\"/>\n"); } } if (ptr->private_data_size) { fprintf(trace, " <PSSHData size=\"%d\" value=\"", ptr->private_data_size); dump_data_hex(trace, (char *) ptr->private_data, ptr->private_data_size); fprintf(trace, "\"/>\n"); } if (!ptr->size) { fprintf(trace, " <PSSHKey KID=\"\"/>\n"); fprintf(trace, " <PSSHData size=\"\" value=\"\"/>\n"); } gf_isom_box_dump_done("ProtectionSystemHeaderBox", a, trace); return GF_OK; } GF_Err tenc_dump(GF_Box *a, FILE * trace) { GF_TrackEncryptionBox *ptr = (GF_TrackEncryptionBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "TrackEncryptionBox", trace); fprintf(trace, "isEncrypted=\"%d\"", ptr->isProtected); if (ptr->Per_Sample_IV_Size) fprintf(trace, " IV_size=\"%d\" KID=\"", ptr->Per_Sample_IV_Size); else { fprintf(trace, " constant_IV_size=\"%d\" constant_IV=\"", ptr->constant_IV_size); dump_data_hex(trace, (char *) ptr->constant_IV, ptr->constant_IV_size); fprintf(trace, "\" KID=\""); } dump_data_hex(trace, (char *) ptr->KID, 16); if (ptr->version) fprintf(trace, "\" crypt_byte_block=\"%d\" skip_byte_block=\"%d", ptr->crypt_byte_block, ptr->skip_byte_block); fprintf(trace, "\">\n"); gf_isom_box_dump_done("TrackEncryptionBox", a, trace); return GF_OK; } GF_Err piff_pssh_dump(GF_Box *a, FILE * trace) { GF_PIFFProtectionSystemHeaderBox *ptr = (GF_PIFFProtectionSystemHeaderBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "PIFFProtectionSystemHeaderBox", trace); fprintf(trace, "SystemID=\""); dump_data_hex(trace, (char *) ptr->SystemID, 16); fprintf(trace, "\" PrivateData=\""); dump_data_hex(trace, (char *) ptr->private_data, ptr->private_data_size); fprintf(trace, "\">\n"); gf_isom_box_dump_done("PIFFProtectionSystemHeaderBox", a, trace); return GF_OK; } GF_Err piff_tenc_dump(GF_Box *a, FILE * trace) { GF_PIFFTrackEncryptionBox *ptr = (GF_PIFFTrackEncryptionBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "PIFFTrackEncryptionBox", trace); fprintf(trace, "AlgorithmID=\"%d\" IV_size=\"%d\" KID=\"", ptr->AlgorithmID, ptr->IV_size); dump_data_hex(trace,(char *) ptr->KID, 16); fprintf(trace, "\">\n"); gf_isom_box_dump_done("PIFFTrackEncryptionBox", a, trace); return GF_OK; } GF_Err piff_psec_dump(GF_Box *a, FILE * trace) { u32 i, j, sample_count; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "PIFFSampleEncryptionBox", trace); sample_count = gf_list_count(ptr->samp_aux_info); fprintf(trace, "sampleCount=\"%d\"", sample_count); if (ptr->flags & 1) { fprintf(trace, " AlgorithmID=\"%d\" IV_size=\"%d\" KID=\"", ptr->AlgorithmID, ptr->IV_size); dump_data(trace, (char *) ptr->KID, 16); fprintf(trace, "\""); } fprintf(trace, ">\n"); if (sample_count) { for (i=0; i<sample_count; i++) { GF_CENCSampleAuxInfo *cenc_sample = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (cenc_sample) { if (!strlen((char *)cenc_sample->IV)) continue; fprintf(trace, "<PIFFSampleEncryptionEntry IV_size=\"%u\" IV=\"", cenc_sample->IV_size); dump_data_hex(trace, (char *) cenc_sample->IV, cenc_sample->IV_size); if (ptr->flags & 0x2) { fprintf(trace, "\" SubsampleCount=\"%d\"", cenc_sample->subsample_count); fprintf(trace, ">\n"); for (j=0; j<cenc_sample->subsample_count; j++) { fprintf(trace, "<PIFFSubSampleEncryptionEntry NumClearBytes=\"%d\" NumEncryptedBytes=\"%d\"/>\n", cenc_sample->subsamples[j].bytes_clear_data, cenc_sample->subsamples[j].bytes_encrypted_data); } } fprintf(trace, "</PIFFSampleEncryptionEntry>\n"); } } } if (!ptr->size) { fprintf(trace, "<PIFFSampleEncryptionEntry IV=\"\" SubsampleCount=\"\">\n"); fprintf(trace, "<PIFFSubSampleEncryptionEntry NumClearBytes=\"\" NumEncryptedBytes=\"\"/>\n"); fprintf(trace, "</PIFFSampleEncryptionEntry>\n"); } gf_isom_box_dump_done("PIFFSampleEncryptionBox", a, trace); return GF_OK; } GF_Err senc_dump(GF_Box *a, FILE * trace) { u32 i, j, sample_count; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "SampleEncryptionBox", trace); sample_count = gf_list_count(ptr->samp_aux_info); fprintf(trace, "sampleCount=\"%d\">\n", sample_count); //WARNING - PSEC (UUID) IS TYPECASTED TO SENC (FULL BOX) SO WE CANNOT USE USUAL FULL BOX FUNCTIONS fprintf(trace, "<FullBoxInfo Version=\"%d\" Flags=\"0x%X\"/>\n", ptr->version, ptr->flags); for (i=0; i<sample_count; i++) { GF_CENCSampleAuxInfo *cenc_sample = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (cenc_sample) { fprintf(trace, "<SampleEncryptionEntry sampleNumber=\"%d\" IV_size=\"%u\" IV=\"", i+1, cenc_sample->IV_size); dump_data_hex(trace, (char *) cenc_sample->IV, cenc_sample->IV_size); fprintf(trace, "\""); if (ptr->flags & 0x2) { fprintf(trace, " SubsampleCount=\"%d\"", cenc_sample->subsample_count); fprintf(trace, ">\n"); for (j=0; j<cenc_sample->subsample_count; j++) { fprintf(trace, "<SubSampleEncryptionEntry NumClearBytes=\"%d\" NumEncryptedBytes=\"%d\"/>\n", cenc_sample->subsamples[j].bytes_clear_data, cenc_sample->subsamples[j].bytes_encrypted_data); } } else { fprintf(trace, ">\n"); } fprintf(trace, "</SampleEncryptionEntry>\n"); } } if (!ptr->size) { fprintf(trace, "<SampleEncryptionEntry sampleCount=\"\" IV=\"\" SubsampleCount=\"\">\n"); fprintf(trace, "<SubSampleEncryptionEntry NumClearBytes=\"\" NumEncryptedBytes=\"\"/>\n"); fprintf(trace, "</SampleEncryptionEntry>\n"); } gf_isom_box_dump_done("SampleEncryptionBox", a, trace); return GF_OK; } GF_Err prft_dump(GF_Box *a, FILE * trace) { Double fracs; GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox *) a; time_t secs; struct tm t; secs = (ptr->ntp >> 32) - GF_NTP_SEC_1900_TO_1970; if (secs < 0) { if (ptr->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("NTP time is not valid, using value 0\n")); } secs = 0; } t = *gmtime(&secs); fracs = (Double) (ptr->ntp & 0xFFFFFFFFULL); fracs /= 0xFFFFFFFF; fracs *= 1000; gf_isom_box_dump_start(a, "ProducerReferenceTimeBox", trace); fprintf(trace, "referenceTrackID=\"%d\" timestamp=\""LLU"\" NTP=\""LLU"\" UTC=\"%d-%02d-%02dT%02d:%02d:%02d.%03dZ\">\n", ptr->refTrackID, ptr->timestamp, ptr->ntp, 1900+t.tm_year, t.tm_mon+1, t.tm_mday, t.tm_hour, t.tm_min, (u32) t.tm_sec, (u32) fracs); gf_isom_box_dump_done("ProducerReferenceTimeBox", a, trace); return GF_OK; } GF_Err adkm_dump(GF_Box *a, FILE * trace) { GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeDRMKeyManagementSystemBox", trace); fprintf(trace, ">\n"); if (ptr->header) gf_isom_box_dump((GF_Box *)ptr->header, trace); if (ptr->au_format) gf_isom_box_dump((GF_Box *)ptr->au_format, trace); gf_isom_box_dump_done("AdobeDRMKeyManagementSystemBox", a, trace); return GF_OK; } GF_Err ahdr_dump(GF_Box *a, FILE * trace) { GF_AdobeDRMHeaderBox *ptr = (GF_AdobeDRMHeaderBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeDRMHeaderBox", trace); fprintf(trace, ">\n"); if (ptr->std_enc_params) gf_isom_box_dump((GF_Box *)ptr->std_enc_params, trace); gf_isom_box_dump_done("AdobeDRMHeaderBox", a, trace); return GF_OK; } GF_Err aprm_dump(GF_Box *a, FILE * trace) { GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeStdEncryptionParamsBox", trace); fprintf(trace, ">\n"); if (ptr->enc_info) gf_isom_box_dump((GF_Box *)ptr->enc_info, trace); if (ptr->key_info) gf_isom_box_dump((GF_Box *)ptr->key_info, trace); gf_isom_box_dump_done("AdobeStdEncryptionParamsBox", a, trace); return GF_OK; } GF_Err aeib_dump(GF_Box *a, FILE * trace) { GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeEncryptionInfoBox", trace); fprintf(trace, "EncryptionAlgorithm=\"%s\" KeyLength=\"%d\">\n", ptr->enc_algo, ptr->key_length); gf_isom_box_dump_done("AdobeEncryptionInfoBox", a, trace); return GF_OK; } GF_Err akey_dump(GF_Box *a, FILE * trace) { GF_AdobeKeyInfoBox *ptr = (GF_AdobeKeyInfoBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeKeyInfoBox", trace); fprintf(trace, ">\n"); if (ptr->params) gf_isom_box_dump((GF_Box *)ptr->params, trace); gf_isom_box_dump_done("AdobeKeyInfoBox", a, trace); return GF_OK; } GF_Err flxs_dump(GF_Box *a, FILE * trace) { GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeFlashAccessParamsBox", trace); fprintf(trace, ">\n"); if (ptr->metadata) fprintf(trace, "<FmrmsV2Metadata=\"%s\"/>\n", ptr->metadata); gf_isom_box_dump_done("AdobeFlashAccessParamsBox", a, trace); return GF_OK; } GF_Err adaf_dump(GF_Box *a, FILE * trace) { GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeDRMAUFormatBox ", trace); fprintf(trace, "SelectiveEncryption=\"%d\" IV_length=\"%d\">\n", ptr->selective_enc ? 1 : 0, ptr->IV_length); gf_isom_box_dump_done("AdobeDRMAUFormatBox", a, trace); return GF_OK; } /* Image File Format dump */ GF_Err ispe_dump(GF_Box *a, FILE * trace) { GF_ImageSpatialExtentsPropertyBox *ptr = (GF_ImageSpatialExtentsPropertyBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ImageSpatialExtentsPropertyBox", trace); fprintf(trace, "image_width=\"%d\" image_height=\"%d\">\n", ptr->image_width, ptr->image_height); gf_isom_box_dump_done("ImageSpatialExtentsPropertyBox", a, trace); return GF_OK; } GF_Err colr_dump(GF_Box *a, FILE * trace) { GF_ColourInformationBox *ptr = (GF_ColourInformationBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ColourInformationBox", trace); fprintf(trace, "colour_type=\"%s\" colour_primaries=\"%d\" transfer_characteristics=\"%d\" matrix_coefficients=\"%d\" full_range_flag=\"%d\">\n", gf_4cc_to_str(ptr->colour_type), ptr->colour_primaries, ptr->transfer_characteristics, ptr->matrix_coefficients, ptr->full_range_flag); gf_isom_box_dump_done("ColourInformationBox", a, trace); return GF_OK; } GF_Err pixi_dump(GF_Box *a, FILE * trace) { u32 i; GF_PixelInformationPropertyBox *ptr = (GF_PixelInformationPropertyBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "PixelInformationPropertyBox", trace); fprintf(trace, ">\n"); for (i = 0; i < ptr->num_channels; i++) { fprintf(trace, "<BitPerChannel bits_per_channel=\"%d\"/>\n", ptr->bits_per_channel[i]); } if (!ptr->size) fprintf(trace, "<BitPerChannel bits_per_channel=\"\"/>\n"); gf_isom_box_dump_done("PixelInformationPropertyBox", a, trace); return GF_OK; } GF_Err rloc_dump(GF_Box *a, FILE * trace) { GF_RelativeLocationPropertyBox *ptr = (GF_RelativeLocationPropertyBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "RelativeLocationPropertyBox", trace); fprintf(trace, "horizontal_offset=\"%d\" vertical_offset=\"%d\">\n", ptr->horizontal_offset, ptr->vertical_offset); gf_isom_box_dump_done("RelativeLocationPropertyBox", a, trace); return GF_OK; } GF_Err irot_dump(GF_Box *a, FILE * trace) { GF_ImageRotationBox *ptr = (GF_ImageRotationBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ImageRotationBox", trace); fprintf(trace, "angle=\"%d\">\n", (ptr->angle*90)); gf_isom_box_dump_done("ImageRotationBox", a, trace); return GF_OK; } GF_Err ipco_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "ItemPropertyContainerBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("ItemPropertyContainerBox", a, trace); return GF_OK; } GF_Err iprp_dump(GF_Box *a, FILE * trace) { GF_ItemPropertiesBox *ptr = (GF_ItemPropertiesBox *)a; gf_isom_box_dump_start(a, "ItemPropertiesBox", trace); fprintf(trace, ">\n"); if (ptr->property_container) gf_isom_box_dump(ptr->property_container, trace); if (ptr->property_association) gf_isom_box_dump(ptr->property_association, trace); gf_isom_box_dump_done("ItemPropertiesBox", a, trace); return GF_OK; } GF_Err ipma_dump(GF_Box *a, FILE * trace) { u32 i, j; GF_ItemPropertyAssociationBox *ptr = (GF_ItemPropertyAssociationBox *)a; u32 entry_count = gf_list_count(ptr->entries); if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ItemPropertyAssociationBox", trace); fprintf(trace, "entry_count=\"%d\">\n", entry_count); for (i = 0; i < entry_count; i++) { GF_ItemPropertyAssociationEntry *entry = (GF_ItemPropertyAssociationEntry *)gf_list_get(ptr->entries, i); u32 association_count = gf_list_count(entry->essential); fprintf(trace, "<AssociationEntry item_ID=\"%d\" association_count=\"%d\">\n", entry->item_id, association_count); for (j = 0; j < association_count; j++) { Bool *ess = (Bool *)gf_list_get(entry->essential, j); u32 *prop_index = (u32 *)gf_list_get(entry->property_index, j); fprintf(trace, "<Property index=\"%d\" essential=\"%d\"/>\n", *prop_index, *ess); } fprintf(trace, "</AssociationEntry>\n"); } if (!ptr->size) { fprintf(trace, "<AssociationEntry item_ID=\"\" association_count=\"\">\n"); fprintf(trace, "<Property index=\"\" essential=\"\"/>\n"); fprintf(trace, "</AssociationEntry>\n"); } gf_isom_box_dump_done("ItemPropertyAssociationBox", a, trace); return GF_OK; } GF_Err auxc_dump(GF_Box *a, FILE * trace) { GF_AuxiliaryTypePropertyBox *ptr = (GF_AuxiliaryTypePropertyBox *)a; gf_isom_box_dump_start(a, "AuxiliaryTypePropertyBox", trace); fprintf(trace, "aux_type=\"%s\" ", ptr->aux_urn); dump_data_attribute(trace, "aux_subtype", ptr->data, ptr->data_size); fprintf(trace, ">\n"); gf_isom_box_dump_done("AuxiliaryTypePropertyBox", a, trace); return GF_OK; } GF_Err oinf_dump(GF_Box *a, FILE * trace) { GF_OINFPropertyBox *ptr = (GF_OINFPropertyBox *)a; gf_isom_box_dump_start(a, "OperatingPointsInformationPropertyBox", trace); fprintf(trace, ">\n"); oinf_entry_dump(ptr->oinf, trace); gf_isom_box_dump_done("OperatingPointsInformationPropertyBox", a, trace); return GF_OK; } GF_Err tols_dump(GF_Box *a, FILE * trace) { GF_TargetOLSPropertyBox *ptr = (GF_TargetOLSPropertyBox *)a; gf_isom_box_dump_start(a, "TargetOLSPropertyBox", trace); fprintf(trace, "target_ols_index=\"%d\">\n", ptr->target_ols_index); gf_isom_box_dump_done("TargetOLSPropertyBox", a, trace); return GF_OK; } GF_Err trgr_dump(GF_Box *a, FILE * trace) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *) a; gf_isom_box_dump_start(a, "TrackGroupBox", trace); fprintf(trace, ">\n"); gf_isom_box_array_dump(ptr->groups, trace); gf_isom_box_dump_done("TrackGroupBox", a, trace); return GF_OK; } GF_Err trgt_dump(GF_Box *a, FILE * trace) { GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *) a; a->type = ptr->group_type; gf_isom_box_dump_start(a, "TrackGroupTypeBox", trace); a->type = GF_ISOM_BOX_TYPE_TRGT; fprintf(trace, "track_group_id=\"%d\">\n", ptr->track_group_id); gf_isom_box_dump_done("TrackGroupTypeBox", a, trace); return GF_OK; } GF_Err grpl_dump(GF_Box *a, FILE * trace) { gf_isom_box_dump_start(a, "GroupListBox", trace); fprintf(trace, ">\n"); gf_isom_box_dump_done("GroupListBox", a, trace); return GF_OK; } GF_Err grptype_dump(GF_Box *a, FILE * trace) { u32 i; GF_EntityToGroupTypeBox *ptr = (GF_EntityToGroupTypeBox *) a; a->type = ptr->grouping_type; gf_isom_box_dump_start(a, "EntityToGroupTypeBox", trace); a->type = GF_ISOM_BOX_TYPE_GRPT; fprintf(trace, "group_id=\"%d\">\n", ptr->group_id); for (i=0; i<ptr->entity_id_count ; i++) fprintf(trace, "<EntityToGroupTypeBoxEntry EntityID=\"%d\"/>\n", ptr->entity_ids[i]); if (!ptr->size) fprintf(trace, "<EntityToGroupTypeBoxEntry EntityID=\"\"/>\n"); gf_isom_box_dump_done("EntityToGroupTypeBox", a, trace); return GF_OK; } GF_Err stvi_dump(GF_Box *a, FILE * trace) { GF_StereoVideoBox *ptr = (GF_StereoVideoBox *) a; gf_isom_box_dump_start(a, "StereoVideoBox", trace); fprintf(trace, "single_view_allowed=\"%d\" stereo_scheme=\"%d\" ", ptr->single_view_allowed, ptr->stereo_scheme); dump_data_attribute(trace, "stereo_indication_type", ptr->stereo_indication_type, ptr->sit_len); fprintf(trace, ">\n"); gf_isom_box_dump_done("StereoVideoBox", a, trace); return GF_OK; } GF_Err def_cont_box_dump(GF_Box *a, FILE *trace) { char *name = "SubTrackDefinitionBox"; //only one using generic box container for now gf_isom_box_dump_start(a, name, trace); fprintf(trace, ">\n"); gf_isom_box_dump_done(name, a, trace); return GF_OK; } GF_Err fiin_dump(GF_Box *a, FILE * trace) { FDItemInformationBox *ptr = (FDItemInformationBox *) a; gf_isom_box_dump_start(a, "FDItemInformationBox", trace); fprintf(trace, ">\n"); if (ptr->partition_entries) gf_isom_box_array_dump(ptr->partition_entries, trace); if (ptr->session_info) gf_isom_box_dump(ptr->session_info, trace); if (ptr->group_id_to_name) gf_isom_box_dump(ptr->group_id_to_name, trace); gf_isom_box_dump_done("FDItemInformationBox", a, trace); return GF_OK; } GF_Err fecr_dump(GF_Box *a, FILE * trace) { u32 i; char *box_name; FECReservoirBox *ptr = (FECReservoirBox *) a; if (a->type==GF_ISOM_BOX_TYPE_FIRE) { box_name = "FILEReservoirBox"; } else { box_name = "FECReservoirBox"; } gf_isom_box_dump_start(a, box_name, trace); fprintf(trace, ">\n"); for (i=0; i<ptr->nb_entries; i++) { fprintf(trace, "<%sEntry itemID=\"%d\" symbol_count=\"%d\"/>\n", box_name, ptr->entries[i].item_id, ptr->entries[i].symbol_count); } if (!ptr->size) { fprintf(trace, "<%sEntry itemID=\"\" symbol_count=\"\"/>\n", box_name); } gf_isom_box_dump_done(box_name, a, trace); return GF_OK; } GF_Err gitn_dump(GF_Box *a, FILE * trace) { u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *) a; gf_isom_box_dump_start(a, "GroupIdToNameBox", trace); fprintf(trace, ">\n"); for (i=0; i<ptr->nb_entries; i++) { fprintf(trace, "<GroupIdToNameBoxEntry groupID=\"%d\" name=\"%s\"/>\n", ptr->entries[i].group_id, ptr->entries[i].name); } if (!ptr->size) { fprintf(trace, "<GroupIdToNameBoxEntryEntry groupID=\"\" name=\"\"/>\n"); } gf_isom_box_dump_done("GroupIdToNameBox", a, trace); return GF_OK; } GF_Err paen_dump(GF_Box *a, FILE * trace) { FDPartitionEntryBox *ptr = (FDPartitionEntryBox *) a; gf_isom_box_dump_start(a, "FDPartitionEntryBox", trace); fprintf(trace, ">\n"); if (ptr->blocks_and_symbols) gf_isom_box_dump(ptr->blocks_and_symbols, trace); if (ptr->FEC_symbol_locations) gf_isom_box_dump(ptr->FEC_symbol_locations, trace); if (ptr->FEC_symbol_locations) gf_isom_box_dump(ptr->FEC_symbol_locations, trace); gf_isom_box_dump_done("FDPartitionEntryBox", a, trace); return GF_OK; } GF_Err fpar_dump(GF_Box *a, FILE * trace) { u32 i; FilePartitionBox *ptr = (FilePartitionBox *) a; gf_isom_box_dump_start(a, "FilePartitionBox", trace); fprintf(trace, "itemID=\"%d\" FEC_encoding_ID=\"%d\" FEC_instance_ID=\"%d\" max_source_block_length=\"%d\" encoding_symbol_length=\"%d\" max_number_of_encoding_symbols=\"%d\" ", ptr->itemID, ptr->FEC_encoding_ID, ptr->FEC_instance_ID, ptr->max_source_block_length, ptr->encoding_symbol_length, ptr->max_number_of_encoding_symbols); if (ptr->scheme_specific_info) dump_data_attribute(trace, "scheme_specific_info", (char*)ptr->scheme_specific_info, (u32)strlen(ptr->scheme_specific_info) ); fprintf(trace, ">\n"); for (i=0; i<ptr->nb_entries; i++) { fprintf(trace, "<FilePartitionBoxEntry block_count=\"%d\" block_size=\"%d\"/>\n", ptr->entries[i].block_count, ptr->entries[i].block_size); } if (!ptr->size) { fprintf(trace, "<FilePartitionBoxEntry block_count=\"\" block_size=\"\"/>\n"); } gf_isom_box_dump_done("FilePartitionBox", a, trace); return GF_OK; } GF_Err segr_dump(GF_Box *a, FILE * trace) { u32 i, k; FDSessionGroupBox *ptr = (FDSessionGroupBox *) a; gf_isom_box_dump_start(a, "FDSessionGroupBox", trace); fprintf(trace, ">\n"); for (i=0; i<ptr->num_session_groups; i++) { fprintf(trace, "<FDSessionGroupBoxEntry groupIDs=\""); for (k=0; k<ptr->session_groups[i].nb_groups; k++) { fprintf(trace, "%d ", ptr->session_groups[i].group_ids[k]); } fprintf(trace, "\" channels=\""); for (k=0; k<ptr->session_groups[i].nb_channels; k++) { fprintf(trace, "%d ", ptr->session_groups[i].channels[k]); } fprintf(trace, "\"/>\n"); } if (!ptr->size) { fprintf(trace, "<FDSessionGroupBoxEntry groupIDs=\"\" channels=\"\"/>\n"); } gf_isom_box_dump_done("FDSessionGroupBox", a, trace); return GF_OK; } GF_Err srpp_dump(GF_Box *a, FILE * trace) { GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *) a; gf_isom_box_dump_start(a, "SRTPProcessBox", trace); fprintf(trace, "encryption_algorithm_rtp=\"%d\" encryption_algorithm_rtcp=\"%d\" integrity_algorithm_rtp=\"%d\" integrity_algorithm_rtcp=\"%d\">\n", ptr->encryption_algorithm_rtp, ptr->encryption_algorithm_rtcp, ptr->integrity_algorithm_rtp, ptr->integrity_algorithm_rtcp); if (ptr->info) gf_isom_box_dump(ptr->info, trace); if (ptr->scheme_type) gf_isom_box_dump(ptr->scheme_type, trace); gf_isom_box_dump_done("SRTPProcessBox", a, trace); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_HINTING GF_Err fdpa_dump(GF_Box *a, FILE * trace) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "FDpacketBox", trace); fprintf(trace, "sender_current_time_present=\"%d\" expected_residual_time_present=\"%d\" session_close_bit=\"%d\" object_close_bit=\"%d\" transport_object_identifier=\"%d\">\n", ptr->info.sender_current_time_present, ptr->info.expected_residual_time_present, ptr->info.session_close_bit, ptr->info.object_close_bit, ptr->info.transport_object_identifier); for (i=0; i<ptr->header_ext_count; i++) { fprintf(trace, "<FDHeaderExt type=\"%d\"", ptr->headers[i].header_extension_type); if (ptr->headers[i].header_extension_type > 127) { dump_data_attribute(trace, "content", (char *) ptr->headers[i].content, 3); } else if (ptr->headers[i].data_length) { dump_data_attribute(trace, "data", ptr->headers[i].data, ptr->headers[i].data_length); } fprintf(trace, "/>\n"); } if (!ptr->size) { fprintf(trace, "<FDHeaderExt type=\"\" content=\"\" data=\"\"/>\n"); } gf_isom_box_dump_done("FDpacketBox", a, trace); return GF_OK; } GF_Err extr_dump(GF_Box *a, FILE * trace) { GF_ExtraDataBox *ptr = (GF_ExtraDataBox *) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "ExtraDataBox", trace); dump_data_attribute(trace, "data", ptr->data, ptr->data_length); fprintf(trace, ">\n"); if (ptr->feci) { gf_isom_box_dump((GF_Box *)ptr->feci, trace); } gf_isom_box_dump_done("ExtraDataBox", a, trace); return GF_OK; } GF_Err fdsa_dump(GF_Box *a, FILE * trace) { GF_Err e; GF_HintSample *ptr = (GF_HintSample *) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "FDSampleBox", trace); fprintf(trace, ">\n"); e = gf_isom_box_array_dump(ptr->packetTable, trace); if (e) return e; if (ptr->extra_data) { e = gf_isom_box_dump((GF_Box *)ptr->extra_data, trace); if (e) return e; } gf_isom_box_dump_done("FDSampleBox", a, trace); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_HINTING*/ GF_Err trik_dump(GF_Box *a, FILE * trace) { u32 i; GF_TrickPlayBox *p = (GF_TrickPlayBox *) a; gf_isom_box_dump_start(a, "TrickPlayBox", trace); fprintf(trace, ">\n"); for (i=0; i<p->entry_count; i++) { fprintf(trace, "<TrickPlayBoxEntry pic_type=\"%d\" dependency_level=\"%d\"/>\n", p->entries[i].pic_type, p->entries[i].dependency_level); } if (!p->size) fprintf(trace, "<TrickPlayBoxEntry pic_type=\"\" dependency_level=\"\"/>\n"); gf_isom_box_dump_done("TrickPlayBox", a, trace); return GF_OK; } GF_Err bloc_dump(GF_Box *a, FILE * trace) { GF_BaseLocationBox *p = (GF_BaseLocationBox *) a; gf_isom_box_dump_start(a, "BaseLocationBox", trace); fprintf(trace, "baseLocation=\"%s\" basePurlLocation=\"%s\">\n", p->baseLocation, p->basePurlLocation); gf_isom_box_dump_done("BaseLocationBox", a, trace); return GF_OK; } GF_Err ainf_dump(GF_Box *a, FILE * trace) { GF_AssetInformationBox *p = (GF_AssetInformationBox *) a; gf_isom_box_dump_start(a, "AssetInformationBox", trace); fprintf(trace, "profile_version=\"%d\" APID=\"%s\">\n", p->profile_version, p->APID); gf_isom_box_dump_done("AssetInformationBox", a, trace); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_DUMP*/
static void nalm_dump(FILE * trace, char *data, u32 data_size) { GF_BitStream *bs; Bool rle, large_size; u32 entry_count; if (!data) { fprintf(trace, "<NALUMap rle=\"\" large_size=\"\">\n"); fprintf(trace, "<NALUMapEntry NALU_startNumber=\"\" groupID=\"\"/>\n"); fprintf(trace, "</NALUMap>\n"); return; } bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); gf_bs_read_int(bs, 6); large_size = gf_bs_read_int(bs, 1); rle = gf_bs_read_int(bs, 1); entry_count = gf_bs_read_int(bs, large_size ? 16 : 8); fprintf(trace, "<NALUMap rle=\"%d\" large_size=\"%d\">\n", rle, large_size); while (entry_count) { u32 ID; fprintf(trace, "<NALUMapEntry "); if (rle) { u32 start_num = gf_bs_read_int(bs, large_size ? 16 : 8); fprintf(trace, "NALU_startNumber=\"%d\" ", start_num); } ID = gf_bs_read_u16(bs); fprintf(trace, "groupID=\"%d\"/>\n", ID); entry_count--; } gf_bs_del(bs); fprintf(trace, "</NALUMap>\n"); return; }
static void nalm_dump(FILE * trace, char *data, u32 data_size) { GF_BitStream *bs; Bool rle, large_size; u32 entry_count; if (!data) { fprintf(trace, "<NALUMap rle=\"\" large_size=\"\">\n"); fprintf(trace, "<NALUMapEntry NALU_startNumber=\"\" groupID=\"\"/>\n"); fprintf(trace, "</NALUMap>\n"); return; } bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); gf_bs_read_int(bs, 6); large_size = gf_bs_read_int(bs, 1); rle = gf_bs_read_int(bs, 1); entry_count = gf_bs_read_int(bs, large_size ? 16 : 8); fprintf(trace, "<NALUMap rle=\"%d\" large_size=\"%d\">\n", rle, large_size); while (entry_count) { u32 ID; fprintf(trace, "<NALUMapEntry "); if (rle) { u32 start_num = gf_bs_read_int(bs, large_size ? 16 : 8); fprintf(trace, "NALU_startNumber=\"%d\" ", start_num); } ID = gf_bs_read_u16(bs); fprintf(trace, "groupID=\"%d\"/>\n", ID); entry_count--; } gf_bs_del(bs); fprintf(trace, "</NALUMap>\n"); return; }
{'added': [(487, '\tif (p->nameUTF8 && (u32) p->nameUTF8[0] == strlen(p->nameUTF8)-1) {'), (4160, '\t\tif (op->frame_rate_info_flag)'), (4162, '\t\tif (op->bit_rate_info_flag)'), (4264, ''), (4271, ''), (4341, ''), (4510, '\tif (ptr->version)')], 'deleted': [(487, '\tif (p->nameUTF8 && (u32) p->nameUTF8[0] == strlen(p->nameUTF8+1)) {'), (4160, '\t\tif (op->frame_rate_info_flag)'), (4162, '\t\tif (op->bit_rate_info_flag)'), (4264, ''), (4271, ''), (4341, ''), (4510, '\tif (ptr->version)')]}
7
7
4,480
34,025
32
195
6
https://github.com/gpac/gpac
CVE-2018-13006
CWE-125
2,667
lua_struct.c
C
optsize
/* ** {====================================================== ** Library for packing/unpacking structures. ** $Id: struct.c,v 1.7 2018/05/11 22:04:31 roberto Exp $ ** See Copyright Notice at the end of this file ** ======================================================= */ /* ** Valid formats: ** > - big endian ** < - little endian ** ![num] - alignment ** x - pading ** b/B - signed/unsigned byte ** h/H - signed/unsigned short ** l/L - signed/unsigned long ** T - size_t ** i/In - signed/unsigned integer with size 'n' (default is size of int) ** cn - sequence of 'n' chars (from/to a string); when packing, n==0 means the whole string; when unpacking, n==0 means use the previous read number as the string length ** s - zero-terminated string ** f - float ** d - double ** ' ' - ignored */ #include <assert.h> #include <ctype.h> #include <limits.h> #include <stddef.h> #include <string.h> #include "lua.h" #include "lauxlib.h" #if (LUA_VERSION_NUM >= 502) #define luaL_register(L,n,f) luaL_newlib(L,f) #endif /* basic integer type */ #if !defined(STRUCT_INT) #define STRUCT_INT long #endif typedef STRUCT_INT Inttype; /* corresponding unsigned version */ typedef unsigned STRUCT_INT Uinttype; /* maximum size (in bytes) for integral types */ #define MAXINTSIZE 32 /* is 'x' a power of 2? */ #define isp2(x) ((x) > 0 && ((x) & ((x) - 1)) == 0) /* dummy structure to get alignment requirements */ struct cD { char c; double d; }; #define PADDING (sizeof(struct cD) - sizeof(double)) #define MAXALIGN (PADDING > sizeof(int) ? PADDING : sizeof(int)) /* endian options */ #define BIG 0 #define LITTLE 1 static union { int dummy; char endian; } const native = {1}; typedef struct Header { int endian; int align; } Header; static int getnum (const char **fmt, int df) { if (!isdigit(**fmt)) /* no number? */ return df; /* return default value */ else { int a = 0; do { a = a*10 + *((*fmt)++) - '0'; } while (isdigit(**fmt)); return a; } } #define defaultoptions(h) ((h)->endian = native.endian, (h)->align = 1) static size_t optsize (lua_State *L, char opt, const char **fmt) { switch (opt) { case 'B': case 'b': return sizeof(char); case 'H': case 'h': return sizeof(short); case 'L': case 'l': return sizeof(long); case 'T': return sizeof(size_t); case 'f': return sizeof(float); case 'd': return sizeof(double); case 'x': return 1; case 'c': return getnum(fmt, 1); case 'i': case 'I': { int sz = getnum(fmt, sizeof(int)); if (sz > MAXINTSIZE) luaL_error(L, "integral size %d is larger than limit of %d", sz, MAXINTSIZE); return sz; } default: return 0; /* other cases do not need alignment */ } } /* ** return number of bytes needed to align an element of size 'size' ** at current position 'len' */ static int gettoalign (size_t len, Header *h, int opt, size_t size) { if (size == 0 || opt == 'c') return 0; if (size > (size_t)h->align) size = h->align; /* respect max. alignment */ return (size - (len & (size - 1))) & (size - 1); } /* ** options to control endianess and alignment */ static void controloptions (lua_State *L, int opt, const char **fmt, Header *h) { switch (opt) { case ' ': return; /* ignore white spaces */ case '>': h->endian = BIG; return; case '<': h->endian = LITTLE; return; case '!': { int a = getnum(fmt, MAXALIGN); if (!isp2(a)) luaL_error(L, "alignment %d is not a power of 2", a); h->align = a; return; } default: { const char *msg = lua_pushfstring(L, "invalid format option '%c'", opt); luaL_argerror(L, 1, msg); } } } static void putinteger (lua_State *L, luaL_Buffer *b, int arg, int endian, int size) { lua_Number n = luaL_checknumber(L, arg); Uinttype value; char buff[MAXINTSIZE]; if (n < 0) value = (Uinttype)(Inttype)n; else value = (Uinttype)n; if (endian == LITTLE) { int i; for (i = 0; i < size; i++) { buff[i] = (value & 0xff); value >>= 8; } } else { int i; for (i = size - 1; i >= 0; i--) { buff[i] = (value & 0xff); value >>= 8; } } luaL_addlstring(b, buff, size); } static void correctbytes (char *b, int size, int endian) { if (endian != native.endian) { int i = 0; while (i < --size) { char temp = b[i]; b[i++] = b[size]; b[size] = temp; } } } static int b_pack (lua_State *L) { luaL_Buffer b; const char *fmt = luaL_checkstring(L, 1); Header h; int arg = 2; size_t totalsize = 0; defaultoptions(&h); lua_pushnil(L); /* mark to separate arguments from string buffer */ luaL_buffinit(L, &b); while (*fmt != '\0') { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); int toalign = gettoalign(totalsize, &h, opt, size); totalsize += toalign; while (toalign-- > 0) luaL_addchar(&b, '\0'); switch (opt) { case 'b': case 'B': case 'h': case 'H': case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ putinteger(L, &b, arg++, h.endian, size); break; } case 'x': { luaL_addchar(&b, '\0'); break; } case 'f': { float f = (float)luaL_checknumber(L, arg++); correctbytes((char *)&f, size, h.endian); luaL_addlstring(&b, (char *)&f, size); break; } case 'd': { double d = luaL_checknumber(L, arg++); correctbytes((char *)&d, size, h.endian); luaL_addlstring(&b, (char *)&d, size); break; } case 'c': case 's': { size_t l; const char *s = luaL_checklstring(L, arg++, &l); if (size == 0) size = l; luaL_argcheck(L, l >= (size_t)size, arg, "string too short"); luaL_addlstring(&b, s, size); if (opt == 's') { luaL_addchar(&b, '\0'); /* add zero at the end */ size++; } break; } default: controloptions(L, opt, &fmt, &h); } totalsize += size; } luaL_pushresult(&b); return 1; } static lua_Number getinteger (const char *buff, int endian, int issigned, int size) { Uinttype l = 0; int i; if (endian == BIG) { for (i = 0; i < size; i++) { l <<= 8; l |= (Uinttype)(unsigned char)buff[i]; } } else { for (i = size - 1; i >= 0; i--) { l <<= 8; l |= (Uinttype)(unsigned char)buff[i]; } } if (!issigned) return (lua_Number)l; else { /* signed format */ Uinttype mask = (Uinttype)(~((Uinttype)0)) << (size*8 - 1); if (l & mask) /* negative value? */ l |= mask; /* signal extension */ return (lua_Number)(Inttype)l; } } static int b_unpack (lua_State *L) { Header h; const char *fmt = luaL_checkstring(L, 1); size_t ld; const char *data = luaL_checklstring(L, 2, &ld); size_t pos = luaL_optinteger(L, 3, 1); luaL_argcheck(L, pos > 0, 3, "offset must be 1 or greater"); pos--; /* Lua indexes are 1-based, but here we want 0-based for C * pointer math. */ int n = 0; /* number of results */ defaultoptions(&h); while (*fmt) { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); pos += gettoalign(pos, &h, opt, size); luaL_argcheck(L, size <= ld && pos <= ld - size, 2, "data string too short"); /* stack space for item + next position */ luaL_checkstack(L, 2, "too many results"); switch (opt) { case 'b': case 'B': case 'h': case 'H': case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ int issigned = islower(opt); lua_Number res = getinteger(data+pos, h.endian, issigned, size); lua_pushnumber(L, res); n++; break; } case 'x': { break; } case 'f': { float f; memcpy(&f, data+pos, size); correctbytes((char *)&f, sizeof(f), h.endian); lua_pushnumber(L, f); n++; break; } case 'd': { double d; memcpy(&d, data+pos, size); correctbytes((char *)&d, sizeof(d), h.endian); lua_pushnumber(L, d); n++; break; } case 'c': { if (size == 0) { if (n == 0 || !lua_isnumber(L, -1)) luaL_error(L, "format 'c0' needs a previous size"); size = lua_tonumber(L, -1); lua_pop(L, 1); n--; luaL_argcheck(L, size <= ld && pos <= ld - size, 2, "data string too short"); } lua_pushlstring(L, data+pos, size); n++; break; } case 's': { const char *e = (const char *)memchr(data+pos, '\0', ld - pos); if (e == NULL) luaL_error(L, "unfinished string in data"); size = (e - (data+pos)) + 1; lua_pushlstring(L, data+pos, size - 1); n++; break; } default: controloptions(L, opt, &fmt, &h); } pos += size; } lua_pushinteger(L, pos + 1); /* next position */ return n + 1; } static int b_size (lua_State *L) { Header h; const char *fmt = luaL_checkstring(L, 1); size_t pos = 0; defaultoptions(&h); while (*fmt) { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); pos += gettoalign(pos, &h, opt, size); if (opt == 's') luaL_argerror(L, 1, "option 's' has no fixed size"); else if (opt == 'c' && size == 0) luaL_argerror(L, 1, "option 'c0' has no fixed size"); if (!isalnum(opt)) controloptions(L, opt, &fmt, &h); pos += size; } lua_pushinteger(L, pos); return 1; } /* }====================================================== */ static const struct luaL_Reg thislib[] = { {"pack", b_pack}, {"unpack", b_unpack}, {"size", b_size}, {NULL, NULL} }; LUALIB_API int luaopen_struct (lua_State *L); LUALIB_API int luaopen_struct (lua_State *L) { luaL_register(L, "struct", thislib); return 1; } /****************************************************************************** * Copyright (C) 2010-2018 Lua.org, PUC-Rio. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ******************************************************************************/
/* ** {====================================================== ** Library for packing/unpacking structures. ** $Id: struct.c,v 1.7 2018/05/11 22:04:31 roberto Exp $ ** See Copyright Notice at the end of this file ** ======================================================= */ /* ** Valid formats: ** > - big endian ** < - little endian ** ![num] - alignment ** x - pading ** b/B - signed/unsigned byte ** h/H - signed/unsigned short ** l/L - signed/unsigned long ** T - size_t ** i/In - signed/unsigned integer with size 'n' (default is size of int) ** cn - sequence of 'n' chars (from/to a string); when packing, n==0 means the whole string; when unpacking, n==0 means use the previous read number as the string length ** s - zero-terminated string ** f - float ** d - double ** ' ' - ignored */ #include <assert.h> #include <ctype.h> #include <limits.h> #include <stddef.h> #include <string.h> #include "lua.h" #include "lauxlib.h" #if (LUA_VERSION_NUM >= 502) #define luaL_register(L,n,f) luaL_newlib(L,f) #endif /* basic integer type */ #if !defined(STRUCT_INT) #define STRUCT_INT long #endif typedef STRUCT_INT Inttype; /* corresponding unsigned version */ typedef unsigned STRUCT_INT Uinttype; /* maximum size (in bytes) for integral types */ #define MAXINTSIZE 32 /* is 'x' a power of 2? */ #define isp2(x) ((x) > 0 && ((x) & ((x) - 1)) == 0) /* dummy structure to get alignment requirements */ struct cD { char c; double d; }; #define PADDING (sizeof(struct cD) - sizeof(double)) #define MAXALIGN (PADDING > sizeof(int) ? PADDING : sizeof(int)) /* endian options */ #define BIG 0 #define LITTLE 1 static union { int dummy; char endian; } const native = {1}; typedef struct Header { int endian; int align; } Header; static int getnum (lua_State *L, const char **fmt, int df) { if (!isdigit(**fmt)) /* no number? */ return df; /* return default value */ else { int a = 0; do { if (a > (INT_MAX / 10) || a * 10 > (INT_MAX - (**fmt - '0'))) luaL_error(L, "integral size overflow"); a = a*10 + *((*fmt)++) - '0'; } while (isdigit(**fmt)); return a; } } #define defaultoptions(h) ((h)->endian = native.endian, (h)->align = 1) static size_t optsize (lua_State *L, char opt, const char **fmt) { switch (opt) { case 'B': case 'b': return sizeof(char); case 'H': case 'h': return sizeof(short); case 'L': case 'l': return sizeof(long); case 'T': return sizeof(size_t); case 'f': return sizeof(float); case 'd': return sizeof(double); case 'x': return 1; case 'c': return getnum(L, fmt, 1); case 'i': case 'I': { int sz = getnum(L, fmt, sizeof(int)); if (sz > MAXINTSIZE) luaL_error(L, "integral size %d is larger than limit of %d", sz, MAXINTSIZE); return sz; } default: return 0; /* other cases do not need alignment */ } } /* ** return number of bytes needed to align an element of size 'size' ** at current position 'len' */ static int gettoalign (size_t len, Header *h, int opt, size_t size) { if (size == 0 || opt == 'c') return 0; if (size > (size_t)h->align) size = h->align; /* respect max. alignment */ return (size - (len & (size - 1))) & (size - 1); } /* ** options to control endianess and alignment */ static void controloptions (lua_State *L, int opt, const char **fmt, Header *h) { switch (opt) { case ' ': return; /* ignore white spaces */ case '>': h->endian = BIG; return; case '<': h->endian = LITTLE; return; case '!': { int a = getnum(L, fmt, MAXALIGN); if (!isp2(a)) luaL_error(L, "alignment %d is not a power of 2", a); h->align = a; return; } default: { const char *msg = lua_pushfstring(L, "invalid format option '%c'", opt); luaL_argerror(L, 1, msg); } } } static void putinteger (lua_State *L, luaL_Buffer *b, int arg, int endian, int size) { lua_Number n = luaL_checknumber(L, arg); Uinttype value; char buff[MAXINTSIZE]; if (n < 0) value = (Uinttype)(Inttype)n; else value = (Uinttype)n; if (endian == LITTLE) { int i; for (i = 0; i < size; i++) { buff[i] = (value & 0xff); value >>= 8; } } else { int i; for (i = size - 1; i >= 0; i--) { buff[i] = (value & 0xff); value >>= 8; } } luaL_addlstring(b, buff, size); } static void correctbytes (char *b, int size, int endian) { if (endian != native.endian) { int i = 0; while (i < --size) { char temp = b[i]; b[i++] = b[size]; b[size] = temp; } } } static int b_pack (lua_State *L) { luaL_Buffer b; const char *fmt = luaL_checkstring(L, 1); Header h; int arg = 2; size_t totalsize = 0; defaultoptions(&h); lua_pushnil(L); /* mark to separate arguments from string buffer */ luaL_buffinit(L, &b); while (*fmt != '\0') { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); int toalign = gettoalign(totalsize, &h, opt, size); totalsize += toalign; while (toalign-- > 0) luaL_addchar(&b, '\0'); switch (opt) { case 'b': case 'B': case 'h': case 'H': case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ putinteger(L, &b, arg++, h.endian, size); break; } case 'x': { luaL_addchar(&b, '\0'); break; } case 'f': { float f = (float)luaL_checknumber(L, arg++); correctbytes((char *)&f, size, h.endian); luaL_addlstring(&b, (char *)&f, size); break; } case 'd': { double d = luaL_checknumber(L, arg++); correctbytes((char *)&d, size, h.endian); luaL_addlstring(&b, (char *)&d, size); break; } case 'c': case 's': { size_t l; const char *s = luaL_checklstring(L, arg++, &l); if (size == 0) size = l; luaL_argcheck(L, l >= (size_t)size, arg, "string too short"); luaL_addlstring(&b, s, size); if (opt == 's') { luaL_addchar(&b, '\0'); /* add zero at the end */ size++; } break; } default: controloptions(L, opt, &fmt, &h); } totalsize += size; } luaL_pushresult(&b); return 1; } static lua_Number getinteger (const char *buff, int endian, int issigned, int size) { Uinttype l = 0; int i; if (endian == BIG) { for (i = 0; i < size; i++) { l <<= 8; l |= (Uinttype)(unsigned char)buff[i]; } } else { for (i = size - 1; i >= 0; i--) { l <<= 8; l |= (Uinttype)(unsigned char)buff[i]; } } if (!issigned) return (lua_Number)l; else { /* signed format */ Uinttype mask = (Uinttype)(~((Uinttype)0)) << (size*8 - 1); if (l & mask) /* negative value? */ l |= mask; /* signal extension */ return (lua_Number)(Inttype)l; } } static int b_unpack (lua_State *L) { Header h; const char *fmt = luaL_checkstring(L, 1); size_t ld; const char *data = luaL_checklstring(L, 2, &ld); size_t pos = luaL_optinteger(L, 3, 1); luaL_argcheck(L, pos > 0, 3, "offset must be 1 or greater"); pos--; /* Lua indexes are 1-based, but here we want 0-based for C * pointer math. */ int n = 0; /* number of results */ defaultoptions(&h); while (*fmt) { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); pos += gettoalign(pos, &h, opt, size); luaL_argcheck(L, size <= ld && pos <= ld - size, 2, "data string too short"); /* stack space for item + next position */ luaL_checkstack(L, 2, "too many results"); switch (opt) { case 'b': case 'B': case 'h': case 'H': case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ int issigned = islower(opt); lua_Number res = getinteger(data+pos, h.endian, issigned, size); lua_pushnumber(L, res); n++; break; } case 'x': { break; } case 'f': { float f; memcpy(&f, data+pos, size); correctbytes((char *)&f, sizeof(f), h.endian); lua_pushnumber(L, f); n++; break; } case 'd': { double d; memcpy(&d, data+pos, size); correctbytes((char *)&d, sizeof(d), h.endian); lua_pushnumber(L, d); n++; break; } case 'c': { if (size == 0) { if (n == 0 || !lua_isnumber(L, -1)) luaL_error(L, "format 'c0' needs a previous size"); size = lua_tonumber(L, -1); lua_pop(L, 1); n--; luaL_argcheck(L, size <= ld && pos <= ld - size, 2, "data string too short"); } lua_pushlstring(L, data+pos, size); n++; break; } case 's': { const char *e = (const char *)memchr(data+pos, '\0', ld - pos); if (e == NULL) luaL_error(L, "unfinished string in data"); size = (e - (data+pos)) + 1; lua_pushlstring(L, data+pos, size - 1); n++; break; } default: controloptions(L, opt, &fmt, &h); } pos += size; } lua_pushinteger(L, pos + 1); /* next position */ return n + 1; } static int b_size (lua_State *L) { Header h; const char *fmt = luaL_checkstring(L, 1); size_t pos = 0; defaultoptions(&h); while (*fmt) { int opt = *fmt++; size_t size = optsize(L, opt, &fmt); pos += gettoalign(pos, &h, opt, size); if (opt == 's') luaL_argerror(L, 1, "option 's' has no fixed size"); else if (opt == 'c' && size == 0) luaL_argerror(L, 1, "option 'c0' has no fixed size"); if (!isalnum(opt)) controloptions(L, opt, &fmt, &h); pos += size; } lua_pushinteger(L, pos); return 1; } /* }====================================================== */ static const struct luaL_Reg thislib[] = { {"pack", b_pack}, {"unpack", b_unpack}, {"size", b_size}, {NULL, NULL} }; LUALIB_API int luaopen_struct (lua_State *L); LUALIB_API int luaopen_struct (lua_State *L) { luaL_register(L, "struct", thislib); return 1; } /****************************************************************************** * Copyright (C) 2010-2018 Lua.org, PUC-Rio. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ******************************************************************************/
static size_t optsize (lua_State *L, char opt, const char **fmt) { switch (opt) { case 'B': case 'b': return sizeof(char); case 'H': case 'h': return sizeof(short); case 'L': case 'l': return sizeof(long); case 'T': return sizeof(size_t); case 'f': return sizeof(float); case 'd': return sizeof(double); case 'x': return 1; case 'c': return getnum(fmt, 1); case 'i': case 'I': { int sz = getnum(fmt, sizeof(int)); if (sz > MAXINTSIZE) luaL_error(L, "integral size %d is larger than limit of %d", sz, MAXINTSIZE); return sz; } default: return 0; /* other cases do not need alignment */ } }
static size_t optsize (lua_State *L, char opt, const char **fmt) { switch (opt) { case 'B': case 'b': return sizeof(char); case 'H': case 'h': return sizeof(short); case 'L': case 'l': return sizeof(long); case 'T': return sizeof(size_t); case 'f': return sizeof(float); case 'd': return sizeof(double); case 'x': return 1; case 'c': return getnum(L, fmt, 1); case 'i': case 'I': { int sz = getnum(L, fmt, sizeof(int)); if (sz > MAXINTSIZE) luaL_error(L, "integral size %d is larger than limit of %d", sz, MAXINTSIZE); return sz; } default: return 0; /* other cases do not need alignment */ } }
{'added': [(92, 'static int getnum (lua_State *L, const char **fmt, int df) {'), (98, " if (a > (INT_MAX / 10) || a * 10 > (INT_MAX - (**fmt - '0')))"), (99, ' luaL_error(L, "integral size overflow");'), (120, " case 'c': return getnum(L, fmt, 1);"), (122, ' int sz = getnum(L, fmt, sizeof(int));'), (155, ' int a = getnum(L, fmt, MAXALIGN);')], 'deleted': [(92, 'static int getnum (const char **fmt, int df) {'), (118, " case 'c': return getnum(fmt, 1);"), (120, ' int sz = getnum(fmt, sizeof(int));'), (153, ' int a = getnum(fmt, MAXALIGN);')]}
6
4
295
2,041
20
148
15
https://github.com/antirez/redis
CVE-2020-14147
CWE-787
799
matrix_diag.cc
C++
tflite::ops::builtin::matrix_diag::Prepare
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace matrix_diag { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Resize the output tensor. TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Last dimension in the output is the same as the last dimension in the // input. output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1]; output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; } // Fill the tensor to make a diagonal matrix in each batch, i.e., when // row index and column index are the same, fill with the next input value. // All other entries get zero. // TODO(b/128636574) Move to reference_ops. template <typename T> void FillDiagImpl(const T* in, T* out, const int batch_size, const int row_size, const int col_size) { int idx = 0; for (int b = 0; b < batch_size; b++) { for (int i = 0; i < row_size; i++) { for (int j = 0; j < col_size; ++j) { // input values go on the diagonal, 0 elsewhere if (i == j) { out[i * col_size + j] = in[idx]; idx++; } else { out[i * col_size + j] = 0; } } } out += row_size * col_size; } } template <typename T> void FillDiag(const TfLiteTensor* input, TfLiteTensor* output, const int batch_size, const int row_size, const int col_size) { FillDiagImpl<T>(GetTensorData<T>(input), GetTensorData<T>(output), batch_size, row_size, col_size); } // Fill a tensor with given input on the diagonal, zero elsewhere void FillDiagHelper(const TfLiteTensor* input, TfLiteTensor* output) { const int num_output_dims = output->dims->size; int batch_size = 1; for (int i = 0; i < num_output_dims - 2; ++i) { batch_size *= output->dims->data[i]; } const int row_size = output->dims->data[num_output_dims - 2]; const int col_size = output->dims->data[num_output_dims - 1]; switch (output->type) { case kTfLiteInt64: { return FillDiag<int64_t>(input, output, batch_size, row_size, col_size); } case kTfLiteInt32: { return FillDiag<int32_t>(input, output, batch_size, row_size, col_size); } case kTfLiteInt16: { return FillDiag<int16_t>(input, output, batch_size, row_size, col_size); } case kTfLiteInt8: { return FillDiag<int8_t>(input, output, batch_size, row_size, col_size); } case kTfLiteUInt8: { return FillDiag<uint8_t>(input, output, batch_size, row_size, col_size); } default: return FillDiag<float>(input, output, batch_size, row_size, col_size); } } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); FillDiagHelper(input, output); return kTfLiteOk; } } // namespace matrix_diag TfLiteRegistration* Register_MATRIX_DIAG() { static TfLiteRegistration r = {nullptr, nullptr, matrix_diag::Prepare, matrix_diag::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace matrix_diag { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 1); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // Resize the output tensor. TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Last dimension in the output is the same as the last dimension in the // input. output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1]; output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; } // Fill the tensor to make a diagonal matrix in each batch, i.e., when // row index and column index are the same, fill with the next input value. // All other entries get zero. // TODO(b/128636574) Move to reference_ops. template <typename T> void FillDiagImpl(const T* in, T* out, const int batch_size, const int row_size, const int col_size) { int idx = 0; for (int b = 0; b < batch_size; b++) { for (int i = 0; i < row_size; i++) { for (int j = 0; j < col_size; ++j) { // input values go on the diagonal, 0 elsewhere if (i == j) { out[i * col_size + j] = in[idx]; idx++; } else { out[i * col_size + j] = 0; } } } out += row_size * col_size; } } template <typename T> void FillDiag(const TfLiteTensor* input, TfLiteTensor* output, const int batch_size, const int row_size, const int col_size) { FillDiagImpl<T>(GetTensorData<T>(input), GetTensorData<T>(output), batch_size, row_size, col_size); } // Fill a tensor with given input on the diagonal, zero elsewhere void FillDiagHelper(const TfLiteTensor* input, TfLiteTensor* output) { const int num_output_dims = output->dims->size; int batch_size = 1; for (int i = 0; i < num_output_dims - 2; ++i) { batch_size *= output->dims->data[i]; } const int row_size = output->dims->data[num_output_dims - 2]; const int col_size = output->dims->data[num_output_dims - 1]; switch (output->type) { case kTfLiteInt64: { return FillDiag<int64_t>(input, output, batch_size, row_size, col_size); } case kTfLiteInt32: { return FillDiag<int32_t>(input, output, batch_size, row_size, col_size); } case kTfLiteInt16: { return FillDiag<int16_t>(input, output, batch_size, row_size, col_size); } case kTfLiteInt8: { return FillDiag<int8_t>(input, output, batch_size, row_size, col_size); } case kTfLiteUInt8: { return FillDiag<uint8_t>(input, output, batch_size, row_size, col_size); } default: return FillDiag<float>(input, output, batch_size, row_size, col_size); } } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); FillDiagHelper(input, output); return kTfLiteOk; } } // namespace matrix_diag TfLiteRegistration* Register_MATRIX_DIAG() { static TfLiteRegistration r = {nullptr, nullptr, matrix_diag::Prepare, matrix_diag::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Resize the output tensor. TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Last dimension in the output is the same as the last dimension in the // input. output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1]; output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 1); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // Resize the output tensor. TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Last dimension in the output is the same as the last dimension in the // input. output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1]; output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; }
{'added': [(35, ' const TfLiteTensor* input;'), (36, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (41, ' TfLiteTensor* output;'), (42, ' TF_LITE_ENSURE_OK(context,'), (43, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (122, ' TfLiteTensor* output;'), (123, ' TF_LITE_ENSURE_OK(context,'), (124, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (125, ' const TfLiteTensor* input;'), (126, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));')], 'deleted': [(35, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (40, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (119, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (120, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);')]}
10
4
104
739
18
171
2
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
1,097
grubfs.c
C
grubfs_free
/* radare - LGPL - Copyright 2011 pancake<nopcode.org> */ #include <r_io.h> #include <r_fs.h> #include "grubfs.h" #include <stdio.h> #include <string.h> static RIOBind *bio = NULL; static ut64 delta = 0; static void* empty (int sz) { void *p = malloc (sz); if (p) memset (p, '\0', sz); return p; } static grub_err_t read_foo (struct grub_disk *disk, grub_disk_addr_t sector, grub_size_t size, char *buf) { if (disk != NULL) { const int blocksize = 512; // unhardcode 512 int ret; RIOBind *iob = disk->data; if (bio) iob = bio; //printf ("io %p\n", file->root->iob.io); ret = iob->read_at (iob->io, delta+(blocksize*sector), (ut8*)buf, size*blocksize); if (ret == -1) return 1; //printf ("DISK PTR = %p\n", disk->data); //printf ("\nBUF: %x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); } else eprintf ("oops. no disk\n"); return 0; // 0 is ok } GrubFS *grubfs_new (struct grub_fs *myfs, void *data) { struct grub_file *file; GrubFS *gfs = empty (sizeof (GrubFS)); // hacky mallocs :D gfs->file = file = empty (sizeof (struct grub_file)); file->device = empty (sizeof (struct grub_device)+1024); file->device->disk = empty (sizeof (struct grub_disk)); file->device->disk->dev = (grub_disk_dev_t)file->device; // hack! file->device->disk->dev->read = read_foo; // grub_disk_dev file->device->disk->data = data; //file->device->disk->read_hook = read_foo; //read_hook; file->fs = myfs; return gfs; } grub_disk_t grubfs_disk (void *data) { struct grub_disk *disk = empty (sizeof (struct grub_disk)); disk->dev = empty (sizeof (struct grub_disk_dev)); disk->dev->read = read_foo; // grub_disk_dev disk->data = data; return disk; } void grubfs_free (GrubFS *gf) { if (gf) { if (gf->file && gf->file->device) free (gf->file->device->disk); //free (gf->file->device); free (gf->file); free (gf); } } void grubfs_bind_io (RIOBind *iob, ut64 _delta) { bio = iob; delta = _delta; }
/* radare - LGPL - Copyright 2011-2017 pancake<nopcode.org> */ #include <r_io.h> #include <r_fs.h> #include "grubfs.h" #include <stdio.h> #include <string.h> static RIOBind *bio = NULL; static ut64 delta = 0; static void* empty (int sz) { void *p = malloc (sz); if (p) memset (p, '\0', sz); return p; } static grub_err_t read_foo (struct grub_disk *disk, grub_disk_addr_t sector, grub_size_t size, char *buf) { if (!disk) { eprintf ("oops. no disk\n"); return 1; } const int blocksize = 512; // TODO unhardcode 512 RIOBind *iob = disk->data; if (bio) { iob = bio; } //printf ("io %p\n", file->root->iob.io); if (iob->read_at (iob->io, delta+(blocksize*sector), (ut8*)buf, size*blocksize) == -1) { return 1; } return 0; } GrubFS *grubfs_new (struct grub_fs *myfs, void *data) { struct grub_file *file; GrubFS *gfs = empty (sizeof (GrubFS)); // hacky mallocs :D gfs->file = file = empty (sizeof (struct grub_file)); file->device = empty (sizeof (struct grub_device)+1024); file->device->disk = empty (sizeof (struct grub_disk)); file->device->disk->dev = (grub_disk_dev_t)file->device; // hack! file->device->disk->dev->read = read_foo; // grub_disk_dev file->device->disk->data = data; //file->device->disk->read_hook = read_foo; //read_hook; file->fs = myfs; return gfs; } grub_disk_t grubfs_disk (void *data) { struct grub_disk *disk = empty (sizeof (struct grub_disk)); disk->dev = empty (sizeof (struct grub_disk_dev)); disk->dev->read = read_foo; // grub_disk_dev disk->data = data; return disk; } void grubfs_free (GrubFS *gf) { if (gf) { if (gf->file && gf->file->device) { free (gf->file->device->disk); } //free (gf->file->device); free (gf->file); free (gf); } } void grubfs_bind_io (RIOBind *iob, ut64 _delta) { bio = iob; delta = _delta; }
void grubfs_free (GrubFS *gf) { if (gf) { if (gf->file && gf->file->device) free (gf->file->device->disk); //free (gf->file->device); free (gf->file); free (gf); } }
void grubfs_free (GrubFS *gf) { if (gf) { if (gf->file && gf->file->device) { free (gf->file->device->disk); } //free (gf->file->device); free (gf->file); free (gf); } }
{'added': [(1, '/* radare - LGPL - Copyright 2011-2017 pancake<nopcode.org> */'), (20, '\tif (!disk) {'), (21, '\t\teprintf ("oops. no disk\\n");'), (22, '\t\treturn 1;'), (23, '\t}'), (24, '\tconst int blocksize = 512; // TODO unhardcode 512'), (25, '\tRIOBind *iob = disk->data;'), (26, '\tif (bio) {'), (27, '\t\tiob = bio;'), (28, '\t}'), (29, '\t//printf ("io %p\\n", file->root->iob.io);'), (30, '\tif (iob->read_at (iob->io, delta+(blocksize*sector), (ut8*)buf, size*blocksize) == -1) {'), (31, '\t\treturn 1;'), (32, '\t}'), (33, '\treturn 0;'), (61, '\t\tif (gf->file && gf->file->device) {'), (63, '\t\t}')], 'deleted': [(1, '/* radare - LGPL - Copyright 2011 pancake<nopcode.org> */'), (20, '\tif (disk != NULL) {'), (21, '\t\tconst int blocksize = 512; // unhardcode 512'), (22, '\t\tint ret;'), (23, '\t\tRIOBind *iob = disk->data;'), (24, '\t\tif (bio) iob = bio;'), (25, '\t\t//printf ("io %p\\n", file->root->iob.io);'), (26, '\t\tret = iob->read_at (iob->io, delta+(blocksize*sector),'), (27, '\t\t\t(ut8*)buf, size*blocksize);'), (28, '\t\tif (ret == -1)'), (29, '\t\t\treturn 1;'), (30, '\t\t//printf ("DISK PTR = %p\\n", disk->data);'), (31, '\t\t//printf ("\\nBUF: %x %x %x %x\\n", buf[0], buf[1], buf[2], buf[3]);'), (32, '\t} else eprintf ("oops. no disk\\n");'), (33, '\treturn 0; // 0 is ok'), (61, '\t\tif (gf->file && gf->file->device)')]}
17
16
59
406
8
49
4
https://github.com/radare/radare2
CVE-2017-9763
CWE-119
3,056
comparisons.cc
C++
tflite::ops::builtin::comparisons::GreaterEval
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/reference/comparisons.h" #include <stdint.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/string_util.h" namespace tflite { namespace ops { namespace builtin { namespace comparisons { namespace { constexpr int kInputTensor1 = 0; constexpr int kInputTensor2 = 1; constexpr int kOutputTensor = 0; TfLiteStatus ComparisonPrepareCommon(TfLiteContext* context, TfLiteNode* node, bool is_string_allowed) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Don't support string. if (!is_string_allowed) { TF_LITE_ENSURE(context, input1->type != kTfLiteString); } // Currently only support tensors have the same type. TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = kTfLiteBool; bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); } TfLiteStatus ComparisonPrepare(TfLiteContext* context, TfLiteNode* node) { return ComparisonPrepareCommon(context, node, false); } TfLiteStatus ComparisonPrepareStringAllowed(TfLiteContext* context, TfLiteNode* node) { return ComparisonPrepareCommon(context, node, true); } template <typename input_dtype, reference_ops::ComparisonFn<int32> opname> void ComparisonQuantized(const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { if (input1->type == kTfLiteUInt8 || input1->type == kTfLiteInt8) { auto input1_offset = -input1->params.zero_point; auto input2_offset = -input2->params.zero_point; const int left_shift = 8; int32 input1_multiplier; int input1_shift; QuantizeMultiplierSmallerThanOneExp(input1->params.scale, &input1_multiplier, &input1_shift); int32 input2_multiplier; int input2_shift; QuantizeMultiplierSmallerThanOneExp(input2->params.scale, &input2_multiplier, &input2_shift); ComparisonParams op_params; op_params.left_shift = left_shift; op_params.input1_offset = input1_offset; op_params.input1_multiplier = input1_multiplier; op_params.input1_shift = input1_shift; op_params.input2_offset = input2_offset; op_params.input2_multiplier = input2_multiplier; op_params.input2_shift = input2_shift; if (requires_broadcast) { reference_ops::BroadcastComparison4DSlowWithScaling<input_dtype, opname>( op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1), GetTensorShape(input2), GetTensorData<input_dtype>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } else { reference_ops::ComparisonWithScaling<input_dtype, opname>( op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1), GetTensorShape(input2), GetTensorData<input_dtype>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } } } template <typename T, reference_ops::ComparisonFn<T> opname> void Comparison(const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { ComparisonParams op_params; requires_broadcast ? reference_ops::BroadcastComparison4DSlowImpl<T, opname>( op_params, GetTensorShape(input1), GetTensorData<T>(input1), GetTensorShape(input2), GetTensorData<T>(input2), GetTensorShape(output), GetTensorData<bool>(output)) : reference_ops::ComparisonImpl<T, opname>( op_params, GetTensorShape(input1), GetTensorData<T>(input1), GetTensorShape(input2), GetTensorData<T>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } void ComparisonString(bool (*opname)(const StringRef&, const StringRef&), const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { bool* output_data = GetTensorData<bool>(output); if (requires_broadcast) { reference_ops::BroadcastComparison4DSlowStringImpl( opname, GetTensorShape(input1), input1, GetTensorShape(input2), input2, GetTensorShape(output), output_data); } else { reference_ops::ComparisonStringImpl(opname, GetTensorShape(input1), input1, GetTensorShape(input2), input2, GetTensorShape(output), output_data); } } TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefNotEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } } // namespace } // namespace comparisons TfLiteRegistration* Register_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepareStringAllowed, comparisons::EqualEval}; return &r; } TfLiteRegistration* Register_NOT_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepareStringAllowed, comparisons::NotEqualEval}; return &r; } TfLiteRegistration* Register_GREATER() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::GreaterEval}; return &r; } TfLiteRegistration* Register_GREATER_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::GreaterEqualEval}; return &r; } TfLiteRegistration* Register_LESS() { static TfLiteRegistration r = { nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::LessEval}; return &r; } TfLiteRegistration* Register_LESS_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::LessEqualEval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/reference/comparisons.h" #include <stdint.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/string_util.h" namespace tflite { namespace ops { namespace builtin { namespace comparisons { namespace { constexpr int kInputTensor1 = 0; constexpr int kInputTensor2 = 1; constexpr int kOutputTensor = 0; TfLiteStatus ComparisonPrepareCommon(TfLiteContext* context, TfLiteNode* node, bool is_string_allowed) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // Don't support string. if (!is_string_allowed) { TF_LITE_ENSURE(context, input1->type != kTfLiteString); } // Currently only support tensors have the same type. TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = kTfLiteBool; bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); } TfLiteStatus ComparisonPrepare(TfLiteContext* context, TfLiteNode* node) { return ComparisonPrepareCommon(context, node, false); } TfLiteStatus ComparisonPrepareStringAllowed(TfLiteContext* context, TfLiteNode* node) { return ComparisonPrepareCommon(context, node, true); } template <typename input_dtype, reference_ops::ComparisonFn<int32> opname> void ComparisonQuantized(const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { if (input1->type == kTfLiteUInt8 || input1->type == kTfLiteInt8) { auto input1_offset = -input1->params.zero_point; auto input2_offset = -input2->params.zero_point; const int left_shift = 8; int32 input1_multiplier; int input1_shift; QuantizeMultiplierSmallerThanOneExp(input1->params.scale, &input1_multiplier, &input1_shift); int32 input2_multiplier; int input2_shift; QuantizeMultiplierSmallerThanOneExp(input2->params.scale, &input2_multiplier, &input2_shift); ComparisonParams op_params; op_params.left_shift = left_shift; op_params.input1_offset = input1_offset; op_params.input1_multiplier = input1_multiplier; op_params.input1_shift = input1_shift; op_params.input2_offset = input2_offset; op_params.input2_multiplier = input2_multiplier; op_params.input2_shift = input2_shift; if (requires_broadcast) { reference_ops::BroadcastComparison4DSlowWithScaling<input_dtype, opname>( op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1), GetTensorShape(input2), GetTensorData<input_dtype>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } else { reference_ops::ComparisonWithScaling<input_dtype, opname>( op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1), GetTensorShape(input2), GetTensorData<input_dtype>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } } } template <typename T, reference_ops::ComparisonFn<T> opname> void Comparison(const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { ComparisonParams op_params; requires_broadcast ? reference_ops::BroadcastComparison4DSlowImpl<T, opname>( op_params, GetTensorShape(input1), GetTensorData<T>(input1), GetTensorShape(input2), GetTensorData<T>(input2), GetTensorShape(output), GetTensorData<bool>(output)) : reference_ops::ComparisonImpl<T, opname>( op_params, GetTensorShape(input1), GetTensorData<T>(input1), GetTensorShape(input2), GetTensorData<T>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } void ComparisonString(bool (*opname)(const StringRef&, const StringRef&), const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { bool* output_data = GetTensorData<bool>(output); if (requires_broadcast) { reference_ops::BroadcastComparison4DSlowStringImpl( opname, GetTensorShape(input1), input1, GetTensorShape(input2), input2, GetTensorShape(output), output_data); } else { reference_ops::ComparisonStringImpl(opname, GetTensorShape(input1), input1, GetTensorShape(input2), input2, GetTensorShape(output), output_data); } } TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefNotEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } } // namespace } // namespace comparisons TfLiteRegistration* Register_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepareStringAllowed, comparisons::EqualEval}; return &r; } TfLiteRegistration* Register_NOT_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepareStringAllowed, comparisons::NotEqualEval}; return &r; } TfLiteRegistration* Register_GREATER() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::GreaterEval}; return &r; } TfLiteRegistration* Register_GREATER_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::GreaterEqualEval}; return &r; } TfLiteRegistration* Register_LESS() { static TfLiteRegistration r = { nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::LessEval}; return &r; } TfLiteRegistration* Register_LESS_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::LessEqualEval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
{'added': [(44, ' const TfLiteTensor* input1;'), (45, ' TF_LITE_ENSURE_OK(context,'), (46, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (47, ' const TfLiteTensor* input2;'), (48, ' TF_LITE_ENSURE_OK(context,'), (49, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (50, ' TfLiteTensor* output;'), (51, ' TF_LITE_ENSURE_OK(context,'), (52, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (154, ' const TfLiteTensor* input1;'), (155, ' TF_LITE_ENSURE_OK(context,'), (156, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (157, ' const TfLiteTensor* input2;'), (158, ' TF_LITE_ENSURE_OK(context,'), (159, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (160, ' TfLiteTensor* output;'), (161, ' TF_LITE_ENSURE_OK(context,'), (162, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (204, ' const TfLiteTensor* input1;'), (205, ' TF_LITE_ENSURE_OK(context,'), (206, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (207, ' const TfLiteTensor* input2;'), (208, ' TF_LITE_ENSURE_OK(context,'), (209, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (210, ' TfLiteTensor* output;'), (211, ' TF_LITE_ENSURE_OK(context,'), (212, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (254, ' const TfLiteTensor* input1;'), (255, ' TF_LITE_ENSURE_OK(context,'), (256, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (257, ' const TfLiteTensor* input2;'), (258, ' TF_LITE_ENSURE_OK(context,'), (259, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (260, ' TfLiteTensor* output;'), (261, ' TF_LITE_ENSURE_OK(context,'), (262, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (295, ' const TfLiteTensor* input1;'), (296, ' TF_LITE_ENSURE_OK(context,'), (297, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (298, ' const TfLiteTensor* input2;'), (299, ' TF_LITE_ENSURE_OK(context,'), (300, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (301, ' TfLiteTensor* output;'), (302, ' TF_LITE_ENSURE_OK(context,'), (303, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (336, ' const TfLiteTensor* input1;'), (337, ' TF_LITE_ENSURE_OK(context,'), (338, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (339, ' const TfLiteTensor* input2;'), (340, ' TF_LITE_ENSURE_OK(context,'), (341, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (342, ' TfLiteTensor* output;'), (343, ' TF_LITE_ENSURE_OK(context,'), (344, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (377, ' const TfLiteTensor* input1;'), (378, ' TF_LITE_ENSURE_OK(context,'), (379, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (380, ' const TfLiteTensor* input2;'), (381, ' TF_LITE_ENSURE_OK(context,'), (382, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (383, ' TfLiteTensor* output;'), (384, ' TF_LITE_ENSURE_OK(context,'), (385, ' GetOutputSafe(context, node, kOutputTensor, &output));')], 'deleted': [(44, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (45, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (46, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (148, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (149, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (150, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (192, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (193, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (194, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (236, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (237, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (238, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (271, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (272, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (273, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (306, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (307, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (308, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (341, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (342, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (343, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);')]}
63
21
416
2,500
34
208
6
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
1,166
root.c
C
proc_root_init
/* * linux/fs/proc/root.c * * Copyright (C) 1991, 1992 Linus Torvalds * * proc root directory handling functions */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/mount.h> #include <linux/pid_namespace.h> #include "internal.h" static int proc_test_super(struct super_block *sb, void *data) { return sb->s_fs_info == data; } static int proc_set_super(struct super_block *sb, void *data) { int err = set_anon_super(sb, NULL); if (!err) { struct pid_namespace *ns = (struct pid_namespace *)data; sb->s_fs_info = get_pid_ns(ns); } return err; } static struct dentry *proc_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { int err; struct super_block *sb; struct pid_namespace *ns; struct proc_inode *ei; if (flags & MS_KERNMOUNT) ns = (struct pid_namespace *)data; else ns = current->nsproxy->pid_ns; sb = sget(fs_type, proc_test_super, proc_set_super, ns); if (IS_ERR(sb)) return ERR_CAST(sb); if (!sb->s_root) { sb->s_flags = flags; err = proc_fill_super(sb); if (err) { deactivate_locked_super(sb); return ERR_PTR(err); } sb->s_flags |= MS_ACTIVE; } ei = PROC_I(sb->s_root->d_inode); if (!ei->pid) { rcu_read_lock(); ei->pid = get_pid(find_pid_ns(1, ns)); rcu_read_unlock(); } return dget(sb->s_root); } static void proc_kill_sb(struct super_block *sb) { struct pid_namespace *ns; ns = (struct pid_namespace *)sb->s_fs_info; kill_anon_super(sb); put_pid_ns(ns); } static struct file_system_type proc_fs_type = { .name = "proc", .mount = proc_mount, .kill_sb = proc_kill_sb, }; void __init proc_root_init(void) { struct vfsmount *mnt; int err; proc_init_inodecache(); err = register_filesystem(&proc_fs_type); if (err) return; mnt = kern_mount_data(&proc_fs_type, &init_pid_ns); if (IS_ERR(mnt)) { unregister_filesystem(&proc_fs_type); return; } init_pid_ns.proc_mnt = mnt; proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); #endif proc_mkdir("fs", NULL); proc_mkdir("driver", NULL); proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) /* just give it a mountpoint */ proc_mkdir("openprom", NULL); #endif proc_tty_init(); #ifdef CONFIG_PROC_DEVICETREE proc_device_tree_init(); #endif proc_mkdir("bus", NULL); proc_sys_init(); } static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat ) { generic_fillattr(dentry->d_inode, stat); stat->nlink = proc_root.nlink + nr_processes(); return 0; } static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) { if (!proc_lookup(dir, dentry, nd)) { return NULL; } return proc_pid_lookup(dir, dentry, nd); } static int proc_root_readdir(struct file * filp, void * dirent, filldir_t filldir) { unsigned int nr = filp->f_pos; int ret; if (nr < FIRST_PROCESS_ENTRY) { int error = proc_readdir(filp, dirent, filldir); if (error <= 0) return error; filp->f_pos = FIRST_PROCESS_ENTRY; } ret = proc_pid_readdir(filp, dirent, filldir); return ret; } /* * The root /proc directory is special, as it has the * <pid> directories. Thus we don't use the generic * directory handling functions for that.. */ static const struct file_operations proc_root_operations = { .read = generic_read_dir, .readdir = proc_root_readdir, .llseek = default_llseek, }; /* * proc root can do almost nothing.. */ static const struct inode_operations proc_root_inode_operations = { .lookup = proc_root_lookup, .getattr = proc_root_getattr, }; /* * This is the root "inode" in the /proc tree.. */ struct proc_dir_entry proc_root = { .low_ino = PROC_ROOT_INO, .namelen = 5, .mode = S_IFDIR | S_IRUGO | S_IXUGO, .nlink = 2, .count = ATOMIC_INIT(1), .proc_iops = &proc_root_inode_operations, .proc_fops = &proc_root_operations, .parent = &proc_root, .name = "/proc", }; int pid_ns_prepare_proc(struct pid_namespace *ns) { struct vfsmount *mnt; mnt = kern_mount_data(&proc_fs_type, ns); if (IS_ERR(mnt)) return PTR_ERR(mnt); ns->proc_mnt = mnt; return 0; } void pid_ns_release_proc(struct pid_namespace *ns) { mntput(ns->proc_mnt); }
/* * linux/fs/proc/root.c * * Copyright (C) 1991, 1992 Linus Torvalds * * proc root directory handling functions */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/mount.h> #include <linux/pid_namespace.h> #include "internal.h" static int proc_test_super(struct super_block *sb, void *data) { return sb->s_fs_info == data; } static int proc_set_super(struct super_block *sb, void *data) { int err = set_anon_super(sb, NULL); if (!err) { struct pid_namespace *ns = (struct pid_namespace *)data; sb->s_fs_info = get_pid_ns(ns); } return err; } static struct dentry *proc_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { int err; struct super_block *sb; struct pid_namespace *ns; struct proc_inode *ei; if (flags & MS_KERNMOUNT) ns = (struct pid_namespace *)data; else ns = current->nsproxy->pid_ns; sb = sget(fs_type, proc_test_super, proc_set_super, ns); if (IS_ERR(sb)) return ERR_CAST(sb); if (!sb->s_root) { sb->s_flags = flags; err = proc_fill_super(sb); if (err) { deactivate_locked_super(sb); return ERR_PTR(err); } sb->s_flags |= MS_ACTIVE; } ei = PROC_I(sb->s_root->d_inode); if (!ei->pid) { rcu_read_lock(); ei->pid = get_pid(find_pid_ns(1, ns)); rcu_read_unlock(); } return dget(sb->s_root); } static void proc_kill_sb(struct super_block *sb) { struct pid_namespace *ns; ns = (struct pid_namespace *)sb->s_fs_info; kill_anon_super(sb); put_pid_ns(ns); } static struct file_system_type proc_fs_type = { .name = "proc", .mount = proc_mount, .kill_sb = proc_kill_sb, }; void __init proc_root_init(void) { int err; proc_init_inodecache(); err = register_filesystem(&proc_fs_type); if (err) return; err = pid_ns_prepare_proc(&init_pid_ns); if (err) { unregister_filesystem(&proc_fs_type); return; } proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); #endif proc_mkdir("fs", NULL); proc_mkdir("driver", NULL); proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) /* just give it a mountpoint */ proc_mkdir("openprom", NULL); #endif proc_tty_init(); #ifdef CONFIG_PROC_DEVICETREE proc_device_tree_init(); #endif proc_mkdir("bus", NULL); proc_sys_init(); } static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat ) { generic_fillattr(dentry->d_inode, stat); stat->nlink = proc_root.nlink + nr_processes(); return 0; } static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) { if (!proc_lookup(dir, dentry, nd)) { return NULL; } return proc_pid_lookup(dir, dentry, nd); } static int proc_root_readdir(struct file * filp, void * dirent, filldir_t filldir) { unsigned int nr = filp->f_pos; int ret; if (nr < FIRST_PROCESS_ENTRY) { int error = proc_readdir(filp, dirent, filldir); if (error <= 0) return error; filp->f_pos = FIRST_PROCESS_ENTRY; } ret = proc_pid_readdir(filp, dirent, filldir); return ret; } /* * The root /proc directory is special, as it has the * <pid> directories. Thus we don't use the generic * directory handling functions for that.. */ static const struct file_operations proc_root_operations = { .read = generic_read_dir, .readdir = proc_root_readdir, .llseek = default_llseek, }; /* * proc root can do almost nothing.. */ static const struct inode_operations proc_root_inode_operations = { .lookup = proc_root_lookup, .getattr = proc_root_getattr, }; /* * This is the root "inode" in the /proc tree.. */ struct proc_dir_entry proc_root = { .low_ino = PROC_ROOT_INO, .namelen = 5, .mode = S_IFDIR | S_IRUGO | S_IXUGO, .nlink = 2, .count = ATOMIC_INIT(1), .proc_iops = &proc_root_inode_operations, .proc_fops = &proc_root_operations, .parent = &proc_root, .name = "/proc", }; int pid_ns_prepare_proc(struct pid_namespace *ns) { struct vfsmount *mnt; mnt = kern_mount_data(&proc_fs_type, ns); if (IS_ERR(mnt)) return PTR_ERR(mnt); ns->proc_mnt = mnt; return 0; } void pid_ns_release_proc(struct pid_namespace *ns) { kern_unmount(ns->proc_mnt); }
void __init proc_root_init(void) { struct vfsmount *mnt; int err; proc_init_inodecache(); err = register_filesystem(&proc_fs_type); if (err) return; mnt = kern_mount_data(&proc_fs_type, &init_pid_ns); if (IS_ERR(mnt)) { unregister_filesystem(&proc_fs_type); return; } init_pid_ns.proc_mnt = mnt; proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); #endif proc_mkdir("fs", NULL); proc_mkdir("driver", NULL); proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) /* just give it a mountpoint */ proc_mkdir("openprom", NULL); #endif proc_tty_init(); #ifdef CONFIG_PROC_DEVICETREE proc_device_tree_init(); #endif proc_mkdir("bus", NULL); proc_sys_init(); }
void __init proc_root_init(void) { int err; proc_init_inodecache(); err = register_filesystem(&proc_fs_type); if (err) return; err = pid_ns_prepare_proc(&init_pid_ns); if (err) { unregister_filesystem(&proc_fs_type); return; } proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); #endif proc_mkdir("fs", NULL); proc_mkdir("driver", NULL); proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) /* just give it a mountpoint */ proc_mkdir("openprom", NULL); #endif proc_tty_init(); #ifdef CONFIG_PROC_DEVICETREE proc_device_tree_init(); #endif proc_mkdir("bus", NULL); proc_sys_init(); }
{'added': [(100, '\terr = pid_ns_prepare_proc(&init_pid_ns);'), (101, '\tif (err) {'), (210, '\tkern_unmount(ns->proc_mnt);')], 'deleted': [(94, '\tstruct vfsmount *mnt;'), (101, '\tmnt = kern_mount_data(&proc_fs_type, &init_pid_ns);'), (102, '\tif (IS_ERR(mnt)) {'), (107, '\tinit_pid_ns.proc_mnt = mnt;'), (212, '\tmntput(ns->proc_mnt);')]}
3
5
153
810
26
133
6
https://github.com/torvalds/linux
CVE-2012-2127
CWE-119
438
ssl_tls.c
C
ssl_parse_certificate
/* * SSLv3/TLSv1 shared functions * * Copyright (C) 2006-2012, Brainspark B.V. * * This file is part of PolarSSL (http://www.polarssl.org) * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org> * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* * The SSL 3.0 specification was drafted by Netscape in 1996, * and became an IETF standard in 1999. * * http://wp.netscape.com/eng/ssl3/ * http://www.ietf.org/rfc/rfc2246.txt * http://www.ietf.org/rfc/rfc4346.txt */ #include "polarssl/config.h" #if defined(POLARSSL_SSL_TLS_C) #include "polarssl/aes.h" #include "polarssl/arc4.h" #include "polarssl/camellia.h" #include "polarssl/des.h" #include "polarssl/debug.h" #include "polarssl/ssl.h" #include "polarssl/sha2.h" #if defined(POLARSSL_GCM_C) #include "polarssl/gcm.h" #endif #include <stdlib.h> #include <time.h> #if defined _MSC_VER && !defined strcasecmp #define strcasecmp _stricmp #endif #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) int (*ssl_hw_record_init)(ssl_context *ssl, const unsigned char *key_enc, const unsigned char *key_dec, const unsigned char *iv_enc, const unsigned char *iv_dec, const unsigned char *mac_enc, const unsigned char *mac_dec) = NULL; int (*ssl_hw_record_reset)(ssl_context *ssl) = NULL; int (*ssl_hw_record_write)(ssl_context *ssl) = NULL; int (*ssl_hw_record_read)(ssl_context *ssl) = NULL; int (*ssl_hw_record_finish)(ssl_context *ssl) = NULL; #endif static int ssl_rsa_decrypt( void *ctx, int mode, size_t *olen, const unsigned char *input, unsigned char *output, size_t output_max_len ) { return rsa_pkcs1_decrypt( (rsa_context *) ctx, mode, olen, input, output, output_max_len ); } static int ssl_rsa_sign( void *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, int hash_id, unsigned int hashlen, const unsigned char *hash, unsigned char *sig ) { return rsa_pkcs1_sign( (rsa_context *) ctx, f_rng, p_rng, mode, hash_id, hashlen, hash, sig ); } static size_t ssl_rsa_key_len( void *ctx ) { return ( (rsa_context *) ctx )->len; } /* * Key material generation */ static int ssl3_prf( unsigned char *secret, size_t slen, char *label, unsigned char *random, size_t rlen, unsigned char *dstbuf, size_t dlen ) { size_t i; md5_context md5; sha1_context sha1; unsigned char padding[16]; unsigned char sha1sum[20]; ((void)label); /* * SSLv3: * block = * MD5( secret + SHA1( 'A' + secret + random ) ) + * MD5( secret + SHA1( 'BB' + secret + random ) ) + * MD5( secret + SHA1( 'CCC' + secret + random ) ) + * ... */ for( i = 0; i < dlen / 16; i++ ) { memset( padding, 'A' + i, 1 + i ); sha1_starts( &sha1 ); sha1_update( &sha1, padding, 1 + i ); sha1_update( &sha1, secret, slen ); sha1_update( &sha1, random, rlen ); sha1_finish( &sha1, sha1sum ); md5_starts( &md5 ); md5_update( &md5, secret, slen ); md5_update( &md5, sha1sum, 20 ); md5_finish( &md5, dstbuf + i * 16 ); } memset( &md5, 0, sizeof( md5 ) ); memset( &sha1, 0, sizeof( sha1 ) ); memset( padding, 0, sizeof( padding ) ); memset( sha1sum, 0, sizeof( sha1sum ) ); return( 0 ); } static int tls1_prf( unsigned char *secret, size_t slen, char *label, unsigned char *random, size_t rlen, unsigned char *dstbuf, size_t dlen ) { size_t nb, hs; size_t i, j, k; unsigned char *S1, *S2; unsigned char tmp[128]; unsigned char h_i[20]; if( sizeof( tmp ) < 20 + strlen( label ) + rlen ) return( POLARSSL_ERR_SSL_BAD_INPUT_DATA ); hs = ( slen + 1 ) / 2; S1 = secret; S2 = secret + slen - hs; nb = strlen( label ); memcpy( tmp + 20, label, nb ); memcpy( tmp + 20 + nb, random, rlen ); nb += rlen; /* * First compute P_md5(secret,label+random)[0..dlen] */ md5_hmac( S1, hs, tmp + 20, nb, 4 + tmp ); for( i = 0; i < dlen; i += 16 ) { md5_hmac( S1, hs, 4 + tmp, 16 + nb, h_i ); md5_hmac( S1, hs, 4 + tmp, 16, 4 + tmp ); k = ( i + 16 > dlen ) ? dlen % 16 : 16; for( j = 0; j < k; j++ ) dstbuf[i + j] = h_i[j]; } /* * XOR out with P_sha1(secret,label+random)[0..dlen] */ sha1_hmac( S2, hs, tmp + 20, nb, tmp ); for( i = 0; i < dlen; i += 20 ) { sha1_hmac( S2, hs, tmp, 20 + nb, h_i ); sha1_hmac( S2, hs, tmp, 20, tmp ); k = ( i + 20 > dlen ) ? dlen % 20 : 20; for( j = 0; j < k; j++ ) dstbuf[i + j] = (unsigned char)( dstbuf[i + j] ^ h_i[j] ); } memset( tmp, 0, sizeof( tmp ) ); memset( h_i, 0, sizeof( h_i ) ); return( 0 ); } static int tls_prf_sha256( unsigned char *secret, size_t slen, char *label, unsigned char *random, size_t rlen, unsigned char *dstbuf, size_t dlen ) { size_t nb; size_t i, j, k; unsigned char tmp[128]; unsigned char h_i[32]; if( sizeof( tmp ) < 32 + strlen( label ) + rlen ) return( POLARSSL_ERR_SSL_BAD_INPUT_DATA ); nb = strlen( label ); memcpy( tmp + 32, label, nb ); memcpy( tmp + 32 + nb, random, rlen ); nb += rlen; /* * Compute P_<hash>(secret, label + random)[0..dlen] */ sha2_hmac( secret, slen, tmp + 32, nb, tmp, 0 ); for( i = 0; i < dlen; i += 32 ) { sha2_hmac( secret, slen, tmp, 32 + nb, h_i, 0 ); sha2_hmac( secret, slen, tmp, 32, tmp, 0 ); k = ( i + 32 > dlen ) ? dlen % 32 : 32; for( j = 0; j < k; j++ ) dstbuf[i + j] = h_i[j]; } memset( tmp, 0, sizeof( tmp ) ); memset( h_i, 0, sizeof( h_i ) ); return( 0 ); } #if defined(POLARSSL_SHA4_C) static int tls_prf_sha384( unsigned char *secret, size_t slen, char *label, unsigned char *random, size_t rlen, unsigned char *dstbuf, size_t dlen ) { size_t nb; size_t i, j, k; unsigned char tmp[128]; unsigned char h_i[48]; if( sizeof( tmp ) < 48 + strlen( label ) + rlen ) return( POLARSSL_ERR_SSL_BAD_INPUT_DATA ); nb = strlen( label ); memcpy( tmp + 48, label, nb ); memcpy( tmp + 48 + nb, random, rlen ); nb += rlen; /* * Compute P_<hash>(secret, label + random)[0..dlen] */ sha4_hmac( secret, slen, tmp + 48, nb, tmp, 1 ); for( i = 0; i < dlen; i += 48 ) { sha4_hmac( secret, slen, tmp, 48 + nb, h_i, 1 ); sha4_hmac( secret, slen, tmp, 48, tmp, 1 ); k = ( i + 48 > dlen ) ? dlen % 48 : 48; for( j = 0; j < k; j++ ) dstbuf[i + j] = h_i[j]; } memset( tmp, 0, sizeof( tmp ) ); memset( h_i, 0, sizeof( h_i ) ); return( 0 ); } #endif static void ssl_update_checksum_start(ssl_context *, unsigned char *, size_t); static void ssl_update_checksum_md5sha1(ssl_context *, unsigned char *, size_t); static void ssl_update_checksum_sha256(ssl_context *, unsigned char *, size_t); static void ssl_calc_verify_ssl(ssl_context *,unsigned char *); static void ssl_calc_verify_tls(ssl_context *,unsigned char *); static void ssl_calc_verify_tls_sha256(ssl_context *,unsigned char *); static void ssl_calc_finished_ssl(ssl_context *,unsigned char *,int); static void ssl_calc_finished_tls(ssl_context *,unsigned char *,int); static void ssl_calc_finished_tls_sha256(ssl_context *,unsigned char *,int); #if defined(POLARSSL_SHA4_C) static void ssl_update_checksum_sha384(ssl_context *, unsigned char *, size_t); static void ssl_calc_verify_tls_sha384(ssl_context *,unsigned char *); static void ssl_calc_finished_tls_sha384(ssl_context *,unsigned char *,int); #endif int ssl_derive_keys( ssl_context *ssl ) { unsigned char tmp[64]; unsigned char keyblk[256]; unsigned char *key1; unsigned char *key2; unsigned int iv_copy_len; ssl_session *session = ssl->session_negotiate; ssl_transform *transform = ssl->transform_negotiate; ssl_handshake_params *handshake = ssl->handshake; SSL_DEBUG_MSG( 2, ( "=> derive keys" ) ); /* * Set appropriate PRF function and other SSL / TLS / TLS1.2 functions */ if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { handshake->tls_prf = ssl3_prf; handshake->calc_verify = ssl_calc_verify_ssl; handshake->calc_finished = ssl_calc_finished_ssl; } else if( ssl->minor_ver < SSL_MINOR_VERSION_3 ) { handshake->tls_prf = tls1_prf; handshake->calc_verify = ssl_calc_verify_tls; handshake->calc_finished = ssl_calc_finished_tls; } #if defined(POLARSSL_SHA4_C) else if( session->ciphersuite == TLS_RSA_WITH_AES_256_GCM_SHA384 || session->ciphersuite == TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ) { handshake->tls_prf = tls_prf_sha384; handshake->calc_verify = ssl_calc_verify_tls_sha384; handshake->calc_finished = ssl_calc_finished_tls_sha384; } #endif else { handshake->tls_prf = tls_prf_sha256; handshake->calc_verify = ssl_calc_verify_tls_sha256; handshake->calc_finished = ssl_calc_finished_tls_sha256; } /* * SSLv3: * master = * MD5( premaster + SHA1( 'A' + premaster + randbytes ) ) + * MD5( premaster + SHA1( 'BB' + premaster + randbytes ) ) + * MD5( premaster + SHA1( 'CCC' + premaster + randbytes ) ) * * TLSv1: * master = PRF( premaster, "master secret", randbytes )[0..47] */ if( handshake->resume == 0 ) { SSL_DEBUG_BUF( 3, "premaster secret", handshake->premaster, handshake->pmslen ); handshake->tls_prf( handshake->premaster, handshake->pmslen, "master secret", handshake->randbytes, 64, session->master, 48 ); memset( handshake->premaster, 0, sizeof( handshake->premaster ) ); } else SSL_DEBUG_MSG( 3, ( "no premaster (session resumed)" ) ); /* * Swap the client and server random values. */ memcpy( tmp, handshake->randbytes, 64 ); memcpy( handshake->randbytes, tmp + 32, 32 ); memcpy( handshake->randbytes + 32, tmp, 32 ); memset( tmp, 0, sizeof( tmp ) ); /* * SSLv3: * key block = * MD5( master + SHA1( 'A' + master + randbytes ) ) + * MD5( master + SHA1( 'BB' + master + randbytes ) ) + * MD5( master + SHA1( 'CCC' + master + randbytes ) ) + * MD5( master + SHA1( 'DDDD' + master + randbytes ) ) + * ... * * TLSv1: * key block = PRF( master, "key expansion", randbytes ) */ handshake->tls_prf( session->master, 48, "key expansion", handshake->randbytes, 64, keyblk, 256 ); SSL_DEBUG_MSG( 3, ( "ciphersuite = %s", ssl_get_ciphersuite_name( session->ciphersuite ) ) ); SSL_DEBUG_BUF( 3, "master secret", session->master, 48 ); SSL_DEBUG_BUF( 4, "random bytes", handshake->randbytes, 64 ); SSL_DEBUG_BUF( 4, "key block", keyblk, 256 ); memset( handshake->randbytes, 0, sizeof( handshake->randbytes ) ); /* * Determine the appropriate key, IV and MAC length. */ switch( session->ciphersuite ) { #if defined(POLARSSL_ARC4_C) case TLS_RSA_WITH_RC4_128_MD5: transform->keylen = 16; transform->minlen = 16; transform->ivlen = 0; transform->maclen = 16; break; case TLS_RSA_WITH_RC4_128_SHA: transform->keylen = 16; transform->minlen = 20; transform->ivlen = 0; transform->maclen = 20; break; #endif #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_3DES_EDE_CBC_SHA: case TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA: transform->keylen = 24; transform->minlen = 24; transform->ivlen = 8; transform->maclen = 20; break; #endif #if defined(POLARSSL_AES_C) case TLS_RSA_WITH_AES_128_CBC_SHA: case TLS_DHE_RSA_WITH_AES_128_CBC_SHA: transform->keylen = 16; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 20; break; case TLS_RSA_WITH_AES_256_CBC_SHA: case TLS_DHE_RSA_WITH_AES_256_CBC_SHA: transform->keylen = 32; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 20; break; #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_AES_128_CBC_SHA256: case TLS_DHE_RSA_WITH_AES_128_CBC_SHA256: transform->keylen = 16; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 32; break; case TLS_RSA_WITH_AES_256_CBC_SHA256: case TLS_DHE_RSA_WITH_AES_256_CBC_SHA256: transform->keylen = 32; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 32; break; #endif #if defined(POLARSSL_GCM_C) case TLS_RSA_WITH_AES_128_GCM_SHA256: case TLS_DHE_RSA_WITH_AES_128_GCM_SHA256: transform->keylen = 16; transform->minlen = 1; transform->ivlen = 12; transform->maclen = 0; transform->fixed_ivlen = 4; break; case TLS_RSA_WITH_AES_256_GCM_SHA384: case TLS_DHE_RSA_WITH_AES_256_GCM_SHA384: transform->keylen = 32; transform->minlen = 1; transform->ivlen = 12; transform->maclen = 0; transform->fixed_ivlen = 4; break; #endif #endif #if defined(POLARSSL_CAMELLIA_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA: case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA: transform->keylen = 16; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 20; break; case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA: case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA: transform->keylen = 32; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 20; break; #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256: case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256: transform->keylen = 16; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 32; break; case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256: case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256: transform->keylen = 32; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 32; break; #endif #endif #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) #if defined(POLARSSL_CIPHER_NULL_CIPHER) case TLS_RSA_WITH_NULL_MD5: transform->keylen = 0; transform->minlen = 0; transform->ivlen = 0; transform->maclen = 16; break; case TLS_RSA_WITH_NULL_SHA: transform->keylen = 0; transform->minlen = 0; transform->ivlen = 0; transform->maclen = 20; break; case TLS_RSA_WITH_NULL_SHA256: transform->keylen = 0; transform->minlen = 0; transform->ivlen = 0; transform->maclen = 32; break; #endif /* defined(POLARSSL_CIPHER_NULL_CIPHER) */ #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_DES_CBC_SHA: case TLS_DHE_RSA_WITH_DES_CBC_SHA: transform->keylen = 8; transform->minlen = 8; transform->ivlen = 8; transform->maclen = 20; break; #endif #endif /* defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) */ default: SSL_DEBUG_MSG( 1, ( "ciphersuite %s is not available", ssl_get_ciphersuite_name( session->ciphersuite ) ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } SSL_DEBUG_MSG( 3, ( "keylen: %d, minlen: %d, ivlen: %d, maclen: %d", transform->keylen, transform->minlen, transform->ivlen, transform->maclen ) ); /* * Finally setup the cipher contexts, IVs and MAC secrets. */ if( ssl->endpoint == SSL_IS_CLIENT ) { key1 = keyblk + transform->maclen * 2; key2 = keyblk + transform->maclen * 2 + transform->keylen; memcpy( transform->mac_enc, keyblk, transform->maclen ); memcpy( transform->mac_dec, keyblk + transform->maclen, transform->maclen ); /* * This is not used in TLS v1.1. */ iv_copy_len = ( transform->fixed_ivlen ) ? transform->fixed_ivlen : transform->ivlen; memcpy( transform->iv_enc, key2 + transform->keylen, iv_copy_len ); memcpy( transform->iv_dec, key2 + transform->keylen + iv_copy_len, iv_copy_len ); } else { key1 = keyblk + transform->maclen * 2 + transform->keylen; key2 = keyblk + transform->maclen * 2; memcpy( transform->mac_dec, keyblk, transform->maclen ); memcpy( transform->mac_enc, keyblk + transform->maclen, transform->maclen ); /* * This is not used in TLS v1.1. */ iv_copy_len = ( transform->fixed_ivlen ) ? transform->fixed_ivlen : transform->ivlen; memcpy( transform->iv_dec, key1 + transform->keylen, iv_copy_len ); memcpy( transform->iv_enc, key1 + transform->keylen + iv_copy_len, iv_copy_len ); } #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_init != NULL) { int ret = 0; SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_init()" ) ); if( ( ret = ssl_hw_record_init( ssl, key1, key2, transform->iv_enc, transform->iv_dec, transform->mac_enc, transform->mac_dec ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_hw_record_init", ret ); return POLARSSL_ERR_SSL_HW_ACCEL_FAILED; } } #endif switch( session->ciphersuite ) { #if defined(POLARSSL_ARC4_C) case TLS_RSA_WITH_RC4_128_MD5: case TLS_RSA_WITH_RC4_128_SHA: arc4_setup( (arc4_context *) transform->ctx_enc, key1, transform->keylen ); arc4_setup( (arc4_context *) transform->ctx_dec, key2, transform->keylen ); break; #endif #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_3DES_EDE_CBC_SHA: case TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA: des3_set3key_enc( (des3_context *) transform->ctx_enc, key1 ); des3_set3key_dec( (des3_context *) transform->ctx_dec, key2 ); break; #endif #if defined(POLARSSL_AES_C) case TLS_RSA_WITH_AES_128_CBC_SHA: case TLS_DHE_RSA_WITH_AES_128_CBC_SHA: case TLS_RSA_WITH_AES_128_CBC_SHA256: case TLS_DHE_RSA_WITH_AES_128_CBC_SHA256: aes_setkey_enc( (aes_context *) transform->ctx_enc, key1, 128 ); aes_setkey_dec( (aes_context *) transform->ctx_dec, key2, 128 ); break; case TLS_RSA_WITH_AES_256_CBC_SHA: case TLS_DHE_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_256_CBC_SHA256: case TLS_DHE_RSA_WITH_AES_256_CBC_SHA256: aes_setkey_enc( (aes_context *) transform->ctx_enc, key1, 256 ); aes_setkey_dec( (aes_context *) transform->ctx_dec, key2, 256 ); break; #if defined(POLARSSL_GCM_C) case TLS_RSA_WITH_AES_128_GCM_SHA256: case TLS_DHE_RSA_WITH_AES_128_GCM_SHA256: gcm_init( (gcm_context *) transform->ctx_enc, key1, 128 ); gcm_init( (gcm_context *) transform->ctx_dec, key2, 128 ); break; case TLS_RSA_WITH_AES_256_GCM_SHA384: case TLS_DHE_RSA_WITH_AES_256_GCM_SHA384: gcm_init( (gcm_context *) transform->ctx_enc, key1, 256 ); gcm_init( (gcm_context *) transform->ctx_dec, key2, 256 ); break; #endif #endif #if defined(POLARSSL_CAMELLIA_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA: case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA: case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256: case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256: camellia_setkey_enc( (camellia_context *) transform->ctx_enc, key1, 128 ); camellia_setkey_dec( (camellia_context *) transform->ctx_dec, key2, 128 ); break; case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA: case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA: case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256: case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256: camellia_setkey_enc( (camellia_context *) transform->ctx_enc, key1, 256 ); camellia_setkey_dec( (camellia_context *) transform->ctx_dec, key2, 256 ); break; #endif #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) #if defined(POLARSSL_CIPHER_NULL_CIPHER) case TLS_RSA_WITH_NULL_MD5: case TLS_RSA_WITH_NULL_SHA: case TLS_RSA_WITH_NULL_SHA256: break; #endif /* defined(POLARSSL_CIPHER_NULL_CIPHER) */ #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_DES_CBC_SHA: case TLS_DHE_RSA_WITH_DES_CBC_SHA: des_setkey_enc( (des_context *) transform->ctx_enc, key1 ); des_setkey_dec( (des_context *) transform->ctx_dec, key2 ); break; #endif #endif /* defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) */ default: return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } memset( keyblk, 0, sizeof( keyblk ) ); #if defined(POLARSSL_ZLIB_SUPPORT) // Initialize compression // if( session->compression == SSL_COMPRESS_DEFLATE ) { SSL_DEBUG_MSG( 3, ( "Initializing zlib states" ) ); memset( &transform->ctx_deflate, 0, sizeof( transform->ctx_deflate ) ); memset( &transform->ctx_inflate, 0, sizeof( transform->ctx_inflate ) ); if( deflateInit( &transform->ctx_deflate, Z_DEFAULT_COMPRESSION ) != Z_OK || inflateInit( &transform->ctx_inflate ) != Z_OK ) { SSL_DEBUG_MSG( 1, ( "Failed to initialize compression" ) ); return( POLARSSL_ERR_SSL_COMPRESSION_FAILED ); } } #endif /* POLARSSL_ZLIB_SUPPORT */ SSL_DEBUG_MSG( 2, ( "<= derive keys" ) ); return( 0 ); } void ssl_calc_verify_ssl( ssl_context *ssl, unsigned char hash[36] ) { md5_context md5; sha1_context sha1; unsigned char pad_1[48]; unsigned char pad_2[48]; SSL_DEBUG_MSG( 2, ( "=> calc verify ssl" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); memset( pad_1, 0x36, 48 ); memset( pad_2, 0x5C, 48 ); md5_update( &md5, ssl->session_negotiate->master, 48 ); md5_update( &md5, pad_1, 48 ); md5_finish( &md5, hash ); md5_starts( &md5 ); md5_update( &md5, ssl->session_negotiate->master, 48 ); md5_update( &md5, pad_2, 48 ); md5_update( &md5, hash, 16 ); md5_finish( &md5, hash ); sha1_update( &sha1, ssl->session_negotiate->master, 48 ); sha1_update( &sha1, pad_1, 40 ); sha1_finish( &sha1, hash + 16 ); sha1_starts( &sha1 ); sha1_update( &sha1, ssl->session_negotiate->master, 48 ); sha1_update( &sha1, pad_2, 40 ); sha1_update( &sha1, hash + 16, 20 ); sha1_finish( &sha1, hash + 16 ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 36 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; } void ssl_calc_verify_tls( ssl_context *ssl, unsigned char hash[36] ) { md5_context md5; sha1_context sha1; SSL_DEBUG_MSG( 2, ( "=> calc verify tls" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); md5_finish( &md5, hash ); sha1_finish( &sha1, hash + 16 ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 36 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; } void ssl_calc_verify_tls_sha256( ssl_context *ssl, unsigned char hash[32] ) { sha2_context sha2; SSL_DEBUG_MSG( 2, ( "=> calc verify sha256" ) ); memcpy( &sha2, &ssl->handshake->fin_sha2, sizeof(sha2_context) ); sha2_finish( &sha2, hash ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 32 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; } #if defined(POLARSSL_SHA4_C) void ssl_calc_verify_tls_sha384( ssl_context *ssl, unsigned char hash[48] ) { sha4_context sha4; SSL_DEBUG_MSG( 2, ( "=> calc verify sha384" ) ); memcpy( &sha4, &ssl->handshake->fin_sha4, sizeof(sha4_context) ); sha4_finish( &sha4, hash ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 48 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; } #endif /* * SSLv3.0 MAC functions */ static void ssl_mac_md5( unsigned char *secret, unsigned char *buf, size_t len, unsigned char *ctr, int type ) { unsigned char header[11]; unsigned char padding[48]; md5_context md5; memcpy( header, ctr, 8 ); header[ 8] = (unsigned char) type; header[ 9] = (unsigned char)( len >> 8 ); header[10] = (unsigned char)( len ); memset( padding, 0x36, 48 ); md5_starts( &md5 ); md5_update( &md5, secret, 16 ); md5_update( &md5, padding, 48 ); md5_update( &md5, header, 11 ); md5_update( &md5, buf, len ); md5_finish( &md5, buf + len ); memset( padding, 0x5C, 48 ); md5_starts( &md5 ); md5_update( &md5, secret, 16 ); md5_update( &md5, padding, 48 ); md5_update( &md5, buf + len, 16 ); md5_finish( &md5, buf + len ); } static void ssl_mac_sha1( unsigned char *secret, unsigned char *buf, size_t len, unsigned char *ctr, int type ) { unsigned char header[11]; unsigned char padding[40]; sha1_context sha1; memcpy( header, ctr, 8 ); header[ 8] = (unsigned char) type; header[ 9] = (unsigned char)( len >> 8 ); header[10] = (unsigned char)( len ); memset( padding, 0x36, 40 ); sha1_starts( &sha1 ); sha1_update( &sha1, secret, 20 ); sha1_update( &sha1, padding, 40 ); sha1_update( &sha1, header, 11 ); sha1_update( &sha1, buf, len ); sha1_finish( &sha1, buf + len ); memset( padding, 0x5C, 40 ); sha1_starts( &sha1 ); sha1_update( &sha1, secret, 20 ); sha1_update( &sha1, padding, 40 ); sha1_update( &sha1, buf + len, 20 ); sha1_finish( &sha1, buf + len ); } static void ssl_mac_sha2( unsigned char *secret, unsigned char *buf, size_t len, unsigned char *ctr, int type ) { unsigned char header[11]; unsigned char padding[32]; sha2_context sha2; memcpy( header, ctr, 8 ); header[ 8] = (unsigned char) type; header[ 9] = (unsigned char)( len >> 8 ); header[10] = (unsigned char)( len ); memset( padding, 0x36, 32 ); sha2_starts( &sha2, 0 ); sha2_update( &sha2, secret, 32 ); sha2_update( &sha2, padding, 32 ); sha2_update( &sha2, header, 11 ); sha2_update( &sha2, buf, len ); sha2_finish( &sha2, buf + len ); memset( padding, 0x5C, 32 ); sha2_starts( &sha2, 0 ); sha2_update( &sha2, secret, 32 ); sha2_update( &sha2, padding, 32 ); sha2_update( &sha2, buf + len, 32 ); sha2_finish( &sha2, buf + len ); } /* * Encryption/decryption functions */ static int ssl_encrypt_buf( ssl_context *ssl ) { size_t i, padlen; SSL_DEBUG_MSG( 2, ( "=> encrypt buf" ) ); /* * Add MAC then encrypt */ if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( ssl->transform_out->maclen == 16 ) ssl_mac_md5( ssl->transform_out->mac_enc, ssl->out_msg, ssl->out_msglen, ssl->out_ctr, ssl->out_msgtype ); else if( ssl->transform_out->maclen == 20 ) ssl_mac_sha1( ssl->transform_out->mac_enc, ssl->out_msg, ssl->out_msglen, ssl->out_ctr, ssl->out_msgtype ); else if( ssl->transform_out->maclen == 32 ) ssl_mac_sha2( ssl->transform_out->mac_enc, ssl->out_msg, ssl->out_msglen, ssl->out_ctr, ssl->out_msgtype ); else if( ssl->transform_out->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_out->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } else { if( ssl->transform_out->maclen == 16 ) { md5_context ctx; md5_hmac_starts( &ctx, ssl->transform_out->mac_enc, 16 ); md5_hmac_update( &ctx, ssl->out_ctr, 13 ); md5_hmac_update( &ctx, ssl->out_msg, ssl->out_msglen ); md5_hmac_finish( &ctx, ssl->out_msg + ssl->out_msglen ); memset( &ctx, 0, sizeof(md5_context)); } else if( ssl->transform_out->maclen == 20 ) { sha1_context ctx; sha1_hmac_starts( &ctx, ssl->transform_out->mac_enc, 20 ); sha1_hmac_update( &ctx, ssl->out_ctr, 13 ); sha1_hmac_update( &ctx, ssl->out_msg, ssl->out_msglen ); sha1_hmac_finish( &ctx, ssl->out_msg + ssl->out_msglen ); memset( &ctx, 0, sizeof(sha1_context)); } else if( ssl->transform_out->maclen == 32 ) { sha2_context ctx; sha2_hmac_starts( &ctx, ssl->transform_out->mac_enc, 32, 0 ); sha2_hmac_update( &ctx, ssl->out_ctr, 13 ); sha2_hmac_update( &ctx, ssl->out_msg, ssl->out_msglen ); sha2_hmac_finish( &ctx, ssl->out_msg + ssl->out_msglen ); memset( &ctx, 0, sizeof(sha2_context)); } else if( ssl->transform_out->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_out->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } SSL_DEBUG_BUF( 4, "computed mac", ssl->out_msg + ssl->out_msglen, ssl->transform_out->maclen ); ssl->out_msglen += ssl->transform_out->maclen; if( ssl->transform_out->ivlen == 0 ) { padlen = 0; SSL_DEBUG_MSG( 3, ( "before encrypt: msglen = %d, " "including %d bytes of padding", ssl->out_msglen, 0 ) ); SSL_DEBUG_BUF( 4, "before encrypt: output payload", ssl->out_msg, ssl->out_msglen ); #if defined(POLARSSL_ARC4_C) if( ssl->session_out->ciphersuite == TLS_RSA_WITH_RC4_128_MD5 || ssl->session_out->ciphersuite == TLS_RSA_WITH_RC4_128_SHA ) { arc4_crypt( (arc4_context *) ssl->transform_out->ctx_enc, ssl->out_msglen, ssl->out_msg, ssl->out_msg ); } else #endif #if defined(POLARSSL_CIPHER_NULL_CIPHER) if( ssl->session_out->ciphersuite == TLS_RSA_WITH_NULL_MD5 || ssl->session_out->ciphersuite == TLS_RSA_WITH_NULL_SHA || ssl->session_out->ciphersuite == TLS_RSA_WITH_NULL_SHA256 ) { } else #endif return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } else if( ssl->transform_out->ivlen == 12 ) { size_t enc_msglen; unsigned char *enc_msg; unsigned char add_data[13]; int ret = POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE; padlen = 0; enc_msglen = ssl->out_msglen; memcpy( add_data, ssl->out_ctr, 8 ); add_data[8] = ssl->out_msgtype; add_data[9] = ssl->major_ver; add_data[10] = ssl->minor_ver; add_data[11] = ( ssl->out_msglen >> 8 ) & 0xFF; add_data[12] = ssl->out_msglen & 0xFF; SSL_DEBUG_BUF( 4, "additional data used for AEAD", add_data, 13 ); #if defined(POLARSSL_AES_C) && defined(POLARSSL_GCM_C) if( ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_256_GCM_SHA384 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ) { /* * Generate IV */ ret = ssl->f_rng( ssl->p_rng, ssl->transform_out->iv_enc + ssl->transform_out->fixed_ivlen, ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen ); if( ret != 0 ) return( ret ); /* * Shift message for ivlen bytes and prepend IV */ memmove( ssl->out_msg + ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen, ssl->out_msg, ssl->out_msglen ); memcpy( ssl->out_msg, ssl->transform_out->iv_enc + ssl->transform_out->fixed_ivlen, ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen ); /* * Fix pointer positions and message length with added IV */ enc_msg = ssl->out_msg + ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen; enc_msglen = ssl->out_msglen; ssl->out_msglen += ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen; SSL_DEBUG_MSG( 3, ( "before encrypt: msglen = %d, " "including %d bytes of padding", ssl->out_msglen, 0 ) ); SSL_DEBUG_BUF( 4, "before encrypt: output payload", ssl->out_msg, ssl->out_msglen ); /* * Adjust for tag */ ssl->out_msglen += 16; gcm_crypt_and_tag( (gcm_context *) ssl->transform_out->ctx_enc, GCM_ENCRYPT, enc_msglen, ssl->transform_out->iv_enc, ssl->transform_out->ivlen, add_data, 13, enc_msg, enc_msg, 16, enc_msg + enc_msglen ); SSL_DEBUG_BUF( 4, "after encrypt: tag", enc_msg + enc_msglen, 16 ); } else #endif return( ret ); } else { unsigned char *enc_msg; size_t enc_msglen; padlen = ssl->transform_out->ivlen - ( ssl->out_msglen + 1 ) % ssl->transform_out->ivlen; if( padlen == ssl->transform_out->ivlen ) padlen = 0; for( i = 0; i <= padlen; i++ ) ssl->out_msg[ssl->out_msglen + i] = (unsigned char) padlen; ssl->out_msglen += padlen + 1; enc_msglen = ssl->out_msglen; enc_msg = ssl->out_msg; /* * Prepend per-record IV for block cipher in TLS v1.1 and up as per * Method 1 (6.2.3.2. in RFC4346 and RFC5246) */ if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) { /* * Generate IV */ int ret = ssl->f_rng( ssl->p_rng, ssl->transform_out->iv_enc, ssl->transform_out->ivlen ); if( ret != 0 ) return( ret ); /* * Shift message for ivlen bytes and prepend IV */ memmove( ssl->out_msg + ssl->transform_out->ivlen, ssl->out_msg, ssl->out_msglen ); memcpy( ssl->out_msg, ssl->transform_out->iv_enc, ssl->transform_out->ivlen ); /* * Fix pointer positions and message length with added IV */ enc_msg = ssl->out_msg + ssl->transform_out->ivlen; enc_msglen = ssl->out_msglen; ssl->out_msglen += ssl->transform_out->ivlen; } SSL_DEBUG_MSG( 3, ( "before encrypt: msglen = %d, " "including %d bytes of IV and %d bytes of padding", ssl->out_msglen, ssl->transform_out->ivlen, padlen + 1 ) ); SSL_DEBUG_BUF( 4, "before encrypt: output payload", ssl->out_msg, ssl->out_msglen ); switch( ssl->transform_out->ivlen ) { #if defined(POLARSSL_DES_C) case 8: #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) if( ssl->session_out->ciphersuite == TLS_RSA_WITH_DES_CBC_SHA || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_DES_CBC_SHA ) { des_crypt_cbc( (des_context *) ssl->transform_out->ctx_enc, DES_ENCRYPT, enc_msglen, ssl->transform_out->iv_enc, enc_msg, enc_msg ); } else #endif des3_crypt_cbc( (des3_context *) ssl->transform_out->ctx_enc, DES_ENCRYPT, enc_msglen, ssl->transform_out->iv_enc, enc_msg, enc_msg ); break; #endif case 16: #if defined(POLARSSL_AES_C) if ( ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA || ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA || ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 ) { aes_crypt_cbc( (aes_context *) ssl->transform_out->ctx_enc, AES_ENCRYPT, enc_msglen, ssl->transform_out->iv_enc, enc_msg, enc_msg); break; } #endif #if defined(POLARSSL_CAMELLIA_C) if ( ssl->session_out->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_out->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_out->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 ) { camellia_crypt_cbc( (camellia_context *) ssl->transform_out->ctx_enc, CAMELLIA_ENCRYPT, enc_msglen, ssl->transform_out->iv_enc, enc_msg, enc_msg ); break; } #endif default: return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } for( i = 8; i > 0; i-- ) if( ++ssl->out_ctr[i - 1] != 0 ) break; SSL_DEBUG_MSG( 2, ( "<= encrypt buf" ) ); return( 0 ); } /* * TODO: Use digest version when integrated! */ #define POLARSSL_SSL_MAX_MAC_SIZE 32 static int ssl_decrypt_buf( ssl_context *ssl ) { size_t i, padlen = 0, correct = 1; unsigned char tmp[POLARSSL_SSL_MAX_MAC_SIZE]; SSL_DEBUG_MSG( 2, ( "=> decrypt buf" ) ); if( ssl->in_msglen < ssl->transform_in->minlen ) { SSL_DEBUG_MSG( 1, ( "in_msglen (%d) < minlen (%d)", ssl->in_msglen, ssl->transform_in->minlen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } if( ssl->transform_in->ivlen == 0 ) { #if defined(POLARSSL_ARC4_C) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_RC4_128_MD5 || ssl->session_in->ciphersuite == TLS_RSA_WITH_RC4_128_SHA ) { arc4_crypt( (arc4_context *) ssl->transform_in->ctx_dec, ssl->in_msglen, ssl->in_msg, ssl->in_msg ); } else #endif #if defined(POLARSSL_CIPHER_NULL_CIPHER) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_MD5 || ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_SHA256 ) { } else #endif return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } else if( ssl->transform_in->ivlen == 12 ) { unsigned char *dec_msg; unsigned char *dec_msg_result; size_t dec_msglen; unsigned char add_data[13]; int ret = POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE; #if defined(POLARSSL_AES_C) && defined(POLARSSL_GCM_C) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_GCM_SHA384 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ) { dec_msglen = ssl->in_msglen - ( ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); dec_msglen -= 16; dec_msg = ssl->in_msg + ( ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); dec_msg_result = ssl->in_msg; ssl->in_msglen = dec_msglen; memcpy( add_data, ssl->in_ctr, 8 ); add_data[8] = ssl->in_msgtype; add_data[9] = ssl->major_ver; add_data[10] = ssl->minor_ver; add_data[11] = ( ssl->in_msglen >> 8 ) & 0xFF; add_data[12] = ssl->in_msglen & 0xFF; SSL_DEBUG_BUF( 4, "additional data used for AEAD", add_data, 13 ); memcpy( ssl->transform_in->iv_dec + ssl->transform_in->fixed_ivlen, ssl->in_msg, ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); SSL_DEBUG_BUF( 4, "IV used", ssl->transform_in->iv_dec, ssl->transform_in->ivlen ); SSL_DEBUG_BUF( 4, "TAG used", dec_msg + dec_msglen, 16 ); memcpy( ssl->transform_in->iv_dec + ssl->transform_in->fixed_ivlen, ssl->in_msg, ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); ret = gcm_auth_decrypt( (gcm_context *) ssl->transform_in->ctx_dec, dec_msglen, ssl->transform_in->iv_dec, ssl->transform_in->ivlen, add_data, 13, dec_msg + dec_msglen, 16, dec_msg, dec_msg_result ); if( ret != 0 ) { SSL_DEBUG_MSG( 1, ( "AEAD decrypt failed on validation (ret = -0x%02x)", -ret ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } } else #endif return( ret ); } else { /* * Decrypt and check the padding */ unsigned char *dec_msg; unsigned char *dec_msg_result; size_t dec_msglen; size_t minlen = 0; /* * Check immediate ciphertext sanity */ if( ssl->in_msglen % ssl->transform_in->ivlen != 0 ) { SSL_DEBUG_MSG( 1, ( "msglen (%d) %% ivlen (%d) != 0", ssl->in_msglen, ssl->transform_in->ivlen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) minlen += ssl->transform_in->ivlen; if( ssl->in_msglen < minlen + ssl->transform_in->ivlen || ssl->in_msglen < minlen + ssl->transform_in->maclen + 1 ) { SSL_DEBUG_MSG( 1, ( "msglen (%d) < max( ivlen(%d), maclen (%d) + 1 ) ( + expl IV )", ssl->in_msglen, ssl->transform_in->ivlen, ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } dec_msglen = ssl->in_msglen; dec_msg = ssl->in_msg; dec_msg_result = ssl->in_msg; /* * Initialize for prepended IV for block cipher in TLS v1.1 and up */ if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) { dec_msg += ssl->transform_in->ivlen; dec_msglen -= ssl->transform_in->ivlen; ssl->in_msglen -= ssl->transform_in->ivlen; for( i = 0; i < ssl->transform_in->ivlen; i++ ) ssl->transform_in->iv_dec[i] = ssl->in_msg[i]; } switch( ssl->transform_in->ivlen ) { #if defined(POLARSSL_DES_C) case 8: #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_DES_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_DES_CBC_SHA ) { des_crypt_cbc( (des_context *) ssl->transform_in->ctx_dec, DES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); } else #endif des3_crypt_cbc( (des3_context *) ssl->transform_in->ctx_dec, DES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; #endif case 16: #if defined(POLARSSL_AES_C) if ( ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 ) { aes_crypt_cbc( (aes_context *) ssl->transform_in->ctx_dec, AES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; } #endif #if defined(POLARSSL_CAMELLIA_C) if ( ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 ) { camellia_crypt_cbc( (camellia_context *) ssl->transform_in->ctx_dec, CAMELLIA_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; } #endif default: return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } padlen = 1 + ssl->in_msg[ssl->in_msglen - 1]; if( ssl->in_msglen < ssl->transform_in->maclen + padlen ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "msglen (%d) < maclen (%d) + padlen (%d)", ssl->in_msglen, ssl->transform_in->maclen, padlen ) ); #endif padlen = 0; correct = 0; } if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( padlen > ssl->transform_in->ivlen ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "bad padding length: is %d, " "should be no more than %d", padlen, ssl->transform_in->ivlen ) ); #endif correct = 0; } } else { /* * TLSv1+: always check the padding up to the first failure * and fake check up to 256 bytes of padding */ size_t pad_count = 0, fake_pad_count = 0; size_t padding_idx = ssl->in_msglen - padlen - 1; for( i = 1; i <= padlen; i++ ) pad_count += ( ssl->in_msg[padding_idx + i] == padlen - 1 ); for( ; i <= 256; i++ ) fake_pad_count += ( ssl->in_msg[padding_idx + i] == padlen - 1 ); correct &= ( pad_count == padlen ); /* Only 1 on correct padding */ correct &= ( pad_count + fake_pad_count < 512 ); /* Always 1 */ #if defined(POLARSSL_SSL_DEBUG_ALL) if( padlen > 0 && correct == 0) SSL_DEBUG_MSG( 1, ( "bad padding byte detected" ) ); #endif padlen &= correct * 0x1FF; } } SSL_DEBUG_BUF( 4, "raw buffer after decryption", ssl->in_msg, ssl->in_msglen ); /* * Always compute the MAC (RFC4346, CBCTIME). */ ssl->in_msglen -= ( ssl->transform_in->maclen + padlen ); ssl->in_hdr[3] = (unsigned char)( ssl->in_msglen >> 8 ); ssl->in_hdr[4] = (unsigned char)( ssl->in_msglen ); memcpy( tmp, ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ); if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( ssl->transform_in->maclen == 16 ) ssl_mac_md5( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen == 20 ) ssl_mac_sha1( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen == 32 ) ssl_mac_sha2( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } else { /* * Process MAC and always update for padlen afterwards to make * total time independent of padlen * * extra_run compensates MAC check for padlen * * Known timing attacks: * - Lucky Thirteen (http://www.isg.rhul.ac.uk/tls/TLStiming.pdf) * * We use ( ( Lx + 8 ) / 64 ) to handle 'negative Lx' values * correctly. (We round down instead of up, so -56 is the correct * value for our calculations instead of -55) */ int j, extra_run = 0; extra_run = ( 13 + ssl->in_msglen + padlen + 8 ) / 64 - ( 13 + ssl->in_msglen + 8 ) / 64; extra_run &= correct * 0xFF; if( ssl->transform_in->maclen == 16 ) { md5_context ctx; md5_hmac_starts( &ctx, ssl->transform_in->mac_dec, 16 ); md5_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); md5_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) md5_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen == 20 ) { sha1_context ctx; sha1_hmac_starts( &ctx, ssl->transform_in->mac_dec, 20 ); sha1_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); sha1_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) sha1_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen == 32 ) { sha2_context ctx; sha2_hmac_starts( &ctx, ssl->transform_in->mac_dec, 32, 0 ); sha2_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); sha2_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) sha2_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } SSL_DEBUG_BUF( 4, "message mac", tmp, ssl->transform_in->maclen ); SSL_DEBUG_BUF( 4, "computed mac", ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ); if( memcmp( tmp, ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ) != 0 ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "message mac does not match" ) ); #endif correct = 0; } /* * Finally check the correct flag */ if( correct == 0 ) return( POLARSSL_ERR_SSL_INVALID_MAC ); if( ssl->in_msglen == 0 ) { ssl->nb_zero++; /* * Three or more empty messages may be a DoS attack * (excessive CPU consumption). */ if( ssl->nb_zero > 3 ) { SSL_DEBUG_MSG( 1, ( "received four consecutive empty " "messages, possible DoS attack" ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } } else ssl->nb_zero = 0; for( i = 8; i > 0; i-- ) if( ++ssl->in_ctr[i - 1] != 0 ) break; SSL_DEBUG_MSG( 2, ( "<= decrypt buf" ) ); return( 0 ); } #if defined(POLARSSL_ZLIB_SUPPORT) /* * Compression/decompression functions */ static int ssl_compress_buf( ssl_context *ssl ) { int ret; unsigned char *msg_post = ssl->out_msg; size_t len_pre = ssl->out_msglen; unsigned char *msg_pre; SSL_DEBUG_MSG( 2, ( "=> compress buf" ) ); msg_pre = (unsigned char*) malloc( len_pre ); if( msg_pre == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len_pre ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memcpy( msg_pre, ssl->out_msg, len_pre ); SSL_DEBUG_MSG( 3, ( "before compression: msglen = %d, ", ssl->out_msglen ) ); SSL_DEBUG_BUF( 4, "before compression: output payload", ssl->out_msg, ssl->out_msglen ); ssl->transform_out->ctx_deflate.next_in = msg_pre; ssl->transform_out->ctx_deflate.avail_in = len_pre; ssl->transform_out->ctx_deflate.next_out = msg_post; ssl->transform_out->ctx_deflate.avail_out = SSL_BUFFER_LEN; ret = deflate( &ssl->transform_out->ctx_deflate, Z_SYNC_FLUSH ); if( ret != Z_OK ) { SSL_DEBUG_MSG( 1, ( "failed to perform compression (%d)", ret ) ); return( POLARSSL_ERR_SSL_COMPRESSION_FAILED ); } ssl->out_msglen = SSL_BUFFER_LEN - ssl->transform_out->ctx_deflate.avail_out; free( msg_pre ); SSL_DEBUG_MSG( 3, ( "after compression: msglen = %d, ", ssl->out_msglen ) ); SSL_DEBUG_BUF( 4, "after compression: output payload", ssl->out_msg, ssl->out_msglen ); SSL_DEBUG_MSG( 2, ( "<= compress buf" ) ); return( 0 ); } static int ssl_decompress_buf( ssl_context *ssl ) { int ret; unsigned char *msg_post = ssl->in_msg; size_t len_pre = ssl->in_msglen; unsigned char *msg_pre; SSL_DEBUG_MSG( 2, ( "=> decompress buf" ) ); msg_pre = (unsigned char*) malloc( len_pre ); if( msg_pre == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len_pre ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memcpy( msg_pre, ssl->in_msg, len_pre ); SSL_DEBUG_MSG( 3, ( "before decompression: msglen = %d, ", ssl->in_msglen ) ); SSL_DEBUG_BUF( 4, "before decompression: input payload", ssl->in_msg, ssl->in_msglen ); ssl->transform_in->ctx_inflate.next_in = msg_pre; ssl->transform_in->ctx_inflate.avail_in = len_pre; ssl->transform_in->ctx_inflate.next_out = msg_post; ssl->transform_in->ctx_inflate.avail_out = SSL_MAX_CONTENT_LEN; ret = inflate( &ssl->transform_in->ctx_inflate, Z_SYNC_FLUSH ); if( ret != Z_OK ) { SSL_DEBUG_MSG( 1, ( "failed to perform decompression (%d)", ret ) ); return( POLARSSL_ERR_SSL_COMPRESSION_FAILED ); } ssl->in_msglen = SSL_MAX_CONTENT_LEN - ssl->transform_in->ctx_inflate.avail_out; free( msg_pre ); SSL_DEBUG_MSG( 3, ( "after decompression: msglen = %d, ", ssl->in_msglen ) ); SSL_DEBUG_BUF( 4, "after decompression: input payload", ssl->in_msg, ssl->in_msglen ); SSL_DEBUG_MSG( 2, ( "<= decompress buf" ) ); return( 0 ); } #endif /* POLARSSL_ZLIB_SUPPORT */ /* * Fill the input message buffer */ int ssl_fetch_input( ssl_context *ssl, size_t nb_want ) { int ret; size_t len; SSL_DEBUG_MSG( 2, ( "=> fetch input" ) ); while( ssl->in_left < nb_want ) { len = nb_want - ssl->in_left; ret = ssl->f_recv( ssl->p_recv, ssl->in_hdr + ssl->in_left, len ); SSL_DEBUG_MSG( 2, ( "in_left: %d, nb_want: %d", ssl->in_left, nb_want ) ); SSL_DEBUG_RET( 2, "ssl->f_recv", ret ); if( ret == 0 ) return( POLARSSL_ERR_SSL_CONN_EOF ); if( ret < 0 ) return( ret ); ssl->in_left += ret; } SSL_DEBUG_MSG( 2, ( "<= fetch input" ) ); return( 0 ); } /* * Flush any data not yet written */ int ssl_flush_output( ssl_context *ssl ) { int ret; unsigned char *buf; SSL_DEBUG_MSG( 2, ( "=> flush output" ) ); while( ssl->out_left > 0 ) { SSL_DEBUG_MSG( 2, ( "message length: %d, out_left: %d", 5 + ssl->out_msglen, ssl->out_left ) ); if( ssl->out_msglen < ssl->out_left ) { size_t header_left = ssl->out_left - ssl->out_msglen; buf = ssl->out_hdr + 5 - header_left; ret = ssl->f_send( ssl->p_send, buf, header_left ); SSL_DEBUG_RET( 2, "ssl->f_send (header)", ret ); if( ret <= 0 ) return( ret ); ssl->out_left -= ret; } buf = ssl->out_msg + ssl->out_msglen - ssl->out_left; ret = ssl->f_send( ssl->p_send, buf, ssl->out_left ); SSL_DEBUG_RET( 2, "ssl->f_send", ret ); if( ret <= 0 ) return( ret ); ssl->out_left -= ret; } SSL_DEBUG_MSG( 2, ( "<= flush output" ) ); return( 0 ); } /* * Record layer functions */ int ssl_write_record( ssl_context *ssl ) { int ret, done = 0; size_t len = ssl->out_msglen; SSL_DEBUG_MSG( 2, ( "=> write record" ) ); if( ssl->out_msgtype == SSL_MSG_HANDSHAKE ) { ssl->out_msg[1] = (unsigned char)( ( len - 4 ) >> 16 ); ssl->out_msg[2] = (unsigned char)( ( len - 4 ) >> 8 ); ssl->out_msg[3] = (unsigned char)( ( len - 4 ) ); ssl->handshake->update_checksum( ssl, ssl->out_msg, len ); } #if defined(POLARSSL_ZLIB_SUPPORT) if( ssl->transform_out != NULL && ssl->session_out->compression == SSL_COMPRESS_DEFLATE ) { if( ( ret = ssl_compress_buf( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_compress_buf", ret ); return( ret ); } len = ssl->out_msglen; } #endif /*POLARSSL_ZLIB_SUPPORT */ #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_write != NULL) { SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_write()" ) ); ret = ssl_hw_record_write( ssl ); if( ret != 0 && ret != POLARSSL_ERR_SSL_HW_ACCEL_FALLTHROUGH ) { SSL_DEBUG_RET( 1, "ssl_hw_record_write", ret ); return POLARSSL_ERR_SSL_HW_ACCEL_FAILED; } done = 1; } #endif if( !done ) { ssl->out_hdr[0] = (unsigned char) ssl->out_msgtype; ssl->out_hdr[1] = (unsigned char) ssl->major_ver; ssl->out_hdr[2] = (unsigned char) ssl->minor_ver; ssl->out_hdr[3] = (unsigned char)( len >> 8 ); ssl->out_hdr[4] = (unsigned char)( len ); if( ssl->transform_out != NULL ) { if( ( ret = ssl_encrypt_buf( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_encrypt_buf", ret ); return( ret ); } len = ssl->out_msglen; ssl->out_hdr[3] = (unsigned char)( len >> 8 ); ssl->out_hdr[4] = (unsigned char)( len ); } ssl->out_left = 5 + ssl->out_msglen; SSL_DEBUG_MSG( 3, ( "output record: msgtype = %d, " "version = [%d:%d], msglen = %d", ssl->out_hdr[0], ssl->out_hdr[1], ssl->out_hdr[2], ( ssl->out_hdr[3] << 8 ) | ssl->out_hdr[4] ) ); SSL_DEBUG_BUF( 4, "output record header sent to network", ssl->out_hdr, 5 ); SSL_DEBUG_BUF( 4, "output record sent to network", ssl->out_hdr + 32, ssl->out_msglen ); } if( ( ret = ssl_flush_output( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_flush_output", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= write record" ) ); return( 0 ); } int ssl_read_record( ssl_context *ssl ) { int ret, done = 0; SSL_DEBUG_MSG( 2, ( "=> read record" ) ); if( ssl->in_hslen != 0 && ssl->in_hslen < ssl->in_msglen ) { /* * Get next Handshake message in the current record */ ssl->in_msglen -= ssl->in_hslen; memmove( ssl->in_msg, ssl->in_msg + ssl->in_hslen, ssl->in_msglen ); ssl->in_hslen = 4; ssl->in_hslen += ( ssl->in_msg[2] << 8 ) | ssl->in_msg[3]; SSL_DEBUG_MSG( 3, ( "handshake message: msglen =" " %d, type = %d, hslen = %d", ssl->in_msglen, ssl->in_msg[0], ssl->in_hslen ) ); if( ssl->in_msglen < 4 || ssl->in_msg[1] != 0 ) { SSL_DEBUG_MSG( 1, ( "bad handshake length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->in_msglen < ssl->in_hslen ) { SSL_DEBUG_MSG( 1, ( "bad handshake length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } ssl->handshake->update_checksum( ssl, ssl->in_msg, ssl->in_hslen ); return( 0 ); } ssl->in_hslen = 0; /* * Read the record header and validate it */ if( ( ret = ssl_fetch_input( ssl, 5 ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_fetch_input", ret ); return( ret ); } ssl->in_msgtype = ssl->in_hdr[0]; ssl->in_msglen = ( ssl->in_hdr[3] << 8 ) | ssl->in_hdr[4]; SSL_DEBUG_MSG( 3, ( "input record: msgtype = %d, " "version = [%d:%d], msglen = %d", ssl->in_hdr[0], ssl->in_hdr[1], ssl->in_hdr[2], ( ssl->in_hdr[3] << 8 ) | ssl->in_hdr[4] ) ); if( ssl->in_hdr[1] != ssl->major_ver ) { SSL_DEBUG_MSG( 1, ( "major version mismatch" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->in_hdr[2] > ssl->max_minor_ver ) { SSL_DEBUG_MSG( 1, ( "minor version mismatch" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } /* * Make sure the message length is acceptable */ if( ssl->transform_in == NULL ) { if( ssl->in_msglen < 1 || ssl->in_msglen > SSL_MAX_CONTENT_LEN ) { SSL_DEBUG_MSG( 1, ( "bad message length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } } else { if( ssl->in_msglen < ssl->transform_in->minlen ) { SSL_DEBUG_MSG( 1, ( "bad message length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->minor_ver == SSL_MINOR_VERSION_0 && ssl->in_msglen > ssl->transform_in->minlen + SSL_MAX_CONTENT_LEN ) { SSL_DEBUG_MSG( 1, ( "bad message length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } /* * TLS encrypted messages can have up to 256 bytes of padding */ if( ssl->minor_ver >= SSL_MINOR_VERSION_1 && ssl->in_msglen > ssl->transform_in->minlen + SSL_MAX_CONTENT_LEN + 256 ) { SSL_DEBUG_MSG( 1, ( "bad message length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } } /* * Read and optionally decrypt the message contents */ if( ( ret = ssl_fetch_input( ssl, 5 + ssl->in_msglen ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_fetch_input", ret ); return( ret ); } SSL_DEBUG_BUF( 4, "input record from network", ssl->in_hdr, 5 + ssl->in_msglen ); #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_read != NULL) { SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_read()" ) ); ret = ssl_hw_record_read( ssl ); if( ret != 0 && ret != POLARSSL_ERR_SSL_HW_ACCEL_FALLTHROUGH ) { SSL_DEBUG_RET( 1, "ssl_hw_record_read", ret ); return POLARSSL_ERR_SSL_HW_ACCEL_FAILED; } done = 1; } #endif if( !done && ssl->transform_in != NULL ) { if( ( ret = ssl_decrypt_buf( ssl ) ) != 0 ) { #if defined(POLARSSL_SSL_ALERT_MESSAGES) if( ret == POLARSSL_ERR_SSL_INVALID_MAC ) { ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_FATAL, SSL_ALERT_MSG_BAD_RECORD_MAC ); } #endif SSL_DEBUG_RET( 1, "ssl_decrypt_buf", ret ); return( ret ); } SSL_DEBUG_BUF( 4, "input payload after decrypt", ssl->in_msg, ssl->in_msglen ); if( ssl->in_msglen > SSL_MAX_CONTENT_LEN ) { SSL_DEBUG_MSG( 1, ( "bad message length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } } #if defined(POLARSSL_ZLIB_SUPPORT) if( ssl->transform_in != NULL && ssl->session_in->compression == SSL_COMPRESS_DEFLATE ) { if( ( ret = ssl_decompress_buf( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_decompress_buf", ret ); return( ret ); } ssl->in_hdr[3] = (unsigned char)( ssl->in_msglen >> 8 ); ssl->in_hdr[4] = (unsigned char)( ssl->in_msglen ); } #endif /* POLARSSL_ZLIB_SUPPORT */ if( ssl->in_msgtype != SSL_MSG_HANDSHAKE && ssl->in_msgtype != SSL_MSG_ALERT && ssl->in_msgtype != SSL_MSG_CHANGE_CIPHER_SPEC && ssl->in_msgtype != SSL_MSG_APPLICATION_DATA ) { SSL_DEBUG_MSG( 1, ( "unknown record type" ) ); if( ( ret = ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_FATAL, SSL_ALERT_MSG_UNEXPECTED_MESSAGE ) ) != 0 ) { return( ret ); } return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->in_msgtype == SSL_MSG_HANDSHAKE ) { ssl->in_hslen = 4; ssl->in_hslen += ( ssl->in_msg[2] << 8 ) | ssl->in_msg[3]; SSL_DEBUG_MSG( 3, ( "handshake message: msglen =" " %d, type = %d, hslen = %d", ssl->in_msglen, ssl->in_msg[0], ssl->in_hslen ) ); /* * Additional checks to validate the handshake header */ if( ssl->in_msglen < 4 || ssl->in_msg[1] != 0 ) { SSL_DEBUG_MSG( 1, ( "bad handshake length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->in_msglen < ssl->in_hslen ) { SSL_DEBUG_MSG( 1, ( "bad handshake length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->state != SSL_HANDSHAKE_OVER ) ssl->handshake->update_checksum( ssl, ssl->in_msg, ssl->in_hslen ); } if( ssl->in_msgtype == SSL_MSG_ALERT ) { SSL_DEBUG_MSG( 2, ( "got an alert message, type: [%d:%d]", ssl->in_msg[0], ssl->in_msg[1] ) ); /* * Ignore non-fatal alerts, except close_notify */ if( ssl->in_msg[0] == SSL_ALERT_LEVEL_FATAL ) { SSL_DEBUG_MSG( 1, ( "is a fatal alert message (msg %d)", ssl->in_msg[1] ) ); /** * Subtract from error code as ssl->in_msg[1] is 7-bit positive * error identifier. */ return( POLARSSL_ERR_SSL_FATAL_ALERT_MESSAGE ); } if( ssl->in_msg[0] == SSL_ALERT_LEVEL_WARNING && ssl->in_msg[1] == SSL_ALERT_MSG_CLOSE_NOTIFY ) { SSL_DEBUG_MSG( 2, ( "is a close notify message" ) ); return( POLARSSL_ERR_SSL_PEER_CLOSE_NOTIFY ); } } ssl->in_left = 0; SSL_DEBUG_MSG( 2, ( "<= read record" ) ); return( 0 ); } int ssl_send_fatal_handshake_failure( ssl_context *ssl ) { int ret; if( ( ret = ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_FATAL, SSL_ALERT_MSG_HANDSHAKE_FAILURE ) ) != 0 ) { return( ret ); } return( 0 ); } int ssl_send_alert_message( ssl_context *ssl, unsigned char level, unsigned char message ) { int ret; SSL_DEBUG_MSG( 2, ( "=> send alert message" ) ); ssl->out_msgtype = SSL_MSG_ALERT; ssl->out_msglen = 2; ssl->out_msg[0] = level; ssl->out_msg[1] = message; if( ( ret = ssl_write_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_write_record", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= send alert message" ) ); return( 0 ); } /* * Handshake functions */ int ssl_write_certificate( ssl_context *ssl ) { int ret; size_t i, n; const x509_cert *crt; SSL_DEBUG_MSG( 2, ( "=> write certificate" ) ); if( ssl->endpoint == SSL_IS_CLIENT ) { if( ssl->client_auth == 0 ) { SSL_DEBUG_MSG( 2, ( "<= skip write certificate" ) ); ssl->state++; return( 0 ); } /* * If using SSLv3 and got no cert, send an Alert message * (otherwise an empty Certificate message will be sent). */ if( ssl->own_cert == NULL && ssl->minor_ver == SSL_MINOR_VERSION_0 ) { ssl->out_msglen = 2; ssl->out_msgtype = SSL_MSG_ALERT; ssl->out_msg[0] = SSL_ALERT_LEVEL_WARNING; ssl->out_msg[1] = SSL_ALERT_MSG_NO_CERT; SSL_DEBUG_MSG( 2, ( "got no certificate to send" ) ); goto write_msg; } } else /* SSL_IS_SERVER */ { if( ssl->own_cert == NULL ) { SSL_DEBUG_MSG( 1, ( "got no certificate to send" ) ); return( POLARSSL_ERR_SSL_CERTIFICATE_REQUIRED ); } } SSL_DEBUG_CRT( 3, "own certificate", ssl->own_cert ); /* * 0 . 0 handshake type * 1 . 3 handshake length * 4 . 6 length of all certs * 7 . 9 length of cert. 1 * 10 . n-1 peer certificate * n . n+2 length of cert. 2 * n+3 . ... upper level cert, etc. */ i = 7; crt = ssl->own_cert; while( crt != NULL ) { n = crt->raw.len; if( i + 3 + n > SSL_MAX_CONTENT_LEN ) { SSL_DEBUG_MSG( 1, ( "certificate too large, %d > %d", i + 3 + n, SSL_MAX_CONTENT_LEN ) ); return( POLARSSL_ERR_SSL_CERTIFICATE_TOO_LARGE ); } ssl->out_msg[i ] = (unsigned char)( n >> 16 ); ssl->out_msg[i + 1] = (unsigned char)( n >> 8 ); ssl->out_msg[i + 2] = (unsigned char)( n ); i += 3; memcpy( ssl->out_msg + i, crt->raw.p, n ); i += n; crt = crt->next; } ssl->out_msg[4] = (unsigned char)( ( i - 7 ) >> 16 ); ssl->out_msg[5] = (unsigned char)( ( i - 7 ) >> 8 ); ssl->out_msg[6] = (unsigned char)( ( i - 7 ) ); ssl->out_msglen = i; ssl->out_msgtype = SSL_MSG_HANDSHAKE; ssl->out_msg[0] = SSL_HS_CERTIFICATE; write_msg: ssl->state++; if( ( ret = ssl_write_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_write_record", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= write certificate" ) ); return( 0 ); } int ssl_parse_certificate( ssl_context *ssl ) { int ret; size_t i, n; SSL_DEBUG_MSG( 2, ( "=> parse certificate" ) ); if( ssl->endpoint == SSL_IS_SERVER && ssl->authmode == SSL_VERIFY_NONE ) { ssl->verify_result = BADCERT_SKIP_VERIFY; SSL_DEBUG_MSG( 2, ( "<= skip parse certificate" ) ); ssl->state++; return( 0 ); } if( ( ret = ssl_read_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } ssl->state++; /* * Check if the client sent an empty certificate */ if( ssl->endpoint == SSL_IS_SERVER && ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( ssl->in_msglen == 2 && ssl->in_msgtype == SSL_MSG_ALERT && ssl->in_msg[0] == SSL_ALERT_LEVEL_WARNING && ssl->in_msg[1] == SSL_ALERT_MSG_NO_CERT ) { SSL_DEBUG_MSG( 1, ( "SSLv3 client has no certificate" ) ); ssl->verify_result = BADCERT_MISSING; if( ssl->authmode == SSL_VERIFY_OPTIONAL ) return( 0 ); else return( POLARSSL_ERR_SSL_NO_CLIENT_CERTIFICATE ); } } if( ssl->endpoint == SSL_IS_SERVER && ssl->minor_ver != SSL_MINOR_VERSION_0 ) { if( ssl->in_hslen == 7 && ssl->in_msgtype == SSL_MSG_HANDSHAKE && ssl->in_msg[0] == SSL_HS_CERTIFICATE && memcmp( ssl->in_msg + 4, "\0\0\0", 3 ) == 0 ) { SSL_DEBUG_MSG( 1, ( "TLSv1 client has no certificate" ) ); ssl->verify_result = BADCERT_MISSING; if( ssl->authmode == SSL_VERIFY_REQUIRED ) return( POLARSSL_ERR_SSL_NO_CLIENT_CERTIFICATE ); else return( 0 ); } } if( ssl->in_msgtype != SSL_MSG_HANDSHAKE ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } if( ssl->in_msg[0] != SSL_HS_CERTIFICATE || ssl->in_hslen < 10 ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } /* * Same message structure as in ssl_write_certificate() */ n = ( ssl->in_msg[5] << 8 ) | ssl->in_msg[6]; if( ssl->in_msg[4] != 0 || ssl->in_hslen != 7 + n ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } if( ( ssl->session_negotiate->peer_cert = (x509_cert *) malloc( sizeof( x509_cert ) ) ) == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", sizeof( x509_cert ) ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memset( ssl->session_negotiate->peer_cert, 0, sizeof( x509_cert ) ); i = 7; while( i < ssl->in_hslen ) { if( ssl->in_msg[i] != 0 ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } n = ( (unsigned int) ssl->in_msg[i + 1] << 8 ) | (unsigned int) ssl->in_msg[i + 2]; i += 3; if( n < 128 || i + n > ssl->in_hslen ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret ); return( ret ); } i += n; } SSL_DEBUG_CRT( 3, "peer certificate", ssl->session_negotiate->peer_cert ); if( ssl->authmode != SSL_VERIFY_NONE ) { if( ssl->ca_chain == NULL ) { SSL_DEBUG_MSG( 1, ( "got no CA chain" ) ); return( POLARSSL_ERR_SSL_CA_CHAIN_REQUIRED ); } ret = x509parse_verify( ssl->session_negotiate->peer_cert, ssl->ca_chain, ssl->ca_crl, ssl->peer_cn, &ssl->verify_result, ssl->f_vrfy, ssl->p_vrfy ); if( ret != 0 ) SSL_DEBUG_RET( 1, "x509_verify_cert", ret ); if( ssl->authmode != SSL_VERIFY_REQUIRED ) ret = 0; } SSL_DEBUG_MSG( 2, ( "<= parse certificate" ) ); return( ret ); } int ssl_write_change_cipher_spec( ssl_context *ssl ) { int ret; SSL_DEBUG_MSG( 2, ( "=> write change cipher spec" ) ); ssl->out_msgtype = SSL_MSG_CHANGE_CIPHER_SPEC; ssl->out_msglen = 1; ssl->out_msg[0] = 1; ssl->state++; if( ( ret = ssl_write_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_write_record", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= write change cipher spec" ) ); return( 0 ); } int ssl_parse_change_cipher_spec( ssl_context *ssl ) { int ret; SSL_DEBUG_MSG( 2, ( "=> parse change cipher spec" ) ); if( ( ret = ssl_read_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } if( ssl->in_msgtype != SSL_MSG_CHANGE_CIPHER_SPEC ) { SSL_DEBUG_MSG( 1, ( "bad change cipher spec message" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } if( ssl->in_msglen != 1 || ssl->in_msg[0] != 1 ) { SSL_DEBUG_MSG( 1, ( "bad change cipher spec message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CHANGE_CIPHER_SPEC ); } ssl->state++; SSL_DEBUG_MSG( 2, ( "<= parse change cipher spec" ) ); return( 0 ); } void ssl_optimize_checksum( ssl_context *ssl, int ciphersuite ) { #if !defined(POLARSSL_SHA4_C) ((void) ciphersuite); #endif if( ssl->minor_ver < SSL_MINOR_VERSION_3 ) ssl->handshake->update_checksum = ssl_update_checksum_md5sha1; #if defined(POLARSSL_SHA4_C) else if ( ciphersuite == TLS_RSA_WITH_AES_256_GCM_SHA384 || ciphersuite == TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ) { ssl->handshake->update_checksum = ssl_update_checksum_sha384; } #endif else ssl->handshake->update_checksum = ssl_update_checksum_sha256; } static void ssl_update_checksum_start( ssl_context *ssl, unsigned char *buf, size_t len ) { md5_update( &ssl->handshake->fin_md5 , buf, len ); sha1_update( &ssl->handshake->fin_sha1, buf, len ); sha2_update( &ssl->handshake->fin_sha2, buf, len ); #if defined(POLARSSL_SHA4_C) sha4_update( &ssl->handshake->fin_sha4, buf, len ); #endif } static void ssl_update_checksum_md5sha1( ssl_context *ssl, unsigned char *buf, size_t len ) { md5_update( &ssl->handshake->fin_md5 , buf, len ); sha1_update( &ssl->handshake->fin_sha1, buf, len ); } static void ssl_update_checksum_sha256( ssl_context *ssl, unsigned char *buf, size_t len ) { sha2_update( &ssl->handshake->fin_sha2, buf, len ); } #if defined(POLARSSL_SHA4_C) static void ssl_update_checksum_sha384( ssl_context *ssl, unsigned char *buf, size_t len ) { sha4_update( &ssl->handshake->fin_sha4, buf, len ); } #endif static void ssl_calc_finished_ssl( ssl_context *ssl, unsigned char *buf, int from ) { const char *sender; md5_context md5; sha1_context sha1; unsigned char padbuf[48]; unsigned char md5sum[16]; unsigned char sha1sum[20]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished ssl" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); /* * SSLv3: * hash = * MD5( master + pad2 + * MD5( handshake + sender + master + pad1 ) ) * + SHA1( master + pad2 + * SHA1( handshake + sender + master + pad1 ) ) */ SSL_DEBUG_BUF( 4, "finished md5 state", (unsigned char *) md5.state, sizeof( md5.state ) ); SSL_DEBUG_BUF( 4, "finished sha1 state", (unsigned char *) sha1.state, sizeof( sha1.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "CLNT" : "SRVR"; memset( padbuf, 0x36, 48 ); md5_update( &md5, (const unsigned char *) sender, 4 ); md5_update( &md5, session->master, 48 ); md5_update( &md5, padbuf, 48 ); md5_finish( &md5, md5sum ); sha1_update( &sha1, (const unsigned char *) sender, 4 ); sha1_update( &sha1, session->master, 48 ); sha1_update( &sha1, padbuf, 40 ); sha1_finish( &sha1, sha1sum ); memset( padbuf, 0x5C, 48 ); md5_starts( &md5 ); md5_update( &md5, session->master, 48 ); md5_update( &md5, padbuf, 48 ); md5_update( &md5, md5sum, 16 ); md5_finish( &md5, buf ); sha1_starts( &sha1 ); sha1_update( &sha1, session->master, 48 ); sha1_update( &sha1, padbuf , 40 ); sha1_update( &sha1, sha1sum, 20 ); sha1_finish( &sha1, buf + 16 ); SSL_DEBUG_BUF( 3, "calc finished result", buf, 36 ); memset( &md5, 0, sizeof( md5_context ) ); memset( &sha1, 0, sizeof( sha1_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); memset( md5sum, 0, sizeof( md5sum ) ); memset( sha1sum, 0, sizeof( sha1sum ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); } static void ssl_calc_finished_tls( ssl_context *ssl, unsigned char *buf, int from ) { int len = 12; const char *sender; md5_context md5; sha1_context sha1; unsigned char padbuf[36]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished tls" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); /* * TLSv1: * hash = PRF( master, finished_label, * MD5( handshake ) + SHA1( handshake ) )[0..11] */ SSL_DEBUG_BUF( 4, "finished md5 state", (unsigned char *) md5.state, sizeof( md5.state ) ); SSL_DEBUG_BUF( 4, "finished sha1 state", (unsigned char *) sha1.state, sizeof( sha1.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "client finished" : "server finished"; md5_finish( &md5, padbuf ); sha1_finish( &sha1, padbuf + 16 ); ssl->handshake->tls_prf( session->master, 48, (char *) sender, padbuf, 36, buf, len ); SSL_DEBUG_BUF( 3, "calc finished result", buf, len ); memset( &md5, 0, sizeof( md5_context ) ); memset( &sha1, 0, sizeof( sha1_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); } static void ssl_calc_finished_tls_sha256( ssl_context *ssl, unsigned char *buf, int from ) { int len = 12; const char *sender; sha2_context sha2; unsigned char padbuf[32]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished tls sha256" ) ); memcpy( &sha2, &ssl->handshake->fin_sha2, sizeof(sha2_context) ); /* * TLSv1.2: * hash = PRF( master, finished_label, * Hash( handshake ) )[0.11] */ SSL_DEBUG_BUF( 4, "finished sha2 state", (unsigned char *) sha2.state, sizeof( sha2.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "client finished" : "server finished"; sha2_finish( &sha2, padbuf ); ssl->handshake->tls_prf( session->master, 48, (char *) sender, padbuf, 32, buf, len ); SSL_DEBUG_BUF( 3, "calc finished result", buf, len ); memset( &sha2, 0, sizeof( sha2_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); } #if defined(POLARSSL_SHA4_C) static void ssl_calc_finished_tls_sha384( ssl_context *ssl, unsigned char *buf, int from ) { int len = 12; const char *sender; sha4_context sha4; unsigned char padbuf[48]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished tls sha384" ) ); memcpy( &sha4, &ssl->handshake->fin_sha4, sizeof(sha4_context) ); /* * TLSv1.2: * hash = PRF( master, finished_label, * Hash( handshake ) )[0.11] */ SSL_DEBUG_BUF( 4, "finished sha4 state", (unsigned char *) sha4.state, sizeof( sha4.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "client finished" : "server finished"; sha4_finish( &sha4, padbuf ); ssl->handshake->tls_prf( session->master, 48, (char *) sender, padbuf, 48, buf, len ); SSL_DEBUG_BUF( 3, "calc finished result", buf, len ); memset( &sha4, 0, sizeof( sha4_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); } #endif void ssl_handshake_wrapup( ssl_context *ssl ) { SSL_DEBUG_MSG( 3, ( "=> handshake wrapup" ) ); /* * Free our handshake params */ ssl_handshake_free( ssl->handshake ); free( ssl->handshake ); ssl->handshake = NULL; /* * Switch in our now active transform context */ if( ssl->transform ) { ssl_transform_free( ssl->transform ); free( ssl->transform ); } ssl->transform = ssl->transform_negotiate; ssl->transform_negotiate = NULL; if( ssl->session ) { ssl_session_free( ssl->session ); free( ssl->session ); } ssl->session = ssl->session_negotiate; ssl->session_negotiate = NULL; /* * Add cache entry */ if( ssl->f_set_cache != NULL ) if( ssl->f_set_cache( ssl->p_set_cache, ssl->session ) != 0 ) SSL_DEBUG_MSG( 1, ( "cache did not store session" ) ); ssl->state++; SSL_DEBUG_MSG( 3, ( "<= handshake wrapup" ) ); } int ssl_write_finished( ssl_context *ssl ) { int ret, hash_len; SSL_DEBUG_MSG( 2, ( "=> write finished" ) ); ssl->handshake->calc_finished( ssl, ssl->out_msg + 4, ssl->endpoint ); // TODO TLS/1.2 Hash length is determined by cipher suite (Page 63) hash_len = ( ssl->minor_ver == SSL_MINOR_VERSION_0 ) ? 36 : 12; ssl->verify_data_len = hash_len; memcpy( ssl->own_verify_data, ssl->out_msg + 4, hash_len ); ssl->out_msglen = 4 + hash_len; ssl->out_msgtype = SSL_MSG_HANDSHAKE; ssl->out_msg[0] = SSL_HS_FINISHED; /* * In case of session resuming, invert the client and server * ChangeCipherSpec messages order. */ if( ssl->handshake->resume != 0 ) { if( ssl->endpoint == SSL_IS_CLIENT ) ssl->state = SSL_HANDSHAKE_WRAPUP; else ssl->state = SSL_CLIENT_CHANGE_CIPHER_SPEC; } else ssl->state++; /* * Switch to our negotiated transform and session parameters for outbound data. */ SSL_DEBUG_MSG( 3, ( "switching to new transform spec for outbound data" ) ); ssl->transform_out = ssl->transform_negotiate; ssl->session_out = ssl->session_negotiate; memset( ssl->out_ctr, 0, 8 ); if( ( ret = ssl_write_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_write_record", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= write finished" ) ); return( 0 ); } int ssl_parse_finished( ssl_context *ssl ) { int ret; unsigned int hash_len; unsigned char buf[36]; SSL_DEBUG_MSG( 2, ( "=> parse finished" ) ); ssl->handshake->calc_finished( ssl, buf, ssl->endpoint ^ 1 ); /* * Switch to our negotiated transform and session parameters for inbound data. */ SSL_DEBUG_MSG( 3, ( "switching to new transform spec for inbound data" ) ); ssl->transform_in = ssl->transform_negotiate; ssl->session_in = ssl->session_negotiate; memset( ssl->in_ctr, 0, 8 ); if( ( ret = ssl_read_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } if( ssl->in_msgtype != SSL_MSG_HANDSHAKE ) { SSL_DEBUG_MSG( 1, ( "bad finished message" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } // TODO TLS/1.2 Hash length is determined by cipher suite (Page 63) hash_len = ( ssl->minor_ver == SSL_MINOR_VERSION_0 ) ? 36 : 12; if( ssl->in_msg[0] != SSL_HS_FINISHED || ssl->in_hslen != 4 + hash_len ) { SSL_DEBUG_MSG( 1, ( "bad finished message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_FINISHED ); } if( memcmp( ssl->in_msg + 4, buf, hash_len ) != 0 ) { SSL_DEBUG_MSG( 1, ( "bad finished message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_FINISHED ); } ssl->verify_data_len = hash_len; memcpy( ssl->peer_verify_data, buf, hash_len ); if( ssl->handshake->resume != 0 ) { if( ssl->endpoint == SSL_IS_CLIENT ) ssl->state = SSL_CLIENT_CHANGE_CIPHER_SPEC; if( ssl->endpoint == SSL_IS_SERVER ) ssl->state = SSL_HANDSHAKE_WRAPUP; } else ssl->state++; SSL_DEBUG_MSG( 2, ( "<= parse finished" ) ); return( 0 ); } int ssl_handshake_init( ssl_context *ssl ) { if( ssl->transform_negotiate ) ssl_transform_free( ssl->transform_negotiate ); else ssl->transform_negotiate = malloc( sizeof(ssl_transform) ); if( ssl->session_negotiate ) ssl_session_free( ssl->session_negotiate ); else ssl->session_negotiate = malloc( sizeof(ssl_session) ); if( ssl->handshake ) ssl_handshake_free( ssl->handshake ); else ssl->handshake = malloc( sizeof(ssl_handshake_params) ); if( ssl->handshake == NULL || ssl->transform_negotiate == NULL || ssl->session_negotiate == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc() of ssl sub-contexts failed" ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memset( ssl->handshake, 0, sizeof(ssl_handshake_params) ); memset( ssl->transform_negotiate, 0, sizeof(ssl_transform) ); memset( ssl->session_negotiate, 0, sizeof(ssl_session) ); md5_starts( &ssl->handshake->fin_md5 ); sha1_starts( &ssl->handshake->fin_sha1 ); sha2_starts( &ssl->handshake->fin_sha2, 0 ); #if defined(POLARSSL_SHA4_C) sha4_starts( &ssl->handshake->fin_sha4, 1 ); #endif ssl->handshake->update_checksum = ssl_update_checksum_start; ssl->handshake->sig_alg = SSL_HASH_SHA1; return( 0 ); } /* * Initialize an SSL context */ int ssl_init( ssl_context *ssl ) { int ret; int len = SSL_BUFFER_LEN; memset( ssl, 0, sizeof( ssl_context ) ); /* * Sane defaults */ ssl->rsa_decrypt = ssl_rsa_decrypt; ssl->rsa_sign = ssl_rsa_sign; ssl->rsa_key_len = ssl_rsa_key_len; ssl->min_major_ver = SSL_MAJOR_VERSION_3; ssl->min_minor_ver = SSL_MINOR_VERSION_0; ssl->ciphersuites = malloc( sizeof(int *) * 4 ); ssl_set_ciphersuites( ssl, ssl_default_ciphersuites ); #if defined(POLARSSL_DHM_C) if( ( ret = mpi_read_string( &ssl->dhm_P, 16, POLARSSL_DHM_RFC5114_MODP_1024_P) ) != 0 || ( ret = mpi_read_string( &ssl->dhm_G, 16, POLARSSL_DHM_RFC5114_MODP_1024_G) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_read_string", ret ); return( ret ); } #endif /* * Prepare base structures */ ssl->in_ctr = (unsigned char *) malloc( len ); ssl->in_hdr = ssl->in_ctr + 8; ssl->in_msg = ssl->in_ctr + 13; if( ssl->in_ctr == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } ssl->out_ctr = (unsigned char *) malloc( len ); ssl->out_hdr = ssl->out_ctr + 8; ssl->out_msg = ssl->out_ctr + 40; if( ssl->out_ctr == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len ) ); free( ssl-> in_ctr ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memset( ssl-> in_ctr, 0, SSL_BUFFER_LEN ); memset( ssl->out_ctr, 0, SSL_BUFFER_LEN ); ssl->hostname = NULL; ssl->hostname_len = 0; if( ( ret = ssl_handshake_init( ssl ) ) != 0 ) return( ret ); return( 0 ); } /* * Reset an initialized and used SSL context for re-use while retaining * all application-set variables, function pointers and data. */ int ssl_session_reset( ssl_context *ssl ) { int ret; ssl->state = SSL_HELLO_REQUEST; ssl->renegotiation = SSL_INITIAL_HANDSHAKE; ssl->secure_renegotiation = SSL_LEGACY_RENEGOTIATION; ssl->verify_data_len = 0; memset( ssl->own_verify_data, 0, 36 ); memset( ssl->peer_verify_data, 0, 36 ); ssl->in_offt = NULL; ssl->in_msgtype = 0; ssl->in_msglen = 0; ssl->in_left = 0; ssl->in_hslen = 0; ssl->nb_zero = 0; ssl->out_msgtype = 0; ssl->out_msglen = 0; ssl->out_left = 0; ssl->transform_in = NULL; ssl->transform_out = NULL; memset( ssl->out_ctr, 0, SSL_BUFFER_LEN ); memset( ssl->in_ctr, 0, SSL_BUFFER_LEN ); #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_reset != NULL) { SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_reset()" ) ); if( ssl_hw_record_reset( ssl ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_hw_record_reset", ret ); return( POLARSSL_ERR_SSL_HW_ACCEL_FAILED ); } } #endif if( ssl->transform ) { ssl_transform_free( ssl->transform ); free( ssl->transform ); ssl->transform = NULL; } if( ssl->session ) { ssl_session_free( ssl->session ); free( ssl->session ); ssl->session = NULL; } if( ( ret = ssl_handshake_init( ssl ) ) != 0 ) return( ret ); return( 0 ); } /* * SSL set accessors */ void ssl_set_endpoint( ssl_context *ssl, int endpoint ) { ssl->endpoint = endpoint; } void ssl_set_authmode( ssl_context *ssl, int authmode ) { ssl->authmode = authmode; } void ssl_set_verify( ssl_context *ssl, int (*f_vrfy)(void *, x509_cert *, int, int *), void *p_vrfy ) { ssl->f_vrfy = f_vrfy; ssl->p_vrfy = p_vrfy; } void ssl_set_rng( ssl_context *ssl, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { ssl->f_rng = f_rng; ssl->p_rng = p_rng; } void ssl_set_dbg( ssl_context *ssl, void (*f_dbg)(void *, int, const char *), void *p_dbg ) { ssl->f_dbg = f_dbg; ssl->p_dbg = p_dbg; } void ssl_set_bio( ssl_context *ssl, int (*f_recv)(void *, unsigned char *, size_t), void *p_recv, int (*f_send)(void *, const unsigned char *, size_t), void *p_send ) { ssl->f_recv = f_recv; ssl->f_send = f_send; ssl->p_recv = p_recv; ssl->p_send = p_send; } void ssl_set_session_cache( ssl_context *ssl, int (*f_get_cache)(void *, ssl_session *), void *p_get_cache, int (*f_set_cache)(void *, const ssl_session *), void *p_set_cache ) { ssl->f_get_cache = f_get_cache; ssl->p_get_cache = p_get_cache; ssl->f_set_cache = f_set_cache; ssl->p_set_cache = p_set_cache; } void ssl_set_session( ssl_context *ssl, const ssl_session *session ) { memcpy( ssl->session_negotiate, session, sizeof(ssl_session) ); ssl->handshake->resume = 1; } void ssl_set_ciphersuites( ssl_context *ssl, const int *ciphersuites ) { ssl->ciphersuites[SSL_MINOR_VERSION_0] = ciphersuites; ssl->ciphersuites[SSL_MINOR_VERSION_1] = ciphersuites; ssl->ciphersuites[SSL_MINOR_VERSION_2] = ciphersuites; ssl->ciphersuites[SSL_MINOR_VERSION_3] = ciphersuites; } void ssl_set_ciphersuites_for_version( ssl_context *ssl, const int *ciphersuites, int major, int minor ) { if( major != SSL_MAJOR_VERSION_3 ) return; if( minor < SSL_MINOR_VERSION_0 || minor > SSL_MINOR_VERSION_3 ) return; ssl->ciphersuites[minor] = ciphersuites; } void ssl_set_ca_chain( ssl_context *ssl, x509_cert *ca_chain, x509_crl *ca_crl, const char *peer_cn ) { ssl->ca_chain = ca_chain; ssl->ca_crl = ca_crl; ssl->peer_cn = peer_cn; } void ssl_set_own_cert( ssl_context *ssl, x509_cert *own_cert, rsa_context *rsa_key ) { ssl->own_cert = own_cert; ssl->rsa_key = rsa_key; } void ssl_set_own_cert_alt( ssl_context *ssl, x509_cert *own_cert, void *rsa_key, rsa_decrypt_func rsa_decrypt, rsa_sign_func rsa_sign, rsa_key_len_func rsa_key_len ) { ssl->own_cert = own_cert; ssl->rsa_key = rsa_key; ssl->rsa_decrypt = rsa_decrypt; ssl->rsa_sign = rsa_sign; ssl->rsa_key_len = rsa_key_len; } #if defined(POLARSSL_DHM_C) int ssl_set_dh_param( ssl_context *ssl, const char *dhm_P, const char *dhm_G ) { int ret; if( ( ret = mpi_read_string( &ssl->dhm_P, 16, dhm_P ) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_read_string", ret ); return( ret ); } if( ( ret = mpi_read_string( &ssl->dhm_G, 16, dhm_G ) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_read_string", ret ); return( ret ); } return( 0 ); } int ssl_set_dh_param_ctx( ssl_context *ssl, dhm_context *dhm_ctx ) { int ret; if( ( ret = mpi_copy(&ssl->dhm_P, &dhm_ctx->P) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_copy", ret ); return( ret ); } if( ( ret = mpi_copy(&ssl->dhm_G, &dhm_ctx->G) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_copy", ret ); return( ret ); } return( 0 ); } #endif /* POLARSSL_DHM_C */ int ssl_set_hostname( ssl_context *ssl, const char *hostname ) { if( hostname == NULL ) return( POLARSSL_ERR_SSL_BAD_INPUT_DATA ); ssl->hostname_len = strlen( hostname ); ssl->hostname = (unsigned char *) malloc( ssl->hostname_len + 1 ); if( ssl->hostname == NULL ) return( POLARSSL_ERR_SSL_MALLOC_FAILED ); memcpy( ssl->hostname, (const unsigned char *) hostname, ssl->hostname_len ); ssl->hostname[ssl->hostname_len] = '\0'; return( 0 ); } void ssl_set_sni( ssl_context *ssl, int (*f_sni)(void *, ssl_context *, const unsigned char *, size_t), void *p_sni ) { ssl->f_sni = f_sni; ssl->p_sni = p_sni; } void ssl_set_max_version( ssl_context *ssl, int major, int minor ) { ssl->max_major_ver = major; ssl->max_minor_ver = minor; } void ssl_set_min_version( ssl_context *ssl, int major, int minor ) { ssl->min_major_ver = major; ssl->min_minor_ver = minor; } void ssl_set_renegotiation( ssl_context *ssl, int renegotiation ) { ssl->disable_renegotiation = renegotiation; } void ssl_legacy_renegotiation( ssl_context *ssl, int allow_legacy ) { ssl->allow_legacy_renegotiation = allow_legacy; } /* * SSL get accessors */ size_t ssl_get_bytes_avail( const ssl_context *ssl ) { return( ssl->in_offt == NULL ? 0 : ssl->in_msglen ); } int ssl_get_verify_result( const ssl_context *ssl ) { return( ssl->verify_result ); } const char *ssl_get_ciphersuite_name( const int ciphersuite_id ) { switch( ciphersuite_id ) { #if defined(POLARSSL_ARC4_C) case TLS_RSA_WITH_RC4_128_MD5: return( "TLS-RSA-WITH-RC4-128-MD5" ); case TLS_RSA_WITH_RC4_128_SHA: return( "TLS-RSA-WITH-RC4-128-SHA" ); #endif #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_3DES_EDE_CBC_SHA: return( "TLS-RSA-WITH-3DES-EDE-CBC-SHA" ); case TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA: return( "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA" ); #endif #if defined(POLARSSL_AES_C) case TLS_RSA_WITH_AES_128_CBC_SHA: return( "TLS-RSA-WITH-AES-128-CBC-SHA" ); case TLS_DHE_RSA_WITH_AES_128_CBC_SHA: return( "TLS-DHE-RSA-WITH-AES-128-CBC-SHA" ); case TLS_RSA_WITH_AES_256_CBC_SHA: return( "TLS-RSA-WITH-AES-256-CBC-SHA" ); case TLS_DHE_RSA_WITH_AES_256_CBC_SHA: return( "TLS-DHE-RSA-WITH-AES-256-CBC-SHA" ); #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_AES_128_CBC_SHA256: return( "TLS-RSA-WITH-AES-128-CBC-SHA256" ); case TLS_RSA_WITH_AES_256_CBC_SHA256: return( "TLS-RSA-WITH-AES-256-CBC-SHA256" ); case TLS_DHE_RSA_WITH_AES_128_CBC_SHA256: return( "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256" ); case TLS_DHE_RSA_WITH_AES_256_CBC_SHA256: return( "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256" ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_AES_128_GCM_SHA256: return( "TLS-RSA-WITH-AES-128-GCM-SHA256" ); case TLS_RSA_WITH_AES_256_GCM_SHA384: return( "TLS-RSA-WITH-AES-256-GCM-SHA384" ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA4_C) case TLS_DHE_RSA_WITH_AES_128_GCM_SHA256: return( "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256" ); case TLS_DHE_RSA_WITH_AES_256_GCM_SHA384: return( "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384" ); #endif #endif /* POLARSSL_AES_C */ #if defined(POLARSSL_CAMELLIA_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA: return( "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA" ); case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA: return( "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA" ); case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA: return( "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA" ); case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA: return( "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA" ); #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256: return( "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256" ); case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256: return( "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256" ); case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256: return( "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256" ); case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256: return( "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256" ); #endif #endif #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) #if defined(POLARSSL_CIPHER_NULL_CIPHER) case TLS_RSA_WITH_NULL_MD5: return( "TLS-RSA-WITH-NULL-MD5" ); case TLS_RSA_WITH_NULL_SHA: return( "TLS-RSA-WITH-NULL-SHA" ); case TLS_RSA_WITH_NULL_SHA256: return( "TLS-RSA-WITH-NULL-SHA256" ); #endif /* defined(POLARSSL_CIPHER_NULL_CIPHER) */ #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_DES_CBC_SHA: return( "TLS-RSA-WITH-DES-CBC-SHA" ); case TLS_DHE_RSA_WITH_DES_CBC_SHA: return( "TLS-DHE-RSA-WITH-DES-CBC-SHA" ); #endif #endif /* defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) */ default: break; } return( "unknown" ); } int ssl_get_ciphersuite_id( const char *ciphersuite_name ) { #if defined(POLARSSL_ARC4_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-RC4-128-MD5")) return( TLS_RSA_WITH_RC4_128_MD5 ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-RC4-128-SHA")) return( TLS_RSA_WITH_RC4_128_SHA ); #endif #if defined(POLARSSL_DES_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-3DES-EDE-CBC-SHA")) return( TLS_RSA_WITH_3DES_EDE_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA")) return( TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA ); #endif #if defined(POLARSSL_AES_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-128-CBC-SHA")) return( TLS_RSA_WITH_AES_128_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-128-CBC-SHA")) return( TLS_DHE_RSA_WITH_AES_128_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-256-CBC-SHA")) return( TLS_RSA_WITH_AES_256_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-256-CBC-SHA")) return( TLS_DHE_RSA_WITH_AES_256_CBC_SHA ); #if defined(POLARSSL_SHA2_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-128-CBC-SHA256")) return( TLS_RSA_WITH_AES_128_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-256-CBC-SHA256")) return( TLS_RSA_WITH_AES_256_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256")) return( TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256")) return( TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-128-GCM-SHA256")) return( TLS_RSA_WITH_AES_128_GCM_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-256-GCM-SHA384")) return( TLS_RSA_WITH_AES_256_GCM_SHA384 ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256")) return( TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384")) return( TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ); #endif #endif #if defined(POLARSSL_CAMELLIA_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA")) return( TLS_RSA_WITH_CAMELLIA_128_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA")) return( TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA")) return( TLS_RSA_WITH_CAMELLIA_256_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA")) return( TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA ); #if defined(POLARSSL_SHA2_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256")) return( TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256")) return( TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256")) return( TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256")) return( TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 ); #endif #endif #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) #if defined(POLARSSL_CIPHER_NULL_CIPHER) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-NULL-MD5")) return( TLS_RSA_WITH_NULL_MD5 ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-NULL-SHA")) return( TLS_RSA_WITH_NULL_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-NULL-SHA256")) return( TLS_RSA_WITH_NULL_SHA256 ); #endif /* defined(POLARSSL_CIPHER_NULL_CIPHER) */ #if defined(POLARSSL_DES_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-DES-CBC-SHA")) return( TLS_RSA_WITH_DES_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-DES-CBC-SHA")) return( TLS_DHE_RSA_WITH_DES_CBC_SHA ); #endif #endif /* defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) */ return( 0 ); } const char *ssl_get_ciphersuite( const ssl_context *ssl ) { if( ssl == NULL || ssl->session == NULL ) return NULL; return ssl_get_ciphersuite_name( ssl->session->ciphersuite ); } const char *ssl_get_version( const ssl_context *ssl ) { switch( ssl->minor_ver ) { case SSL_MINOR_VERSION_0: return( "SSLv3.0" ); case SSL_MINOR_VERSION_1: return( "TLSv1.0" ); case SSL_MINOR_VERSION_2: return( "TLSv1.1" ); case SSL_MINOR_VERSION_3: return( "TLSv1.2" ); default: break; } return( "unknown" ); } const x509_cert *ssl_get_peer_cert( const ssl_context *ssl ) { if( ssl == NULL || ssl->session == NULL ) return NULL; return ssl->session->peer_cert; } const int ssl_default_ciphersuites[] = { #if defined(POLARSSL_DHM_C) #if defined(POLARSSL_AES_C) #if defined(POLARSSL_SHA2_C) TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA4_C) TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, #endif TLS_DHE_RSA_WITH_AES_256_CBC_SHA, #if defined(POLARSSL_SHA2_C) TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, #endif TLS_DHE_RSA_WITH_AES_128_CBC_SHA, #endif #if defined(POLARSSL_CAMELLIA_C) #if defined(POLARSSL_SHA2_C) TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, #if defined(POLARSSL_SHA2_C) TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, #endif #if defined(POLARSSL_DES_C) TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, #endif #endif #if defined(POLARSSL_AES_C) #if defined(POLARSSL_SHA2_C) TLS_RSA_WITH_AES_256_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA4_C) TLS_RSA_WITH_AES_256_GCM_SHA384, #endif /* POLARSSL_SHA2_C */ TLS_RSA_WITH_AES_256_CBC_SHA, #endif #if defined(POLARSSL_CAMELLIA_C) #if defined(POLARSSL_SHA2_C) TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, #endif #if defined(POLARSSL_AES_C) #if defined(POLARSSL_SHA2_C) TLS_RSA_WITH_AES_128_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) TLS_RSA_WITH_AES_128_GCM_SHA256, #endif /* POLARSSL_SHA2_C */ TLS_RSA_WITH_AES_128_CBC_SHA, #endif #if defined(POLARSSL_CAMELLIA_C) #if defined(POLARSSL_SHA2_C) TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, #endif #if defined(POLARSSL_DES_C) TLS_RSA_WITH_3DES_EDE_CBC_SHA, #endif #if defined(POLARSSL_ARC4_C) TLS_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_RC4_128_MD5, #endif 0 }; /* * Perform a single step of the SSL handshake */ int ssl_handshake_step( ssl_context *ssl ) { int ret = POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE; #if defined(POLARSSL_SSL_CLI_C) if( ssl->endpoint == SSL_IS_CLIENT ) ret = ssl_handshake_client_step( ssl ); #endif #if defined(POLARSSL_SSL_SRV_C) if( ssl->endpoint == SSL_IS_SERVER ) ret = ssl_handshake_server_step( ssl ); #endif return( ret ); } /* * Perform the SSL handshake */ int ssl_handshake( ssl_context *ssl ) { int ret = 0; SSL_DEBUG_MSG( 2, ( "=> handshake" ) ); while( ssl->state != SSL_HANDSHAKE_OVER ) { ret = ssl_handshake_step( ssl ); if( ret != 0 ) break; } SSL_DEBUG_MSG( 2, ( "<= handshake" ) ); return( ret ); } /* * Renegotiate current connection */ int ssl_renegotiate( ssl_context *ssl ) { int ret; SSL_DEBUG_MSG( 2, ( "=> renegotiate" ) ); if( ssl->state != SSL_HANDSHAKE_OVER ) return( POLARSSL_ERR_SSL_BAD_INPUT_DATA ); ssl->state = SSL_HELLO_REQUEST; ssl->renegotiation = SSL_RENEGOTIATION; if( ( ret = ssl_handshake_init( ssl ) ) != 0 ) return( ret ); if( ( ret = ssl_handshake( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_handshake", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= renegotiate" ) ); return( 0 ); } /* * Receive application data decrypted from the SSL layer */ int ssl_read( ssl_context *ssl, unsigned char *buf, size_t len ) { int ret; size_t n; SSL_DEBUG_MSG( 2, ( "=> read" ) ); if( ssl->state != SSL_HANDSHAKE_OVER ) { if( ( ret = ssl_handshake( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_handshake", ret ); return( ret ); } } if( ssl->in_offt == NULL ) { if( ( ret = ssl_read_record( ssl ) ) != 0 ) { if( ret == POLARSSL_ERR_SSL_CONN_EOF ) return( 0 ); SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } if( ssl->in_msglen == 0 && ssl->in_msgtype == SSL_MSG_APPLICATION_DATA ) { /* * OpenSSL sends empty messages to randomize the IV */ if( ( ret = ssl_read_record( ssl ) ) != 0 ) { if( ret == POLARSSL_ERR_SSL_CONN_EOF ) return( 0 ); SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } } if( ssl->in_msgtype == SSL_MSG_HANDSHAKE ) { SSL_DEBUG_MSG( 1, ( "received handshake message" ) ); if( ssl->endpoint == SSL_IS_CLIENT && ( ssl->in_msg[0] != SSL_HS_HELLO_REQUEST || ssl->in_hslen != 4 ) ) { SSL_DEBUG_MSG( 1, ( "handshake received (not HelloRequest)" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } if( ssl->disable_renegotiation == SSL_RENEGOTIATION_DISABLED || ( ssl->secure_renegotiation == SSL_LEGACY_RENEGOTIATION && ssl->allow_legacy_renegotiation == SSL_LEGACY_NO_RENEGOTIATION ) ) { SSL_DEBUG_MSG( 3, ( "ignoring renegotiation, sending alert" ) ); if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { /* * SSLv3 does not have a "no_renegotiation" alert */ if( ( ret = ssl_send_fatal_handshake_failure( ssl ) ) != 0 ) return( ret ); } else { if( ( ret = ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_WARNING, SSL_ALERT_MSG_NO_RENEGOTIATION ) ) != 0 ) { return( ret ); } } } else { if( ( ret = ssl_renegotiate( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_renegotiate", ret ); return( ret ); } return( POLARSSL_ERR_NET_WANT_READ ); } } else if( ssl->in_msgtype != SSL_MSG_APPLICATION_DATA ) { SSL_DEBUG_MSG( 1, ( "bad application data message" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } ssl->in_offt = ssl->in_msg; } n = ( len < ssl->in_msglen ) ? len : ssl->in_msglen; memcpy( buf, ssl->in_offt, n ); ssl->in_msglen -= n; if( ssl->in_msglen == 0 ) /* all bytes consumed */ ssl->in_offt = NULL; else /* more data available */ ssl->in_offt += n; SSL_DEBUG_MSG( 2, ( "<= read" ) ); return( (int) n ); } /* * Send application data to be encrypted by the SSL layer */ int ssl_write( ssl_context *ssl, const unsigned char *buf, size_t len ) { int ret; size_t n; SSL_DEBUG_MSG( 2, ( "=> write" ) ); if( ssl->state != SSL_HANDSHAKE_OVER ) { if( ( ret = ssl_handshake( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_handshake", ret ); return( ret ); } } n = ( len < SSL_MAX_CONTENT_LEN ) ? len : SSL_MAX_CONTENT_LEN; if( ssl->out_left != 0 ) { if( ( ret = ssl_flush_output( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_flush_output", ret ); return( ret ); } } else { ssl->out_msglen = n; ssl->out_msgtype = SSL_MSG_APPLICATION_DATA; memcpy( ssl->out_msg, buf, n ); if( ( ret = ssl_write_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_write_record", ret ); return( ret ); } } SSL_DEBUG_MSG( 2, ( "<= write" ) ); return( (int) n ); } /* * Notify the peer that the connection is being closed */ int ssl_close_notify( ssl_context *ssl ) { int ret; SSL_DEBUG_MSG( 2, ( "=> write close notify" ) ); if( ( ret = ssl_flush_output( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_flush_output", ret ); return( ret ); } if( ssl->state == SSL_HANDSHAKE_OVER ) { if( ( ret = ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_WARNING, SSL_ALERT_MSG_CLOSE_NOTIFY ) ) != 0 ) { return( ret ); } } SSL_DEBUG_MSG( 2, ( "<= write close notify" ) ); return( ret ); } void ssl_transform_free( ssl_transform *transform ) { #if defined(POLARSSL_ZLIB_SUPPORT) deflateEnd( &transform->ctx_deflate ); inflateEnd( &transform->ctx_inflate ); #endif memset( transform, 0, sizeof( ssl_transform ) ); } void ssl_handshake_free( ssl_handshake_params *handshake ) { #if defined(POLARSSL_DHM_C) dhm_free( &handshake->dhm_ctx ); #endif memset( handshake, 0, sizeof( ssl_handshake_params ) ); } void ssl_session_free( ssl_session *session ) { if( session->peer_cert != NULL ) { x509_free( session->peer_cert ); free( session->peer_cert ); } memset( session, 0, sizeof( ssl_session ) ); } /* * Free an SSL context */ void ssl_free( ssl_context *ssl ) { SSL_DEBUG_MSG( 2, ( "=> free" ) ); free( ssl->ciphersuites ); if( ssl->out_ctr != NULL ) { memset( ssl->out_ctr, 0, SSL_BUFFER_LEN ); free( ssl->out_ctr ); } if( ssl->in_ctr != NULL ) { memset( ssl->in_ctr, 0, SSL_BUFFER_LEN ); free( ssl->in_ctr ); } #if defined(POLARSSL_DHM_C) mpi_free( &ssl->dhm_P ); mpi_free( &ssl->dhm_G ); #endif if( ssl->transform ) { ssl_transform_free( ssl->transform ); free( ssl->transform ); } if( ssl->handshake ) { ssl_handshake_free( ssl->handshake ); ssl_transform_free( ssl->transform_negotiate ); ssl_session_free( ssl->session_negotiate ); free( ssl->handshake ); free( ssl->transform_negotiate ); free( ssl->session_negotiate ); } if( ssl->session ) { ssl_session_free( ssl->session ); free( ssl->session ); } if ( ssl->hostname != NULL) { memset( ssl->hostname, 0, ssl->hostname_len ); free( ssl->hostname ); ssl->hostname_len = 0; } #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_finish != NULL ) { SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_finish()" ) ); ssl_hw_record_finish( ssl ); } #endif SSL_DEBUG_MSG( 2, ( "<= free" ) ); /* Actually clear after last debug message */ memset( ssl, 0, sizeof( ssl_context ) ); } #endif
/* * SSLv3/TLSv1 shared functions * * Copyright (C) 2006-2012, Brainspark B.V. * * This file is part of PolarSSL (http://www.polarssl.org) * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org> * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* * The SSL 3.0 specification was drafted by Netscape in 1996, * and became an IETF standard in 1999. * * http://wp.netscape.com/eng/ssl3/ * http://www.ietf.org/rfc/rfc2246.txt * http://www.ietf.org/rfc/rfc4346.txt */ #include "polarssl/config.h" #if defined(POLARSSL_SSL_TLS_C) #include "polarssl/aes.h" #include "polarssl/arc4.h" #include "polarssl/camellia.h" #include "polarssl/des.h" #include "polarssl/debug.h" #include "polarssl/ssl.h" #include "polarssl/sha2.h" #if defined(POLARSSL_GCM_C) #include "polarssl/gcm.h" #endif #include <stdlib.h> #include <time.h> #if defined _MSC_VER && !defined strcasecmp #define strcasecmp _stricmp #endif #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) int (*ssl_hw_record_init)(ssl_context *ssl, const unsigned char *key_enc, const unsigned char *key_dec, const unsigned char *iv_enc, const unsigned char *iv_dec, const unsigned char *mac_enc, const unsigned char *mac_dec) = NULL; int (*ssl_hw_record_reset)(ssl_context *ssl) = NULL; int (*ssl_hw_record_write)(ssl_context *ssl) = NULL; int (*ssl_hw_record_read)(ssl_context *ssl) = NULL; int (*ssl_hw_record_finish)(ssl_context *ssl) = NULL; #endif static int ssl_rsa_decrypt( void *ctx, int mode, size_t *olen, const unsigned char *input, unsigned char *output, size_t output_max_len ) { return rsa_pkcs1_decrypt( (rsa_context *) ctx, mode, olen, input, output, output_max_len ); } static int ssl_rsa_sign( void *ctx, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng, int mode, int hash_id, unsigned int hashlen, const unsigned char *hash, unsigned char *sig ) { return rsa_pkcs1_sign( (rsa_context *) ctx, f_rng, p_rng, mode, hash_id, hashlen, hash, sig ); } static size_t ssl_rsa_key_len( void *ctx ) { return ( (rsa_context *) ctx )->len; } /* * Key material generation */ static int ssl3_prf( unsigned char *secret, size_t slen, char *label, unsigned char *random, size_t rlen, unsigned char *dstbuf, size_t dlen ) { size_t i; md5_context md5; sha1_context sha1; unsigned char padding[16]; unsigned char sha1sum[20]; ((void)label); /* * SSLv3: * block = * MD5( secret + SHA1( 'A' + secret + random ) ) + * MD5( secret + SHA1( 'BB' + secret + random ) ) + * MD5( secret + SHA1( 'CCC' + secret + random ) ) + * ... */ for( i = 0; i < dlen / 16; i++ ) { memset( padding, 'A' + i, 1 + i ); sha1_starts( &sha1 ); sha1_update( &sha1, padding, 1 + i ); sha1_update( &sha1, secret, slen ); sha1_update( &sha1, random, rlen ); sha1_finish( &sha1, sha1sum ); md5_starts( &md5 ); md5_update( &md5, secret, slen ); md5_update( &md5, sha1sum, 20 ); md5_finish( &md5, dstbuf + i * 16 ); } memset( &md5, 0, sizeof( md5 ) ); memset( &sha1, 0, sizeof( sha1 ) ); memset( padding, 0, sizeof( padding ) ); memset( sha1sum, 0, sizeof( sha1sum ) ); return( 0 ); } static int tls1_prf( unsigned char *secret, size_t slen, char *label, unsigned char *random, size_t rlen, unsigned char *dstbuf, size_t dlen ) { size_t nb, hs; size_t i, j, k; unsigned char *S1, *S2; unsigned char tmp[128]; unsigned char h_i[20]; if( sizeof( tmp ) < 20 + strlen( label ) + rlen ) return( POLARSSL_ERR_SSL_BAD_INPUT_DATA ); hs = ( slen + 1 ) / 2; S1 = secret; S2 = secret + slen - hs; nb = strlen( label ); memcpy( tmp + 20, label, nb ); memcpy( tmp + 20 + nb, random, rlen ); nb += rlen; /* * First compute P_md5(secret,label+random)[0..dlen] */ md5_hmac( S1, hs, tmp + 20, nb, 4 + tmp ); for( i = 0; i < dlen; i += 16 ) { md5_hmac( S1, hs, 4 + tmp, 16 + nb, h_i ); md5_hmac( S1, hs, 4 + tmp, 16, 4 + tmp ); k = ( i + 16 > dlen ) ? dlen % 16 : 16; for( j = 0; j < k; j++ ) dstbuf[i + j] = h_i[j]; } /* * XOR out with P_sha1(secret,label+random)[0..dlen] */ sha1_hmac( S2, hs, tmp + 20, nb, tmp ); for( i = 0; i < dlen; i += 20 ) { sha1_hmac( S2, hs, tmp, 20 + nb, h_i ); sha1_hmac( S2, hs, tmp, 20, tmp ); k = ( i + 20 > dlen ) ? dlen % 20 : 20; for( j = 0; j < k; j++ ) dstbuf[i + j] = (unsigned char)( dstbuf[i + j] ^ h_i[j] ); } memset( tmp, 0, sizeof( tmp ) ); memset( h_i, 0, sizeof( h_i ) ); return( 0 ); } static int tls_prf_sha256( unsigned char *secret, size_t slen, char *label, unsigned char *random, size_t rlen, unsigned char *dstbuf, size_t dlen ) { size_t nb; size_t i, j, k; unsigned char tmp[128]; unsigned char h_i[32]; if( sizeof( tmp ) < 32 + strlen( label ) + rlen ) return( POLARSSL_ERR_SSL_BAD_INPUT_DATA ); nb = strlen( label ); memcpy( tmp + 32, label, nb ); memcpy( tmp + 32 + nb, random, rlen ); nb += rlen; /* * Compute P_<hash>(secret, label + random)[0..dlen] */ sha2_hmac( secret, slen, tmp + 32, nb, tmp, 0 ); for( i = 0; i < dlen; i += 32 ) { sha2_hmac( secret, slen, tmp, 32 + nb, h_i, 0 ); sha2_hmac( secret, slen, tmp, 32, tmp, 0 ); k = ( i + 32 > dlen ) ? dlen % 32 : 32; for( j = 0; j < k; j++ ) dstbuf[i + j] = h_i[j]; } memset( tmp, 0, sizeof( tmp ) ); memset( h_i, 0, sizeof( h_i ) ); return( 0 ); } #if defined(POLARSSL_SHA4_C) static int tls_prf_sha384( unsigned char *secret, size_t slen, char *label, unsigned char *random, size_t rlen, unsigned char *dstbuf, size_t dlen ) { size_t nb; size_t i, j, k; unsigned char tmp[128]; unsigned char h_i[48]; if( sizeof( tmp ) < 48 + strlen( label ) + rlen ) return( POLARSSL_ERR_SSL_BAD_INPUT_DATA ); nb = strlen( label ); memcpy( tmp + 48, label, nb ); memcpy( tmp + 48 + nb, random, rlen ); nb += rlen; /* * Compute P_<hash>(secret, label + random)[0..dlen] */ sha4_hmac( secret, slen, tmp + 48, nb, tmp, 1 ); for( i = 0; i < dlen; i += 48 ) { sha4_hmac( secret, slen, tmp, 48 + nb, h_i, 1 ); sha4_hmac( secret, slen, tmp, 48, tmp, 1 ); k = ( i + 48 > dlen ) ? dlen % 48 : 48; for( j = 0; j < k; j++ ) dstbuf[i + j] = h_i[j]; } memset( tmp, 0, sizeof( tmp ) ); memset( h_i, 0, sizeof( h_i ) ); return( 0 ); } #endif static void ssl_update_checksum_start(ssl_context *, unsigned char *, size_t); static void ssl_update_checksum_md5sha1(ssl_context *, unsigned char *, size_t); static void ssl_update_checksum_sha256(ssl_context *, unsigned char *, size_t); static void ssl_calc_verify_ssl(ssl_context *,unsigned char *); static void ssl_calc_verify_tls(ssl_context *,unsigned char *); static void ssl_calc_verify_tls_sha256(ssl_context *,unsigned char *); static void ssl_calc_finished_ssl(ssl_context *,unsigned char *,int); static void ssl_calc_finished_tls(ssl_context *,unsigned char *,int); static void ssl_calc_finished_tls_sha256(ssl_context *,unsigned char *,int); #if defined(POLARSSL_SHA4_C) static void ssl_update_checksum_sha384(ssl_context *, unsigned char *, size_t); static void ssl_calc_verify_tls_sha384(ssl_context *,unsigned char *); static void ssl_calc_finished_tls_sha384(ssl_context *,unsigned char *,int); #endif int ssl_derive_keys( ssl_context *ssl ) { unsigned char tmp[64]; unsigned char keyblk[256]; unsigned char *key1; unsigned char *key2; unsigned int iv_copy_len; ssl_session *session = ssl->session_negotiate; ssl_transform *transform = ssl->transform_negotiate; ssl_handshake_params *handshake = ssl->handshake; SSL_DEBUG_MSG( 2, ( "=> derive keys" ) ); /* * Set appropriate PRF function and other SSL / TLS / TLS1.2 functions */ if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { handshake->tls_prf = ssl3_prf; handshake->calc_verify = ssl_calc_verify_ssl; handshake->calc_finished = ssl_calc_finished_ssl; } else if( ssl->minor_ver < SSL_MINOR_VERSION_3 ) { handshake->tls_prf = tls1_prf; handshake->calc_verify = ssl_calc_verify_tls; handshake->calc_finished = ssl_calc_finished_tls; } #if defined(POLARSSL_SHA4_C) else if( session->ciphersuite == TLS_RSA_WITH_AES_256_GCM_SHA384 || session->ciphersuite == TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ) { handshake->tls_prf = tls_prf_sha384; handshake->calc_verify = ssl_calc_verify_tls_sha384; handshake->calc_finished = ssl_calc_finished_tls_sha384; } #endif else { handshake->tls_prf = tls_prf_sha256; handshake->calc_verify = ssl_calc_verify_tls_sha256; handshake->calc_finished = ssl_calc_finished_tls_sha256; } /* * SSLv3: * master = * MD5( premaster + SHA1( 'A' + premaster + randbytes ) ) + * MD5( premaster + SHA1( 'BB' + premaster + randbytes ) ) + * MD5( premaster + SHA1( 'CCC' + premaster + randbytes ) ) * * TLSv1: * master = PRF( premaster, "master secret", randbytes )[0..47] */ if( handshake->resume == 0 ) { SSL_DEBUG_BUF( 3, "premaster secret", handshake->premaster, handshake->pmslen ); handshake->tls_prf( handshake->premaster, handshake->pmslen, "master secret", handshake->randbytes, 64, session->master, 48 ); memset( handshake->premaster, 0, sizeof( handshake->premaster ) ); } else SSL_DEBUG_MSG( 3, ( "no premaster (session resumed)" ) ); /* * Swap the client and server random values. */ memcpy( tmp, handshake->randbytes, 64 ); memcpy( handshake->randbytes, tmp + 32, 32 ); memcpy( handshake->randbytes + 32, tmp, 32 ); memset( tmp, 0, sizeof( tmp ) ); /* * SSLv3: * key block = * MD5( master + SHA1( 'A' + master + randbytes ) ) + * MD5( master + SHA1( 'BB' + master + randbytes ) ) + * MD5( master + SHA1( 'CCC' + master + randbytes ) ) + * MD5( master + SHA1( 'DDDD' + master + randbytes ) ) + * ... * * TLSv1: * key block = PRF( master, "key expansion", randbytes ) */ handshake->tls_prf( session->master, 48, "key expansion", handshake->randbytes, 64, keyblk, 256 ); SSL_DEBUG_MSG( 3, ( "ciphersuite = %s", ssl_get_ciphersuite_name( session->ciphersuite ) ) ); SSL_DEBUG_BUF( 3, "master secret", session->master, 48 ); SSL_DEBUG_BUF( 4, "random bytes", handshake->randbytes, 64 ); SSL_DEBUG_BUF( 4, "key block", keyblk, 256 ); memset( handshake->randbytes, 0, sizeof( handshake->randbytes ) ); /* * Determine the appropriate key, IV and MAC length. */ switch( session->ciphersuite ) { #if defined(POLARSSL_ARC4_C) case TLS_RSA_WITH_RC4_128_MD5: transform->keylen = 16; transform->minlen = 16; transform->ivlen = 0; transform->maclen = 16; break; case TLS_RSA_WITH_RC4_128_SHA: transform->keylen = 16; transform->minlen = 20; transform->ivlen = 0; transform->maclen = 20; break; #endif #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_3DES_EDE_CBC_SHA: case TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA: transform->keylen = 24; transform->minlen = 24; transform->ivlen = 8; transform->maclen = 20; break; #endif #if defined(POLARSSL_AES_C) case TLS_RSA_WITH_AES_128_CBC_SHA: case TLS_DHE_RSA_WITH_AES_128_CBC_SHA: transform->keylen = 16; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 20; break; case TLS_RSA_WITH_AES_256_CBC_SHA: case TLS_DHE_RSA_WITH_AES_256_CBC_SHA: transform->keylen = 32; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 20; break; #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_AES_128_CBC_SHA256: case TLS_DHE_RSA_WITH_AES_128_CBC_SHA256: transform->keylen = 16; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 32; break; case TLS_RSA_WITH_AES_256_CBC_SHA256: case TLS_DHE_RSA_WITH_AES_256_CBC_SHA256: transform->keylen = 32; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 32; break; #endif #if defined(POLARSSL_GCM_C) case TLS_RSA_WITH_AES_128_GCM_SHA256: case TLS_DHE_RSA_WITH_AES_128_GCM_SHA256: transform->keylen = 16; transform->minlen = 1; transform->ivlen = 12; transform->maclen = 0; transform->fixed_ivlen = 4; break; case TLS_RSA_WITH_AES_256_GCM_SHA384: case TLS_DHE_RSA_WITH_AES_256_GCM_SHA384: transform->keylen = 32; transform->minlen = 1; transform->ivlen = 12; transform->maclen = 0; transform->fixed_ivlen = 4; break; #endif #endif #if defined(POLARSSL_CAMELLIA_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA: case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA: transform->keylen = 16; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 20; break; case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA: case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA: transform->keylen = 32; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 20; break; #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256: case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256: transform->keylen = 16; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 32; break; case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256: case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256: transform->keylen = 32; transform->minlen = 32; transform->ivlen = 16; transform->maclen = 32; break; #endif #endif #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) #if defined(POLARSSL_CIPHER_NULL_CIPHER) case TLS_RSA_WITH_NULL_MD5: transform->keylen = 0; transform->minlen = 0; transform->ivlen = 0; transform->maclen = 16; break; case TLS_RSA_WITH_NULL_SHA: transform->keylen = 0; transform->minlen = 0; transform->ivlen = 0; transform->maclen = 20; break; case TLS_RSA_WITH_NULL_SHA256: transform->keylen = 0; transform->minlen = 0; transform->ivlen = 0; transform->maclen = 32; break; #endif /* defined(POLARSSL_CIPHER_NULL_CIPHER) */ #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_DES_CBC_SHA: case TLS_DHE_RSA_WITH_DES_CBC_SHA: transform->keylen = 8; transform->minlen = 8; transform->ivlen = 8; transform->maclen = 20; break; #endif #endif /* defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) */ default: SSL_DEBUG_MSG( 1, ( "ciphersuite %s is not available", ssl_get_ciphersuite_name( session->ciphersuite ) ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } SSL_DEBUG_MSG( 3, ( "keylen: %d, minlen: %d, ivlen: %d, maclen: %d", transform->keylen, transform->minlen, transform->ivlen, transform->maclen ) ); /* * Finally setup the cipher contexts, IVs and MAC secrets. */ if( ssl->endpoint == SSL_IS_CLIENT ) { key1 = keyblk + transform->maclen * 2; key2 = keyblk + transform->maclen * 2 + transform->keylen; memcpy( transform->mac_enc, keyblk, transform->maclen ); memcpy( transform->mac_dec, keyblk + transform->maclen, transform->maclen ); /* * This is not used in TLS v1.1. */ iv_copy_len = ( transform->fixed_ivlen ) ? transform->fixed_ivlen : transform->ivlen; memcpy( transform->iv_enc, key2 + transform->keylen, iv_copy_len ); memcpy( transform->iv_dec, key2 + transform->keylen + iv_copy_len, iv_copy_len ); } else { key1 = keyblk + transform->maclen * 2 + transform->keylen; key2 = keyblk + transform->maclen * 2; memcpy( transform->mac_dec, keyblk, transform->maclen ); memcpy( transform->mac_enc, keyblk + transform->maclen, transform->maclen ); /* * This is not used in TLS v1.1. */ iv_copy_len = ( transform->fixed_ivlen ) ? transform->fixed_ivlen : transform->ivlen; memcpy( transform->iv_dec, key1 + transform->keylen, iv_copy_len ); memcpy( transform->iv_enc, key1 + transform->keylen + iv_copy_len, iv_copy_len ); } #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_init != NULL) { int ret = 0; SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_init()" ) ); if( ( ret = ssl_hw_record_init( ssl, key1, key2, transform->iv_enc, transform->iv_dec, transform->mac_enc, transform->mac_dec ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_hw_record_init", ret ); return POLARSSL_ERR_SSL_HW_ACCEL_FAILED; } } #endif switch( session->ciphersuite ) { #if defined(POLARSSL_ARC4_C) case TLS_RSA_WITH_RC4_128_MD5: case TLS_RSA_WITH_RC4_128_SHA: arc4_setup( (arc4_context *) transform->ctx_enc, key1, transform->keylen ); arc4_setup( (arc4_context *) transform->ctx_dec, key2, transform->keylen ); break; #endif #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_3DES_EDE_CBC_SHA: case TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA: des3_set3key_enc( (des3_context *) transform->ctx_enc, key1 ); des3_set3key_dec( (des3_context *) transform->ctx_dec, key2 ); break; #endif #if defined(POLARSSL_AES_C) case TLS_RSA_WITH_AES_128_CBC_SHA: case TLS_DHE_RSA_WITH_AES_128_CBC_SHA: case TLS_RSA_WITH_AES_128_CBC_SHA256: case TLS_DHE_RSA_WITH_AES_128_CBC_SHA256: aes_setkey_enc( (aes_context *) transform->ctx_enc, key1, 128 ); aes_setkey_dec( (aes_context *) transform->ctx_dec, key2, 128 ); break; case TLS_RSA_WITH_AES_256_CBC_SHA: case TLS_DHE_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_256_CBC_SHA256: case TLS_DHE_RSA_WITH_AES_256_CBC_SHA256: aes_setkey_enc( (aes_context *) transform->ctx_enc, key1, 256 ); aes_setkey_dec( (aes_context *) transform->ctx_dec, key2, 256 ); break; #if defined(POLARSSL_GCM_C) case TLS_RSA_WITH_AES_128_GCM_SHA256: case TLS_DHE_RSA_WITH_AES_128_GCM_SHA256: gcm_init( (gcm_context *) transform->ctx_enc, key1, 128 ); gcm_init( (gcm_context *) transform->ctx_dec, key2, 128 ); break; case TLS_RSA_WITH_AES_256_GCM_SHA384: case TLS_DHE_RSA_WITH_AES_256_GCM_SHA384: gcm_init( (gcm_context *) transform->ctx_enc, key1, 256 ); gcm_init( (gcm_context *) transform->ctx_dec, key2, 256 ); break; #endif #endif #if defined(POLARSSL_CAMELLIA_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA: case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA: case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256: case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256: camellia_setkey_enc( (camellia_context *) transform->ctx_enc, key1, 128 ); camellia_setkey_dec( (camellia_context *) transform->ctx_dec, key2, 128 ); break; case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA: case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA: case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256: case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256: camellia_setkey_enc( (camellia_context *) transform->ctx_enc, key1, 256 ); camellia_setkey_dec( (camellia_context *) transform->ctx_dec, key2, 256 ); break; #endif #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) #if defined(POLARSSL_CIPHER_NULL_CIPHER) case TLS_RSA_WITH_NULL_MD5: case TLS_RSA_WITH_NULL_SHA: case TLS_RSA_WITH_NULL_SHA256: break; #endif /* defined(POLARSSL_CIPHER_NULL_CIPHER) */ #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_DES_CBC_SHA: case TLS_DHE_RSA_WITH_DES_CBC_SHA: des_setkey_enc( (des_context *) transform->ctx_enc, key1 ); des_setkey_dec( (des_context *) transform->ctx_dec, key2 ); break; #endif #endif /* defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) */ default: return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } memset( keyblk, 0, sizeof( keyblk ) ); #if defined(POLARSSL_ZLIB_SUPPORT) // Initialize compression // if( session->compression == SSL_COMPRESS_DEFLATE ) { SSL_DEBUG_MSG( 3, ( "Initializing zlib states" ) ); memset( &transform->ctx_deflate, 0, sizeof( transform->ctx_deflate ) ); memset( &transform->ctx_inflate, 0, sizeof( transform->ctx_inflate ) ); if( deflateInit( &transform->ctx_deflate, Z_DEFAULT_COMPRESSION ) != Z_OK || inflateInit( &transform->ctx_inflate ) != Z_OK ) { SSL_DEBUG_MSG( 1, ( "Failed to initialize compression" ) ); return( POLARSSL_ERR_SSL_COMPRESSION_FAILED ); } } #endif /* POLARSSL_ZLIB_SUPPORT */ SSL_DEBUG_MSG( 2, ( "<= derive keys" ) ); return( 0 ); } void ssl_calc_verify_ssl( ssl_context *ssl, unsigned char hash[36] ) { md5_context md5; sha1_context sha1; unsigned char pad_1[48]; unsigned char pad_2[48]; SSL_DEBUG_MSG( 2, ( "=> calc verify ssl" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); memset( pad_1, 0x36, 48 ); memset( pad_2, 0x5C, 48 ); md5_update( &md5, ssl->session_negotiate->master, 48 ); md5_update( &md5, pad_1, 48 ); md5_finish( &md5, hash ); md5_starts( &md5 ); md5_update( &md5, ssl->session_negotiate->master, 48 ); md5_update( &md5, pad_2, 48 ); md5_update( &md5, hash, 16 ); md5_finish( &md5, hash ); sha1_update( &sha1, ssl->session_negotiate->master, 48 ); sha1_update( &sha1, pad_1, 40 ); sha1_finish( &sha1, hash + 16 ); sha1_starts( &sha1 ); sha1_update( &sha1, ssl->session_negotiate->master, 48 ); sha1_update( &sha1, pad_2, 40 ); sha1_update( &sha1, hash + 16, 20 ); sha1_finish( &sha1, hash + 16 ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 36 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; } void ssl_calc_verify_tls( ssl_context *ssl, unsigned char hash[36] ) { md5_context md5; sha1_context sha1; SSL_DEBUG_MSG( 2, ( "=> calc verify tls" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); md5_finish( &md5, hash ); sha1_finish( &sha1, hash + 16 ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 36 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; } void ssl_calc_verify_tls_sha256( ssl_context *ssl, unsigned char hash[32] ) { sha2_context sha2; SSL_DEBUG_MSG( 2, ( "=> calc verify sha256" ) ); memcpy( &sha2, &ssl->handshake->fin_sha2, sizeof(sha2_context) ); sha2_finish( &sha2, hash ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 32 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; } #if defined(POLARSSL_SHA4_C) void ssl_calc_verify_tls_sha384( ssl_context *ssl, unsigned char hash[48] ) { sha4_context sha4; SSL_DEBUG_MSG( 2, ( "=> calc verify sha384" ) ); memcpy( &sha4, &ssl->handshake->fin_sha4, sizeof(sha4_context) ); sha4_finish( &sha4, hash ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 48 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; } #endif /* * SSLv3.0 MAC functions */ static void ssl_mac_md5( unsigned char *secret, unsigned char *buf, size_t len, unsigned char *ctr, int type ) { unsigned char header[11]; unsigned char padding[48]; md5_context md5; memcpy( header, ctr, 8 ); header[ 8] = (unsigned char) type; header[ 9] = (unsigned char)( len >> 8 ); header[10] = (unsigned char)( len ); memset( padding, 0x36, 48 ); md5_starts( &md5 ); md5_update( &md5, secret, 16 ); md5_update( &md5, padding, 48 ); md5_update( &md5, header, 11 ); md5_update( &md5, buf, len ); md5_finish( &md5, buf + len ); memset( padding, 0x5C, 48 ); md5_starts( &md5 ); md5_update( &md5, secret, 16 ); md5_update( &md5, padding, 48 ); md5_update( &md5, buf + len, 16 ); md5_finish( &md5, buf + len ); } static void ssl_mac_sha1( unsigned char *secret, unsigned char *buf, size_t len, unsigned char *ctr, int type ) { unsigned char header[11]; unsigned char padding[40]; sha1_context sha1; memcpy( header, ctr, 8 ); header[ 8] = (unsigned char) type; header[ 9] = (unsigned char)( len >> 8 ); header[10] = (unsigned char)( len ); memset( padding, 0x36, 40 ); sha1_starts( &sha1 ); sha1_update( &sha1, secret, 20 ); sha1_update( &sha1, padding, 40 ); sha1_update( &sha1, header, 11 ); sha1_update( &sha1, buf, len ); sha1_finish( &sha1, buf + len ); memset( padding, 0x5C, 40 ); sha1_starts( &sha1 ); sha1_update( &sha1, secret, 20 ); sha1_update( &sha1, padding, 40 ); sha1_update( &sha1, buf + len, 20 ); sha1_finish( &sha1, buf + len ); } static void ssl_mac_sha2( unsigned char *secret, unsigned char *buf, size_t len, unsigned char *ctr, int type ) { unsigned char header[11]; unsigned char padding[32]; sha2_context sha2; memcpy( header, ctr, 8 ); header[ 8] = (unsigned char) type; header[ 9] = (unsigned char)( len >> 8 ); header[10] = (unsigned char)( len ); memset( padding, 0x36, 32 ); sha2_starts( &sha2, 0 ); sha2_update( &sha2, secret, 32 ); sha2_update( &sha2, padding, 32 ); sha2_update( &sha2, header, 11 ); sha2_update( &sha2, buf, len ); sha2_finish( &sha2, buf + len ); memset( padding, 0x5C, 32 ); sha2_starts( &sha2, 0 ); sha2_update( &sha2, secret, 32 ); sha2_update( &sha2, padding, 32 ); sha2_update( &sha2, buf + len, 32 ); sha2_finish( &sha2, buf + len ); } /* * Encryption/decryption functions */ static int ssl_encrypt_buf( ssl_context *ssl ) { size_t i, padlen; SSL_DEBUG_MSG( 2, ( "=> encrypt buf" ) ); /* * Add MAC then encrypt */ if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( ssl->transform_out->maclen == 16 ) ssl_mac_md5( ssl->transform_out->mac_enc, ssl->out_msg, ssl->out_msglen, ssl->out_ctr, ssl->out_msgtype ); else if( ssl->transform_out->maclen == 20 ) ssl_mac_sha1( ssl->transform_out->mac_enc, ssl->out_msg, ssl->out_msglen, ssl->out_ctr, ssl->out_msgtype ); else if( ssl->transform_out->maclen == 32 ) ssl_mac_sha2( ssl->transform_out->mac_enc, ssl->out_msg, ssl->out_msglen, ssl->out_ctr, ssl->out_msgtype ); else if( ssl->transform_out->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_out->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } else { if( ssl->transform_out->maclen == 16 ) { md5_context ctx; md5_hmac_starts( &ctx, ssl->transform_out->mac_enc, 16 ); md5_hmac_update( &ctx, ssl->out_ctr, 13 ); md5_hmac_update( &ctx, ssl->out_msg, ssl->out_msglen ); md5_hmac_finish( &ctx, ssl->out_msg + ssl->out_msglen ); memset( &ctx, 0, sizeof(md5_context)); } else if( ssl->transform_out->maclen == 20 ) { sha1_context ctx; sha1_hmac_starts( &ctx, ssl->transform_out->mac_enc, 20 ); sha1_hmac_update( &ctx, ssl->out_ctr, 13 ); sha1_hmac_update( &ctx, ssl->out_msg, ssl->out_msglen ); sha1_hmac_finish( &ctx, ssl->out_msg + ssl->out_msglen ); memset( &ctx, 0, sizeof(sha1_context)); } else if( ssl->transform_out->maclen == 32 ) { sha2_context ctx; sha2_hmac_starts( &ctx, ssl->transform_out->mac_enc, 32, 0 ); sha2_hmac_update( &ctx, ssl->out_ctr, 13 ); sha2_hmac_update( &ctx, ssl->out_msg, ssl->out_msglen ); sha2_hmac_finish( &ctx, ssl->out_msg + ssl->out_msglen ); memset( &ctx, 0, sizeof(sha2_context)); } else if( ssl->transform_out->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_out->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } SSL_DEBUG_BUF( 4, "computed mac", ssl->out_msg + ssl->out_msglen, ssl->transform_out->maclen ); ssl->out_msglen += ssl->transform_out->maclen; if( ssl->transform_out->ivlen == 0 ) { padlen = 0; SSL_DEBUG_MSG( 3, ( "before encrypt: msglen = %d, " "including %d bytes of padding", ssl->out_msglen, 0 ) ); SSL_DEBUG_BUF( 4, "before encrypt: output payload", ssl->out_msg, ssl->out_msglen ); #if defined(POLARSSL_ARC4_C) if( ssl->session_out->ciphersuite == TLS_RSA_WITH_RC4_128_MD5 || ssl->session_out->ciphersuite == TLS_RSA_WITH_RC4_128_SHA ) { arc4_crypt( (arc4_context *) ssl->transform_out->ctx_enc, ssl->out_msglen, ssl->out_msg, ssl->out_msg ); } else #endif #if defined(POLARSSL_CIPHER_NULL_CIPHER) if( ssl->session_out->ciphersuite == TLS_RSA_WITH_NULL_MD5 || ssl->session_out->ciphersuite == TLS_RSA_WITH_NULL_SHA || ssl->session_out->ciphersuite == TLS_RSA_WITH_NULL_SHA256 ) { } else #endif return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } else if( ssl->transform_out->ivlen == 12 ) { size_t enc_msglen; unsigned char *enc_msg; unsigned char add_data[13]; int ret = POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE; padlen = 0; enc_msglen = ssl->out_msglen; memcpy( add_data, ssl->out_ctr, 8 ); add_data[8] = ssl->out_msgtype; add_data[9] = ssl->major_ver; add_data[10] = ssl->minor_ver; add_data[11] = ( ssl->out_msglen >> 8 ) & 0xFF; add_data[12] = ssl->out_msglen & 0xFF; SSL_DEBUG_BUF( 4, "additional data used for AEAD", add_data, 13 ); #if defined(POLARSSL_AES_C) && defined(POLARSSL_GCM_C) if( ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_256_GCM_SHA384 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ) { /* * Generate IV */ ret = ssl->f_rng( ssl->p_rng, ssl->transform_out->iv_enc + ssl->transform_out->fixed_ivlen, ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen ); if( ret != 0 ) return( ret ); /* * Shift message for ivlen bytes and prepend IV */ memmove( ssl->out_msg + ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen, ssl->out_msg, ssl->out_msglen ); memcpy( ssl->out_msg, ssl->transform_out->iv_enc + ssl->transform_out->fixed_ivlen, ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen ); /* * Fix pointer positions and message length with added IV */ enc_msg = ssl->out_msg + ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen; enc_msglen = ssl->out_msglen; ssl->out_msglen += ssl->transform_out->ivlen - ssl->transform_out->fixed_ivlen; SSL_DEBUG_MSG( 3, ( "before encrypt: msglen = %d, " "including %d bytes of padding", ssl->out_msglen, 0 ) ); SSL_DEBUG_BUF( 4, "before encrypt: output payload", ssl->out_msg, ssl->out_msglen ); /* * Adjust for tag */ ssl->out_msglen += 16; gcm_crypt_and_tag( (gcm_context *) ssl->transform_out->ctx_enc, GCM_ENCRYPT, enc_msglen, ssl->transform_out->iv_enc, ssl->transform_out->ivlen, add_data, 13, enc_msg, enc_msg, 16, enc_msg + enc_msglen ); SSL_DEBUG_BUF( 4, "after encrypt: tag", enc_msg + enc_msglen, 16 ); } else #endif return( ret ); } else { unsigned char *enc_msg; size_t enc_msglen; padlen = ssl->transform_out->ivlen - ( ssl->out_msglen + 1 ) % ssl->transform_out->ivlen; if( padlen == ssl->transform_out->ivlen ) padlen = 0; for( i = 0; i <= padlen; i++ ) ssl->out_msg[ssl->out_msglen + i] = (unsigned char) padlen; ssl->out_msglen += padlen + 1; enc_msglen = ssl->out_msglen; enc_msg = ssl->out_msg; /* * Prepend per-record IV for block cipher in TLS v1.1 and up as per * Method 1 (6.2.3.2. in RFC4346 and RFC5246) */ if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) { /* * Generate IV */ int ret = ssl->f_rng( ssl->p_rng, ssl->transform_out->iv_enc, ssl->transform_out->ivlen ); if( ret != 0 ) return( ret ); /* * Shift message for ivlen bytes and prepend IV */ memmove( ssl->out_msg + ssl->transform_out->ivlen, ssl->out_msg, ssl->out_msglen ); memcpy( ssl->out_msg, ssl->transform_out->iv_enc, ssl->transform_out->ivlen ); /* * Fix pointer positions and message length with added IV */ enc_msg = ssl->out_msg + ssl->transform_out->ivlen; enc_msglen = ssl->out_msglen; ssl->out_msglen += ssl->transform_out->ivlen; } SSL_DEBUG_MSG( 3, ( "before encrypt: msglen = %d, " "including %d bytes of IV and %d bytes of padding", ssl->out_msglen, ssl->transform_out->ivlen, padlen + 1 ) ); SSL_DEBUG_BUF( 4, "before encrypt: output payload", ssl->out_msg, ssl->out_msglen ); switch( ssl->transform_out->ivlen ) { #if defined(POLARSSL_DES_C) case 8: #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) if( ssl->session_out->ciphersuite == TLS_RSA_WITH_DES_CBC_SHA || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_DES_CBC_SHA ) { des_crypt_cbc( (des_context *) ssl->transform_out->ctx_enc, DES_ENCRYPT, enc_msglen, ssl->transform_out->iv_enc, enc_msg, enc_msg ); } else #endif des3_crypt_cbc( (des3_context *) ssl->transform_out->ctx_enc, DES_ENCRYPT, enc_msglen, ssl->transform_out->iv_enc, enc_msg, enc_msg ); break; #endif case 16: #if defined(POLARSSL_AES_C) if ( ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA || ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA || ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 ) { aes_crypt_cbc( (aes_context *) ssl->transform_out->ctx_enc, AES_ENCRYPT, enc_msglen, ssl->transform_out->iv_enc, enc_msg, enc_msg); break; } #endif #if defined(POLARSSL_CAMELLIA_C) if ( ssl->session_out->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_out->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_out->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 || ssl->session_out->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 ) { camellia_crypt_cbc( (camellia_context *) ssl->transform_out->ctx_enc, CAMELLIA_ENCRYPT, enc_msglen, ssl->transform_out->iv_enc, enc_msg, enc_msg ); break; } #endif default: return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } for( i = 8; i > 0; i-- ) if( ++ssl->out_ctr[i - 1] != 0 ) break; SSL_DEBUG_MSG( 2, ( "<= encrypt buf" ) ); return( 0 ); } /* * TODO: Use digest version when integrated! */ #define POLARSSL_SSL_MAX_MAC_SIZE 32 static int ssl_decrypt_buf( ssl_context *ssl ) { size_t i, padlen = 0, correct = 1; unsigned char tmp[POLARSSL_SSL_MAX_MAC_SIZE]; SSL_DEBUG_MSG( 2, ( "=> decrypt buf" ) ); if( ssl->in_msglen < ssl->transform_in->minlen ) { SSL_DEBUG_MSG( 1, ( "in_msglen (%d) < minlen (%d)", ssl->in_msglen, ssl->transform_in->minlen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } if( ssl->transform_in->ivlen == 0 ) { #if defined(POLARSSL_ARC4_C) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_RC4_128_MD5 || ssl->session_in->ciphersuite == TLS_RSA_WITH_RC4_128_SHA ) { arc4_crypt( (arc4_context *) ssl->transform_in->ctx_dec, ssl->in_msglen, ssl->in_msg, ssl->in_msg ); } else #endif #if defined(POLARSSL_CIPHER_NULL_CIPHER) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_MD5 || ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_SHA256 ) { } else #endif return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } else if( ssl->transform_in->ivlen == 12 ) { unsigned char *dec_msg; unsigned char *dec_msg_result; size_t dec_msglen; unsigned char add_data[13]; int ret = POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE; #if defined(POLARSSL_AES_C) && defined(POLARSSL_GCM_C) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_GCM_SHA384 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ) { dec_msglen = ssl->in_msglen - ( ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); dec_msglen -= 16; dec_msg = ssl->in_msg + ( ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); dec_msg_result = ssl->in_msg; ssl->in_msglen = dec_msglen; memcpy( add_data, ssl->in_ctr, 8 ); add_data[8] = ssl->in_msgtype; add_data[9] = ssl->major_ver; add_data[10] = ssl->minor_ver; add_data[11] = ( ssl->in_msglen >> 8 ) & 0xFF; add_data[12] = ssl->in_msglen & 0xFF; SSL_DEBUG_BUF( 4, "additional data used for AEAD", add_data, 13 ); memcpy( ssl->transform_in->iv_dec + ssl->transform_in->fixed_ivlen, ssl->in_msg, ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); SSL_DEBUG_BUF( 4, "IV used", ssl->transform_in->iv_dec, ssl->transform_in->ivlen ); SSL_DEBUG_BUF( 4, "TAG used", dec_msg + dec_msglen, 16 ); memcpy( ssl->transform_in->iv_dec + ssl->transform_in->fixed_ivlen, ssl->in_msg, ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); ret = gcm_auth_decrypt( (gcm_context *) ssl->transform_in->ctx_dec, dec_msglen, ssl->transform_in->iv_dec, ssl->transform_in->ivlen, add_data, 13, dec_msg + dec_msglen, 16, dec_msg, dec_msg_result ); if( ret != 0 ) { SSL_DEBUG_MSG( 1, ( "AEAD decrypt failed on validation (ret = -0x%02x)", -ret ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } } else #endif return( ret ); } else { /* * Decrypt and check the padding */ unsigned char *dec_msg; unsigned char *dec_msg_result; size_t dec_msglen; size_t minlen = 0; /* * Check immediate ciphertext sanity */ if( ssl->in_msglen % ssl->transform_in->ivlen != 0 ) { SSL_DEBUG_MSG( 1, ( "msglen (%d) %% ivlen (%d) != 0", ssl->in_msglen, ssl->transform_in->ivlen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) minlen += ssl->transform_in->ivlen; if( ssl->in_msglen < minlen + ssl->transform_in->ivlen || ssl->in_msglen < minlen + ssl->transform_in->maclen + 1 ) { SSL_DEBUG_MSG( 1, ( "msglen (%d) < max( ivlen(%d), maclen (%d) + 1 ) ( + expl IV )", ssl->in_msglen, ssl->transform_in->ivlen, ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } dec_msglen = ssl->in_msglen; dec_msg = ssl->in_msg; dec_msg_result = ssl->in_msg; /* * Initialize for prepended IV for block cipher in TLS v1.1 and up */ if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) { dec_msg += ssl->transform_in->ivlen; dec_msglen -= ssl->transform_in->ivlen; ssl->in_msglen -= ssl->transform_in->ivlen; for( i = 0; i < ssl->transform_in->ivlen; i++ ) ssl->transform_in->iv_dec[i] = ssl->in_msg[i]; } switch( ssl->transform_in->ivlen ) { #if defined(POLARSSL_DES_C) case 8: #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_DES_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_DES_CBC_SHA ) { des_crypt_cbc( (des_context *) ssl->transform_in->ctx_dec, DES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); } else #endif des3_crypt_cbc( (des3_context *) ssl->transform_in->ctx_dec, DES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; #endif case 16: #if defined(POLARSSL_AES_C) if ( ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 ) { aes_crypt_cbc( (aes_context *) ssl->transform_in->ctx_dec, AES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; } #endif #if defined(POLARSSL_CAMELLIA_C) if ( ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 ) { camellia_crypt_cbc( (camellia_context *) ssl->transform_in->ctx_dec, CAMELLIA_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; } #endif default: return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } padlen = 1 + ssl->in_msg[ssl->in_msglen - 1]; if( ssl->in_msglen < ssl->transform_in->maclen + padlen ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "msglen (%d) < maclen (%d) + padlen (%d)", ssl->in_msglen, ssl->transform_in->maclen, padlen ) ); #endif padlen = 0; correct = 0; } if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( padlen > ssl->transform_in->ivlen ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "bad padding length: is %d, " "should be no more than %d", padlen, ssl->transform_in->ivlen ) ); #endif correct = 0; } } else { /* * TLSv1+: always check the padding up to the first failure * and fake check up to 256 bytes of padding */ size_t pad_count = 0, fake_pad_count = 0; size_t padding_idx = ssl->in_msglen - padlen - 1; for( i = 1; i <= padlen; i++ ) pad_count += ( ssl->in_msg[padding_idx + i] == padlen - 1 ); for( ; i <= 256; i++ ) fake_pad_count += ( ssl->in_msg[padding_idx + i] == padlen - 1 ); correct &= ( pad_count == padlen ); /* Only 1 on correct padding */ correct &= ( pad_count + fake_pad_count < 512 ); /* Always 1 */ #if defined(POLARSSL_SSL_DEBUG_ALL) if( padlen > 0 && correct == 0) SSL_DEBUG_MSG( 1, ( "bad padding byte detected" ) ); #endif padlen &= correct * 0x1FF; } } SSL_DEBUG_BUF( 4, "raw buffer after decryption", ssl->in_msg, ssl->in_msglen ); /* * Always compute the MAC (RFC4346, CBCTIME). */ ssl->in_msglen -= ( ssl->transform_in->maclen + padlen ); ssl->in_hdr[3] = (unsigned char)( ssl->in_msglen >> 8 ); ssl->in_hdr[4] = (unsigned char)( ssl->in_msglen ); memcpy( tmp, ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ); if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( ssl->transform_in->maclen == 16 ) ssl_mac_md5( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen == 20 ) ssl_mac_sha1( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen == 32 ) ssl_mac_sha2( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } else { /* * Process MAC and always update for padlen afterwards to make * total time independent of padlen * * extra_run compensates MAC check for padlen * * Known timing attacks: * - Lucky Thirteen (http://www.isg.rhul.ac.uk/tls/TLStiming.pdf) * * We use ( ( Lx + 8 ) / 64 ) to handle 'negative Lx' values * correctly. (We round down instead of up, so -56 is the correct * value for our calculations instead of -55) */ int j, extra_run = 0; extra_run = ( 13 + ssl->in_msglen + padlen + 8 ) / 64 - ( 13 + ssl->in_msglen + 8 ) / 64; extra_run &= correct * 0xFF; if( ssl->transform_in->maclen == 16 ) { md5_context ctx; md5_hmac_starts( &ctx, ssl->transform_in->mac_dec, 16 ); md5_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); md5_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) md5_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen == 20 ) { sha1_context ctx; sha1_hmac_starts( &ctx, ssl->transform_in->mac_dec, 20 ); sha1_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); sha1_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) sha1_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen == 32 ) { sha2_context ctx; sha2_hmac_starts( &ctx, ssl->transform_in->mac_dec, 32, 0 ); sha2_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); sha2_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) sha2_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } SSL_DEBUG_BUF( 4, "message mac", tmp, ssl->transform_in->maclen ); SSL_DEBUG_BUF( 4, "computed mac", ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ); if( memcmp( tmp, ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ) != 0 ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "message mac does not match" ) ); #endif correct = 0; } /* * Finally check the correct flag */ if( correct == 0 ) return( POLARSSL_ERR_SSL_INVALID_MAC ); if( ssl->in_msglen == 0 ) { ssl->nb_zero++; /* * Three or more empty messages may be a DoS attack * (excessive CPU consumption). */ if( ssl->nb_zero > 3 ) { SSL_DEBUG_MSG( 1, ( "received four consecutive empty " "messages, possible DoS attack" ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } } else ssl->nb_zero = 0; for( i = 8; i > 0; i-- ) if( ++ssl->in_ctr[i - 1] != 0 ) break; SSL_DEBUG_MSG( 2, ( "<= decrypt buf" ) ); return( 0 ); } #if defined(POLARSSL_ZLIB_SUPPORT) /* * Compression/decompression functions */ static int ssl_compress_buf( ssl_context *ssl ) { int ret; unsigned char *msg_post = ssl->out_msg; size_t len_pre = ssl->out_msglen; unsigned char *msg_pre; SSL_DEBUG_MSG( 2, ( "=> compress buf" ) ); msg_pre = (unsigned char*) malloc( len_pre ); if( msg_pre == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len_pre ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memcpy( msg_pre, ssl->out_msg, len_pre ); SSL_DEBUG_MSG( 3, ( "before compression: msglen = %d, ", ssl->out_msglen ) ); SSL_DEBUG_BUF( 4, "before compression: output payload", ssl->out_msg, ssl->out_msglen ); ssl->transform_out->ctx_deflate.next_in = msg_pre; ssl->transform_out->ctx_deflate.avail_in = len_pre; ssl->transform_out->ctx_deflate.next_out = msg_post; ssl->transform_out->ctx_deflate.avail_out = SSL_BUFFER_LEN; ret = deflate( &ssl->transform_out->ctx_deflate, Z_SYNC_FLUSH ); if( ret != Z_OK ) { SSL_DEBUG_MSG( 1, ( "failed to perform compression (%d)", ret ) ); return( POLARSSL_ERR_SSL_COMPRESSION_FAILED ); } ssl->out_msglen = SSL_BUFFER_LEN - ssl->transform_out->ctx_deflate.avail_out; free( msg_pre ); SSL_DEBUG_MSG( 3, ( "after compression: msglen = %d, ", ssl->out_msglen ) ); SSL_DEBUG_BUF( 4, "after compression: output payload", ssl->out_msg, ssl->out_msglen ); SSL_DEBUG_MSG( 2, ( "<= compress buf" ) ); return( 0 ); } static int ssl_decompress_buf( ssl_context *ssl ) { int ret; unsigned char *msg_post = ssl->in_msg; size_t len_pre = ssl->in_msglen; unsigned char *msg_pre; SSL_DEBUG_MSG( 2, ( "=> decompress buf" ) ); msg_pre = (unsigned char*) malloc( len_pre ); if( msg_pre == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len_pre ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memcpy( msg_pre, ssl->in_msg, len_pre ); SSL_DEBUG_MSG( 3, ( "before decompression: msglen = %d, ", ssl->in_msglen ) ); SSL_DEBUG_BUF( 4, "before decompression: input payload", ssl->in_msg, ssl->in_msglen ); ssl->transform_in->ctx_inflate.next_in = msg_pre; ssl->transform_in->ctx_inflate.avail_in = len_pre; ssl->transform_in->ctx_inflate.next_out = msg_post; ssl->transform_in->ctx_inflate.avail_out = SSL_MAX_CONTENT_LEN; ret = inflate( &ssl->transform_in->ctx_inflate, Z_SYNC_FLUSH ); if( ret != Z_OK ) { SSL_DEBUG_MSG( 1, ( "failed to perform decompression (%d)", ret ) ); return( POLARSSL_ERR_SSL_COMPRESSION_FAILED ); } ssl->in_msglen = SSL_MAX_CONTENT_LEN - ssl->transform_in->ctx_inflate.avail_out; free( msg_pre ); SSL_DEBUG_MSG( 3, ( "after decompression: msglen = %d, ", ssl->in_msglen ) ); SSL_DEBUG_BUF( 4, "after decompression: input payload", ssl->in_msg, ssl->in_msglen ); SSL_DEBUG_MSG( 2, ( "<= decompress buf" ) ); return( 0 ); } #endif /* POLARSSL_ZLIB_SUPPORT */ /* * Fill the input message buffer */ int ssl_fetch_input( ssl_context *ssl, size_t nb_want ) { int ret; size_t len; SSL_DEBUG_MSG( 2, ( "=> fetch input" ) ); while( ssl->in_left < nb_want ) { len = nb_want - ssl->in_left; ret = ssl->f_recv( ssl->p_recv, ssl->in_hdr + ssl->in_left, len ); SSL_DEBUG_MSG( 2, ( "in_left: %d, nb_want: %d", ssl->in_left, nb_want ) ); SSL_DEBUG_RET( 2, "ssl->f_recv", ret ); if( ret == 0 ) return( POLARSSL_ERR_SSL_CONN_EOF ); if( ret < 0 ) return( ret ); ssl->in_left += ret; } SSL_DEBUG_MSG( 2, ( "<= fetch input" ) ); return( 0 ); } /* * Flush any data not yet written */ int ssl_flush_output( ssl_context *ssl ) { int ret; unsigned char *buf; SSL_DEBUG_MSG( 2, ( "=> flush output" ) ); while( ssl->out_left > 0 ) { SSL_DEBUG_MSG( 2, ( "message length: %d, out_left: %d", 5 + ssl->out_msglen, ssl->out_left ) ); if( ssl->out_msglen < ssl->out_left ) { size_t header_left = ssl->out_left - ssl->out_msglen; buf = ssl->out_hdr + 5 - header_left; ret = ssl->f_send( ssl->p_send, buf, header_left ); SSL_DEBUG_RET( 2, "ssl->f_send (header)", ret ); if( ret <= 0 ) return( ret ); ssl->out_left -= ret; } buf = ssl->out_msg + ssl->out_msglen - ssl->out_left; ret = ssl->f_send( ssl->p_send, buf, ssl->out_left ); SSL_DEBUG_RET( 2, "ssl->f_send", ret ); if( ret <= 0 ) return( ret ); ssl->out_left -= ret; } SSL_DEBUG_MSG( 2, ( "<= flush output" ) ); return( 0 ); } /* * Record layer functions */ int ssl_write_record( ssl_context *ssl ) { int ret, done = 0; size_t len = ssl->out_msglen; SSL_DEBUG_MSG( 2, ( "=> write record" ) ); if( ssl->out_msgtype == SSL_MSG_HANDSHAKE ) { ssl->out_msg[1] = (unsigned char)( ( len - 4 ) >> 16 ); ssl->out_msg[2] = (unsigned char)( ( len - 4 ) >> 8 ); ssl->out_msg[3] = (unsigned char)( ( len - 4 ) ); ssl->handshake->update_checksum( ssl, ssl->out_msg, len ); } #if defined(POLARSSL_ZLIB_SUPPORT) if( ssl->transform_out != NULL && ssl->session_out->compression == SSL_COMPRESS_DEFLATE ) { if( ( ret = ssl_compress_buf( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_compress_buf", ret ); return( ret ); } len = ssl->out_msglen; } #endif /*POLARSSL_ZLIB_SUPPORT */ #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_write != NULL) { SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_write()" ) ); ret = ssl_hw_record_write( ssl ); if( ret != 0 && ret != POLARSSL_ERR_SSL_HW_ACCEL_FALLTHROUGH ) { SSL_DEBUG_RET( 1, "ssl_hw_record_write", ret ); return POLARSSL_ERR_SSL_HW_ACCEL_FAILED; } done = 1; } #endif if( !done ) { ssl->out_hdr[0] = (unsigned char) ssl->out_msgtype; ssl->out_hdr[1] = (unsigned char) ssl->major_ver; ssl->out_hdr[2] = (unsigned char) ssl->minor_ver; ssl->out_hdr[3] = (unsigned char)( len >> 8 ); ssl->out_hdr[4] = (unsigned char)( len ); if( ssl->transform_out != NULL ) { if( ( ret = ssl_encrypt_buf( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_encrypt_buf", ret ); return( ret ); } len = ssl->out_msglen; ssl->out_hdr[3] = (unsigned char)( len >> 8 ); ssl->out_hdr[4] = (unsigned char)( len ); } ssl->out_left = 5 + ssl->out_msglen; SSL_DEBUG_MSG( 3, ( "output record: msgtype = %d, " "version = [%d:%d], msglen = %d", ssl->out_hdr[0], ssl->out_hdr[1], ssl->out_hdr[2], ( ssl->out_hdr[3] << 8 ) | ssl->out_hdr[4] ) ); SSL_DEBUG_BUF( 4, "output record header sent to network", ssl->out_hdr, 5 ); SSL_DEBUG_BUF( 4, "output record sent to network", ssl->out_hdr + 32, ssl->out_msglen ); } if( ( ret = ssl_flush_output( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_flush_output", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= write record" ) ); return( 0 ); } int ssl_read_record( ssl_context *ssl ) { int ret, done = 0; SSL_DEBUG_MSG( 2, ( "=> read record" ) ); if( ssl->in_hslen != 0 && ssl->in_hslen < ssl->in_msglen ) { /* * Get next Handshake message in the current record */ ssl->in_msglen -= ssl->in_hslen; memmove( ssl->in_msg, ssl->in_msg + ssl->in_hslen, ssl->in_msglen ); ssl->in_hslen = 4; ssl->in_hslen += ( ssl->in_msg[2] << 8 ) | ssl->in_msg[3]; SSL_DEBUG_MSG( 3, ( "handshake message: msglen =" " %d, type = %d, hslen = %d", ssl->in_msglen, ssl->in_msg[0], ssl->in_hslen ) ); if( ssl->in_msglen < 4 || ssl->in_msg[1] != 0 ) { SSL_DEBUG_MSG( 1, ( "bad handshake length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->in_msglen < ssl->in_hslen ) { SSL_DEBUG_MSG( 1, ( "bad handshake length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } ssl->handshake->update_checksum( ssl, ssl->in_msg, ssl->in_hslen ); return( 0 ); } ssl->in_hslen = 0; /* * Read the record header and validate it */ if( ( ret = ssl_fetch_input( ssl, 5 ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_fetch_input", ret ); return( ret ); } ssl->in_msgtype = ssl->in_hdr[0]; ssl->in_msglen = ( ssl->in_hdr[3] << 8 ) | ssl->in_hdr[4]; SSL_DEBUG_MSG( 3, ( "input record: msgtype = %d, " "version = [%d:%d], msglen = %d", ssl->in_hdr[0], ssl->in_hdr[1], ssl->in_hdr[2], ( ssl->in_hdr[3] << 8 ) | ssl->in_hdr[4] ) ); if( ssl->in_hdr[1] != ssl->major_ver ) { SSL_DEBUG_MSG( 1, ( "major version mismatch" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->in_hdr[2] > ssl->max_minor_ver ) { SSL_DEBUG_MSG( 1, ( "minor version mismatch" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } /* * Make sure the message length is acceptable */ if( ssl->transform_in == NULL ) { if( ssl->in_msglen < 1 || ssl->in_msglen > SSL_MAX_CONTENT_LEN ) { SSL_DEBUG_MSG( 1, ( "bad message length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } } else { if( ssl->in_msglen < ssl->transform_in->minlen ) { SSL_DEBUG_MSG( 1, ( "bad message length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->minor_ver == SSL_MINOR_VERSION_0 && ssl->in_msglen > ssl->transform_in->minlen + SSL_MAX_CONTENT_LEN ) { SSL_DEBUG_MSG( 1, ( "bad message length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } /* * TLS encrypted messages can have up to 256 bytes of padding */ if( ssl->minor_ver >= SSL_MINOR_VERSION_1 && ssl->in_msglen > ssl->transform_in->minlen + SSL_MAX_CONTENT_LEN + 256 ) { SSL_DEBUG_MSG( 1, ( "bad message length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } } /* * Read and optionally decrypt the message contents */ if( ( ret = ssl_fetch_input( ssl, 5 + ssl->in_msglen ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_fetch_input", ret ); return( ret ); } SSL_DEBUG_BUF( 4, "input record from network", ssl->in_hdr, 5 + ssl->in_msglen ); #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_read != NULL) { SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_read()" ) ); ret = ssl_hw_record_read( ssl ); if( ret != 0 && ret != POLARSSL_ERR_SSL_HW_ACCEL_FALLTHROUGH ) { SSL_DEBUG_RET( 1, "ssl_hw_record_read", ret ); return POLARSSL_ERR_SSL_HW_ACCEL_FAILED; } done = 1; } #endif if( !done && ssl->transform_in != NULL ) { if( ( ret = ssl_decrypt_buf( ssl ) ) != 0 ) { #if defined(POLARSSL_SSL_ALERT_MESSAGES) if( ret == POLARSSL_ERR_SSL_INVALID_MAC ) { ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_FATAL, SSL_ALERT_MSG_BAD_RECORD_MAC ); } #endif SSL_DEBUG_RET( 1, "ssl_decrypt_buf", ret ); return( ret ); } SSL_DEBUG_BUF( 4, "input payload after decrypt", ssl->in_msg, ssl->in_msglen ); if( ssl->in_msglen > SSL_MAX_CONTENT_LEN ) { SSL_DEBUG_MSG( 1, ( "bad message length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } } #if defined(POLARSSL_ZLIB_SUPPORT) if( ssl->transform_in != NULL && ssl->session_in->compression == SSL_COMPRESS_DEFLATE ) { if( ( ret = ssl_decompress_buf( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_decompress_buf", ret ); return( ret ); } ssl->in_hdr[3] = (unsigned char)( ssl->in_msglen >> 8 ); ssl->in_hdr[4] = (unsigned char)( ssl->in_msglen ); } #endif /* POLARSSL_ZLIB_SUPPORT */ if( ssl->in_msgtype != SSL_MSG_HANDSHAKE && ssl->in_msgtype != SSL_MSG_ALERT && ssl->in_msgtype != SSL_MSG_CHANGE_CIPHER_SPEC && ssl->in_msgtype != SSL_MSG_APPLICATION_DATA ) { SSL_DEBUG_MSG( 1, ( "unknown record type" ) ); if( ( ret = ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_FATAL, SSL_ALERT_MSG_UNEXPECTED_MESSAGE ) ) != 0 ) { return( ret ); } return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->in_msgtype == SSL_MSG_HANDSHAKE ) { ssl->in_hslen = 4; ssl->in_hslen += ( ssl->in_msg[2] << 8 ) | ssl->in_msg[3]; SSL_DEBUG_MSG( 3, ( "handshake message: msglen =" " %d, type = %d, hslen = %d", ssl->in_msglen, ssl->in_msg[0], ssl->in_hslen ) ); /* * Additional checks to validate the handshake header */ if( ssl->in_msglen < 4 || ssl->in_msg[1] != 0 ) { SSL_DEBUG_MSG( 1, ( "bad handshake length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->in_msglen < ssl->in_hslen ) { SSL_DEBUG_MSG( 1, ( "bad handshake length" ) ); return( POLARSSL_ERR_SSL_INVALID_RECORD ); } if( ssl->state != SSL_HANDSHAKE_OVER ) ssl->handshake->update_checksum( ssl, ssl->in_msg, ssl->in_hslen ); } if( ssl->in_msgtype == SSL_MSG_ALERT ) { SSL_DEBUG_MSG( 2, ( "got an alert message, type: [%d:%d]", ssl->in_msg[0], ssl->in_msg[1] ) ); /* * Ignore non-fatal alerts, except close_notify */ if( ssl->in_msg[0] == SSL_ALERT_LEVEL_FATAL ) { SSL_DEBUG_MSG( 1, ( "is a fatal alert message (msg %d)", ssl->in_msg[1] ) ); /** * Subtract from error code as ssl->in_msg[1] is 7-bit positive * error identifier. */ return( POLARSSL_ERR_SSL_FATAL_ALERT_MESSAGE ); } if( ssl->in_msg[0] == SSL_ALERT_LEVEL_WARNING && ssl->in_msg[1] == SSL_ALERT_MSG_CLOSE_NOTIFY ) { SSL_DEBUG_MSG( 2, ( "is a close notify message" ) ); return( POLARSSL_ERR_SSL_PEER_CLOSE_NOTIFY ); } } ssl->in_left = 0; SSL_DEBUG_MSG( 2, ( "<= read record" ) ); return( 0 ); } int ssl_send_fatal_handshake_failure( ssl_context *ssl ) { int ret; if( ( ret = ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_FATAL, SSL_ALERT_MSG_HANDSHAKE_FAILURE ) ) != 0 ) { return( ret ); } return( 0 ); } int ssl_send_alert_message( ssl_context *ssl, unsigned char level, unsigned char message ) { int ret; SSL_DEBUG_MSG( 2, ( "=> send alert message" ) ); ssl->out_msgtype = SSL_MSG_ALERT; ssl->out_msglen = 2; ssl->out_msg[0] = level; ssl->out_msg[1] = message; if( ( ret = ssl_write_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_write_record", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= send alert message" ) ); return( 0 ); } /* * Handshake functions */ int ssl_write_certificate( ssl_context *ssl ) { int ret; size_t i, n; const x509_cert *crt; SSL_DEBUG_MSG( 2, ( "=> write certificate" ) ); if( ssl->endpoint == SSL_IS_CLIENT ) { if( ssl->client_auth == 0 ) { SSL_DEBUG_MSG( 2, ( "<= skip write certificate" ) ); ssl->state++; return( 0 ); } /* * If using SSLv3 and got no cert, send an Alert message * (otherwise an empty Certificate message will be sent). */ if( ssl->own_cert == NULL && ssl->minor_ver == SSL_MINOR_VERSION_0 ) { ssl->out_msglen = 2; ssl->out_msgtype = SSL_MSG_ALERT; ssl->out_msg[0] = SSL_ALERT_LEVEL_WARNING; ssl->out_msg[1] = SSL_ALERT_MSG_NO_CERT; SSL_DEBUG_MSG( 2, ( "got no certificate to send" ) ); goto write_msg; } } else /* SSL_IS_SERVER */ { if( ssl->own_cert == NULL ) { SSL_DEBUG_MSG( 1, ( "got no certificate to send" ) ); return( POLARSSL_ERR_SSL_CERTIFICATE_REQUIRED ); } } SSL_DEBUG_CRT( 3, "own certificate", ssl->own_cert ); /* * 0 . 0 handshake type * 1 . 3 handshake length * 4 . 6 length of all certs * 7 . 9 length of cert. 1 * 10 . n-1 peer certificate * n . n+2 length of cert. 2 * n+3 . ... upper level cert, etc. */ i = 7; crt = ssl->own_cert; while( crt != NULL ) { n = crt->raw.len; if( i + 3 + n > SSL_MAX_CONTENT_LEN ) { SSL_DEBUG_MSG( 1, ( "certificate too large, %d > %d", i + 3 + n, SSL_MAX_CONTENT_LEN ) ); return( POLARSSL_ERR_SSL_CERTIFICATE_TOO_LARGE ); } ssl->out_msg[i ] = (unsigned char)( n >> 16 ); ssl->out_msg[i + 1] = (unsigned char)( n >> 8 ); ssl->out_msg[i + 2] = (unsigned char)( n ); i += 3; memcpy( ssl->out_msg + i, crt->raw.p, n ); i += n; crt = crt->next; } ssl->out_msg[4] = (unsigned char)( ( i - 7 ) >> 16 ); ssl->out_msg[5] = (unsigned char)( ( i - 7 ) >> 8 ); ssl->out_msg[6] = (unsigned char)( ( i - 7 ) ); ssl->out_msglen = i; ssl->out_msgtype = SSL_MSG_HANDSHAKE; ssl->out_msg[0] = SSL_HS_CERTIFICATE; write_msg: ssl->state++; if( ( ret = ssl_write_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_write_record", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= write certificate" ) ); return( 0 ); } int ssl_parse_certificate( ssl_context *ssl ) { int ret; size_t i, n; SSL_DEBUG_MSG( 2, ( "=> parse certificate" ) ); if( ssl->endpoint == SSL_IS_SERVER && ssl->authmode == SSL_VERIFY_NONE ) { ssl->verify_result = BADCERT_SKIP_VERIFY; SSL_DEBUG_MSG( 2, ( "<= skip parse certificate" ) ); ssl->state++; return( 0 ); } if( ( ret = ssl_read_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } ssl->state++; /* * Check if the client sent an empty certificate */ if( ssl->endpoint == SSL_IS_SERVER && ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( ssl->in_msglen == 2 && ssl->in_msgtype == SSL_MSG_ALERT && ssl->in_msg[0] == SSL_ALERT_LEVEL_WARNING && ssl->in_msg[1] == SSL_ALERT_MSG_NO_CERT ) { SSL_DEBUG_MSG( 1, ( "SSLv3 client has no certificate" ) ); ssl->verify_result = BADCERT_MISSING; if( ssl->authmode == SSL_VERIFY_OPTIONAL ) return( 0 ); else return( POLARSSL_ERR_SSL_NO_CLIENT_CERTIFICATE ); } } if( ssl->endpoint == SSL_IS_SERVER && ssl->minor_ver != SSL_MINOR_VERSION_0 ) { if( ssl->in_hslen == 7 && ssl->in_msgtype == SSL_MSG_HANDSHAKE && ssl->in_msg[0] == SSL_HS_CERTIFICATE && memcmp( ssl->in_msg + 4, "\0\0\0", 3 ) == 0 ) { SSL_DEBUG_MSG( 1, ( "TLSv1 client has no certificate" ) ); ssl->verify_result = BADCERT_MISSING; if( ssl->authmode == SSL_VERIFY_REQUIRED ) return( POLARSSL_ERR_SSL_NO_CLIENT_CERTIFICATE ); else return( 0 ); } } if( ssl->in_msgtype != SSL_MSG_HANDSHAKE ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } if( ssl->in_msg[0] != SSL_HS_CERTIFICATE || ssl->in_hslen < 10 ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } /* * Same message structure as in ssl_write_certificate() */ n = ( ssl->in_msg[5] << 8 ) | ssl->in_msg[6]; if( ssl->in_msg[4] != 0 || ssl->in_hslen != 7 + n ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } if( ( ssl->session_negotiate->peer_cert = (x509_cert *) malloc( sizeof( x509_cert ) ) ) == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", sizeof( x509_cert ) ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memset( ssl->session_negotiate->peer_cert, 0, sizeof( x509_cert ) ); i = 7; while( i < ssl->in_hslen ) { if( ssl->in_msg[i] != 0 ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } n = ( (unsigned int) ssl->in_msg[i + 1] << 8 ) | (unsigned int) ssl->in_msg[i + 2]; i += 3; if( n < 128 || i + n > ssl->in_hslen ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret ); return( ret ); } i += n; } SSL_DEBUG_CRT( 3, "peer certificate", ssl->session_negotiate->peer_cert ); if( ssl->authmode != SSL_VERIFY_NONE ) { if( ssl->ca_chain == NULL ) { SSL_DEBUG_MSG( 1, ( "got no CA chain" ) ); return( POLARSSL_ERR_SSL_CA_CHAIN_REQUIRED ); } ret = x509parse_verify( ssl->session_negotiate->peer_cert, ssl->ca_chain, ssl->ca_crl, ssl->peer_cn, &ssl->verify_result, ssl->f_vrfy, ssl->p_vrfy ); if( ret != 0 ) SSL_DEBUG_RET( 1, "x509_verify_cert", ret ); if( ssl->authmode != SSL_VERIFY_REQUIRED ) ret = 0; } SSL_DEBUG_MSG( 2, ( "<= parse certificate" ) ); return( ret ); } int ssl_write_change_cipher_spec( ssl_context *ssl ) { int ret; SSL_DEBUG_MSG( 2, ( "=> write change cipher spec" ) ); ssl->out_msgtype = SSL_MSG_CHANGE_CIPHER_SPEC; ssl->out_msglen = 1; ssl->out_msg[0] = 1; ssl->state++; if( ( ret = ssl_write_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_write_record", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= write change cipher spec" ) ); return( 0 ); } int ssl_parse_change_cipher_spec( ssl_context *ssl ) { int ret; SSL_DEBUG_MSG( 2, ( "=> parse change cipher spec" ) ); if( ( ret = ssl_read_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } if( ssl->in_msgtype != SSL_MSG_CHANGE_CIPHER_SPEC ) { SSL_DEBUG_MSG( 1, ( "bad change cipher spec message" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } if( ssl->in_msglen != 1 || ssl->in_msg[0] != 1 ) { SSL_DEBUG_MSG( 1, ( "bad change cipher spec message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CHANGE_CIPHER_SPEC ); } ssl->state++; SSL_DEBUG_MSG( 2, ( "<= parse change cipher spec" ) ); return( 0 ); } void ssl_optimize_checksum( ssl_context *ssl, int ciphersuite ) { #if !defined(POLARSSL_SHA4_C) ((void) ciphersuite); #endif if( ssl->minor_ver < SSL_MINOR_VERSION_3 ) ssl->handshake->update_checksum = ssl_update_checksum_md5sha1; #if defined(POLARSSL_SHA4_C) else if ( ciphersuite == TLS_RSA_WITH_AES_256_GCM_SHA384 || ciphersuite == TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ) { ssl->handshake->update_checksum = ssl_update_checksum_sha384; } #endif else ssl->handshake->update_checksum = ssl_update_checksum_sha256; } static void ssl_update_checksum_start( ssl_context *ssl, unsigned char *buf, size_t len ) { md5_update( &ssl->handshake->fin_md5 , buf, len ); sha1_update( &ssl->handshake->fin_sha1, buf, len ); sha2_update( &ssl->handshake->fin_sha2, buf, len ); #if defined(POLARSSL_SHA4_C) sha4_update( &ssl->handshake->fin_sha4, buf, len ); #endif } static void ssl_update_checksum_md5sha1( ssl_context *ssl, unsigned char *buf, size_t len ) { md5_update( &ssl->handshake->fin_md5 , buf, len ); sha1_update( &ssl->handshake->fin_sha1, buf, len ); } static void ssl_update_checksum_sha256( ssl_context *ssl, unsigned char *buf, size_t len ) { sha2_update( &ssl->handshake->fin_sha2, buf, len ); } #if defined(POLARSSL_SHA4_C) static void ssl_update_checksum_sha384( ssl_context *ssl, unsigned char *buf, size_t len ) { sha4_update( &ssl->handshake->fin_sha4, buf, len ); } #endif static void ssl_calc_finished_ssl( ssl_context *ssl, unsigned char *buf, int from ) { const char *sender; md5_context md5; sha1_context sha1; unsigned char padbuf[48]; unsigned char md5sum[16]; unsigned char sha1sum[20]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished ssl" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); /* * SSLv3: * hash = * MD5( master + pad2 + * MD5( handshake + sender + master + pad1 ) ) * + SHA1( master + pad2 + * SHA1( handshake + sender + master + pad1 ) ) */ SSL_DEBUG_BUF( 4, "finished md5 state", (unsigned char *) md5.state, sizeof( md5.state ) ); SSL_DEBUG_BUF( 4, "finished sha1 state", (unsigned char *) sha1.state, sizeof( sha1.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "CLNT" : "SRVR"; memset( padbuf, 0x36, 48 ); md5_update( &md5, (const unsigned char *) sender, 4 ); md5_update( &md5, session->master, 48 ); md5_update( &md5, padbuf, 48 ); md5_finish( &md5, md5sum ); sha1_update( &sha1, (const unsigned char *) sender, 4 ); sha1_update( &sha1, session->master, 48 ); sha1_update( &sha1, padbuf, 40 ); sha1_finish( &sha1, sha1sum ); memset( padbuf, 0x5C, 48 ); md5_starts( &md5 ); md5_update( &md5, session->master, 48 ); md5_update( &md5, padbuf, 48 ); md5_update( &md5, md5sum, 16 ); md5_finish( &md5, buf ); sha1_starts( &sha1 ); sha1_update( &sha1, session->master, 48 ); sha1_update( &sha1, padbuf , 40 ); sha1_update( &sha1, sha1sum, 20 ); sha1_finish( &sha1, buf + 16 ); SSL_DEBUG_BUF( 3, "calc finished result", buf, 36 ); memset( &md5, 0, sizeof( md5_context ) ); memset( &sha1, 0, sizeof( sha1_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); memset( md5sum, 0, sizeof( md5sum ) ); memset( sha1sum, 0, sizeof( sha1sum ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); } static void ssl_calc_finished_tls( ssl_context *ssl, unsigned char *buf, int from ) { int len = 12; const char *sender; md5_context md5; sha1_context sha1; unsigned char padbuf[36]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished tls" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); /* * TLSv1: * hash = PRF( master, finished_label, * MD5( handshake ) + SHA1( handshake ) )[0..11] */ SSL_DEBUG_BUF( 4, "finished md5 state", (unsigned char *) md5.state, sizeof( md5.state ) ); SSL_DEBUG_BUF( 4, "finished sha1 state", (unsigned char *) sha1.state, sizeof( sha1.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "client finished" : "server finished"; md5_finish( &md5, padbuf ); sha1_finish( &sha1, padbuf + 16 ); ssl->handshake->tls_prf( session->master, 48, (char *) sender, padbuf, 36, buf, len ); SSL_DEBUG_BUF( 3, "calc finished result", buf, len ); memset( &md5, 0, sizeof( md5_context ) ); memset( &sha1, 0, sizeof( sha1_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); } static void ssl_calc_finished_tls_sha256( ssl_context *ssl, unsigned char *buf, int from ) { int len = 12; const char *sender; sha2_context sha2; unsigned char padbuf[32]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished tls sha256" ) ); memcpy( &sha2, &ssl->handshake->fin_sha2, sizeof(sha2_context) ); /* * TLSv1.2: * hash = PRF( master, finished_label, * Hash( handshake ) )[0.11] */ SSL_DEBUG_BUF( 4, "finished sha2 state", (unsigned char *) sha2.state, sizeof( sha2.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "client finished" : "server finished"; sha2_finish( &sha2, padbuf ); ssl->handshake->tls_prf( session->master, 48, (char *) sender, padbuf, 32, buf, len ); SSL_DEBUG_BUF( 3, "calc finished result", buf, len ); memset( &sha2, 0, sizeof( sha2_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); } #if defined(POLARSSL_SHA4_C) static void ssl_calc_finished_tls_sha384( ssl_context *ssl, unsigned char *buf, int from ) { int len = 12; const char *sender; sha4_context sha4; unsigned char padbuf[48]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished tls sha384" ) ); memcpy( &sha4, &ssl->handshake->fin_sha4, sizeof(sha4_context) ); /* * TLSv1.2: * hash = PRF( master, finished_label, * Hash( handshake ) )[0.11] */ SSL_DEBUG_BUF( 4, "finished sha4 state", (unsigned char *) sha4.state, sizeof( sha4.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "client finished" : "server finished"; sha4_finish( &sha4, padbuf ); ssl->handshake->tls_prf( session->master, 48, (char *) sender, padbuf, 48, buf, len ); SSL_DEBUG_BUF( 3, "calc finished result", buf, len ); memset( &sha4, 0, sizeof( sha4_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); } #endif void ssl_handshake_wrapup( ssl_context *ssl ) { SSL_DEBUG_MSG( 3, ( "=> handshake wrapup" ) ); /* * Free our handshake params */ ssl_handshake_free( ssl->handshake ); free( ssl->handshake ); ssl->handshake = NULL; /* * Switch in our now active transform context */ if( ssl->transform ) { ssl_transform_free( ssl->transform ); free( ssl->transform ); } ssl->transform = ssl->transform_negotiate; ssl->transform_negotiate = NULL; if( ssl->session ) { ssl_session_free( ssl->session ); free( ssl->session ); } ssl->session = ssl->session_negotiate; ssl->session_negotiate = NULL; /* * Add cache entry */ if( ssl->f_set_cache != NULL ) if( ssl->f_set_cache( ssl->p_set_cache, ssl->session ) != 0 ) SSL_DEBUG_MSG( 1, ( "cache did not store session" ) ); ssl->state++; SSL_DEBUG_MSG( 3, ( "<= handshake wrapup" ) ); } int ssl_write_finished( ssl_context *ssl ) { int ret, hash_len; SSL_DEBUG_MSG( 2, ( "=> write finished" ) ); ssl->handshake->calc_finished( ssl, ssl->out_msg + 4, ssl->endpoint ); // TODO TLS/1.2 Hash length is determined by cipher suite (Page 63) hash_len = ( ssl->minor_ver == SSL_MINOR_VERSION_0 ) ? 36 : 12; ssl->verify_data_len = hash_len; memcpy( ssl->own_verify_data, ssl->out_msg + 4, hash_len ); ssl->out_msglen = 4 + hash_len; ssl->out_msgtype = SSL_MSG_HANDSHAKE; ssl->out_msg[0] = SSL_HS_FINISHED; /* * In case of session resuming, invert the client and server * ChangeCipherSpec messages order. */ if( ssl->handshake->resume != 0 ) { if( ssl->endpoint == SSL_IS_CLIENT ) ssl->state = SSL_HANDSHAKE_WRAPUP; else ssl->state = SSL_CLIENT_CHANGE_CIPHER_SPEC; } else ssl->state++; /* * Switch to our negotiated transform and session parameters for outbound data. */ SSL_DEBUG_MSG( 3, ( "switching to new transform spec for outbound data" ) ); ssl->transform_out = ssl->transform_negotiate; ssl->session_out = ssl->session_negotiate; memset( ssl->out_ctr, 0, 8 ); if( ( ret = ssl_write_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_write_record", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= write finished" ) ); return( 0 ); } int ssl_parse_finished( ssl_context *ssl ) { int ret; unsigned int hash_len; unsigned char buf[36]; SSL_DEBUG_MSG( 2, ( "=> parse finished" ) ); ssl->handshake->calc_finished( ssl, buf, ssl->endpoint ^ 1 ); /* * Switch to our negotiated transform and session parameters for inbound data. */ SSL_DEBUG_MSG( 3, ( "switching to new transform spec for inbound data" ) ); ssl->transform_in = ssl->transform_negotiate; ssl->session_in = ssl->session_negotiate; memset( ssl->in_ctr, 0, 8 ); if( ( ret = ssl_read_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } if( ssl->in_msgtype != SSL_MSG_HANDSHAKE ) { SSL_DEBUG_MSG( 1, ( "bad finished message" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } // TODO TLS/1.2 Hash length is determined by cipher suite (Page 63) hash_len = ( ssl->minor_ver == SSL_MINOR_VERSION_0 ) ? 36 : 12; if( ssl->in_msg[0] != SSL_HS_FINISHED || ssl->in_hslen != 4 + hash_len ) { SSL_DEBUG_MSG( 1, ( "bad finished message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_FINISHED ); } if( memcmp( ssl->in_msg + 4, buf, hash_len ) != 0 ) { SSL_DEBUG_MSG( 1, ( "bad finished message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_FINISHED ); } ssl->verify_data_len = hash_len; memcpy( ssl->peer_verify_data, buf, hash_len ); if( ssl->handshake->resume != 0 ) { if( ssl->endpoint == SSL_IS_CLIENT ) ssl->state = SSL_CLIENT_CHANGE_CIPHER_SPEC; if( ssl->endpoint == SSL_IS_SERVER ) ssl->state = SSL_HANDSHAKE_WRAPUP; } else ssl->state++; SSL_DEBUG_MSG( 2, ( "<= parse finished" ) ); return( 0 ); } int ssl_handshake_init( ssl_context *ssl ) { if( ssl->transform_negotiate ) ssl_transform_free( ssl->transform_negotiate ); else ssl->transform_negotiate = malloc( sizeof(ssl_transform) ); if( ssl->session_negotiate ) ssl_session_free( ssl->session_negotiate ); else ssl->session_negotiate = malloc( sizeof(ssl_session) ); if( ssl->handshake ) ssl_handshake_free( ssl->handshake ); else ssl->handshake = malloc( sizeof(ssl_handshake_params) ); if( ssl->handshake == NULL || ssl->transform_negotiate == NULL || ssl->session_negotiate == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc() of ssl sub-contexts failed" ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memset( ssl->handshake, 0, sizeof(ssl_handshake_params) ); memset( ssl->transform_negotiate, 0, sizeof(ssl_transform) ); memset( ssl->session_negotiate, 0, sizeof(ssl_session) ); md5_starts( &ssl->handshake->fin_md5 ); sha1_starts( &ssl->handshake->fin_sha1 ); sha2_starts( &ssl->handshake->fin_sha2, 0 ); #if defined(POLARSSL_SHA4_C) sha4_starts( &ssl->handshake->fin_sha4, 1 ); #endif ssl->handshake->update_checksum = ssl_update_checksum_start; ssl->handshake->sig_alg = SSL_HASH_SHA1; return( 0 ); } /* * Initialize an SSL context */ int ssl_init( ssl_context *ssl ) { int ret; int len = SSL_BUFFER_LEN; memset( ssl, 0, sizeof( ssl_context ) ); /* * Sane defaults */ ssl->rsa_decrypt = ssl_rsa_decrypt; ssl->rsa_sign = ssl_rsa_sign; ssl->rsa_key_len = ssl_rsa_key_len; ssl->min_major_ver = SSL_MAJOR_VERSION_3; ssl->min_minor_ver = SSL_MINOR_VERSION_0; ssl->ciphersuites = malloc( sizeof(int *) * 4 ); ssl_set_ciphersuites( ssl, ssl_default_ciphersuites ); #if defined(POLARSSL_DHM_C) if( ( ret = mpi_read_string( &ssl->dhm_P, 16, POLARSSL_DHM_RFC5114_MODP_1024_P) ) != 0 || ( ret = mpi_read_string( &ssl->dhm_G, 16, POLARSSL_DHM_RFC5114_MODP_1024_G) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_read_string", ret ); return( ret ); } #endif /* * Prepare base structures */ ssl->in_ctr = (unsigned char *) malloc( len ); ssl->in_hdr = ssl->in_ctr + 8; ssl->in_msg = ssl->in_ctr + 13; if( ssl->in_ctr == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } ssl->out_ctr = (unsigned char *) malloc( len ); ssl->out_hdr = ssl->out_ctr + 8; ssl->out_msg = ssl->out_ctr + 40; if( ssl->out_ctr == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len ) ); free( ssl-> in_ctr ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memset( ssl-> in_ctr, 0, SSL_BUFFER_LEN ); memset( ssl->out_ctr, 0, SSL_BUFFER_LEN ); ssl->hostname = NULL; ssl->hostname_len = 0; if( ( ret = ssl_handshake_init( ssl ) ) != 0 ) return( ret ); return( 0 ); } /* * Reset an initialized and used SSL context for re-use while retaining * all application-set variables, function pointers and data. */ int ssl_session_reset( ssl_context *ssl ) { int ret; ssl->state = SSL_HELLO_REQUEST; ssl->renegotiation = SSL_INITIAL_HANDSHAKE; ssl->secure_renegotiation = SSL_LEGACY_RENEGOTIATION; ssl->verify_data_len = 0; memset( ssl->own_verify_data, 0, 36 ); memset( ssl->peer_verify_data, 0, 36 ); ssl->in_offt = NULL; ssl->in_msgtype = 0; ssl->in_msglen = 0; ssl->in_left = 0; ssl->in_hslen = 0; ssl->nb_zero = 0; ssl->out_msgtype = 0; ssl->out_msglen = 0; ssl->out_left = 0; ssl->transform_in = NULL; ssl->transform_out = NULL; memset( ssl->out_ctr, 0, SSL_BUFFER_LEN ); memset( ssl->in_ctr, 0, SSL_BUFFER_LEN ); #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_reset != NULL) { SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_reset()" ) ); if( ssl_hw_record_reset( ssl ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_hw_record_reset", ret ); return( POLARSSL_ERR_SSL_HW_ACCEL_FAILED ); } } #endif if( ssl->transform ) { ssl_transform_free( ssl->transform ); free( ssl->transform ); ssl->transform = NULL; } if( ssl->session ) { ssl_session_free( ssl->session ); free( ssl->session ); ssl->session = NULL; } if( ( ret = ssl_handshake_init( ssl ) ) != 0 ) return( ret ); return( 0 ); } /* * SSL set accessors */ void ssl_set_endpoint( ssl_context *ssl, int endpoint ) { ssl->endpoint = endpoint; } void ssl_set_authmode( ssl_context *ssl, int authmode ) { ssl->authmode = authmode; } void ssl_set_verify( ssl_context *ssl, int (*f_vrfy)(void *, x509_cert *, int, int *), void *p_vrfy ) { ssl->f_vrfy = f_vrfy; ssl->p_vrfy = p_vrfy; } void ssl_set_rng( ssl_context *ssl, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { ssl->f_rng = f_rng; ssl->p_rng = p_rng; } void ssl_set_dbg( ssl_context *ssl, void (*f_dbg)(void *, int, const char *), void *p_dbg ) { ssl->f_dbg = f_dbg; ssl->p_dbg = p_dbg; } void ssl_set_bio( ssl_context *ssl, int (*f_recv)(void *, unsigned char *, size_t), void *p_recv, int (*f_send)(void *, const unsigned char *, size_t), void *p_send ) { ssl->f_recv = f_recv; ssl->f_send = f_send; ssl->p_recv = p_recv; ssl->p_send = p_send; } void ssl_set_session_cache( ssl_context *ssl, int (*f_get_cache)(void *, ssl_session *), void *p_get_cache, int (*f_set_cache)(void *, const ssl_session *), void *p_set_cache ) { ssl->f_get_cache = f_get_cache; ssl->p_get_cache = p_get_cache; ssl->f_set_cache = f_set_cache; ssl->p_set_cache = p_set_cache; } void ssl_set_session( ssl_context *ssl, const ssl_session *session ) { memcpy( ssl->session_negotiate, session, sizeof(ssl_session) ); ssl->handshake->resume = 1; } void ssl_set_ciphersuites( ssl_context *ssl, const int *ciphersuites ) { ssl->ciphersuites[SSL_MINOR_VERSION_0] = ciphersuites; ssl->ciphersuites[SSL_MINOR_VERSION_1] = ciphersuites; ssl->ciphersuites[SSL_MINOR_VERSION_2] = ciphersuites; ssl->ciphersuites[SSL_MINOR_VERSION_3] = ciphersuites; } void ssl_set_ciphersuites_for_version( ssl_context *ssl, const int *ciphersuites, int major, int minor ) { if( major != SSL_MAJOR_VERSION_3 ) return; if( minor < SSL_MINOR_VERSION_0 || minor > SSL_MINOR_VERSION_3 ) return; ssl->ciphersuites[minor] = ciphersuites; } void ssl_set_ca_chain( ssl_context *ssl, x509_cert *ca_chain, x509_crl *ca_crl, const char *peer_cn ) { ssl->ca_chain = ca_chain; ssl->ca_crl = ca_crl; ssl->peer_cn = peer_cn; } void ssl_set_own_cert( ssl_context *ssl, x509_cert *own_cert, rsa_context *rsa_key ) { ssl->own_cert = own_cert; ssl->rsa_key = rsa_key; } void ssl_set_own_cert_alt( ssl_context *ssl, x509_cert *own_cert, void *rsa_key, rsa_decrypt_func rsa_decrypt, rsa_sign_func rsa_sign, rsa_key_len_func rsa_key_len ) { ssl->own_cert = own_cert; ssl->rsa_key = rsa_key; ssl->rsa_decrypt = rsa_decrypt; ssl->rsa_sign = rsa_sign; ssl->rsa_key_len = rsa_key_len; } #if defined(POLARSSL_DHM_C) int ssl_set_dh_param( ssl_context *ssl, const char *dhm_P, const char *dhm_G ) { int ret; if( ( ret = mpi_read_string( &ssl->dhm_P, 16, dhm_P ) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_read_string", ret ); return( ret ); } if( ( ret = mpi_read_string( &ssl->dhm_G, 16, dhm_G ) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_read_string", ret ); return( ret ); } return( 0 ); } int ssl_set_dh_param_ctx( ssl_context *ssl, dhm_context *dhm_ctx ) { int ret; if( ( ret = mpi_copy(&ssl->dhm_P, &dhm_ctx->P) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_copy", ret ); return( ret ); } if( ( ret = mpi_copy(&ssl->dhm_G, &dhm_ctx->G) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_copy", ret ); return( ret ); } return( 0 ); } #endif /* POLARSSL_DHM_C */ int ssl_set_hostname( ssl_context *ssl, const char *hostname ) { if( hostname == NULL ) return( POLARSSL_ERR_SSL_BAD_INPUT_DATA ); ssl->hostname_len = strlen( hostname ); ssl->hostname = (unsigned char *) malloc( ssl->hostname_len + 1 ); if( ssl->hostname == NULL ) return( POLARSSL_ERR_SSL_MALLOC_FAILED ); memcpy( ssl->hostname, (const unsigned char *) hostname, ssl->hostname_len ); ssl->hostname[ssl->hostname_len] = '\0'; return( 0 ); } void ssl_set_sni( ssl_context *ssl, int (*f_sni)(void *, ssl_context *, const unsigned char *, size_t), void *p_sni ) { ssl->f_sni = f_sni; ssl->p_sni = p_sni; } void ssl_set_max_version( ssl_context *ssl, int major, int minor ) { ssl->max_major_ver = major; ssl->max_minor_ver = minor; } void ssl_set_min_version( ssl_context *ssl, int major, int minor ) { ssl->min_major_ver = major; ssl->min_minor_ver = minor; } void ssl_set_renegotiation( ssl_context *ssl, int renegotiation ) { ssl->disable_renegotiation = renegotiation; } void ssl_legacy_renegotiation( ssl_context *ssl, int allow_legacy ) { ssl->allow_legacy_renegotiation = allow_legacy; } /* * SSL get accessors */ size_t ssl_get_bytes_avail( const ssl_context *ssl ) { return( ssl->in_offt == NULL ? 0 : ssl->in_msglen ); } int ssl_get_verify_result( const ssl_context *ssl ) { return( ssl->verify_result ); } const char *ssl_get_ciphersuite_name( const int ciphersuite_id ) { switch( ciphersuite_id ) { #if defined(POLARSSL_ARC4_C) case TLS_RSA_WITH_RC4_128_MD5: return( "TLS-RSA-WITH-RC4-128-MD5" ); case TLS_RSA_WITH_RC4_128_SHA: return( "TLS-RSA-WITH-RC4-128-SHA" ); #endif #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_3DES_EDE_CBC_SHA: return( "TLS-RSA-WITH-3DES-EDE-CBC-SHA" ); case TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA: return( "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA" ); #endif #if defined(POLARSSL_AES_C) case TLS_RSA_WITH_AES_128_CBC_SHA: return( "TLS-RSA-WITH-AES-128-CBC-SHA" ); case TLS_DHE_RSA_WITH_AES_128_CBC_SHA: return( "TLS-DHE-RSA-WITH-AES-128-CBC-SHA" ); case TLS_RSA_WITH_AES_256_CBC_SHA: return( "TLS-RSA-WITH-AES-256-CBC-SHA" ); case TLS_DHE_RSA_WITH_AES_256_CBC_SHA: return( "TLS-DHE-RSA-WITH-AES-256-CBC-SHA" ); #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_AES_128_CBC_SHA256: return( "TLS-RSA-WITH-AES-128-CBC-SHA256" ); case TLS_RSA_WITH_AES_256_CBC_SHA256: return( "TLS-RSA-WITH-AES-256-CBC-SHA256" ); case TLS_DHE_RSA_WITH_AES_128_CBC_SHA256: return( "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256" ); case TLS_DHE_RSA_WITH_AES_256_CBC_SHA256: return( "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256" ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_AES_128_GCM_SHA256: return( "TLS-RSA-WITH-AES-128-GCM-SHA256" ); case TLS_RSA_WITH_AES_256_GCM_SHA384: return( "TLS-RSA-WITH-AES-256-GCM-SHA384" ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA4_C) case TLS_DHE_RSA_WITH_AES_128_GCM_SHA256: return( "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256" ); case TLS_DHE_RSA_WITH_AES_256_GCM_SHA384: return( "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384" ); #endif #endif /* POLARSSL_AES_C */ #if defined(POLARSSL_CAMELLIA_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA: return( "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA" ); case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA: return( "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA" ); case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA: return( "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA" ); case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA: return( "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA" ); #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256: return( "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256" ); case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256: return( "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256" ); case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256: return( "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256" ); case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256: return( "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256" ); #endif #endif #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) #if defined(POLARSSL_CIPHER_NULL_CIPHER) case TLS_RSA_WITH_NULL_MD5: return( "TLS-RSA-WITH-NULL-MD5" ); case TLS_RSA_WITH_NULL_SHA: return( "TLS-RSA-WITH-NULL-SHA" ); case TLS_RSA_WITH_NULL_SHA256: return( "TLS-RSA-WITH-NULL-SHA256" ); #endif /* defined(POLARSSL_CIPHER_NULL_CIPHER) */ #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_DES_CBC_SHA: return( "TLS-RSA-WITH-DES-CBC-SHA" ); case TLS_DHE_RSA_WITH_DES_CBC_SHA: return( "TLS-DHE-RSA-WITH-DES-CBC-SHA" ); #endif #endif /* defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) */ default: break; } return( "unknown" ); } int ssl_get_ciphersuite_id( const char *ciphersuite_name ) { #if defined(POLARSSL_ARC4_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-RC4-128-MD5")) return( TLS_RSA_WITH_RC4_128_MD5 ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-RC4-128-SHA")) return( TLS_RSA_WITH_RC4_128_SHA ); #endif #if defined(POLARSSL_DES_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-3DES-EDE-CBC-SHA")) return( TLS_RSA_WITH_3DES_EDE_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA")) return( TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA ); #endif #if defined(POLARSSL_AES_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-128-CBC-SHA")) return( TLS_RSA_WITH_AES_128_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-128-CBC-SHA")) return( TLS_DHE_RSA_WITH_AES_128_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-256-CBC-SHA")) return( TLS_RSA_WITH_AES_256_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-256-CBC-SHA")) return( TLS_DHE_RSA_WITH_AES_256_CBC_SHA ); #if defined(POLARSSL_SHA2_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-128-CBC-SHA256")) return( TLS_RSA_WITH_AES_128_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-256-CBC-SHA256")) return( TLS_RSA_WITH_AES_256_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256")) return( TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256")) return( TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-128-GCM-SHA256")) return( TLS_RSA_WITH_AES_128_GCM_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-AES-256-GCM-SHA384")) return( TLS_RSA_WITH_AES_256_GCM_SHA384 ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256")) return( TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384")) return( TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ); #endif #endif #if defined(POLARSSL_CAMELLIA_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA")) return( TLS_RSA_WITH_CAMELLIA_128_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA")) return( TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA")) return( TLS_RSA_WITH_CAMELLIA_256_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA")) return( TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA ); #if defined(POLARSSL_SHA2_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256")) return( TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256")) return( TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256")) return( TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256")) return( TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 ); #endif #endif #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) #if defined(POLARSSL_CIPHER_NULL_CIPHER) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-NULL-MD5")) return( TLS_RSA_WITH_NULL_MD5 ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-NULL-SHA")) return( TLS_RSA_WITH_NULL_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-NULL-SHA256")) return( TLS_RSA_WITH_NULL_SHA256 ); #endif /* defined(POLARSSL_CIPHER_NULL_CIPHER) */ #if defined(POLARSSL_DES_C) if (0 == strcasecmp(ciphersuite_name, "TLS-RSA-WITH-DES-CBC-SHA")) return( TLS_RSA_WITH_DES_CBC_SHA ); if (0 == strcasecmp(ciphersuite_name, "TLS-DHE-RSA-WITH-DES-CBC-SHA")) return( TLS_DHE_RSA_WITH_DES_CBC_SHA ); #endif #endif /* defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) */ return( 0 ); } const char *ssl_get_ciphersuite( const ssl_context *ssl ) { if( ssl == NULL || ssl->session == NULL ) return NULL; return ssl_get_ciphersuite_name( ssl->session->ciphersuite ); } const char *ssl_get_version( const ssl_context *ssl ) { switch( ssl->minor_ver ) { case SSL_MINOR_VERSION_0: return( "SSLv3.0" ); case SSL_MINOR_VERSION_1: return( "TLSv1.0" ); case SSL_MINOR_VERSION_2: return( "TLSv1.1" ); case SSL_MINOR_VERSION_3: return( "TLSv1.2" ); default: break; } return( "unknown" ); } const x509_cert *ssl_get_peer_cert( const ssl_context *ssl ) { if( ssl == NULL || ssl->session == NULL ) return NULL; return ssl->session->peer_cert; } const int ssl_default_ciphersuites[] = { #if defined(POLARSSL_DHM_C) #if defined(POLARSSL_AES_C) #if defined(POLARSSL_SHA2_C) TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA4_C) TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, #endif TLS_DHE_RSA_WITH_AES_256_CBC_SHA, #if defined(POLARSSL_SHA2_C) TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, #endif TLS_DHE_RSA_WITH_AES_128_CBC_SHA, #endif #if defined(POLARSSL_CAMELLIA_C) #if defined(POLARSSL_SHA2_C) TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, #if defined(POLARSSL_SHA2_C) TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, #endif #if defined(POLARSSL_DES_C) TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, #endif #endif #if defined(POLARSSL_AES_C) #if defined(POLARSSL_SHA2_C) TLS_RSA_WITH_AES_256_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA4_C) TLS_RSA_WITH_AES_256_GCM_SHA384, #endif /* POLARSSL_SHA2_C */ TLS_RSA_WITH_AES_256_CBC_SHA, #endif #if defined(POLARSSL_CAMELLIA_C) #if defined(POLARSSL_SHA2_C) TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, #endif #if defined(POLARSSL_AES_C) #if defined(POLARSSL_SHA2_C) TLS_RSA_WITH_AES_128_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) TLS_RSA_WITH_AES_128_GCM_SHA256, #endif /* POLARSSL_SHA2_C */ TLS_RSA_WITH_AES_128_CBC_SHA, #endif #if defined(POLARSSL_CAMELLIA_C) #if defined(POLARSSL_SHA2_C) TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, #endif /* POLARSSL_SHA2_C */ TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, #endif #if defined(POLARSSL_DES_C) TLS_RSA_WITH_3DES_EDE_CBC_SHA, #endif #if defined(POLARSSL_ARC4_C) TLS_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_RC4_128_MD5, #endif 0 }; /* * Perform a single step of the SSL handshake */ int ssl_handshake_step( ssl_context *ssl ) { int ret = POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE; #if defined(POLARSSL_SSL_CLI_C) if( ssl->endpoint == SSL_IS_CLIENT ) ret = ssl_handshake_client_step( ssl ); #endif #if defined(POLARSSL_SSL_SRV_C) if( ssl->endpoint == SSL_IS_SERVER ) ret = ssl_handshake_server_step( ssl ); #endif return( ret ); } /* * Perform the SSL handshake */ int ssl_handshake( ssl_context *ssl ) { int ret = 0; SSL_DEBUG_MSG( 2, ( "=> handshake" ) ); while( ssl->state != SSL_HANDSHAKE_OVER ) { ret = ssl_handshake_step( ssl ); if( ret != 0 ) break; } SSL_DEBUG_MSG( 2, ( "<= handshake" ) ); return( ret ); } /* * Renegotiate current connection */ int ssl_renegotiate( ssl_context *ssl ) { int ret; SSL_DEBUG_MSG( 2, ( "=> renegotiate" ) ); if( ssl->state != SSL_HANDSHAKE_OVER ) return( POLARSSL_ERR_SSL_BAD_INPUT_DATA ); ssl->state = SSL_HELLO_REQUEST; ssl->renegotiation = SSL_RENEGOTIATION; if( ( ret = ssl_handshake_init( ssl ) ) != 0 ) return( ret ); if( ( ret = ssl_handshake( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_handshake", ret ); return( ret ); } SSL_DEBUG_MSG( 2, ( "<= renegotiate" ) ); return( 0 ); } /* * Receive application data decrypted from the SSL layer */ int ssl_read( ssl_context *ssl, unsigned char *buf, size_t len ) { int ret; size_t n; SSL_DEBUG_MSG( 2, ( "=> read" ) ); if( ssl->state != SSL_HANDSHAKE_OVER ) { if( ( ret = ssl_handshake( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_handshake", ret ); return( ret ); } } if( ssl->in_offt == NULL ) { if( ( ret = ssl_read_record( ssl ) ) != 0 ) { if( ret == POLARSSL_ERR_SSL_CONN_EOF ) return( 0 ); SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } if( ssl->in_msglen == 0 && ssl->in_msgtype == SSL_MSG_APPLICATION_DATA ) { /* * OpenSSL sends empty messages to randomize the IV */ if( ( ret = ssl_read_record( ssl ) ) != 0 ) { if( ret == POLARSSL_ERR_SSL_CONN_EOF ) return( 0 ); SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } } if( ssl->in_msgtype == SSL_MSG_HANDSHAKE ) { SSL_DEBUG_MSG( 1, ( "received handshake message" ) ); if( ssl->endpoint == SSL_IS_CLIENT && ( ssl->in_msg[0] != SSL_HS_HELLO_REQUEST || ssl->in_hslen != 4 ) ) { SSL_DEBUG_MSG( 1, ( "handshake received (not HelloRequest)" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } if( ssl->disable_renegotiation == SSL_RENEGOTIATION_DISABLED || ( ssl->secure_renegotiation == SSL_LEGACY_RENEGOTIATION && ssl->allow_legacy_renegotiation == SSL_LEGACY_NO_RENEGOTIATION ) ) { SSL_DEBUG_MSG( 3, ( "ignoring renegotiation, sending alert" ) ); if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { /* * SSLv3 does not have a "no_renegotiation" alert */ if( ( ret = ssl_send_fatal_handshake_failure( ssl ) ) != 0 ) return( ret ); } else { if( ( ret = ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_WARNING, SSL_ALERT_MSG_NO_RENEGOTIATION ) ) != 0 ) { return( ret ); } } } else { if( ( ret = ssl_renegotiate( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_renegotiate", ret ); return( ret ); } return( POLARSSL_ERR_NET_WANT_READ ); } } else if( ssl->in_msgtype != SSL_MSG_APPLICATION_DATA ) { SSL_DEBUG_MSG( 1, ( "bad application data message" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } ssl->in_offt = ssl->in_msg; } n = ( len < ssl->in_msglen ) ? len : ssl->in_msglen; memcpy( buf, ssl->in_offt, n ); ssl->in_msglen -= n; if( ssl->in_msglen == 0 ) /* all bytes consumed */ ssl->in_offt = NULL; else /* more data available */ ssl->in_offt += n; SSL_DEBUG_MSG( 2, ( "<= read" ) ); return( (int) n ); } /* * Send application data to be encrypted by the SSL layer */ int ssl_write( ssl_context *ssl, const unsigned char *buf, size_t len ) { int ret; size_t n; SSL_DEBUG_MSG( 2, ( "=> write" ) ); if( ssl->state != SSL_HANDSHAKE_OVER ) { if( ( ret = ssl_handshake( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_handshake", ret ); return( ret ); } } n = ( len < SSL_MAX_CONTENT_LEN ) ? len : SSL_MAX_CONTENT_LEN; if( ssl->out_left != 0 ) { if( ( ret = ssl_flush_output( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_flush_output", ret ); return( ret ); } } else { ssl->out_msglen = n; ssl->out_msgtype = SSL_MSG_APPLICATION_DATA; memcpy( ssl->out_msg, buf, n ); if( ( ret = ssl_write_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_write_record", ret ); return( ret ); } } SSL_DEBUG_MSG( 2, ( "<= write" ) ); return( (int) n ); } /* * Notify the peer that the connection is being closed */ int ssl_close_notify( ssl_context *ssl ) { int ret; SSL_DEBUG_MSG( 2, ( "=> write close notify" ) ); if( ( ret = ssl_flush_output( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_flush_output", ret ); return( ret ); } if( ssl->state == SSL_HANDSHAKE_OVER ) { if( ( ret = ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_WARNING, SSL_ALERT_MSG_CLOSE_NOTIFY ) ) != 0 ) { return( ret ); } } SSL_DEBUG_MSG( 2, ( "<= write close notify" ) ); return( ret ); } void ssl_transform_free( ssl_transform *transform ) { #if defined(POLARSSL_ZLIB_SUPPORT) deflateEnd( &transform->ctx_deflate ); inflateEnd( &transform->ctx_inflate ); #endif memset( transform, 0, sizeof( ssl_transform ) ); } void ssl_handshake_free( ssl_handshake_params *handshake ) { #if defined(POLARSSL_DHM_C) dhm_free( &handshake->dhm_ctx ); #endif memset( handshake, 0, sizeof( ssl_handshake_params ) ); } void ssl_session_free( ssl_session *session ) { if( session->peer_cert != NULL ) { x509_free( session->peer_cert ); free( session->peer_cert ); } memset( session, 0, sizeof( ssl_session ) ); } /* * Free an SSL context */ void ssl_free( ssl_context *ssl ) { SSL_DEBUG_MSG( 2, ( "=> free" ) ); free( ssl->ciphersuites ); if( ssl->out_ctr != NULL ) { memset( ssl->out_ctr, 0, SSL_BUFFER_LEN ); free( ssl->out_ctr ); } if( ssl->in_ctr != NULL ) { memset( ssl->in_ctr, 0, SSL_BUFFER_LEN ); free( ssl->in_ctr ); } #if defined(POLARSSL_DHM_C) mpi_free( &ssl->dhm_P ); mpi_free( &ssl->dhm_G ); #endif if( ssl->transform ) { ssl_transform_free( ssl->transform ); free( ssl->transform ); } if( ssl->handshake ) { ssl_handshake_free( ssl->handshake ); ssl_transform_free( ssl->transform_negotiate ); ssl_session_free( ssl->session_negotiate ); free( ssl->handshake ); free( ssl->transform_negotiate ); free( ssl->session_negotiate ); } if( ssl->session ) { ssl_session_free( ssl->session ); free( ssl->session ); } if ( ssl->hostname != NULL) { memset( ssl->hostname, 0, ssl->hostname_len ); free( ssl->hostname ); ssl->hostname_len = 0; } #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_finish != NULL ) { SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_finish()" ) ); ssl_hw_record_finish( ssl ); } #endif SSL_DEBUG_MSG( 2, ( "<= free" ) ); /* Actually clear after last debug message */ memset( ssl, 0, sizeof( ssl_context ) ); } #endif
int ssl_parse_certificate( ssl_context *ssl ) { int ret; size_t i, n; SSL_DEBUG_MSG( 2, ( "=> parse certificate" ) ); if( ssl->endpoint == SSL_IS_SERVER && ssl->authmode == SSL_VERIFY_NONE ) { ssl->verify_result = BADCERT_SKIP_VERIFY; SSL_DEBUG_MSG( 2, ( "<= skip parse certificate" ) ); ssl->state++; return( 0 ); } if( ( ret = ssl_read_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } ssl->state++; /* * Check if the client sent an empty certificate */ if( ssl->endpoint == SSL_IS_SERVER && ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( ssl->in_msglen == 2 && ssl->in_msgtype == SSL_MSG_ALERT && ssl->in_msg[0] == SSL_ALERT_LEVEL_WARNING && ssl->in_msg[1] == SSL_ALERT_MSG_NO_CERT ) { SSL_DEBUG_MSG( 1, ( "SSLv3 client has no certificate" ) ); ssl->verify_result = BADCERT_MISSING; if( ssl->authmode == SSL_VERIFY_OPTIONAL ) return( 0 ); else return( POLARSSL_ERR_SSL_NO_CLIENT_CERTIFICATE ); } } if( ssl->endpoint == SSL_IS_SERVER && ssl->minor_ver != SSL_MINOR_VERSION_0 ) { if( ssl->in_hslen == 7 && ssl->in_msgtype == SSL_MSG_HANDSHAKE && ssl->in_msg[0] == SSL_HS_CERTIFICATE && memcmp( ssl->in_msg + 4, "\0\0\0", 3 ) == 0 ) { SSL_DEBUG_MSG( 1, ( "TLSv1 client has no certificate" ) ); ssl->verify_result = BADCERT_MISSING; if( ssl->authmode == SSL_VERIFY_REQUIRED ) return( POLARSSL_ERR_SSL_NO_CLIENT_CERTIFICATE ); else return( 0 ); } } if( ssl->in_msgtype != SSL_MSG_HANDSHAKE ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } if( ssl->in_msg[0] != SSL_HS_CERTIFICATE || ssl->in_hslen < 10 ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } /* * Same message structure as in ssl_write_certificate() */ n = ( ssl->in_msg[5] << 8 ) | ssl->in_msg[6]; if( ssl->in_msg[4] != 0 || ssl->in_hslen != 7 + n ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } if( ( ssl->session_negotiate->peer_cert = (x509_cert *) malloc( sizeof( x509_cert ) ) ) == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", sizeof( x509_cert ) ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memset( ssl->session_negotiate->peer_cert, 0, sizeof( x509_cert ) ); i = 7; while( i < ssl->in_hslen ) { if( ssl->in_msg[i] != 0 ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } n = ( (unsigned int) ssl->in_msg[i + 1] << 8 ) | (unsigned int) ssl->in_msg[i + 2]; i += 3; if( n < 128 || i + n > ssl->in_hslen ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret ); return( ret ); } i += n; } SSL_DEBUG_CRT( 3, "peer certificate", ssl->session_negotiate->peer_cert ); if( ssl->authmode != SSL_VERIFY_NONE ) { if( ssl->ca_chain == NULL ) { SSL_DEBUG_MSG( 1, ( "got no CA chain" ) ); return( POLARSSL_ERR_SSL_CA_CHAIN_REQUIRED ); } ret = x509parse_verify( ssl->session_negotiate->peer_cert, ssl->ca_chain, ssl->ca_crl, ssl->peer_cn, &ssl->verify_result, ssl->f_vrfy, ssl->p_vrfy ); if( ret != 0 ) SSL_DEBUG_RET( 1, "x509_verify_cert", ret ); if( ssl->authmode != SSL_VERIFY_REQUIRED ) ret = 0; } SSL_DEBUG_MSG( 2, ( "<= parse certificate" ) ); return( ret ); }
int ssl_parse_certificate( ssl_context *ssl ) { int ret; size_t i, n; SSL_DEBUG_MSG( 2, ( "=> parse certificate" ) ); if( ssl->endpoint == SSL_IS_SERVER && ssl->authmode == SSL_VERIFY_NONE ) { ssl->verify_result = BADCERT_SKIP_VERIFY; SSL_DEBUG_MSG( 2, ( "<= skip parse certificate" ) ); ssl->state++; return( 0 ); } if( ( ret = ssl_read_record( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } ssl->state++; /* * Check if the client sent an empty certificate */ if( ssl->endpoint == SSL_IS_SERVER && ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( ssl->in_msglen == 2 && ssl->in_msgtype == SSL_MSG_ALERT && ssl->in_msg[0] == SSL_ALERT_LEVEL_WARNING && ssl->in_msg[1] == SSL_ALERT_MSG_NO_CERT ) { SSL_DEBUG_MSG( 1, ( "SSLv3 client has no certificate" ) ); ssl->verify_result = BADCERT_MISSING; if( ssl->authmode == SSL_VERIFY_OPTIONAL ) return( 0 ); else return( POLARSSL_ERR_SSL_NO_CLIENT_CERTIFICATE ); } } if( ssl->endpoint == SSL_IS_SERVER && ssl->minor_ver != SSL_MINOR_VERSION_0 ) { if( ssl->in_hslen == 7 && ssl->in_msgtype == SSL_MSG_HANDSHAKE && ssl->in_msg[0] == SSL_HS_CERTIFICATE && memcmp( ssl->in_msg + 4, "\0\0\0", 3 ) == 0 ) { SSL_DEBUG_MSG( 1, ( "TLSv1 client has no certificate" ) ); ssl->verify_result = BADCERT_MISSING; if( ssl->authmode == SSL_VERIFY_REQUIRED ) return( POLARSSL_ERR_SSL_NO_CLIENT_CERTIFICATE ); else return( 0 ); } } if( ssl->in_msgtype != SSL_MSG_HANDSHAKE ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } if( ssl->in_msg[0] != SSL_HS_CERTIFICATE || ssl->in_hslen < 10 ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } /* * Same message structure as in ssl_write_certificate() */ n = ( ssl->in_msg[5] << 8 ) | ssl->in_msg[6]; if( ssl->in_msg[4] != 0 || ssl->in_hslen != 7 + n ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } if( ( ssl->session_negotiate->peer_cert = (x509_cert *) malloc( sizeof( x509_cert ) ) ) == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", sizeof( x509_cert ) ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memset( ssl->session_negotiate->peer_cert, 0, sizeof( x509_cert ) ); i = 7; while( i < ssl->in_hslen ) { if( ssl->in_msg[i] != 0 ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } n = ( (unsigned int) ssl->in_msg[i + 1] << 8 ) | (unsigned int) ssl->in_msg[i + 2]; i += 3; if( n < 128 || i + n > ssl->in_hslen ) { SSL_DEBUG_MSG( 1, ( "bad certificate message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret ); return( ret ); } i += n; } SSL_DEBUG_CRT( 3, "peer certificate", ssl->session_negotiate->peer_cert ); if( ssl->authmode != SSL_VERIFY_NONE ) { if( ssl->ca_chain == NULL ) { SSL_DEBUG_MSG( 1, ( "got no CA chain" ) ); return( POLARSSL_ERR_SSL_CA_CHAIN_REQUIRED ); } ret = x509parse_verify( ssl->session_negotiate->peer_cert, ssl->ca_chain, ssl->ca_crl, ssl->peer_cn, &ssl->verify_result, ssl->f_vrfy, ssl->p_vrfy ); if( ret != 0 ) SSL_DEBUG_RET( 1, "x509_verify_cert", ret ); if( ssl->authmode != SSL_VERIFY_REQUIRED ) ret = 0; } SSL_DEBUG_MSG( 2, ( "<= parse certificate" ) ); return( ret ); }
{'added': [(2378, ' ret = x509parse_crt_der( ssl->session_negotiate->peer_cert,'), (2379, ' ssl->in_msg + i, n );')], 'deleted': [(2378, ' ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i,'), (2379, ' n );')]}
2
2
2,736
18,398
120
725
33
https://github.com/polarssl/polarssl
CVE-2013-4623
CWE-20
1,375
hrtimer.c
C
snd_hrtimer_start
/* * ALSA timer back-end using hrtimer * Copyright (C) 2008 Takashi Iwai * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/hrtimer.h> #include <sound/core.h> #include <sound/timer.h> MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA hrtimer backend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-timer-" __stringify(SNDRV_TIMER_GLOBAL_HRTIMER)); #define NANO_SEC 1000000000UL /* 10^9 in sec */ static unsigned int resolution; struct snd_hrtimer { struct snd_timer *timer; struct hrtimer hrt; atomic_t running; }; static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) { struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt); struct snd_timer *t = stime->timer; unsigned long oruns; if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution)); snd_timer_interrupt(stime->timer, t->sticks * oruns); if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; return HRTIMER_RESTART; } static int snd_hrtimer_open(struct snd_timer *t) { struct snd_hrtimer *stime; stime = kmalloc(sizeof(*stime), GFP_KERNEL); if (!stime) return -ENOMEM; hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); stime->timer = t; stime->hrt.function = snd_hrtimer_callback; atomic_set(&stime->running, 0); t->private_data = stime; return 0; } static int snd_hrtimer_close(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime) { hrtimer_cancel(&stime->hrt); kfree(stime); t->private_data = NULL; } return 0; } static int snd_hrtimer_start(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; atomic_set(&stime->running, 0); hrtimer_cancel(&stime->hrt); hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution), HRTIMER_MODE_REL); atomic_set(&stime->running, 1); return 0; } static int snd_hrtimer_stop(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; atomic_set(&stime->running, 0); return 0; } static struct snd_timer_hardware hrtimer_hw = { .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_TASKLET, .open = snd_hrtimer_open, .close = snd_hrtimer_close, .start = snd_hrtimer_start, .stop = snd_hrtimer_stop, }; /* * entry functions */ static struct snd_timer *mytimer; static int __init snd_hrtimer_init(void) { struct snd_timer *timer; int err; resolution = hrtimer_resolution; /* Create a new timer and set up the fields */ err = snd_timer_global_new("hrtimer", SNDRV_TIMER_GLOBAL_HRTIMER, &timer); if (err < 0) return err; timer->module = THIS_MODULE; strcpy(timer->name, "HR timer"); timer->hw = hrtimer_hw; timer->hw.resolution = resolution; timer->hw.ticks = NANO_SEC / resolution; err = snd_timer_global_register(timer); if (err < 0) { snd_timer_global_free(timer); return err; } mytimer = timer; /* remember this */ return 0; } static void __exit snd_hrtimer_exit(void) { if (mytimer) { snd_timer_global_free(mytimer); mytimer = NULL; } } module_init(snd_hrtimer_init); module_exit(snd_hrtimer_exit);
/* * ALSA timer back-end using hrtimer * Copyright (C) 2008 Takashi Iwai * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/hrtimer.h> #include <sound/core.h> #include <sound/timer.h> MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA hrtimer backend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-timer-" __stringify(SNDRV_TIMER_GLOBAL_HRTIMER)); #define NANO_SEC 1000000000UL /* 10^9 in sec */ static unsigned int resolution; struct snd_hrtimer { struct snd_timer *timer; struct hrtimer hrt; atomic_t running; }; static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) { struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt); struct snd_timer *t = stime->timer; unsigned long oruns; if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution)); snd_timer_interrupt(stime->timer, t->sticks * oruns); if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; return HRTIMER_RESTART; } static int snd_hrtimer_open(struct snd_timer *t) { struct snd_hrtimer *stime; stime = kmalloc(sizeof(*stime), GFP_KERNEL); if (!stime) return -ENOMEM; hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); stime->timer = t; stime->hrt.function = snd_hrtimer_callback; atomic_set(&stime->running, 0); t->private_data = stime; return 0; } static int snd_hrtimer_close(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime) { hrtimer_cancel(&stime->hrt); kfree(stime); t->private_data = NULL; } return 0; } static int snd_hrtimer_start(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; atomic_set(&stime->running, 0); hrtimer_try_to_cancel(&stime->hrt); hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution), HRTIMER_MODE_REL); atomic_set(&stime->running, 1); return 0; } static int snd_hrtimer_stop(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; atomic_set(&stime->running, 0); hrtimer_try_to_cancel(&stime->hrt); return 0; } static struct snd_timer_hardware hrtimer_hw = { .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_TASKLET, .open = snd_hrtimer_open, .close = snd_hrtimer_close, .start = snd_hrtimer_start, .stop = snd_hrtimer_stop, }; /* * entry functions */ static struct snd_timer *mytimer; static int __init snd_hrtimer_init(void) { struct snd_timer *timer; int err; resolution = hrtimer_resolution; /* Create a new timer and set up the fields */ err = snd_timer_global_new("hrtimer", SNDRV_TIMER_GLOBAL_HRTIMER, &timer); if (err < 0) return err; timer->module = THIS_MODULE; strcpy(timer->name, "HR timer"); timer->hw = hrtimer_hw; timer->hw.resolution = resolution; timer->hw.ticks = NANO_SEC / resolution; err = snd_timer_global_register(timer); if (err < 0) { snd_timer_global_free(timer); return err; } mytimer = timer; /* remember this */ return 0; } static void __exit snd_hrtimer_exit(void) { if (mytimer) { snd_timer_global_free(mytimer); mytimer = NULL; } } module_init(snd_hrtimer_init); module_exit(snd_hrtimer_exit);
static int snd_hrtimer_start(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; atomic_set(&stime->running, 0); hrtimer_cancel(&stime->hrt); hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution), HRTIMER_MODE_REL); atomic_set(&stime->running, 1); return 0; }
static int snd_hrtimer_start(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; atomic_set(&stime->running, 0); hrtimer_try_to_cancel(&stime->hrt); hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution), HRTIMER_MODE_REL); atomic_set(&stime->running, 1); return 0; }
{'added': [(93, '\thrtimer_try_to_cancel(&stime->hrt);'), (104, '\thrtimer_try_to_cancel(&stime->hrt);')], 'deleted': [(93, '\thrtimer_cancel(&stime->hrt);')]}
2
1
109
588
10
68
1
https://github.com/torvalds/linux
CVE-2016-2549
CWE-20
2,434
activations.cc
C++
tflite::ops::builtin::activations::SigmoidPrepare
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stddef.h> #include <algorithm> #include <cmath> #include <cstdint> #include <functional> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" #include "tensorflow/lite/kernels/internal/reference/logistic.h" #include "tensorflow/lite/kernels/internal/reference/prelu.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/reference/softmax.h" #include "tensorflow/lite/kernels/internal/reference/tanh.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #if __aarch64__ && __clang__ #include <arm_neon.h> #endif namespace tflite { namespace ops { namespace builtin { namespace activations { // TODO(b/142762739): We should figure out a multi-threading plan for most of // the activation ops below. enum KernelType { kReference, kGenericOptimized, kFixedPointOptimized, }; struct OpData { int32_t input_multiplier = 0; int input_left_shift = 0; int32_t input_range_radius = 0; int diff_min = 0; uint8_t table[256] = {0}; }; struct SoftmaxOpData { struct SoftmaxParams params = {}; float table[256]; #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT uint8_t uint8_table1[256]; uint8_t uint8_table2[256]; #endif static constexpr int kInt16LUTArraySize = 513; int16_t exp_lut[kInt16LUTArraySize]; // int16 LUT for exp(x), where x uniform // distributed between [-10.0 , 0.0] int16_t one_over_one_plus_x_lut[kInt16LUTArraySize]; // int16 LUT for 1 / // (1 + x), where x // uniform distributed // between [0.0 , 1.0] }; struct LogSoftmaxOpData : public OpData { int32_t reverse_scaling_divisor = 0; int32_t reverse_scaling_right_shift = 0; struct SoftmaxParams params = {}; float f_table[256]; }; struct LeakyReluOpData : public OpData { int32_t output_multiplier_alpha = 0; int32_t output_shift_alpha = 0; int32_t output_multiplier_identity = 0; int32_t output_shift_identity = 0; }; struct PreluOpData : public OpData { int32_t output_multiplier_1 = 0; int32_t output_shift_1 = 0; int32_t output_multiplier_2 = 0; int32_t output_shift_2 = 0; bool requires_broadcast; }; struct HardSwishData { HardSwishParams params; }; struct ReluOpData : public OpData { int32_t output_multiplier = 0; int output_shift = 0; }; namespace { TfLiteStatus CheckOutputQuantParams(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* output) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); } return kTfLiteOk; } template <typename T> void PopulateLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output, const std::function<float(float)>& transform) { static_assert(sizeof(T) == 1, "Lookup table valid only for 8bit"); const float inverse_scale = 1 / output->params.scale; int32_t maxval = std::numeric_limits<T>::max(); int32_t minval = std::numeric_limits<T>::min(); for (int32_t val = minval; val <= maxval; ++val) { const float dequantized = input->params.scale * (val - input->params.zero_point); const float transformed = transform(dequantized); const float rescaled = std::round(transformed * inverse_scale); const int32_t quantized = static_cast<int32_t>(rescaled + output->params.zero_point); data->table[static_cast<uint8_t>(static_cast<T>(val))] = static_cast<uint8_t>( static_cast<T>(std::max(std::min(maxval, quantized), minval))); } } // TODO(b/143696793): move this to optimized_ops. void EvalUsingLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); uint8_t* output_data = GetTensorData<uint8_t>(output); const uint8_t* input_data = GetTensorData<uint8_t>(input); int i = 0; #if __aarch64__ && __clang__ // This code uses ARM64-only instructions. // TODO(b/143709993): Port to ARMv7 // Load the tables into registers. (4*4 128-bit registers) uint8x16x4_t table[4]; table[0] = vld1q_u8_x4(data->table + 16 * 4 * 0); table[1] = vld1q_u8_x4(data->table + 16 * 4 * 1); table[2] = vld1q_u8_x4(data->table + 16 * 4 * 2); table[3] = vld1q_u8_x4(data->table + 16 * 4 * 3); // Vectorized loop; process uint8x16_t (16 elements) at a time. constexpr int vectorized_16_loop_step = 16; const int vectorized_16_loop_end = size / vectorized_16_loop_step * vectorized_16_loop_step; for (; i < vectorized_16_loop_end; i += vectorized_16_loop_step) { uint8x16_t input = vld1q_u8(input_data + i); uint8x16_t output = optimized_ops::aarch64_lookup_vector(table, input); vst1q_u8(output_data + i, output); } // Postamble and non-ARM64 code: simple for loop. #endif for (; i < size; ++i) { output_data[i] = data->table[input_data[i]]; } } template <typename T> void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input, TfLiteTensor* output, const ReluOpData* data) { ReluParams params; params.quantized_activation_min = std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), output->params.zero_point + static_cast<int32>(roundf(act_min / output->params.scale))); params.quantized_activation_max = act_max == std::numeric_limits<float>::infinity() ? static_cast<int32_t>(std::numeric_limits<T>::max()) : std::min( static_cast<int32_t>(std::numeric_limits<T>::max()), output->params.zero_point + static_cast<int32>(roundf(act_max / output->params.scale))); params.input_offset = input->params.zero_point; params.output_offset = output->params.zero_point; params.output_multiplier = data->output_multiplier; params.output_shift = data->output_shift; optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } } // namespace void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new SoftmaxOpData; } void SoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<SoftmaxOpData*>(buffer); } void* LogSoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new LogSoftmaxOpData; } void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { return new PreluOpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } void LogSoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LogSoftmaxOpData*>(buffer); } void PreluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<PreluOpData*>(buffer); } void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { return new HardSwishData; } TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new ReluOpData; } void ReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<ReluOpData*>(buffer); } TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new LeakyReluOpData; } void LeakyReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LeakyReluOpData*>(buffer); } void HardSwishFree(TfLiteContext* context, void* buffer) { delete static_cast<HardSwishData*>(buffer); } TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_STATUS(GenericPrepare(context, node)); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); HardSwishParams* params = &data->params; const TfLiteTensor* input = GetInput(context, node, 0); params->input_zero_point = input->params.zero_point; params->output_zero_point = output->params.zero_point; const float input_scale = input->params.scale; const float hires_input_scale = (1.0f / 128.0f) * input_scale; const float reluish_scale = 3.0f / 32768.0f; const float output_scale = output->params.scale; const float output_multiplier = hires_input_scale / output_scale; int32_t output_multiplier_fixedpoint_int32; QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, &params->output_multiplier_exponent); DownScaleInt32ToInt16Multiplier( output_multiplier_fixedpoint_int32, &params->output_multiplier_fixedpoint_int16); TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); const float reluish_multiplier = hires_input_scale / reluish_scale; int32_t reluish_multiplier_fixedpoint_int32; QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_exponent); DownScaleInt32ToInt16Multiplier( reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_fixedpoint_int16); } return kTfLiteOk; } TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); double alpha_multiplier = input->params.scale * params->alpha / output->params.scale; QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, &data->output_shift_alpha); double identity_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, &data->output_shift_identity); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; } TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } // Sigmoid is also know as "Logistic". template <KernelType kernel_type> TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { SoftmaxParams op_params; op_params.beta = params->beta; optimized_ops::Softmax(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } template <typename In, typename Out> TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<In>(input), GetTensorShape(output), GetTensorData<Out>(output)); return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) { reference_ops::SoftmaxInt16( data->params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Only 1D, 2D, 3D and 4D tensors supported for int16 " "input with int16 output, got %dD.", NumDimensions(input)); return kTfLiteError; } } TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> T ApplyPrelu(T input, T alpha) { return input >= 0.0 ? input : input * alpha; } template <KernelType kernel_type> TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); TfLiteTensor* output = GetOutput(context, node, 0); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output, const LeakyReluOpData* data) { LeakyReluParams op_params; op_params.input_offset = input->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_alpha = data->output_multiplier_alpha; op_params.output_shift_alpha = data->output_shift_alpha; op_params.output_multiplier_identity = data->output_multiplier_identity; op_params.output_shift_identity = data->output_shift_identity; reference_ops::QuantizeLeakyRelu( op_params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); } TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } } // namespace activations TfLiteRegistration* Register_ELU() { static TfLiteRegistration r = {activations::Init, activations::Free, activations::EluPrepare, activations::EluEval}; return &r; } TfLiteRegistration* Register_RELU() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::ReluEval}; return &r; } TfLiteRegistration* Register_RELU_N1_TO_1() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu1Eval}; return &r; } TfLiteRegistration* Register_RELU6() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu6Eval}; return &r; } TfLiteRegistration* Register_TANH_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kReference>, activations::TanhEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_TANH_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kGenericOptimized>, activations::TanhEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kFixedPointOptimized>, activations::TanhEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_TANH() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_TANH_GENERIC_OPT(); } TfLiteRegistration* Register_LOGISTIC_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kReference>, activations::SigmoidEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kGenericOptimized>, activations::SigmoidEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kFixedPointOptimized>, activations::SigmoidEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_LOGISTIC_GENERIC_OPT(); } TfLiteRegistration* Register_SOFTMAX() { static TfLiteRegistration r = { activations::SoftmaxInit, activations::SoftmaxFree, activations::SoftmaxPrepare, activations::SoftmaxEval}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX_REF() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_PRELU_REF() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_PRELU() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LEAKY_RELU() { static TfLiteRegistration r = { activations::LeakyReluInit, activations::LeakyReluFree, activations::LeakyReluPrepare, activations::LeakyReluEval}; return &r; } TfLiteRegistration* Register_HARD_SWISH() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_HARD_SWISH_REF() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kReference>}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stddef.h> #include <algorithm> #include <cmath> #include <cstdint> #include <functional> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" #include "tensorflow/lite/kernels/internal/reference/logistic.h" #include "tensorflow/lite/kernels/internal/reference/prelu.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/reference/softmax.h" #include "tensorflow/lite/kernels/internal/reference/tanh.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #if __aarch64__ && __clang__ #include <arm_neon.h> #endif namespace tflite { namespace ops { namespace builtin { namespace activations { // TODO(b/142762739): We should figure out a multi-threading plan for most of // the activation ops below. enum KernelType { kReference, kGenericOptimized, kFixedPointOptimized, }; struct OpData { int32_t input_multiplier = 0; int input_left_shift = 0; int32_t input_range_radius = 0; int diff_min = 0; uint8_t table[256] = {0}; }; struct SoftmaxOpData { struct SoftmaxParams params = {}; float table[256]; #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT uint8_t uint8_table1[256]; uint8_t uint8_table2[256]; #endif static constexpr int kInt16LUTArraySize = 513; int16_t exp_lut[kInt16LUTArraySize]; // int16 LUT for exp(x), where x uniform // distributed between [-10.0 , 0.0] int16_t one_over_one_plus_x_lut[kInt16LUTArraySize]; // int16 LUT for 1 / // (1 + x), where x // uniform distributed // between [0.0 , 1.0] }; struct LogSoftmaxOpData : public OpData { int32_t reverse_scaling_divisor = 0; int32_t reverse_scaling_right_shift = 0; struct SoftmaxParams params = {}; float f_table[256]; }; struct LeakyReluOpData : public OpData { int32_t output_multiplier_alpha = 0; int32_t output_shift_alpha = 0; int32_t output_multiplier_identity = 0; int32_t output_shift_identity = 0; }; struct PreluOpData : public OpData { int32_t output_multiplier_1 = 0; int32_t output_shift_1 = 0; int32_t output_multiplier_2 = 0; int32_t output_shift_2 = 0; bool requires_broadcast; }; struct HardSwishData { HardSwishParams params; }; struct ReluOpData : public OpData { int32_t output_multiplier = 0; int output_shift = 0; }; namespace { TfLiteStatus CheckOutputQuantParams(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* output) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); } return kTfLiteOk; } template <typename T> void PopulateLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output, const std::function<float(float)>& transform) { static_assert(sizeof(T) == 1, "Lookup table valid only for 8bit"); const float inverse_scale = 1 / output->params.scale; int32_t maxval = std::numeric_limits<T>::max(); int32_t minval = std::numeric_limits<T>::min(); for (int32_t val = minval; val <= maxval; ++val) { const float dequantized = input->params.scale * (val - input->params.zero_point); const float transformed = transform(dequantized); const float rescaled = std::round(transformed * inverse_scale); const int32_t quantized = static_cast<int32_t>(rescaled + output->params.zero_point); data->table[static_cast<uint8_t>(static_cast<T>(val))] = static_cast<uint8_t>( static_cast<T>(std::max(std::min(maxval, quantized), minval))); } } // TODO(b/143696793): move this to optimized_ops. void EvalUsingLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); uint8_t* output_data = GetTensorData<uint8_t>(output); const uint8_t* input_data = GetTensorData<uint8_t>(input); int i = 0; #if __aarch64__ && __clang__ // This code uses ARM64-only instructions. // TODO(b/143709993): Port to ARMv7 // Load the tables into registers. (4*4 128-bit registers) uint8x16x4_t table[4]; table[0] = vld1q_u8_x4(data->table + 16 * 4 * 0); table[1] = vld1q_u8_x4(data->table + 16 * 4 * 1); table[2] = vld1q_u8_x4(data->table + 16 * 4 * 2); table[3] = vld1q_u8_x4(data->table + 16 * 4 * 3); // Vectorized loop; process uint8x16_t (16 elements) at a time. constexpr int vectorized_16_loop_step = 16; const int vectorized_16_loop_end = size / vectorized_16_loop_step * vectorized_16_loop_step; for (; i < vectorized_16_loop_end; i += vectorized_16_loop_step) { uint8x16_t input = vld1q_u8(input_data + i); uint8x16_t output = optimized_ops::aarch64_lookup_vector(table, input); vst1q_u8(output_data + i, output); } // Postamble and non-ARM64 code: simple for loop. #endif for (; i < size; ++i) { output_data[i] = data->table[input_data[i]]; } } template <typename T> void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input, TfLiteTensor* output, const ReluOpData* data) { ReluParams params; params.quantized_activation_min = std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), output->params.zero_point + static_cast<int32>(roundf(act_min / output->params.scale))); params.quantized_activation_max = act_max == std::numeric_limits<float>::infinity() ? static_cast<int32_t>(std::numeric_limits<T>::max()) : std::min( static_cast<int32_t>(std::numeric_limits<T>::max()), output->params.zero_point + static_cast<int32>(roundf(act_max / output->params.scale))); params.input_offset = input->params.zero_point; params.output_offset = output->params.zero_point; params.output_multiplier = data->output_multiplier; params.output_shift = data->output_shift; optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } } // namespace void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new SoftmaxOpData; } void SoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<SoftmaxOpData*>(buffer); } void* LogSoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new LogSoftmaxOpData; } void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { return new PreluOpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } void LogSoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LogSoftmaxOpData*>(buffer); } void PreluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<PreluOpData*>(buffer); } void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { return new HardSwishData; } TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new ReluOpData; } void ReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<ReluOpData*>(buffer); } TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new LeakyReluOpData; } void LeakyReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LeakyReluOpData*>(buffer); } void HardSwishFree(TfLiteContext* context, void* buffer) { delete static_cast<HardSwishData*>(buffer); } TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_STATUS(GenericPrepare(context, node)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); HardSwishParams* params = &data->params; const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); params->input_zero_point = input->params.zero_point; params->output_zero_point = output->params.zero_point; const float input_scale = input->params.scale; const float hires_input_scale = (1.0f / 128.0f) * input_scale; const float reluish_scale = 3.0f / 32768.0f; const float output_scale = output->params.scale; const float output_multiplier = hires_input_scale / output_scale; int32_t output_multiplier_fixedpoint_int32; QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, &params->output_multiplier_exponent); DownScaleInt32ToInt16Multiplier( output_multiplier_fixedpoint_int32, &params->output_multiplier_fixedpoint_int16); TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); const float reluish_multiplier = hires_input_scale / reluish_scale; int32_t reluish_multiplier_fixedpoint_int32; QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_exponent); DownScaleInt32ToInt16Multiplier( reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_fixedpoint_int16); } return kTfLiteOk; } TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); double alpha_multiplier = input->params.scale * params->alpha / output->params.scale; QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, &data->output_shift_alpha); double identity_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, &data->output_shift_identity); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* alpha; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha)); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; } TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } // Sigmoid is also know as "Logistic". template <KernelType kernel_type> TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { SoftmaxParams op_params; op_params.beta = params->beta; optimized_ops::Softmax(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } template <typename In, typename Out> TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<In>(input), GetTensorShape(output), GetTensorData<Out>(output)); return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) { reference_ops::SoftmaxInt16( data->params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Only 1D, 2D, 3D and 4D tensors supported for int16 " "input with int16 output, got %dD.", NumDimensions(input)); return kTfLiteError; } } TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> T ApplyPrelu(T input, T alpha) { return input >= 0.0 ? input : input * alpha; } template <KernelType kernel_type> TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); const TfLiteTensor* alpha; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output, const LeakyReluOpData* data) { LeakyReluParams op_params; op_params.input_offset = input->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_alpha = data->output_multiplier_alpha; op_params.output_shift_alpha = data->output_shift_alpha; op_params.output_multiplier_identity = data->output_multiplier_identity; op_params.output_shift_identity = data->output_shift_identity; reference_ops::QuantizeLeakyRelu( op_params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); } TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } } // namespace activations TfLiteRegistration* Register_ELU() { static TfLiteRegistration r = {activations::Init, activations::Free, activations::EluPrepare, activations::EluEval}; return &r; } TfLiteRegistration* Register_RELU() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::ReluEval}; return &r; } TfLiteRegistration* Register_RELU_N1_TO_1() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu1Eval}; return &r; } TfLiteRegistration* Register_RELU6() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu6Eval}; return &r; } TfLiteRegistration* Register_TANH_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kReference>, activations::TanhEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_TANH_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kGenericOptimized>, activations::TanhEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kFixedPointOptimized>, activations::TanhEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_TANH() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_TANH_GENERIC_OPT(); } TfLiteRegistration* Register_LOGISTIC_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kReference>, activations::SigmoidEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kGenericOptimized>, activations::SigmoidEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kFixedPointOptimized>, activations::SigmoidEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_LOGISTIC_GENERIC_OPT(); } TfLiteRegistration* Register_SOFTMAX() { static TfLiteRegistration r = { activations::SoftmaxInit, activations::SoftmaxFree, activations::SoftmaxPrepare, activations::SoftmaxEval}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX_REF() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_PRELU_REF() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_PRELU() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LEAKY_RELU() { static TfLiteRegistration r = { activations::LeakyReluInit, activations::LeakyReluFree, activations::LeakyReluPrepare, activations::LeakyReluEval}; return &r; } TfLiteRegistration* Register_HARD_SWISH() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_HARD_SWISH_REF() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kReference>}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
{'added': [(255, ' const TfLiteTensor* input;'), (256, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (257, ' TfLiteTensor* output;'), (258, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (277, ' const TfLiteTensor* input;'), (278, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (279, ' TfLiteTensor* output;'), (280, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (307, ' TfLiteTensor* output;'), (308, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (313, ' const TfLiteTensor* input;'), (314, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (346, ' const TfLiteTensor* input;'), (347, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (348, ' TfLiteTensor* output;'), (349, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (377, ' const TfLiteTensor* input;'), (378, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (379, ' TfLiteTensor* output;'), (380, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (464, ' const TfLiteTensor* input;'), (465, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (466, ' TfLiteTensor* output;'), (467, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (561, ' const TfLiteTensor* input;'), (562, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (563, ' TfLiteTensor* output;'), (564, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (631, ' const TfLiteTensor* input;'), (632, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (633, ' TfLiteTensor* output;'), (634, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (669, ' const TfLiteTensor* input;'), (670, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (671, ' TfLiteTensor* output;'), (672, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (673, ' const TfLiteTensor* alpha;'), (674, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));'), (726, ' const TfLiteTensor* input;'), (727, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (728, ' TfLiteTensor* output;'), (729, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (756, ' const TfLiteTensor* input;'), (757, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (758, ' TfLiteTensor* output;'), (759, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (789, ' const TfLiteTensor* input;'), (790, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (791, ' TfLiteTensor* output;'), (792, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (842, ' const TfLiteTensor* input;'), (843, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (844, ' TfLiteTensor* output;'), (845, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (875, ' const TfLiteTensor* input;'), (876, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (877, ' TfLiteTensor* output;'), (878, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (951, ' const TfLiteTensor* input;'), (952, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (953, ' TfLiteTensor* output;'), (954, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1101, ' const TfLiteTensor* input;'), (1102, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1103, ' TfLiteTensor* output;'), (1104, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1158, ' const TfLiteTensor* input;'), (1159, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1160, ' TfLiteTensor* output;'), (1161, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1221, ' const TfLiteTensor* input;'), (1222, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1223, ' const TfLiteTensor* alpha;'), (1224, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));'), (1225, ' TfLiteTensor* output;'), (1226, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1335, ' const TfLiteTensor* input;'), (1336, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1337, ' TfLiteTensor* output;'), (1338, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1375, ' const TfLiteTensor* input;'), (1376, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1377, ' TfLiteTensor* output;'), (1378, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1391, ' const TfLiteTensor* input;'), (1392, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1393, ' TfLiteTensor* output;'), (1394, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));')], 'deleted': [(255, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (256, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (275, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (276, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (303, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (308, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (340, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (341, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (369, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (370, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (454, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (455, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (549, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (550, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (617, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (618, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (653, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (654, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (655, ' const TfLiteTensor* alpha = GetInput(context, node, 1);'), (707, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (708, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (735, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (736, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (766, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (767, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (817, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (818, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (848, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (849, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (922, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (923, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1070, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1071, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1125, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1126, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1186, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1187, ' const TfLiteTensor* alpha = GetInput(context, node, 1);'), (1188, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1297, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1298, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1335, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1336, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1349, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1350, ' TfLiteTensor* output = GetOutput(context, node, 0);')]}
88
44
1,316
9,729
70
643
13
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
1,162
ikev2_parent.c
C
ikev2parent_inI2outR2
/* * IKEv2 parent SA creation routines * Copyright (C) 2007-2008 Michael Richardson <mcr@xelerance.com> * Copyright (C) 2008-2011 Paul Wouters <paul@xelerance.com> * Copyright (C) 2008 Antony Antony <antony@xelerance.com> * Copyright (C) 2008-2009 David McCullough <david_mccullough@securecomputing.com> * Copyright (C) 2010,2012 Avesh Agarwal <avagarwa@redhat.com> * Copyright (C) 2010 Tuomo Soini <tis@foobar.fi * Copyright (C) 2012 Paul Wouters <pwouters@redhat.com> * Copyright (C) 2012 Antony Antony <antony@phenome.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * */ #include <stdio.h> #include <string.h> #include <stddef.h> #include <stdlib.h> #include <unistd.h> #include <gmp.h> #include <libreswan.h> #include <libreswan/ipsec_policy.h> #include "sysdep.h" #include "constants.h" #include "defs.h" #include "state.h" #include "id.h" #include "connections.h" #include "crypto.h" /* requires sha1.h and md5.h */ #include "x509.h" #include "x509more.h" #include "ike_alg.h" #include "kernel_alg.h" #include "plutoalg.h" #include "pluto_crypt.h" #include "packet.h" #include "demux.h" #include "ikev2.h" #include "log.h" #include "spdb.h" /* for out_sa */ #include "ipsec_doi.h" #include "vendor.h" #include "timer.h" #include "ike_continuations.h" #include "cookie.h" #include "rnd.h" #include "pending.h" #include "kernel.h" #define SEND_NOTIFICATION_AA(t, d) \ if (st) \ send_v2_notification_from_state(st, st->st_state, t, d); \ else \ send_v2_notification_from_md(md, t, d); #define SEND_NOTIFICATION(t) \ if (st) \ send_v2_notification_from_state(st, st->st_state, t, NULL); \ else \ send_v2_notification_from_md(md, t, NULL); static void ikev2_parent_outI1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_outI1_tail(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); static bool ikev2_get_dcookie(u_char *dcookie, chunk_t st_ni, ip_address *addr, u_int8_t *spiI); static stf_status ikev2_parent_outI1_common(struct msg_digest *md, struct state *st); static int build_ike_version(); /* * *************************************************************** ***** PARENT_OUTI1 ***** *************************************************************** * * * Initiate an Oakley Main Mode exchange. * HDR, SAi1, KEi, Ni --> * * Note: this is not called from demux.c, but from ipsecdoi_initiate(). * */ stf_status ikev2parent_outI1(int whack_sock, struct connection *c, struct state *predecessor, lset_t policy, unsigned long try, enum crypto_importance importance #ifdef HAVE_LABELED_IPSEC , struct xfrm_user_sec_ctx_ike * uctx #endif ) { struct state *st = new_state(); struct db_sa *sadb; int groupnum; int policy_index = POLICY_ISAKMP(policy, c->spd.this.xauth_server, c->spd.this.xauth_client); /* set up new state */ get_cookie(TRUE, st->st_icookie, COOKIE_SIZE, &c->spd.that.host_addr); initialize_new_state(st, c, policy, try, whack_sock, importance); st->st_ikev2 = TRUE; change_state(st, STATE_PARENT_I1); st->st_msgid_lastack = INVALID_MSGID; st->st_msgid_nextuse = 0; st->st_try = try; if (HAS_IPSEC_POLICY(policy)) { #ifdef HAVE_LABELED_IPSEC st->sec_ctx = NULL; if ( uctx != NULL) libreswan_log( "Labeled ipsec is not supported with ikev2 yet"); #endif add_pending(dup_any( whack_sock), st, c, policy, 1, predecessor == NULL ? SOS_NOBODY : predecessor->st_serialno #ifdef HAVE_LABELED_IPSEC , st->sec_ctx #endif ); } if (predecessor == NULL) libreswan_log("initiating v2 parent SA"); else libreswan_log("initiating v2 parent SA to replace #%lu", predecessor->st_serialno); if (predecessor != NULL) { update_pending(predecessor, st); whack_log(RC_NEW_STATE + STATE_PARENT_I1, "%s: initiate, replacing #%lu", enum_name(&state_names, st->st_state), predecessor->st_serialno); } else { whack_log(RC_NEW_STATE + STATE_PARENT_I1, "%s: initiate", enum_name(&state_names, st->st_state)); } /* * now, we need to initialize st->st_oakley, specifically, the group * number needs to be initialized. */ groupnum = 0; st->st_sadb = &oakley_sadb[policy_index]; sadb = oakley_alg_makedb(st->st_connection->alg_info_ike, st->st_sadb, 0); if (sadb != NULL) st->st_sadb = sadb; sadb = st->st_sadb = sa_v2_convert(st->st_sadb); { unsigned int pc_cnt; /* look at all the proposals */ if (st->st_sadb->prop_disj != NULL) { for (pc_cnt = 0; pc_cnt < st->st_sadb->prop_disj_cnt && groupnum == 0; pc_cnt++) { struct db_v2_prop *vp = &st->st_sadb->prop_disj[pc_cnt]; unsigned int pr_cnt; /* look at all the proposals */ if (vp->props != NULL) { for (pr_cnt = 0; pr_cnt < vp->prop_cnt && groupnum == 0; pr_cnt++) { unsigned int ts_cnt; struct db_v2_prop_conj *vpc = &vp->props[pr_cnt]; for (ts_cnt = 0; ts_cnt < vpc->trans_cnt && groupnum == 0; ts_cnt++) { struct db_v2_trans *tr = &vpc-> trans[ ts_cnt ]; if (tr != NULL && tr->transform_type == IKEv2_TRANS_TYPE_DH) { groupnum = tr-> transid; } } } } } } } if (groupnum == 0) groupnum = OAKLEY_GROUP_MODP2048; st->st_oakley.group = lookup_group(groupnum); st->st_oakley.groupnum = groupnum; /* now. we need to go calculate the nonce, and the KE */ { struct ke_continuation *ke = alloc_thing( struct ke_continuation, "ikev2_outI1 KE"); stf_status e; ke->md = alloc_md(); ke->md->from_state = STATE_IKEv2_BASE; ke->md->svm = ikev2_parent_firststate(); ke->md->st = st; set_suspended(st, ke->md); if (!st->st_sec_in_use) { pcrc_init(&ke->ke_pcrc); ke->ke_pcrc.pcrc_func = ikev2_parent_outI1_continue; e = build_ke(&ke->ke_pcrc, st, st->st_oakley.group, importance); if ( (e != STF_SUSPEND && e != STF_INLINE) || (e == STF_TOOMUCHCRYPTO)) { loglog(RC_CRYPTOFAILED, "system too busy - Enabling dcookies [TODO]"); delete_state(st); } } else { e = ikev2_parent_outI1_tail( (struct pluto_crypto_req_cont *)ke, NULL); } reset_globals(); return e; } } static void ikev2_parent_outI1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent outI1: calculated ke+nonce, sending I1")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (ke->md) release_md(ke->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == ke->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_outI1_tail(pcrc, r); if (ke->md != NULL) { complete_v2_state_transition(&ke->md, e); if (ke->md) release_md(ke->md); } reset_cur_state(); reset_globals(); passert(GLOBALS_ARE_RESET()); } /* * unpack the calculate KE value, store it in state. * used by IKEv2: parent, child (PFS) */ static int unpack_v2KE(struct state *st, struct pluto_crypto_req *r, chunk_t *g) { struct pcr_kenonce *kn = &r->pcr_d.kn; unpack_KE(st, r, g); return kn->oakley_group; } /* * package up the calculate KE value, and emit it as a KE payload. * used by IKEv2: parent, child (PFS) */ static bool justship_v2KE(struct state *st UNUSED, chunk_t *g, unsigned int oakley_group, pb_stream *outs, u_int8_t np) { struct ikev2_ke v2ke; pb_stream kepbs; memset(&v2ke, 0, sizeof(v2ke)); v2ke.isak_np = np; v2ke.isak_group = oakley_group; if (!out_struct(&v2ke, &ikev2_ke_desc, outs, &kepbs)) return FALSE; if (!out_chunk(*g, &kepbs, "ikev2 g^x")) return FALSE; close_output_pbs(&kepbs); return TRUE; } static bool ship_v2KE(struct state *st, struct pluto_crypto_req *r, chunk_t *g, pb_stream *outs, u_int8_t np) { int oakley_group = unpack_v2KE(st, r, g); return justship_v2KE(st, g, oakley_group, outs, np); } static stf_status ikev2_parent_outI1_tail(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; unpack_v2KE(st, r, &st->st_gi); unpack_nonce(&st->st_ni, r); return ikev2_parent_outI1_common(md, st); } static stf_status ikev2_parent_outI1_common(struct msg_digest *md, struct state *st) { struct connection *c = st->st_connection; int numvidtosend = 0; /* set up reply */ init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr hdr; zero(&hdr); /* default to 0 */ /* Impair function will raise major/minor by 1 for testing */ hdr.isa_version = build_ike_version(); if (st->st_dcookie.ptr) hdr.isa_np = ISAKMP_NEXT_v2N; else hdr.isa_np = ISAKMP_NEXT_v2SA; hdr.isa_xchg = ISAKMP_v2_SA_INIT; hdr.isa_flags = ISAKMP_FLAGS_I; memcpy(hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); /* R-cookie, are left zero */ if (!out_struct(&hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) { reset_cur_state(); return STF_INTERNAL_ERROR; } } /* send an anti DOS cookie, 4306 2.6, if we have received one from the * responder */ if (st->st_dcookie.ptr) { chunk_t child_spi; memset(&child_spi, 0, sizeof(child_spi)); ship_v2N(ISAKMP_NEXT_v2SA, DBGP( IMPAIR_SEND_BOGUS_ISAKMP_FLAG) ? (ISAKMP_PAYLOAD_NONCRITICAL | ISAKMP_PAYLOAD_LIBRESWAN_BOGUS) : ISAKMP_PAYLOAD_NONCRITICAL, PROTO_ISAKMP, &child_spi, v2N_COOKIE, &st->st_dcookie, &md->rbody); } /* SA out */ { u_char *sa_start = md->rbody.cur; if (st->st_sadb->prop_disj_cnt == 0 || st->st_sadb->prop_disj) st->st_sadb = sa_v2_convert(st->st_sadb); if (!ikev2_out_sa(&md->rbody, PROTO_ISAKMP, st->st_sadb, st, TRUE, /* parentSA */ ISAKMP_NEXT_v2KE)) { libreswan_log("outsa fail"); reset_cur_state(); return STF_INTERNAL_ERROR; } /* save initiator SA for later HASH */ if (st->st_p1isa.ptr == NULL) { /* no leak! (MUST be first time) */ clonetochunk(st->st_p1isa, sa_start, md->rbody.cur - sa_start, "sa in main_outI1"); } } /* send KE */ if (!justship_v2KE(st, &st->st_gi, st->st_oakley.groupnum, &md->rbody, ISAKMP_NEXT_v2Ni)) return STF_INTERNAL_ERROR; /* * Check which Vendor ID's we need to send - there will be more soon * In IKEv2, DPD and NAT-T are no longer vendorid's */ if (c->send_vendorid) { numvidtosend++; /* if we need to send Libreswan VID */ } /* send NONCE */ { int np = numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; struct ikev2_generic in; pb_stream pb; memset(&in, 0, sizeof(in)); in.isag_np = np; in.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); in.isag_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } if (!out_struct(&in, &ikev2_nonce_desc, &md->rbody, &pb) || !out_raw(st->st_ni.ptr, st->st_ni.len, &pb, "IKEv2 nonce")) return STF_INTERNAL_ERROR; close_output_pbs(&pb); } /* Send Vendor VID if needed */ if (c->send_vendorid) { const char *myvid = ipsec_version_vendorid(); int np = --numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; if (!out_generic_raw(np, &isakmp_vendor_id_desc, &md->rbody, myvid, strlen(myvid), "Vendor ID")) return STF_INTERNAL_ERROR; /* ensure our VID chain was valid */ passert(numvidtosend == 0); } close_message(&md->rbody, st); close_output_pbs(&reply_stream); freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_outI1_tail"); /* save packet for later signing */ freeanychunk(st->st_firstpacket_me); clonetochunk(st->st_firstpacket_me, reply_stream.start, pbs_offset(&reply_stream), "saved first packet"); /* Transmit */ send_ike_msg(st, __FUNCTION__); delete_event(st); event_schedule(EVENT_v2_RETRANSMIT, EVENT_RETRANSMIT_DELAY_0, st); reset_cur_state(); return STF_OK; } /* * *************************************************************** * PARENT_INI1 ***** *************************************************************** * - * * */ static void ikev2_parent_inI1outR1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_inI1outR1_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); stf_status ikev2parent_inI1outR1(struct msg_digest *md) { struct state *st = md->st; lset_t policy = POLICY_IKEV2_ALLOW; struct connection *c = find_host_connection(&md->iface->ip_addr, md->iface->port, &md->sender, md->sender_port, POLICY_IKEV2_ALLOW); /* retrieve st->st_gi */ #if 0 if (c == NULL) { /* * make up a policy from the thing that was proposed, and see * if we can find a connection with that policy. */ pb_stream pre_sa_pbs = sa_pd->pbs; policy = preparse_isakmp_sa_body(&pre_sa_pbs); c = find_host_connection(&md->iface->ip_addr, pluto_port, (ip_address*)NULL, md->sender_port, policy); } #endif if (c == NULL) { /* See if a wildcarded connection can be found. * We cannot pick the right connection, so we're making a guess. * All Road Warrior connections are fair game: * we pick the first we come across (if any). * If we don't find any, we pick the first opportunistic * with the smallest subnet that includes the peer. * There is, of course, no necessary relationship between * an Initiator's address and that of its client, * but Food Groups kind of assumes one. */ { struct connection *d; d = find_host_connection(&md->iface->ip_addr, pluto_port, (ip_address*)NULL, md->sender_port, policy); for (; d != NULL; d = d->hp_next) { if (d->kind == CK_GROUP) { /* ignore */ } else { if (d->kind == CK_TEMPLATE && !(d->policy & POLICY_OPPO)) { /* must be Road Warrior: we have a winner */ c = d; break; } /* Opportunistic or Shunt: pick tightest match */ if (addrinsubnet(&md->sender, &d->spd.that.client) && (c == NULL || !subnetinsubnet(&c->spd.that. client, &d->spd.that. client))) c = d; } } } if (c == NULL) { loglog(RC_LOG_SERIOUS, "initial parent SA message received on %s:%u" " but no connection has been authorized%s%s", ip_str( &md->iface->ip_addr), ntohs(portof(&md->iface->ip_addr)), (policy != LEMPTY) ? " with policy=" : "", (policy != LEMPTY) ? bitnamesof(sa_policy_bit_names, policy) : ""); return STF_FAIL + v2N_NO_PROPOSAL_CHOSEN; } if (c->kind != CK_TEMPLATE) { loglog(RC_LOG_SERIOUS, "initial parent SA message received on %s:%u" " but \"%s\" forbids connection", ip_str( &md->iface->ip_addr), pluto_port, c->name); return STF_FAIL + v2N_NO_PROPOSAL_CHOSEN; } c = rw_instantiate(c, &md->sender, NULL, NULL); } else { /* we found a non-wildcard conn. double check if it needs instantiation anyway (eg vnet=) */ /* vnet=/vhost= should have set CK_TEMPLATE on connection loading */ if ((c->kind == CK_TEMPLATE) && c->spd.that.virt) { DBG(DBG_CONTROL, DBG_log( "local endpoint has virt (vnet/vhost) set without wildcards - needs instantiation")); c = rw_instantiate(c, &md->sender, NULL, NULL); } else if ((c->kind == CK_TEMPLATE) && (c->policy & POLICY_IKEV2_ALLOW_NARROWING)) { DBG(DBG_CONTROL, DBG_log( "local endpoint has narrowing=yes - needs instantiation")); c = rw_instantiate(c, &md->sender, NULL, NULL); } } DBG_log("found connection: %s\n", c ? c->name : "<none>"); if (!st) { st = new_state(); /* set up new state */ memcpy(st->st_icookie, md->hdr.isa_icookie, COOKIE_SIZE); /* initialize_new_state expects valid icookie/rcookie values, so create it now */ get_cookie(FALSE, st->st_rcookie, COOKIE_SIZE, &md->sender); initialize_new_state(st, c, policy, 0, NULL_FD, pcim_stranger_crypto); st->st_ikev2 = TRUE; change_state(st, STATE_PARENT_R1); st->st_msgid_lastack = INVALID_MSGID; st->st_msgid_nextuse = 0; md->st = st; md->from_state = STATE_IKEv2_BASE; } /* check,as a responder, are we under dos attack or not * if yes go to 6 message exchange mode. it is a config option for now. * TBD set force_busy dynamically * Paul: Can we check for STF_TOOMUCHCRYPTO ? */ if (force_busy == TRUE) { u_char dcookie[SHA1_DIGEST_SIZE]; chunk_t dc; ikev2_get_dcookie( dcookie, st->st_ni, &md->sender, st->st_icookie); dc.ptr = dcookie; dc.len = SHA1_DIGEST_SIZE; /* check if I1 packet contian KE and a v2N payload with type COOKIE */ if ( md->chain[ISAKMP_NEXT_v2KE] && md->chain[ISAKMP_NEXT_v2N] && (md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type == v2N_COOKIE)) { u_int8_t spisize; const pb_stream *dc_pbs; chunk_t blob; DBG(DBG_CONTROLMORE, DBG_log("received a DOS cookie in I1 verify it")); /* we received dcookie we send earlier verify it */ spisize = md->chain[ISAKMP_NEXT_v2N]->payload.v2n. isan_spisize; dc_pbs = &md->chain[ISAKMP_NEXT_v2N]->pbs; blob.ptr = dc_pbs->cur + spisize; blob.len = pbs_left(dc_pbs) - spisize; DBG(DBG_CONTROLMORE, DBG_dump_chunk("dcookie received in I1 Packet", blob); DBG_dump("dcookie computed", dcookie, SHA1_DIGEST_SIZE)); if (memcmp(blob.ptr, dcookie, SHA1_DIGEST_SIZE) != 0) { libreswan_log( "mismatch in DOS v2N_COOKIE,send a new one"); SEND_NOTIFICATION_AA(v2N_COOKIE, &dc); return STF_FAIL + v2N_INVALID_IKE_SPI; } DBG(DBG_CONTROLMORE, DBG_log("dcookie received match with computed one")); } else { /* we are under DOS attack I1 contains no DOS COOKIE */ DBG(DBG_CONTROLMORE, DBG_log( "busy mode on. receieved I1 without a valid dcookie"); DBG_log("send a dcookie and forget this state")); SEND_NOTIFICATION_AA(v2N_COOKIE, &dc); return STF_FAIL; } } else { DBG(DBG_CONTROLMORE, DBG_log("will not send/process a dcookie")); } /* * We have to agree to the DH group before we actually know who * we are talking to. If we support the group, we use it. * * It is really too hard here to go through all the possible policies * that might permit this group. If we think we are being DOS'ed * then we should demand a cookie. */ { struct ikev2_ke *ke; ke = &md->chain[ISAKMP_NEXT_v2KE]->payload.v2ke; st->st_oakley.group = lookup_group(ke->isak_group); if (st->st_oakley.group == NULL) { char fromname[ADDRTOT_BUF]; addrtot(&md->sender, 0, fromname, ADDRTOT_BUF); libreswan_log( "rejecting I1 from %s:%u, invalid DH group=%u", fromname, md->sender_port, ke->isak_group); return v2N_INVALID_KE_PAYLOAD; } } /* now. we need to go calculate the nonce, and the KE */ { struct ke_continuation *ke = alloc_thing( struct ke_continuation, "ikev2_inI1outR1 KE"); stf_status e; ke->md = md; set_suspended(st, ke->md); if (!st->st_sec_in_use) { pcrc_init(&ke->ke_pcrc); ke->ke_pcrc.pcrc_func = ikev2_parent_inI1outR1_continue; e = build_ke(&ke->ke_pcrc, st, st->st_oakley.group, pcim_stranger_crypto); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } } else { e = ikev2_parent_inI1outR1_tail((struct pluto_crypto_req_cont *)ke, NULL); } reset_globals(); return e; } } static void ikev2_parent_inI1outR1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inI1outR1: calculated ke+nonce, sending R1")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (ke->md) release_md(ke->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == ke->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inI1outR1_tail(pcrc, r); if (ke->md != NULL) { complete_v2_state_transition(&ke->md, e); if (ke->md) release_md(ke->md); } reset_globals(); passert(GLOBALS_ARE_RESET()); } static stf_status ikev2_parent_inI1outR1_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct payload_digest *const sa_pd = md->chain[ISAKMP_NEXT_v2SA]; struct state *const st = md->st; struct connection *c = st->st_connection; pb_stream *keyex_pbs; int numvidtosend = 0; if (c->send_vendorid) { numvidtosend++; /* we send Libreswan VID */ } /* note that we don't update the state here yet */ /* record first packet for later checking of signature */ clonetochunk(st->st_firstpacket_him, md->message_pbs.start, pbs_offset( &md->message_pbs), "saved first received packet"); /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr r_hdr = md->hdr; memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); r_hdr.isa_np = ISAKMP_NEXT_v2SA; /* major will be same, but their minor might be higher */ r_hdr.isa_version = build_ike_version(); r_hdr.isa_flags &= ~ISAKMP_FLAGS_I; r_hdr.isa_flags |= ISAKMP_FLAGS_R; /* PAUL shouldn't we set r_hdr.isa_msgid = [htonl](st->st_msgid); here? */ if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) return STF_INTERNAL_ERROR; } /* start of SA out */ { struct isakmp_sa r_sa = sa_pd->payload.sa; v2_notification_t rn; pb_stream r_sa_pbs; r_sa.isasa_np = ISAKMP_NEXT_v2KE; /* XXX */ if (!out_struct(&r_sa, &ikev2_sa_desc, &md->rbody, &r_sa_pbs)) return STF_INTERNAL_ERROR; /* SA body in and out */ rn = ikev2_parse_parent_sa_body(&sa_pd->pbs, &sa_pd->payload.v2sa, &r_sa_pbs, st, FALSE); if (rn != v2N_NOTHING_WRONG) return STF_FAIL + rn; } { v2_notification_t rn; chunk_t dc; keyex_pbs = &md->chain[ISAKMP_NEXT_v2KE]->pbs; /* KE in */ rn = accept_KE(&st->st_gi, "Gi", st->st_oakley.group, keyex_pbs); if (rn != v2N_NOTHING_WRONG) { u_int16_t group_number = htons( st->st_oakley.group->group); dc.ptr = (unsigned char *)&group_number; dc.len = 2; SEND_NOTIFICATION_AA(v2N_INVALID_KE_PAYLOAD, &dc); delete_state(st); return STF_FAIL + rn; } } /* Ni in */ RETURN_STF_FAILURE(accept_v2_nonce(md, &st->st_ni, "Ni")); /* send KE */ if (!ship_v2KE(st, r, &st->st_gr, &md->rbody, ISAKMP_NEXT_v2Nr)) return STF_INTERNAL_ERROR; /* send NONCE */ unpack_nonce(&st->st_nr, r); { int np = numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; struct ikev2_generic in; pb_stream pb; memset(&in, 0, sizeof(in)); in.isag_np = np; in.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); in.isag_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } if (!out_struct(&in, &ikev2_nonce_desc, &md->rbody, &pb) || !out_raw(st->st_nr.ptr, st->st_nr.len, &pb, "IKEv2 nonce")) return STF_INTERNAL_ERROR; close_output_pbs(&pb); } /* Send VendrID if needed VID */ if (c->send_vendorid) { const char *myvid = ipsec_version_vendorid(); int np = --numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; if (!out_generic_raw(np, &isakmp_vendor_id_desc, &md->rbody, myvid, strlen(myvid), "Vendor ID")) return STF_INTERNAL_ERROR; } close_message(&md->rbody, st); close_output_pbs(&reply_stream); /* keep it for a retransmit if necessary */ freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_inI1outR1_tail"); /* save packet for later signing */ freeanychunk(st->st_firstpacket_me); clonetochunk(st->st_firstpacket_me, reply_stream.start, pbs_offset(&reply_stream), "saved first packet"); /* note: retransimission is driven by initiator */ return STF_OK; } /* * *************************************************************** * PARENT_inR1 ***** *************************************************************** * - * * */ static void ikev2_parent_inR1outI2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_inR1outI2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); stf_status ikev2parent_inR1outI2(struct msg_digest *md) { struct state *st = md->st; /* struct connection *c = st->st_connection; */ pb_stream *keyex_pbs; /* check if the responder replied with v2N with DOS COOKIE */ if ( md->chain[ISAKMP_NEXT_v2N] && md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type == v2N_COOKIE) { u_int8_t spisize; const pb_stream *dc_pbs; DBG(DBG_CONTROLMORE, DBG_log( "inR1OutI2 received a DOS v2N_COOKIE from the responder"); DBG_log("resend the I1 with a cookie payload")); spisize = md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_spisize; dc_pbs = &md->chain[ISAKMP_NEXT_v2N]->pbs; clonetochunk(st->st_dcookie, (dc_pbs->cur + spisize), (pbs_left( dc_pbs) - spisize), "saved received dcookie"); DBG(DBG_CONTROLMORE, DBG_dump_chunk("dcookie received (instead of a R1):", st->st_dcookie); DBG_log("next STATE_PARENT_I1 resend I1 with the dcookie")); md->svm = ikev2_parent_firststate(); change_state(st, STATE_PARENT_I1); st->st_msgid_lastack = INVALID_MSGID; md->msgid_received = INVALID_MSGID; /* AAA hack */ st->st_msgid_nextuse = 0; return ikev2_parent_outI1_common(md, st); } /* * If we did not get a KE payload, we cannot continue. There * should be * a Notify telling us why. We inform the user, but continue to try this * connection via regular retransmit intervals. */ if ( md->chain[ISAKMP_NEXT_v2N] && (md->chain[ISAKMP_NEXT_v2KE] == NULL)) { const char *from_state_name = enum_name(&state_names, st->st_state); const u_int16_t isan_type = md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type; libreswan_log("%s: received %s", from_state_name, enum_name(&ikev2_notify_names, isan_type)); return STF_FAIL + isan_type; } else if ( md->chain[ISAKMP_NEXT_v2N]) { DBG(DBG_CONTROL, DBG_log("received a notify..")); } /* * the responder sent us back KE, Gr, Nr, and it's our time to calculate * the shared key values. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inR1: calculating g^{xy} in order to send I2")); /* KE in */ keyex_pbs = &md->chain[ISAKMP_NEXT_v2KE]->pbs; RETURN_STF_FAILURE(accept_KE(&st->st_gr, "Gr", st->st_oakley.group, keyex_pbs)); /* Ni in */ RETURN_STF_FAILURE(accept_v2_nonce(md, &st->st_nr, "Ni")); if (md->chain[ISAKMP_NEXT_v2SA] == NULL) { libreswan_log("No responder SA proposal found"); return v2N_INVALID_SYNTAX; } /* process and confirm the SA selected */ { struct payload_digest *const sa_pd = md->chain[ISAKMP_NEXT_v2SA]; v2_notification_t rn; /* SA body in and out */ rn = ikev2_parse_parent_sa_body(&sa_pd->pbs, &sa_pd->payload.v2sa, NULL, st, FALSE); if (rn != v2N_NOTHING_WRONG) return STF_FAIL + rn; } /* update state */ ikev2_update_counters(md); /* now. we need to go calculate the g^xy */ { struct dh_continuation *dh = alloc_thing( struct dh_continuation, "ikev2_inR1outI2 KE"); stf_status e; dh->md = md; set_suspended(st, dh->md); pcrc_init(&dh->dh_pcrc); dh->dh_pcrc.pcrc_func = ikev2_parent_inR1outI2_continue; e = start_dh_v2(&dh->dh_pcrc, st, st->st_import, INITIATOR, st->st_oakley.groupnum); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } reset_globals(); return e; } } static void ikev2_parent_inR1outI2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inR1outI2: calculating g^{xy}, sending I2")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (dh->md) release_md(dh->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == dh->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inR1outI2_tail(pcrc, r); if (dh->md != NULL) { complete_v2_state_transition(&dh->md, e); if (dh->md) release_md(dh->md); } reset_globals(); passert(GLOBALS_ARE_RESET()); } static void ikev2_padup_pre_encrypt(struct msg_digest *md, pb_stream *e_pbs_cipher) { struct state *st = md->st; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); /* pads things up to message size boundary */ { size_t blocksize = pst->st_oakley.encrypter->enc_blocksize; char *b = alloca(blocksize); unsigned int i; size_t padding = pad_up(pbs_offset(e_pbs_cipher), blocksize); if (padding == 0) padding = blocksize; for (i = 0; i < padding; i++) b[i] = i; out_raw(b, padding, e_pbs_cipher, "padding and length"); } } static unsigned char *ikev2_authloc(struct msg_digest *md, pb_stream *e_pbs) { unsigned char *b12; struct state *st = md->st; struct state *pst = st; if (st->st_clonedfrom != 0) { pst = state_with_serialno(st->st_clonedfrom); if ( pst == NULL) return NULL; } b12 = e_pbs->cur; if (!out_zero(pst->st_oakley.integ_hasher->hash_integ_len, e_pbs, "length of truncated HMAC")) return NULL; return b12; } static stf_status ikev2_encrypt_msg(struct msg_digest *md, enum phase1_role init, unsigned char *authstart, unsigned char *iv, unsigned char *encstart, unsigned char *authloc, pb_stream *e_pbs UNUSED, pb_stream *e_pbs_cipher) { struct state *st = md->st; struct state *pst = st; chunk_t *cipherkey, *authkey; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); if (init == INITIATOR) { cipherkey = &pst->st_skey_ei; authkey = &pst->st_skey_ai; } else { cipherkey = &pst->st_skey_er; authkey = &pst->st_skey_ar; } /* encrypt the block */ { size_t blocksize = pst->st_oakley.encrypter->enc_blocksize; unsigned char *savediv = alloca(blocksize); unsigned int cipherlen = e_pbs_cipher->cur - encstart; DBG(DBG_CRYPT, DBG_dump("data before encryption:", encstart, cipherlen)); memcpy(savediv, iv, blocksize); /* now, encrypt */ (st->st_oakley.encrypter->do_crypt)(encstart, cipherlen, cipherkey->ptr, cipherkey->len, savediv, TRUE); DBG(DBG_CRYPT, DBG_dump("data after encryption:", encstart, cipherlen)); } /* okay, authenticate from beginning of IV */ { struct hmac_ctx ctx; DBG(DBG_PARSING, DBG_log("Inside authloc")); DBG(DBG_CRYPT, DBG_dump("authkey value: ", authkey->ptr, authkey->len)); hmac_init_chunk(&ctx, pst->st_oakley.integ_hasher, *authkey); DBG(DBG_PARSING, DBG_log("Inside authloc after init")); hmac_update(&ctx, authstart, authloc - authstart); DBG(DBG_PARSING, DBG_log("Inside authloc after update")); hmac_final(authloc, &ctx); DBG(DBG_PARSING, DBG_log("Inside authloc after final")); DBG(DBG_PARSING, { DBG_dump("data being hmac:", authstart, authloc - authstart); DBG_dump("out calculated auth:", authloc, pst->st_oakley.integ_hasher-> hash_integ_len); }); } return STF_OK; } static stf_status ikev2_decrypt_msg(struct msg_digest *md, enum phase1_role init) { struct state *st = md->st; unsigned char *encend; pb_stream *e_pbs; unsigned int np; unsigned char *iv; chunk_t *cipherkey, *authkey; unsigned char *authstart; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); if (init == INITIATOR) { cipherkey = &pst->st_skey_er; authkey = &pst->st_skey_ar; } else { cipherkey = &pst->st_skey_ei; authkey = &pst->st_skey_ai; } e_pbs = &md->chain[ISAKMP_NEXT_v2E]->pbs; np = md->chain[ISAKMP_NEXT_v2E]->payload.generic.isag_np; authstart = md->packet_pbs.start; iv = e_pbs->cur; encend = e_pbs->roof - pst->st_oakley.integ_hasher->hash_integ_len; /* start by checking authenticator */ { unsigned char *b12 = alloca( pst->st_oakley.integ_hasher->hash_digest_len); struct hmac_ctx ctx; hmac_init_chunk(&ctx, pst->st_oakley.integ_hasher, *authkey); hmac_update(&ctx, authstart, encend - authstart); hmac_final(b12, &ctx); DBG(DBG_PARSING, { DBG_dump("data being hmac:", authstart, encend - authstart); DBG_dump("R2 calculated auth:", b12, pst->st_oakley.integ_hasher-> hash_integ_len); DBG_dump("R2 provided auth:", encend, pst->st_oakley.integ_hasher-> hash_integ_len); }); /* compare first 96 bits == 12 bytes */ /* It is not always 96 bytes, it depends upon which integ algo is used*/ if (memcmp(b12, encend, pst->st_oakley.integ_hasher->hash_integ_len) != 0) { libreswan_log("R2 failed to match authenticator"); return STF_FAIL; } } DBG(DBG_PARSING, DBG_log("authenticator matched")); /* decrypt */ { size_t blocksize = pst->st_oakley.encrypter->enc_blocksize; unsigned char *encstart = iv + blocksize; unsigned int enclen = encend - encstart; unsigned int padlen; DBG(DBG_CRYPT, DBG_dump("data before decryption:", encstart, enclen)); /* now, decrypt */ (pst->st_oakley.encrypter->do_crypt)(encstart, enclen, cipherkey->ptr, cipherkey->len, iv, FALSE); padlen = encstart[enclen - 1]; encend = encend - padlen + 1; if (encend < encstart) { libreswan_log("invalid pad length: %u", padlen); return STF_FAIL; } DBG(DBG_CRYPT, { DBG_dump("decrypted payload:", encstart, enclen); DBG_log("striping %u bytes as pad", padlen + 1); }); init_pbs(&md->clr_pbs, encstart, enclen - (padlen + 1), "cleartext"); } { stf_status ret; ret = ikev2_process_payloads(md, &md->clr_pbs, st->st_state, np); if (ret != STF_OK) return ret; } return STF_OK; } static stf_status ikev2_send_auth(struct connection *c, struct state *st, enum phase1_role role, unsigned int np, unsigned char *idhash_out, pb_stream *outpbs) { struct ikev2_a a; pb_stream a_pbs; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); a.isaa_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); a.isaa_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } a.isaa_np = np; if (c->policy & POLICY_RSASIG) { a.isaa_type = v2_AUTH_RSA; } else if (c->policy & POLICY_PSK) { a.isaa_type = v2_AUTH_SHARED; } else { /* what else is there?... DSS not implemented. */ return STF_FAIL; } if (!out_struct(&a, &ikev2_a_desc, outpbs, &a_pbs)) return STF_INTERNAL_ERROR; if (c->policy & POLICY_RSASIG) { if (!ikev2_calculate_rsa_sha1(pst, role, idhash_out, &a_pbs)) return STF_FATAL + v2N_AUTHENTICATION_FAILED; } else if (c->policy & POLICY_PSK) { if (!ikev2_calculate_psk_auth(pst, role, idhash_out, &a_pbs)) return STF_FAIL + v2N_AUTHENTICATION_FAILED; } close_output_pbs(&a_pbs); return STF_OK; } static stf_status ikev2_parent_inR1outI2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *st = md->st; struct connection *c = st->st_connection; struct ikev2_generic e; unsigned char *encstart; pb_stream e_pbs, e_pbs_cipher; unsigned char *iv; int ivsize; stf_status ret; unsigned char *idhash; unsigned char *authstart; struct state *pst = st; bool send_cert = FALSE; finish_dh_v2(st, r); if (DBGP(DBG_PRIVATE) && DBGP(DBG_CRYPT)) ikev2_log_parentSA(st); pst = st; st = duplicate_state(pst); st->st_msgid = htonl(pst->st_msgid_nextuse); /* PAUL: note ordering */ insert_state(st); md->st = st; md->pst = pst; /* parent had crypto failed, replace it with rekey! */ delete_event(pst); event_schedule(EVENT_SA_REPLACE, c->sa_ike_life_seconds, pst); /* need to force parent state to I2 */ change_state(pst, STATE_PARENT_I2); /* record first packet for later checking of signature */ clonetochunk(pst->st_firstpacket_him, md->message_pbs.start, pbs_offset( &md->message_pbs), "saved first received packet"); /* beginning of data going out */ authstart = reply_stream.cur; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr r_hdr = md->hdr; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_xchg = ISAKMP_v2_AUTH; r_hdr.isa_flags = ISAKMP_FLAGS_I; r_hdr.isa_msgid = st->st_msgid; memcpy(r_hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) return STF_INTERNAL_ERROR; } /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2IDi; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); e.isag_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } if (!out_struct(&e, &ikev2_e_desc, &md->rbody, &e_pbs)) return STF_INTERNAL_ERROR; /* insert IV */ iv = e_pbs.cur; ivsize = st->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_INTERNAL_ERROR; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; /* send out the IDi payload */ { struct ikev2_id r_id; pb_stream r_id_pbs; chunk_t id_b; struct hmac_ctx id_ctx; unsigned char *id_start; unsigned int id_len; hmac_init_chunk(&id_ctx, pst->st_oakley.prf_hasher, pst->st_skey_pi); build_id_payload((struct isakmp_ipsec_id *)&r_id, &id_b, &c->spd.this); r_id.isai_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); r_id.isai_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } { /* decide to send CERT payload */ send_cert = doi_send_ikev2_cert_thinking(st); if (send_cert) r_id.isai_np = ISAKMP_NEXT_v2CERT; else r_id.isai_np = ISAKMP_NEXT_v2AUTH; } id_start = e_pbs_cipher.cur; if (!out_struct(&r_id, &ikev2_id_desc, &e_pbs_cipher, &r_id_pbs) || !out_chunk(id_b, &r_id_pbs, "my identity")) return STF_INTERNAL_ERROR; /* HASH of ID is not done over common header */ id_start += 4; close_output_pbs(&r_id_pbs); /* calculate hash of IDi for AUTH below */ id_len = e_pbs_cipher.cur - id_start; DBG(DBG_CRYPT, DBG_dump_chunk("idhash calc pi", pst->st_skey_pi)); DBG(DBG_CRYPT, DBG_dump("idhash calc I2", id_start, id_len)); hmac_update(&id_ctx, id_start, id_len); idhash = alloca(pst->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash, &id_ctx); } /* send [CERT,] payload RFC 4306 3.6, 1.2) */ { if (send_cert) { stf_status certstat = ikev2_send_cert( st, md, INITIATOR, ISAKMP_NEXT_v2AUTH, &e_pbs_cipher); if (certstat != STF_OK) return certstat; } } /* send out the AUTH payload */ { lset_t policy; struct connection *c0 = first_pending(pst, &policy, &st->st_whack_sock); unsigned int np = (c0 ? ISAKMP_NEXT_v2SA : ISAKMP_NEXT_v2NONE); DBG(DBG_CONTROL, DBG_log(" payload after AUTH will be %s", (c0) ? "ISAKMP_NEXT_v2SA" : "ISAKMP_NEXT_v2NONE/NOTIFY")); stf_status authstat = ikev2_send_auth(c, st, INITIATOR, np, idhash, &e_pbs_cipher); if (authstat != STF_OK) return authstat; /* * now, find an eligible child SA from the pending list, and emit * SA2i, TSi and TSr and (v2N_USE_TRANSPORT_MODE notification in transport mode) for it . */ if (c0) { chunk_t child_spi, notify_data; st->st_connection = c0; ikev2_emit_ipsec_sa(md, &e_pbs_cipher, ISAKMP_NEXT_v2TSi, c0, policy); st->st_ts_this = ikev2_end_to_ts(&c0->spd.this); st->st_ts_that = ikev2_end_to_ts(&c0->spd.that); ikev2_calc_emit_ts(md, &e_pbs_cipher, INITIATOR, c0, policy); if ( !(st->st_connection->policy & POLICY_TUNNEL) ) { DBG_log( "Initiator child policy is transport mode, sending v2N_USE_TRANSPORT_MODE"); memset(&child_spi, 0, sizeof(child_spi)); memset(&notify_data, 0, sizeof(notify_data)); ship_v2N(ISAKMP_NEXT_v2NONE, ISAKMP_PAYLOAD_NONCRITICAL, 0, &child_spi, v2N_USE_TRANSPORT_MODE, &notify_data, &e_pbs_cipher); } } else { libreswan_log( "no pending SAs found, PARENT SA keyed only"); } } /* * need to extend the packet so that we will know how big it is * since the length is under the integrity check */ ikev2_padup_pre_encrypt(md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { unsigned char *authloc = ikev2_authloc(md, &e_pbs); if (authloc == NULL) return STF_INTERNAL_ERROR; close_output_pbs(&e_pbs); close_output_pbs(&md->rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(md, INITIATOR, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return ret; } /* keep it for a retransmit if necessary, but on initiator * we never do that, but send_ike_msg() uses it. */ freeanychunk(pst->st_tpacket); clonetochunk(pst->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_outI1"); /* * Delete previous retransmission event. */ delete_event(st); event_schedule(EVENT_v2_RETRANSMIT, EVENT_RETRANSMIT_DELAY_0, st); return STF_OK; } /* * *************************************************************** * PARENT_inI2 ***** *************************************************************** * - * * */ static void ikev2_parent_inI2outR2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_inI2outR2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); stf_status ikev2parent_inI2outR2(struct msg_digest *md) { struct state *st = md->st; /* struct connection *c = st->st_connection; */ /* * the initiator sent us an encrypted payload. We need to calculate * our g^xy, and skeyseed values, and then decrypt the payload. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inI2outR2: calculating g^{xy} in order to decrypt I2")); /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log("R2 state should receive an encrypted payload"); reset_globals(); return STF_FATAL; } /* now. we need to go calculate the g^xy */ { struct dh_continuation *dh = alloc_thing( struct dh_continuation, "ikev2_inI2outR2 KE"); stf_status e; dh->md = md; set_suspended(st, dh->md); pcrc_init(&dh->dh_pcrc); dh->dh_pcrc.pcrc_func = ikev2_parent_inI2outR2_continue; e = start_dh_v2(&dh->dh_pcrc, st, st->st_import, RESPONDER, st->st_oakley.groupnum); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } reset_globals(); return e; } } static void ikev2_parent_inI2outR2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inI2outR2: calculating g^{xy}, sending R2")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (dh->md) release_md(dh->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == dh->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inI2outR2_tail(pcrc, r); if ( e > STF_FAIL) { /* we do not send a notify because we are the initiator that could be responding to an error notification */ int v2_notify_num = e - STF_FAIL; DBG_log( "ikev2_parent_inI2outR2_tail returned STF_FAIL with %s", enum_name(&ikev2_notify_names, v2_notify_num)); } else if ( e != STF_OK) { DBG_log("ikev2_parent_inI2outR2_tail returned %s", enum_name(&stfstatus_name, e)); } if (dh->md != NULL) { complete_v2_state_transition(&dh->md, e); if (dh->md) release_md(dh->md); } reset_globals(); passert(GLOBALS_ARE_RESET()); } static stf_status ikev2_parent_inI2outR2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; struct connection *c = st->st_connection; unsigned char *idhash_in, *idhash_out; unsigned char *authstart; unsigned int np; int v2_notify_num = 0; /* extract calculated values from r */ finish_dh_v2(st, r); if (DBGP(DBG_PRIVATE) && DBGP(DBG_CRYPT)) ikev2_log_parentSA(st); /* decrypt things. */ { stf_status ret; ret = ikev2_decrypt_msg(md, RESPONDER); if (ret != STF_OK) return ret; } /*Once the message has been decrypted, then only we can check for auth payload*/ /*check the presense of auth payload now so that it does not crash in rehash_state if auth payload has not been received*/ if (!md->chain[ISAKMP_NEXT_v2AUTH]) { libreswan_log("no authentication payload found"); return STF_FAIL; } if (!ikev2_decode_peer_id(md, RESPONDER)) return STF_FAIL + v2N_AUTHENTICATION_FAILED; { struct hmac_ctx id_ctx; const pb_stream *id_pbs = &md->chain[ISAKMP_NEXT_v2IDi]->pbs; unsigned char *idstart = id_pbs->start + 4; unsigned int idlen = pbs_room(id_pbs) - 4; hmac_init_chunk(&id_ctx, st->st_oakley.prf_hasher, st->st_skey_pi); /* calculate hash of IDi for AUTH below */ DBG(DBG_CRYPT, DBG_dump_chunk("idhash verify pi", st->st_skey_pi)); DBG(DBG_CRYPT, DBG_dump("idhash verify I2", idstart, idlen)); hmac_update(&id_ctx, idstart, idlen); idhash_in = alloca(st->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash_in, &id_ctx); } /* process CERT payload */ { if (md->chain[ISAKMP_NEXT_v2CERT]) { /* should we check if we should accept a cert payload ? * has_preloaded_public_key(st) */ DBG(DBG_CONTROLMORE, DBG_log( "has a v2_CERT payload going to process it ")); ikev2_decode_cert(md); } } /* process CERTREQ payload */ if (md->chain[ISAKMP_NEXT_v2CERTREQ]) { DBG(DBG_CONTROLMORE, DBG_log("has a v2CERTREQ payload going to decode it")); ikev2_decode_cr(md, &st->st_connection->requested_ca); } /* process AUTH payload now */ /* now check signature from RSA key */ switch (md->chain[ISAKMP_NEXT_v2AUTH]->payload.v2a.isaa_type) { case v2_AUTH_RSA: { stf_status authstat = ikev2_verify_rsa_sha1(st, RESPONDER, idhash_in, NULL, /* keys from DNS */ NULL, /* gateways from DNS */ &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log("RSA authentication failed"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FATAL; } break; } case v2_AUTH_SHARED: { stf_status authstat = ikev2_verify_psk_auth(st, RESPONDER, idhash_in, &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log( "PSK authentication failed AUTH mismatch!"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FATAL; } break; } default: libreswan_log("authentication method: %s not supported", enum_name(&ikev2_auth_names, md->chain[ISAKMP_NEXT_v2AUTH]->payload. v2a.isaa_type)); return STF_FATAL; } /* Is there a notify about an error ? */ if (md->chain[ISAKMP_NEXT_v2N] != NULL) { DBG(DBG_CONTROL, DBG_log( " notify payload detected, should be processed....")); } /* good. now create child state */ /* note: as we will switch to child state, we force the parent to the * new state now */ change_state(st, STATE_PARENT_R2); c->newest_isakmp_sa = st->st_serialno; delete_event(st); event_schedule(EVENT_SA_REPLACE, c->sa_ike_life_seconds, st); authstart = reply_stream.cur; /* send response */ { unsigned char *encstart; unsigned char *iv; unsigned int ivsize; struct ikev2_generic e; pb_stream e_pbs, e_pbs_cipher; stf_status ret; bool send_cert = FALSE; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr r_hdr = md->hdr; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_xchg = ISAKMP_v2_AUTH; r_hdr.isa_flags = ISAKMP_FLAGS_R; memcpy(r_hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) return STF_INTERNAL_ERROR; } /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2IDr; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &md->rbody, &e_pbs)) return STF_INTERNAL_ERROR; /* insert IV */ iv = e_pbs.cur; ivsize = st->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_INTERNAL_ERROR; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; /* decide to send CERT payload before we generate IDr */ send_cert = doi_send_ikev2_cert_thinking(st); /* send out the IDr payload */ { struct ikev2_id r_id; pb_stream r_id_pbs; chunk_t id_b; struct hmac_ctx id_ctx; unsigned char *id_start; unsigned int id_len; hmac_init_chunk(&id_ctx, st->st_oakley.prf_hasher, st->st_skey_pr); build_id_payload((struct isakmp_ipsec_id *)&r_id, &id_b, &c->spd.this); r_id.isai_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (send_cert) r_id.isai_np = ISAKMP_NEXT_v2CERT; else r_id.isai_np = ISAKMP_NEXT_v2AUTH; id_start = e_pbs_cipher.cur; if (!out_struct(&r_id, &ikev2_id_desc, &e_pbs_cipher, &r_id_pbs) || !out_chunk(id_b, &r_id_pbs, "my identity")) return STF_INTERNAL_ERROR; close_output_pbs(&r_id_pbs); id_start += 4; /* calculate hash of IDi for AUTH below */ id_len = e_pbs_cipher.cur - id_start; DBG(DBG_CRYPT, DBG_dump_chunk("idhash calc pr", st->st_skey_pr)); DBG(DBG_CRYPT, DBG_dump("idhash calc R2", id_start, id_len)); hmac_update(&id_ctx, id_start, id_len); idhash_out = alloca( st->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash_out, &id_ctx); } DBG(DBG_CONTROLMORE, DBG_log("assembled IDr payload -- CERT next")); /* send CERT payload RFC 4306 3.6, 1.2:([CERT,] ) */ if (send_cert) { stf_status certstat = ikev2_send_cert(st, md, RESPONDER, ISAKMP_NEXT_v2AUTH, &e_pbs_cipher); if (certstat != STF_OK) return certstat; } /* authentication good, see if there is a child SA being proposed */ if (md->chain[ISAKMP_NEXT_v2SA] == NULL || md->chain[ISAKMP_NEXT_v2TSi] == NULL || md->chain[ISAKMP_NEXT_v2TSr] == NULL) { /* initiator didn't propose anything. Weird. Try unpending out end. */ /* UNPEND XXX */ libreswan_log("No CHILD SA proposals received."); np = ISAKMP_NEXT_v2NONE; } else { DBG_log("CHILD SA proposals received"); libreswan_log( "PAUL: this is where we have to check the TSi/TSr"); np = ISAKMP_NEXT_v2SA; } DBG(DBG_CONTROLMORE, DBG_log("going to assemble AUTH payload")); /* now send AUTH payload */ { stf_status authstat = ikev2_send_auth(c, st, RESPONDER, np, idhash_out, &e_pbs_cipher); if (authstat != STF_OK) return authstat; } if (np == ISAKMP_NEXT_v2SA) { /* must have enough to build an CHILD_SA */ ret = ikev2_child_sa_respond(md, RESPONDER, &e_pbs_cipher); if (ret > STF_FAIL) { v2_notify_num = ret - STF_FAIL; DBG(DBG_CONTROL, DBG_log( "ikev2_child_sa_respond returned STF_FAIL with %s", enum_name(&ikev2_notify_names, v2_notify_num))); np = ISAKMP_NEXT_v2NONE; } else if (ret != STF_OK) { DBG_log("ikev2_child_sa_respond returned %s", enum_name( &stfstatus_name, ret)); np = ISAKMP_NEXT_v2NONE; } } ikev2_padup_pre_encrypt(md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { unsigned char *authloc = ikev2_authloc(md, &e_pbs); if (authloc == NULL) return STF_INTERNAL_ERROR; close_output_pbs(&e_pbs); close_output_pbs(&md->rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(md, RESPONDER, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return ret; } } /* keep it for a retransmit if necessary */ freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_inI2outR2_tail"); /* note: retransimission is driven by initiator */ /* if the child failed, delete its state here - we sent the packet */ /* PAUL */ return STF_OK; } /* * *************************************************************** * PARENT_inR2 (I3 state) ***** *************************************************************** * - there are no cryptographic continuations, but be certain * that there will have to be DNS continuations, but they * just aren't implemented yet. * */ stf_status ikev2parent_inR2(struct msg_digest *md) { struct state *st = md->st; struct connection *c = st->st_connection; unsigned char *idhash_in; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); /* * the initiator sent us an encrypted payload. We need to calculate * our g^xy, and skeyseed values, and then decrypt the payload. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inR2: calculating g^{xy} in order to decrypt I2")); /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log("R2 state should receive an encrypted payload"); return STF_FATAL; } /* decrypt things. */ { stf_status ret; ret = ikev2_decrypt_msg(md, INITIATOR); if (ret != STF_OK) return ret; } if (!ikev2_decode_peer_id(md, INITIATOR)) return STF_FAIL + v2N_AUTHENTICATION_FAILED; { struct hmac_ctx id_ctx; const pb_stream *id_pbs = &md->chain[ISAKMP_NEXT_v2IDr]->pbs; unsigned char *idstart = id_pbs->start + 4; unsigned int idlen = pbs_room(id_pbs) - 4; hmac_init_chunk(&id_ctx, pst->st_oakley.prf_hasher, pst->st_skey_pr); /* calculate hash of IDr for AUTH below */ DBG(DBG_CRYPT, DBG_dump_chunk("idhash verify pr", pst->st_skey_pr)); DBG(DBG_CRYPT, DBG_dump("idhash auth R2", idstart, idlen)); hmac_update(&id_ctx, idstart, idlen); idhash_in = alloca(pst->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash_in, &id_ctx); } if (md->chain[ISAKMP_NEXT_v2CERT]) { /* should we check if we should accept a cert payload ? * has_preloaded_public_key(st) */ /* in v1 code it is decode_cert(struct msg_digest *md) */ DBG(DBG_CONTROLMORE, DBG_log("has a v2_CERT payload going to decode it")); ikev2_decode_cert(md); } /* process AUTH payload */ if (!md->chain[ISAKMP_NEXT_v2AUTH]) { libreswan_log("no authentication payload found"); return STF_FAIL; } /* now check signature from RSA key */ switch (md->chain[ISAKMP_NEXT_v2AUTH]->payload.v2a.isaa_type) { case v2_AUTH_RSA: { stf_status authstat = ikev2_verify_rsa_sha1(pst, INITIATOR, idhash_in, NULL, /* keys from DNS */ NULL, /* gateways from DNS */ &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log("authentication failed"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FAIL; } break; } case v2_AUTH_SHARED: { stf_status authstat = ikev2_verify_psk_auth(pst, INITIATOR, idhash_in, &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log("PSK authentication failed"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FAIL; } break; } default: libreswan_log("authentication method: %s not supported", enum_name(&ikev2_auth_names, md->chain[ISAKMP_NEXT_v2AUTH]->payload. v2a.isaa_type)); return STF_FAIL; } /* * update the parent state to make sure that it knows we have * authenticated properly. */ change_state(pst, STATE_PARENT_I3); c->newest_isakmp_sa = pst->st_serialno; /* authentication good, see if there is a child SA available */ if (md->chain[ISAKMP_NEXT_v2SA] == NULL || md->chain[ISAKMP_NEXT_v2TSi] == NULL || md->chain[ISAKMP_NEXT_v2TSr] == NULL) { /* not really anything to here... but it would be worth unpending again */ DBG(DBG_CONTROLMORE, DBG_log( "no v2SA, v2TSi or v2TSr received, not attempting to setup child SA")); DBG(DBG_CONTROLMORE, DBG_log(" Should we check for some notify?")); /* * Delete previous retransmission event. */ delete_event(st); return STF_OK; } { int bestfit_n, bestfit_p, bestfit_pr; unsigned int best_tsi_i, best_tsr_i; bestfit_n = -1; bestfit_p = -1; bestfit_pr = -1; /* Check TSi/TSr http://tools.ietf.org/html/rfc5996#section-2.9 */ DBG(DBG_CONTROLMORE, DBG_log(" check narrowing - we are responding to I2")); struct payload_digest *const tsi_pd = md->chain[ISAKMP_NEXT_v2TSi]; struct payload_digest *const tsr_pd = md->chain[ISAKMP_NEXT_v2TSr]; struct traffic_selector tsi[16], tsr[16]; #if 0 bool instantiate = FALSE; ip_subnet tsi_subnet, tsr_subnet; const char *oops; #endif unsigned int tsi_n, tsr_n; tsi_n = ikev2_parse_ts(tsi_pd, tsi, 16); tsr_n = ikev2_parse_ts(tsr_pd, tsr, 16); DBG_log( "Checking TSi(%d)/TSr(%d) selectors, looking for exact match", tsi_n, tsr_n); { struct spd_route *sra; sra = &c->spd; int bfit_n = ikev2_evaluate_connection_fit(c, sra, INITIATOR, tsi, tsr, tsi_n, tsr_n); if (bfit_n > bestfit_n) { DBG(DBG_CONTROLMORE, DBG_log( "bfit_n=ikev2_evaluate_connection_fit found better fit c %s", c->name)); int bfit_p = ikev2_evaluate_connection_port_fit(c, sra, INITIATOR, tsi, tsr, tsi_n, tsr_n, &best_tsi_i, &best_tsr_i); if (bfit_p > bestfit_p) { DBG(DBG_CONTROLMORE, DBG_log( "ikev2_evaluate_connection_port_fit found better fit c %s, tsi[%d],tsr[%d]", c->name, best_tsi_i, best_tsr_i)); int bfit_pr = ikev2_evaluate_connection_protocol_fit( c, sra, INITIATOR, tsi, tsr, tsi_n, tsr_n, &best_tsi_i, &best_tsr_i); if (bfit_pr > bestfit_pr ) { DBG(DBG_CONTROLMORE, DBG_log( "ikev2_evaluate_connection_protocol_fit found better fit c %s, tsi[%d],tsr[%d]", c ->name, best_tsi_i, best_tsr_i)); bestfit_p = bfit_p; bestfit_n = bfit_n; } else { DBG(DBG_CONTROLMORE, DBG_log( "protocol range fit c %s c->name was rejected by protocol matching", c ->name)); } } } else { DBG(DBG_CONTROLMORE, DBG_log( "prefix range fit c %s c->name was rejected by port matching", c->name)); } } if ( ( bestfit_n > 0 ) && (bestfit_p > 0)) { DBG(DBG_CONTROLMORE, DBG_log( ( "found an acceptable TSi/TSr Traffic Selector"))); memcpy(&st->st_ts_this, &tsi[best_tsi_i], sizeof(struct traffic_selector)); memcpy(&st->st_ts_that, &tsr[best_tsr_i], sizeof(struct traffic_selector)); ikev2_print_ts(&st->st_ts_this); ikev2_print_ts(&st->st_ts_that); ip_subnet tmp_subnet_i; ip_subnet tmp_subnet_r; rangetosubnet(&st->st_ts_this.low, &st->st_ts_this.high, &tmp_subnet_i); rangetosubnet(&st->st_ts_that.low, &st->st_ts_that.high, &tmp_subnet_r); c->spd.this.client = tmp_subnet_i; c->spd.this.port = st->st_ts_this.startport; c->spd.this.protocol = st->st_ts_this.ipprotoid; setportof(htons( c->spd.this.port), &c->spd.this.host_addr); setportof(htons( c->spd.this.port), &c->spd.this.client.addr); if ( subnetishost(&c->spd.this.client) && addrinsubnet(&c->spd.this.host_addr, &c->spd.this.client)) c->spd.this.has_client = FALSE; else c->spd.this.has_client = TRUE; c->spd.that.client = tmp_subnet_r; c->spd.that.port = st->st_ts_that.startport; c->spd.that.protocol = st->st_ts_that.ipprotoid; setportof(htons( c->spd.that.port), &c->spd.that.host_addr); setportof(htons( c->spd.that.port), &c->spd.that.client.addr); if ( subnetishost(&c->spd.that.client) && addrinsubnet(&c->spd.that.host_addr, &c->spd.that.client)) c->spd.that.has_client = FALSE; else c->spd.that.has_client = TRUE; /* AAAA */ } else { DBG(DBG_CONTROLMORE, DBG_log(( "reject responder TSi/TSr Traffic Selector"))); /* prevents parent from going to I3 */ return STF_FAIL + v2N_TS_UNACCEPTABLE; } } /* end of TS check block */ { v2_notification_t rn; struct payload_digest *const sa_pd = md->chain[ISAKMP_NEXT_v2SA]; rn = ikev2_parse_child_sa_body(&sa_pd->pbs, &sa_pd->payload.v2sa, NULL, st, FALSE); if (rn != v2N_NOTHING_WRONG) return STF_FAIL + rn; } { struct payload_digest *p; for (p = md->chain[ISAKMP_NEXT_v2N]; p != NULL; p = p->next) { /* RFC 5996 */ /*Types in the range 0 - 16383 are intended for reporting errors. An * implementation receiving a Notify payload with one of these types * that it does not recognize in a response MUST assume that the * corresponding request has failed entirely. Unrecognized error types * in a request and status types in a request or response MUST be * ignored, and they should be logged.*/ if (enum_name(&ikev2_notify_names, p->payload.v2n.isan_type) == NULL) { if (p->payload.v2n.isan_type < v2N_INITIAL_CONTACT) return STF_FAIL + p->payload.v2n.isan_type; } if ( p->payload.v2n.isan_type == v2N_USE_TRANSPORT_MODE ) { if ( st->st_connection->policy & POLICY_TUNNEL) { /*This means we did not send v2N_USE_TRANSPORT, however responder is sending it in now (inR2), seems incorrect*/ DBG(DBG_CONTROLMORE, DBG_log( "Initiator policy is tunnel, responder sends v2N_USE_TRANSPORT_MODE notification in inR2, ignoring it")); } else { DBG(DBG_CONTROLMORE, DBG_log( "Initiator policy is transport, responder sends v2N_USE_TRANSPORT_MODE, setting CHILD SA to transport mode")); if (st->st_esp.present == TRUE) { /*libreswan supports only "esp" with ikev2 it seems, look at ikev2_parse_child_sa_body handling*/ st->st_esp.attrs.encapsulation = ENCAPSULATION_MODE_TRANSPORT; } } } } /* for */ } /* notification block */ ikev2_derive_child_keys(st, md->role); c->newest_ipsec_sa = st->st_serialno; /* now install child SAs */ if (!install_ipsec_sa(st, TRUE)) return STF_FATAL; /* * Delete previous retransmission event. */ delete_event(st); return STF_OK; } /* * Cookie = <VersionIDofSecret> | Hash(Ni | IPi | SPIi | <secret>) * where <secret> is a randomly generated secret known only to the * in LSW implementation <VersionIDofSecret> is not used. */ static bool ikev2_get_dcookie(u_char *dcookie, chunk_t st_ni, ip_address *addr, u_int8_t *spiI) { size_t addr_length; SHA1_CTX ctx_sha1; unsigned char addr_buff[ sizeof(union { struct in_addr A; struct in6_addr B; })]; addr_length = addrbytesof(addr, addr_buff, sizeof(addr_buff)); SHA1Init(&ctx_sha1); SHA1Update(&ctx_sha1, st_ni.ptr, st_ni.len); SHA1Update(&ctx_sha1, addr_buff, addr_length); SHA1Update(&ctx_sha1, spiI, sizeof(*spiI)); SHA1Update(&ctx_sha1, ikev2_secret_of_the_day, SHA1_DIGEST_SIZE); SHA1Final(dcookie, &ctx_sha1); DBG(DBG_PRIVATE, DBG_log("ikev2 secret_of_the_day used %s, length %d", ikev2_secret_of_the_day, SHA1_DIGEST_SIZE); ); DBG(DBG_CRYPT, DBG_dump("computed dcookie: HASH(Ni | IPi | SPIi | <secret>)", dcookie, SHA1_DIGEST_SIZE)); #if 0 ikev2_secrets_recycle++; if (ikev2_secrets_recycle >= 32768) { /* handed out too many cookies, cycle secrets */ ikev2_secrets_recycle = 0; /* can we call init_secrets() without adding an EVENT? */ init_secrets(); } #endif return TRUE; } /* * *************************************************************** * NOTIFICATION_OUT Complete packet ***** *************************************************************** * */ void send_v2_notification(struct state *p1st, u_int16_t type, struct state *encst, u_char *icookie, u_char *rcookie, chunk_t *n_data) { u_char buffer[1024]; pb_stream reply; pb_stream rbody; chunk_t child_spi, notify_data; /* this function is not generic enough yet just enough for 6msg * TBD accept HDR FLAGS as arg. default ISAKMP_FLAGS_R * TBD when there is a child SA use that SPI in the notify paylod. * TBD support encrypted notifications payloads. * TBD accept Critical bit as an argument. default is set. * TBD accept exchange type as an arg, default is ISAKMP_v2_SA_INIT * do we need to send a notify with empty data? * do we need to support more Protocol ID? more than PROTO_ISAKMP */ libreswan_log("sending %s notification %s to %s:%u", encst ? "encrypted " : "", enum_name(&ikev2_notify_names, type), ip_str(&p1st->st_remoteaddr), p1st->st_remoteport); #if 0 /* Empty notification data section should be fine? */ if (n_data == NULL) { DBG(DBG_CONTROLMORE, DBG_log("don't send packet when notification data empty")); return; } #endif memset(buffer, 0, sizeof(buffer)); init_pbs(&reply, buffer, sizeof(buffer), "notification msg"); /* HDR out */ { struct isakmp_hdr n_hdr; zero(&n_hdr); /* default to 0 */ /* AAA should we copy from MD? */ /* Impair function will raise major/minor by 1 for testing */ n_hdr.isa_version = build_ike_version(); memcpy(n_hdr.isa_rcookie, rcookie, COOKIE_SIZE); memcpy(n_hdr.isa_icookie, icookie, COOKIE_SIZE); n_hdr.isa_xchg = ISAKMP_v2_SA_INIT; n_hdr.isa_np = ISAKMP_NEXT_v2N; n_hdr.isa_flags &= ~ISAKMP_FLAGS_I; n_hdr.isa_flags |= ISAKMP_FLAGS_R; #warning check msgid code here /* PAUL: shouldn't we set n_hdr.isa_msgid = [htonl](p1st->st_msgid); */ if (!out_struct(&n_hdr, &isakmp_hdr_desc, &reply, &rbody)) { libreswan_log( "error initializing hdr for notify message"); return; } } child_spi.ptr = NULL; child_spi.len = 0; /* build and add v2N payload to the packet */ memset(&child_spi, 0, sizeof(child_spi)); memset(&notify_data, 0, sizeof(notify_data)); ship_v2N(ISAKMP_NEXT_v2NONE, DBGP( IMPAIR_SEND_BOGUS_ISAKMP_FLAG) ? (ISAKMP_PAYLOAD_NONCRITICAL | ISAKMP_PAYLOAD_LIBRESWAN_BOGUS) : ISAKMP_PAYLOAD_NONCRITICAL, PROTO_ISAKMP, &child_spi, type, n_data, &rbody); close_message(&rbody, p1st); close_output_pbs(&reply); clonetochunk(p1st->st_tpacket, reply.start, pbs_offset(&reply), "notification packet"); send_ike_msg(p1st, __FUNCTION__); } /* add notify payload to the rbody */ bool ship_v2N(unsigned int np, u_int8_t critical, u_int8_t protoid, chunk_t *spi, u_int16_t type, chunk_t *n_data, pb_stream *rbody) { struct ikev2_notify n; pb_stream n_pbs; DBG(DBG_CONTROLMORE, DBG_log("Adding a v2N Payload")); n.isan_np = np; n.isan_critical = critical; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); n.isan_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } n.isan_protoid = protoid; n.isan_spisize = spi->len; n.isan_type = type; if (!out_struct(&n, &ikev2_notify_desc, rbody, &n_pbs)) { libreswan_log( "error initializing notify payload for notify message"); return FALSE; } if (spi->len > 0) { if (!out_raw(spi->ptr, spi->len, &n_pbs, "SPI ")) { libreswan_log("error writing SPI to notify payload"); return FALSE; } } if (n_data != NULL) { if (!out_raw(n_data->ptr, n_data->len, &n_pbs, "Notify data")) { libreswan_log( "error writing notify payload for notify message"); return FALSE; } } close_output_pbs(&n_pbs); return TRUE; } /* * *************************************************************** * INFORMATIONAL ***** *************************************************************** * - * * */ stf_status process_informational_ikev2(struct msg_digest *md) { /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log( "Ignoring informational exchange outside encrypted payload (rfc5996 section 1.4)"); return STF_IGNORE; } /* decrypt things. */ { stf_status ret; if (md->hdr.isa_flags & ISAKMP_FLAGS_I) { DBG(DBG_CONTROLMORE, DBG_log( "received informational exchange request from INITIATOR")); ret = ikev2_decrypt_msg(md, RESPONDER); } else { DBG(DBG_CONTROLMORE, DBG_log( "received informational exchange request from RESPONDER")); ret = ikev2_decrypt_msg(md, INITIATOR); } if (ret != STF_OK) return ret; } { struct payload_digest *p; struct ikev2_delete *v2del = NULL; stf_status ret; struct state *const st = md->st; /* Only send response if it is request*/ if (!(md->hdr.isa_flags & ISAKMP_FLAGS_R)) { unsigned char *authstart; pb_stream e_pbs, e_pbs_cipher; struct ikev2_generic e; unsigned char *iv; int ivsize; unsigned char *encstart; /* beginning of data going out */ authstart = reply_stream.cur; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "information exchange reply packet"); DBG(DBG_CONTROLMORE | DBG_DPD, DBG_log("Received an INFORMATIONAL request, " "updating liveness, no longer pending")); st->st_last_liveness = now(); st->st_pend_liveness = FALSE; /* HDR out */ { struct isakmp_hdr r_hdr; zero(&r_hdr); /* default to 0 */ /* AAA should we copy from MD? */ r_hdr.isa_version = build_ike_version(); memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); memcpy(r_hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); r_hdr.isa_xchg = ISAKMP_v2_INFORMATIONAL; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_msgid = htonl(md->msgid_received); /*set initiator bit if we are initiator*/ if (md->role == INITIATOR) r_hdr.isa_flags |= ISAKMP_FLAGS_I; r_hdr.isa_flags |= ISAKMP_FLAGS_R; if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) { libreswan_log( "error initializing hdr for informational message"); return STF_INTERNAL_ERROR; } } /*HDR Done*/ /* insert an Encryption payload header */ if (md->chain[ISAKMP_NEXT_v2D]) { bool ikesa_flag = FALSE; /* Search if there is a IKE SA delete payload*/ for (p = md->chain[ISAKMP_NEXT_v2D]; p != NULL; p = p->next) { if (p->payload.v2delete.isad_protoid == PROTO_ISAKMP) { e.isag_np = ISAKMP_NEXT_v2NONE; ikesa_flag = TRUE; break; } } /* if there is no IKE SA DELETE PAYLOAD*/ /* That means, there are AH OR ESP*/ if (!ikesa_flag) e.isag_np = ISAKMP_NEXT_v2D; } else { e.isag_np = ISAKMP_NEXT_v2NONE; } e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &md->rbody, &e_pbs)) return STF_INTERNAL_ERROR; /* insert IV */ iv = e_pbs.cur; ivsize = st->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_INTERNAL_ERROR; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; if (md->chain[ISAKMP_NEXT_v2D]) { for (p = md->chain[ISAKMP_NEXT_v2D]; p != NULL; p = p->next) { v2del = &p->payload.v2delete; switch (v2del->isad_protoid) { case PROTO_ISAKMP: /* My understanding is that delete payload for IKE SA * should be the only payload in the informational exchange */ break; case PROTO_IPSEC_AH: case PROTO_IPSEC_ESP: { char spi_buf[1024]; pb_stream del_pbs; struct ikev2_delete v2del_tmp; u_int16_t i, j = 0; u_char *spi; for (i = 0; i < v2del->isad_nrspi; i++ ) { spi = p->pbs.cur + (i * v2del-> isad_spisize); DBG(DBG_CONTROLMORE, DBG_log( "received delete request for %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); struct state *dst = find_state_ikev2_child_to_delete( st->st_icookie, st->st_rcookie, v2del->isad_protoid, *( ipsec_spi_t *)spi); if (dst != NULL) { struct ipsec_proto_info *pr = v2del-> isad_protoid == PROTO_IPSEC_AH ? &dst ->st_ah : &dst -> st_esp; DBG( DBG_CONTROLMORE, DBG_log( "our side spi that needs to be sent: %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl( pr -> our_spi))); memcpy( spi_buf + (j * v2del -> isad_spisize), (u_char *)&pr->our_spi, v2del->isad_spisize); j++; } else { DBG( DBG_CONTROLMORE, DBG_log( "received delete request for %s SA(0x%08lx) but local state is not found", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); } } if ( !j ) { DBG(DBG_CONTROLMORE, DBG_log( "This delete payload does not contain a single spi that has any local state, ignoring")); return STF_IGNORE; } else { DBG(DBG_CONTROLMORE, DBG_log( "No. of SPIs to be sent %d", j); DBG_dump( " Emit SPIs", spi_buf, j * v2del-> isad_spisize)); } zero(&v2del_tmp); if (p->next != NULL) v2del_tmp.isad_np = ISAKMP_NEXT_v2D; else v2del_tmp.isad_np = ISAKMP_NEXT_v2NONE; v2del_tmp.isad_protoid = v2del->isad_protoid; v2del_tmp.isad_spisize = v2del->isad_spisize; v2del_tmp.isad_nrspi = j; /* Emit delete payload header out*/ if (!out_struct(&v2del_tmp, & ikev2_delete_desc, &e_pbs_cipher, &del_pbs)) { libreswan_log( "error initializing hdr for delete payload"); return STF_INTERNAL_ERROR; } /* Emit values of spi to be sent to the peer*/ if (!out_raw(spi_buf, j * v2del-> isad_spisize, &del_pbs, "local spis")) { libreswan_log( "error sending spi values in delete payload"); return STF_INTERNAL_ERROR; } close_output_pbs(&del_pbs); } break; default: /*Unrecongnized protocol */ return STF_IGNORE; } /* this will break from for loop*/ if (v2del->isad_protoid == PROTO_ISAKMP) break; } } /*If there are no payloads or in other words empty payload in request * that means it is check for liveliness, so send an empty payload message * this will end up sending an empty payload */ ikev2_padup_pre_encrypt(md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { unsigned char *authloc = ikev2_authloc(md, &e_pbs); if (authloc == NULL) return STF_INTERNAL_ERROR; close_output_pbs(&e_pbs); close_output_pbs(&md->rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(md, md->role, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return ret; } /* keep it for a retransmit if necessary */ freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset( &reply_stream), "reply packet for informational exchange"); send_ike_msg(st, __FUNCTION__); } /* Now carry out the actualy task, we can not carry the actual task since * we need to send informational responde using existig SAs */ { if (md->chain[ISAKMP_NEXT_v2D] && st->st_state != STATE_IKESA_DEL) { for (p = md->chain[ISAKMP_NEXT_v2D]; p != NULL; p = p->next) { v2del = &p->payload.v2delete; switch (v2del->isad_protoid) { case PROTO_ISAKMP: { /* My understanding is that delete payload for IKE SA * should be the only payload in the informational * Now delete the IKE SA state and all its child states */ struct state *current_st = st; struct state *next_st = NULL; struct state *first_st = NULL; /* Find the first state in the hash chain*/ while (current_st != (struct state *) NULL) { first_st = current_st; current_st = first_st-> st_hashchain_prev; } current_st = first_st; while (current_st != (struct state *) NULL) { next_st = current_st-> st_hashchain_next; if (current_st-> st_clonedfrom != 0 ) { change_state( current_st, STATE_CHILDSA_DEL); } else { change_state( current_st, STATE_IKESA_DEL); } delete_state(current_st); current_st = next_st; } } break; case PROTO_IPSEC_AH: case PROTO_IPSEC_ESP: { /* pb_stream del_pbs; */ struct ikev2_delete; u_int16_t i; u_char *spi; for (i = 0; i < v2del->isad_nrspi; i++ ) { spi = p->pbs.cur + (i * v2del-> isad_spisize); DBG(DBG_CONTROLMORE, DBG_log( "Now doing actual deletion for request: %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); struct state *dst = find_state_ikev2_child_to_delete( st->st_icookie, st->st_rcookie, v2del->isad_protoid, *( ipsec_spi_t *)spi); if (dst != NULL) { struct ipsec_proto_info *pr = v2del-> isad_protoid == PROTO_IPSEC_AH ? &dst ->st_ah : &dst -> st_esp; DBG( DBG_CONTROLMORE, DBG_log( "our side spi that needs to be deleted: %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl( pr -> our_spi))); /* now delete the state*/ change_state( dst, STATE_CHILDSA_DEL); delete_state( dst); } else { DBG( DBG_CONTROLMORE, DBG_log( "received delete request for %s SA(0x%08lx) but local state is not found", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); } } } break; default: /*Unrecongnized protocol */ return STF_IGNORE; } /* this will break from for loop*/ if (v2del->isad_protoid == PROTO_ISAKMP) break; } /* for */ } /* if*/ else { /* empty response to our IKESA delete request*/ if ((md->hdr.isa_flags & ISAKMP_FLAGS_R) && st->st_state == STATE_IKESA_DEL) { /* My understanding is that delete payload for IKE SA * should be the only payload in the informational * Now delete the IKE SA state and all its child states */ struct state *current_st = st; struct state *next_st = NULL; struct state *first_st = NULL; /* Find the first state in the hash chain*/ while (current_st != (struct state *) NULL) { first_st = current_st; current_st = first_st-> st_hashchain_prev; } current_st = first_st; while (current_st != (struct state *) NULL) { next_st = current_st-> st_hashchain_next; if (current_st->st_clonedfrom != 0 ) { change_state( current_st, STATE_CHILDSA_DEL); } else { change_state( current_st, STATE_IKESA_DEL); } delete_state(current_st); current_st = next_st; } /* empty response to our empty INFORMATIONAL * We don't send anything back */ } else if ((md->hdr.isa_flags & ISAKMP_FLAGS_R) && st->st_state != STATE_IKESA_DEL) { DBG(DBG_CONTROLMORE, DBG_log( "Received an INFORMATIONAL response, " "updating liveness, no longer pending.")); st->st_last_liveness = now(); st->st_pend_liveness = FALSE; st->st_msgid_lastrecv = md->msgid_received; } } } } return STF_OK; } stf_status ikev2_send_informational(struct state *st) { struct state *pst = NULL; if (st->st_clonedfrom != SOS_NOBODY) { pst = state_with_serialno(st->st_clonedfrom); if (!pst) { DBG(DBG_CONTROL, DBG_log( "IKE SA does not exist for this child SA - should not happen")); DBG(DBG_CONTROL, DBG_log("INFORMATIONAL exchange can not be sent")); return STF_IGNORE; } } else { pst = st; } { unsigned char *authstart; unsigned char *encstart; unsigned char *iv; int ivsize; struct msg_digest md; struct ikev2_generic e; enum phase1_role role; pb_stream e_pbs, e_pbs_cipher; pb_stream rbody; pb_stream request; u_char buffer[1024]; md.st = st; md.pst = pst; memset(buffer, 0, sizeof(buffer)); init_pbs(&request, buffer, sizeof(buffer), "informational exchange request packet"); authstart = request.cur; /* HDR out */ { struct isakmp_hdr r_hdr; zero(&r_hdr); r_hdr.isa_version = build_ike_version(); memcpy(r_hdr.isa_rcookie, pst->st_rcookie, COOKIE_SIZE); memcpy(r_hdr.isa_icookie, pst->st_icookie, COOKIE_SIZE); r_hdr.isa_xchg = ISAKMP_v2_INFORMATIONAL; r_hdr.isa_np = ISAKMP_NEXT_v2E; if (pst->st_state == STATE_PARENT_I2 || pst->st_state == STATE_PARENT_I3) { r_hdr.isa_flags |= ISAKMP_FLAGS_I; role = INITIATOR; r_hdr.isa_msgid = htonl(pst->st_msgid_nextuse); } else { role = RESPONDER; r_hdr.isa_msgid = htonl( pst->st_msgid_lastrecv + 1); } if (!out_struct(&r_hdr, &isakmp_hdr_desc, &request, &rbody)) { libreswan_log( "error initializing hdr for informational message"); return STF_FATAL; } } /* HDR done*/ /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2NONE; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &rbody, &e_pbs)) return STF_FATAL; /* IV */ iv = e_pbs.cur; ivsize = pst->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_FATAL; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; /* This is an empty informational exchange (A.K.A liveness check) */ ikev2_padup_pre_encrypt(&md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { stf_status ret; unsigned char *authloc = ikev2_authloc(&md, &e_pbs); if (!authloc) return STF_FATAL; close_output_pbs(&e_pbs); close_output_pbs(&rbody); close_output_pbs(&request); ret = ikev2_encrypt_msg(&md, role, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return STF_FATAL; } /* keep it for a retransmit if necessary */ freeanychunk(pst->st_tpacket); clonetochunk(pst->st_tpacket, request.start, pbs_offset(&request), "reply packet for informational exchange"); pst->st_pend_liveness = TRUE; /* we should only do this when dpd/liveness is active? */ send_ike_msg(pst, __FUNCTION__); ikev2_update_counters(&md); } return STF_OK; } /* * *************************************************************** * DELETE_OUT ***** *************************************************************** * */ void ikev2_delete_out(struct state *st) { struct state *pst = NULL; if (st->st_clonedfrom != 0) { /*child SA*/ pst = state_with_serialno(st->st_clonedfrom); if (!pst) { DBG(DBG_CONTROL, DBG_log("IKE SA does not exist for this child SA")); DBG(DBG_CONTROL, DBG_log( "INFORMATIONAL exchange can not be sent, deleting state")); goto end; } } else { /* Parent SA*/ pst = st; } { unsigned char *authstart; pb_stream e_pbs, e_pbs_cipher; pb_stream rbody; struct ikev2_generic e; unsigned char *iv; int ivsize; unsigned char *encstart; struct msg_digest md; enum phase1_role role; md.st = st; md.pst = pst; /* beginning of data going out */ authstart = reply_stream.cur; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "information exchange request packet"); /* HDR out */ { struct isakmp_hdr r_hdr; zero(&r_hdr); /* default to 0 */ /* AAA should we copy from MD? */ r_hdr.isa_version = build_ike_version(); memcpy(r_hdr.isa_rcookie, pst->st_rcookie, COOKIE_SIZE); memcpy(r_hdr.isa_icookie, pst->st_icookie, COOKIE_SIZE); r_hdr.isa_xchg = ISAKMP_v2_INFORMATIONAL; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_msgid = htonl(pst->st_msgid_nextuse); /*set initiator bit if we are initiator*/ if (pst->st_state == STATE_PARENT_I2 || pst->st_state == STATE_PARENT_I3) { r_hdr.isa_flags |= ISAKMP_FLAGS_I; role = INITIATOR; } else { role = RESPONDER; } /* r_hdr.isa_flags |= ISAKMP_FLAGS_R; */ if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &rbody)) { libreswan_log( "error initializing hdr for informational message"); goto end; } } /*HDR Done*/ /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2D; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &rbody, &e_pbs)) goto end; /* insert IV */ iv = e_pbs.cur; ivsize = pst->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) goto end; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; { pb_stream del_pbs; struct ikev2_delete v2del_tmp; /* * u_int16_t i, j=0; * u_char *spi; * char spi_buf[1024]; */ zero(&v2del_tmp); v2del_tmp.isad_np = ISAKMP_NEXT_v2NONE; if (st->st_clonedfrom != 0 ) { v2del_tmp.isad_protoid = PROTO_IPSEC_ESP; v2del_tmp.isad_spisize = sizeof(ipsec_spi_t); v2del_tmp.isad_nrspi = 1; } else { v2del_tmp.isad_protoid = PROTO_ISAKMP; v2del_tmp.isad_spisize = 0; v2del_tmp.isad_nrspi = 0; } /* Emit delete payload header out*/ if (!out_struct(&v2del_tmp, &ikev2_delete_desc, &e_pbs_cipher, &del_pbs)) { libreswan_log( "error initializing hdr for delete payload"); goto end; } /* Emit values of spi to be sent to the peer*/ if (st->st_clonedfrom != 0) { if (!out_raw( (u_char *)&st->st_esp.our_spi, sizeof(ipsec_spi_t), &del_pbs, "local spis")) { libreswan_log( "error sending spi values in delete payload"); goto end; } } close_output_pbs(&del_pbs); } ikev2_padup_pre_encrypt(&md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { stf_status ret; unsigned char *authloc = ikev2_authloc(&md, &e_pbs); if (authloc == NULL) goto end; close_output_pbs(&e_pbs); close_output_pbs(&rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(&md, role, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) goto end; } /* keep it for a retransmit if necessary */ freeanychunk(pst->st_tpacket); clonetochunk(pst->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "request packet for informational exchange"); send_ike_msg(pst, __FUNCTION__); /* update state */ ikev2_update_counters(&md); } /* If everything is fine, and we sent packet, goto real_end*/ goto real_end; end: /* If some error occurs above that prevents us sending a request packet*/ /* delete the states right now*/ if (st->st_clonedfrom != SOS_NOBODY) { change_state(st, STATE_CHILDSA_DEL); delete_state(st); } else { struct state *current_st = pst; struct state *next_st = NULL; struct state *first_st = NULL; /* Find the first state in the hash chain*/ while (current_st != (struct state *) NULL) { first_st = current_st; current_st = first_st->st_hashchain_prev; } current_st = first_st; while (current_st != (struct state *) NULL) { next_st = current_st->st_hashchain_next; if (current_st->st_clonedfrom != 0 ) change_state(current_st, STATE_CHILDSA_DEL); else change_state(current_st, STATE_IKESA_DEL); delete_state(current_st); current_st = next_st; } } real_end:; } /* * Determine the IKE version we will use for the IKE packet * Normally, this is "2.0", but in the future we might need to * change that. Version used is the minimum 2.x version both * sides support. So if we support 2.1, and they support 2.0, * we should sent 2.0 (not implemented until we hit 2.1 ourselves) * We also have some impair functions that modify the major/minor * version on purpose - for testing * * rcv_version: the received IKE version, 0 if we don't know * * top 4 bits are major version, lower 4 bits are minor version */ static int build_ike_version() { return ((IKEv2_MAJOR_VERSION + (DBGP(IMPAIR_MAJOR_VERSION_BUMP) ? 1 : 0)) << ISA_MAJ_SHIFT) | (IKEv2_MINOR_VERSION + (DBGP(IMPAIR_MINOR_VERSION_BUMP) ? 1 : 0)); }
/* * IKEv2 parent SA creation routines * Copyright (C) 2007-2008 Michael Richardson <mcr@xelerance.com> * Copyright (C) 2008-2011 Paul Wouters <paul@xelerance.com> * Copyright (C) 2008 Antony Antony <antony@xelerance.com> * Copyright (C) 2008-2009 David McCullough <david_mccullough@securecomputing.com> * Copyright (C) 2010,2012 Avesh Agarwal <avagarwa@redhat.com> * Copyright (C) 2010 Tuomo Soini <tis@foobar.fi * Copyright (C) 2012 Paul Wouters <pwouters@redhat.com> * Copyright (C) 2012 Antony Antony <antony@phenome.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * */ #include <stdio.h> #include <string.h> #include <stddef.h> #include <stdlib.h> #include <unistd.h> #include <gmp.h> #include <libreswan.h> #include <libreswan/ipsec_policy.h> #include "sysdep.h" #include "constants.h" #include "defs.h" #include "state.h" #include "id.h" #include "connections.h" #include "crypto.h" /* requires sha1.h and md5.h */ #include "x509.h" #include "x509more.h" #include "ike_alg.h" #include "kernel_alg.h" #include "plutoalg.h" #include "pluto_crypt.h" #include "packet.h" #include "demux.h" #include "ikev2.h" #include "log.h" #include "spdb.h" /* for out_sa */ #include "ipsec_doi.h" #include "vendor.h" #include "timer.h" #include "ike_continuations.h" #include "cookie.h" #include "rnd.h" #include "pending.h" #include "kernel.h" #define SEND_NOTIFICATION_AA(t, d) \ if (st) \ send_v2_notification_from_state(st, st->st_state, t, d); \ else \ send_v2_notification_from_md(md, t, d); #define SEND_NOTIFICATION(t) \ if (st) \ send_v2_notification_from_state(st, st->st_state, t, NULL); \ else \ send_v2_notification_from_md(md, t, NULL); static void ikev2_parent_outI1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_outI1_tail(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); static bool ikev2_get_dcookie(u_char *dcookie, chunk_t st_ni, ip_address *addr, u_int8_t *spiI); static stf_status ikev2_parent_outI1_common(struct msg_digest *md, struct state *st); static int build_ike_version(); /* * *************************************************************** ***** PARENT_OUTI1 ***** *************************************************************** * * * Initiate an Oakley Main Mode exchange. * HDR, SAi1, KEi, Ni --> * * Note: this is not called from demux.c, but from ipsecdoi_initiate(). * */ stf_status ikev2parent_outI1(int whack_sock, struct connection *c, struct state *predecessor, lset_t policy, unsigned long try, enum crypto_importance importance #ifdef HAVE_LABELED_IPSEC , struct xfrm_user_sec_ctx_ike * uctx #endif ) { struct state *st = new_state(); struct db_sa *sadb; int groupnum; int policy_index = POLICY_ISAKMP(policy, c->spd.this.xauth_server, c->spd.this.xauth_client); /* set up new state */ get_cookie(TRUE, st->st_icookie, COOKIE_SIZE, &c->spd.that.host_addr); initialize_new_state(st, c, policy, try, whack_sock, importance); st->st_ikev2 = TRUE; change_state(st, STATE_PARENT_I1); st->st_msgid_lastack = INVALID_MSGID; st->st_msgid_nextuse = 0; st->st_try = try; if (HAS_IPSEC_POLICY(policy)) { #ifdef HAVE_LABELED_IPSEC st->sec_ctx = NULL; if ( uctx != NULL) libreswan_log( "Labeled ipsec is not supported with ikev2 yet"); #endif add_pending(dup_any( whack_sock), st, c, policy, 1, predecessor == NULL ? SOS_NOBODY : predecessor->st_serialno #ifdef HAVE_LABELED_IPSEC , st->sec_ctx #endif ); } if (predecessor == NULL) libreswan_log("initiating v2 parent SA"); else libreswan_log("initiating v2 parent SA to replace #%lu", predecessor->st_serialno); if (predecessor != NULL) { update_pending(predecessor, st); whack_log(RC_NEW_STATE + STATE_PARENT_I1, "%s: initiate, replacing #%lu", enum_name(&state_names, st->st_state), predecessor->st_serialno); } else { whack_log(RC_NEW_STATE + STATE_PARENT_I1, "%s: initiate", enum_name(&state_names, st->st_state)); } /* * now, we need to initialize st->st_oakley, specifically, the group * number needs to be initialized. */ groupnum = 0; st->st_sadb = &oakley_sadb[policy_index]; sadb = oakley_alg_makedb(st->st_connection->alg_info_ike, st->st_sadb, 0); if (sadb != NULL) st->st_sadb = sadb; sadb = st->st_sadb = sa_v2_convert(st->st_sadb); { unsigned int pc_cnt; /* look at all the proposals */ if (st->st_sadb->prop_disj != NULL) { for (pc_cnt = 0; pc_cnt < st->st_sadb->prop_disj_cnt && groupnum == 0; pc_cnt++) { struct db_v2_prop *vp = &st->st_sadb->prop_disj[pc_cnt]; unsigned int pr_cnt; /* look at all the proposals */ if (vp->props != NULL) { for (pr_cnt = 0; pr_cnt < vp->prop_cnt && groupnum == 0; pr_cnt++) { unsigned int ts_cnt; struct db_v2_prop_conj *vpc = &vp->props[pr_cnt]; for (ts_cnt = 0; ts_cnt < vpc->trans_cnt && groupnum == 0; ts_cnt++) { struct db_v2_trans *tr = &vpc-> trans[ ts_cnt ]; if (tr != NULL && tr->transform_type == IKEv2_TRANS_TYPE_DH) { groupnum = tr-> transid; } } } } } } } if (groupnum == 0) groupnum = OAKLEY_GROUP_MODP2048; st->st_oakley.group = lookup_group(groupnum); st->st_oakley.groupnum = groupnum; /* now. we need to go calculate the nonce, and the KE */ { struct ke_continuation *ke = alloc_thing( struct ke_continuation, "ikev2_outI1 KE"); stf_status e; ke->md = alloc_md(); ke->md->from_state = STATE_IKEv2_BASE; ke->md->svm = ikev2_parent_firststate(); ke->md->st = st; set_suspended(st, ke->md); if (!st->st_sec_in_use) { pcrc_init(&ke->ke_pcrc); ke->ke_pcrc.pcrc_func = ikev2_parent_outI1_continue; e = build_ke(&ke->ke_pcrc, st, st->st_oakley.group, importance); if ( (e != STF_SUSPEND && e != STF_INLINE) || (e == STF_TOOMUCHCRYPTO)) { loglog(RC_CRYPTOFAILED, "system too busy - Enabling dcookies [TODO]"); delete_state(st); } } else { e = ikev2_parent_outI1_tail( (struct pluto_crypto_req_cont *)ke, NULL); } reset_globals(); return e; } } static void ikev2_parent_outI1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent outI1: calculated ke+nonce, sending I1")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (ke->md) release_md(ke->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == ke->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_outI1_tail(pcrc, r); if (ke->md != NULL) { complete_v2_state_transition(&ke->md, e); if (ke->md) release_md(ke->md); } reset_cur_state(); reset_globals(); } /* * unpack the calculate KE value, store it in state. * used by IKEv2: parent, child (PFS) */ static int unpack_v2KE(struct state *st, struct pluto_crypto_req *r, chunk_t *g) { struct pcr_kenonce *kn = &r->pcr_d.kn; unpack_KE(st, r, g); return kn->oakley_group; } /* * package up the calculate KE value, and emit it as a KE payload. * used by IKEv2: parent, child (PFS) */ static bool justship_v2KE(struct state *st UNUSED, chunk_t *g, unsigned int oakley_group, pb_stream *outs, u_int8_t np) { struct ikev2_ke v2ke; pb_stream kepbs; memset(&v2ke, 0, sizeof(v2ke)); v2ke.isak_np = np; v2ke.isak_group = oakley_group; if (!out_struct(&v2ke, &ikev2_ke_desc, outs, &kepbs)) return FALSE; if (!out_chunk(*g, &kepbs, "ikev2 g^x")) return FALSE; close_output_pbs(&kepbs); return TRUE; } static bool ship_v2KE(struct state *st, struct pluto_crypto_req *r, chunk_t *g, pb_stream *outs, u_int8_t np) { int oakley_group = unpack_v2KE(st, r, g); return justship_v2KE(st, g, oakley_group, outs, np); } static stf_status ikev2_parent_outI1_tail(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; unpack_v2KE(st, r, &st->st_gi); unpack_nonce(&st->st_ni, r); return ikev2_parent_outI1_common(md, st); } static stf_status ikev2_parent_outI1_common(struct msg_digest *md, struct state *st) { struct connection *c = st->st_connection; int numvidtosend = 0; /* set up reply */ init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr hdr; zero(&hdr); /* default to 0 */ /* Impair function will raise major/minor by 1 for testing */ hdr.isa_version = build_ike_version(); if (st->st_dcookie.ptr) hdr.isa_np = ISAKMP_NEXT_v2N; else hdr.isa_np = ISAKMP_NEXT_v2SA; hdr.isa_xchg = ISAKMP_v2_SA_INIT; hdr.isa_flags = ISAKMP_FLAGS_I; memcpy(hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); /* R-cookie, are left zero */ if (!out_struct(&hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) { reset_cur_state(); return STF_INTERNAL_ERROR; } } /* send an anti DOS cookie, 4306 2.6, if we have received one from the * responder */ if (st->st_dcookie.ptr) { chunk_t child_spi; memset(&child_spi, 0, sizeof(child_spi)); ship_v2N(ISAKMP_NEXT_v2SA, DBGP( IMPAIR_SEND_BOGUS_ISAKMP_FLAG) ? (ISAKMP_PAYLOAD_NONCRITICAL | ISAKMP_PAYLOAD_LIBRESWAN_BOGUS) : ISAKMP_PAYLOAD_NONCRITICAL, PROTO_ISAKMP, &child_spi, v2N_COOKIE, &st->st_dcookie, &md->rbody); } /* SA out */ { u_char *sa_start = md->rbody.cur; if (st->st_sadb->prop_disj_cnt == 0 || st->st_sadb->prop_disj) st->st_sadb = sa_v2_convert(st->st_sadb); if (!ikev2_out_sa(&md->rbody, PROTO_ISAKMP, st->st_sadb, st, TRUE, /* parentSA */ ISAKMP_NEXT_v2KE)) { libreswan_log("outsa fail"); reset_cur_state(); return STF_INTERNAL_ERROR; } /* save initiator SA for later HASH */ if (st->st_p1isa.ptr == NULL) { /* no leak! (MUST be first time) */ clonetochunk(st->st_p1isa, sa_start, md->rbody.cur - sa_start, "sa in main_outI1"); } } /* send KE */ if (!justship_v2KE(st, &st->st_gi, st->st_oakley.groupnum, &md->rbody, ISAKMP_NEXT_v2Ni)) return STF_INTERNAL_ERROR; /* * Check which Vendor ID's we need to send - there will be more soon * In IKEv2, DPD and NAT-T are no longer vendorid's */ if (c->send_vendorid) { numvidtosend++; /* if we need to send Libreswan VID */ } /* send NONCE */ { int np = numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; struct ikev2_generic in; pb_stream pb; memset(&in, 0, sizeof(in)); in.isag_np = np; in.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); in.isag_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } if (!out_struct(&in, &ikev2_nonce_desc, &md->rbody, &pb) || !out_raw(st->st_ni.ptr, st->st_ni.len, &pb, "IKEv2 nonce")) return STF_INTERNAL_ERROR; close_output_pbs(&pb); } /* Send Vendor VID if needed */ if (c->send_vendorid) { const char *myvid = ipsec_version_vendorid(); int np = --numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; if (!out_generic_raw(np, &isakmp_vendor_id_desc, &md->rbody, myvid, strlen(myvid), "Vendor ID")) return STF_INTERNAL_ERROR; /* ensure our VID chain was valid */ passert(numvidtosend == 0); } close_message(&md->rbody, st); close_output_pbs(&reply_stream); freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_outI1_tail"); /* save packet for later signing */ freeanychunk(st->st_firstpacket_me); clonetochunk(st->st_firstpacket_me, reply_stream.start, pbs_offset(&reply_stream), "saved first packet"); /* Transmit */ send_ike_msg(st, __FUNCTION__); delete_event(st); event_schedule(EVENT_v2_RETRANSMIT, EVENT_RETRANSMIT_DELAY_0, st); reset_cur_state(); return STF_OK; } /* * *************************************************************** * PARENT_INI1 ***** *************************************************************** * - * * */ static void ikev2_parent_inI1outR1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_inI1outR1_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); stf_status ikev2parent_inI1outR1(struct msg_digest *md) { struct state *st = md->st; lset_t policy = POLICY_IKEV2_ALLOW; struct connection *c = find_host_connection(&md->iface->ip_addr, md->iface->port, &md->sender, md->sender_port, POLICY_IKEV2_ALLOW); /* retrieve st->st_gi */ #if 0 if (c == NULL) { /* * make up a policy from the thing that was proposed, and see * if we can find a connection with that policy. */ pb_stream pre_sa_pbs = sa_pd->pbs; policy = preparse_isakmp_sa_body(&pre_sa_pbs); c = find_host_connection(&md->iface->ip_addr, pluto_port, (ip_address*)NULL, md->sender_port, policy); } #endif if (c == NULL) { /* See if a wildcarded connection can be found. * We cannot pick the right connection, so we're making a guess. * All Road Warrior connections are fair game: * we pick the first we come across (if any). * If we don't find any, we pick the first opportunistic * with the smallest subnet that includes the peer. * There is, of course, no necessary relationship between * an Initiator's address and that of its client, * but Food Groups kind of assumes one. */ { struct connection *d; d = find_host_connection(&md->iface->ip_addr, pluto_port, (ip_address*)NULL, md->sender_port, policy); for (; d != NULL; d = d->hp_next) { if (d->kind == CK_GROUP) { /* ignore */ } else { if (d->kind == CK_TEMPLATE && !(d->policy & POLICY_OPPO)) { /* must be Road Warrior: we have a winner */ c = d; break; } /* Opportunistic or Shunt: pick tightest match */ if (addrinsubnet(&md->sender, &d->spd.that.client) && (c == NULL || !subnetinsubnet(&c->spd.that. client, &d->spd.that. client))) c = d; } } } if (c == NULL) { loglog(RC_LOG_SERIOUS, "initial parent SA message received on %s:%u" " but no connection has been authorized%s%s", ip_str( &md->iface->ip_addr), ntohs(portof(&md->iface->ip_addr)), (policy != LEMPTY) ? " with policy=" : "", (policy != LEMPTY) ? bitnamesof(sa_policy_bit_names, policy) : ""); return STF_FAIL + v2N_NO_PROPOSAL_CHOSEN; } if (c->kind != CK_TEMPLATE) { loglog(RC_LOG_SERIOUS, "initial parent SA message received on %s:%u" " but \"%s\" forbids connection", ip_str( &md->iface->ip_addr), pluto_port, c->name); return STF_FAIL + v2N_NO_PROPOSAL_CHOSEN; } c = rw_instantiate(c, &md->sender, NULL, NULL); } else { /* we found a non-wildcard conn. double check if it needs instantiation anyway (eg vnet=) */ /* vnet=/vhost= should have set CK_TEMPLATE on connection loading */ if ((c->kind == CK_TEMPLATE) && c->spd.that.virt) { DBG(DBG_CONTROL, DBG_log( "local endpoint has virt (vnet/vhost) set without wildcards - needs instantiation")); c = rw_instantiate(c, &md->sender, NULL, NULL); } else if ((c->kind == CK_TEMPLATE) && (c->policy & POLICY_IKEV2_ALLOW_NARROWING)) { DBG(DBG_CONTROL, DBG_log( "local endpoint has narrowing=yes - needs instantiation")); c = rw_instantiate(c, &md->sender, NULL, NULL); } } DBG_log("found connection: %s\n", c ? c->name : "<none>"); if (!st) { st = new_state(); /* set up new state */ memcpy(st->st_icookie, md->hdr.isa_icookie, COOKIE_SIZE); /* initialize_new_state expects valid icookie/rcookie values, so create it now */ get_cookie(FALSE, st->st_rcookie, COOKIE_SIZE, &md->sender); initialize_new_state(st, c, policy, 0, NULL_FD, pcim_stranger_crypto); st->st_ikev2 = TRUE; change_state(st, STATE_PARENT_R1); st->st_msgid_lastack = INVALID_MSGID; st->st_msgid_nextuse = 0; md->st = st; md->from_state = STATE_IKEv2_BASE; } /* check,as a responder, are we under dos attack or not * if yes go to 6 message exchange mode. it is a config option for now. * TBD set force_busy dynamically * Paul: Can we check for STF_TOOMUCHCRYPTO ? */ if (force_busy == TRUE) { u_char dcookie[SHA1_DIGEST_SIZE]; chunk_t dc; ikev2_get_dcookie( dcookie, st->st_ni, &md->sender, st->st_icookie); dc.ptr = dcookie; dc.len = SHA1_DIGEST_SIZE; /* check if I1 packet contian KE and a v2N payload with type COOKIE */ if ( md->chain[ISAKMP_NEXT_v2KE] && md->chain[ISAKMP_NEXT_v2N] && (md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type == v2N_COOKIE)) { u_int8_t spisize; const pb_stream *dc_pbs; chunk_t blob; DBG(DBG_CONTROLMORE, DBG_log("received a DOS cookie in I1 verify it")); /* we received dcookie we send earlier verify it */ spisize = md->chain[ISAKMP_NEXT_v2N]->payload.v2n. isan_spisize; dc_pbs = &md->chain[ISAKMP_NEXT_v2N]->pbs; blob.ptr = dc_pbs->cur + spisize; blob.len = pbs_left(dc_pbs) - spisize; DBG(DBG_CONTROLMORE, DBG_dump_chunk("dcookie received in I1 Packet", blob); DBG_dump("dcookie computed", dcookie, SHA1_DIGEST_SIZE)); if (memcmp(blob.ptr, dcookie, SHA1_DIGEST_SIZE) != 0) { libreswan_log( "mismatch in DOS v2N_COOKIE,send a new one"); SEND_NOTIFICATION_AA(v2N_COOKIE, &dc); return STF_FAIL + v2N_INVALID_IKE_SPI; } DBG(DBG_CONTROLMORE, DBG_log("dcookie received match with computed one")); } else { /* we are under DOS attack I1 contains no DOS COOKIE */ DBG(DBG_CONTROLMORE, DBG_log( "busy mode on. receieved I1 without a valid dcookie"); DBG_log("send a dcookie and forget this state")); SEND_NOTIFICATION_AA(v2N_COOKIE, &dc); return STF_FAIL; } } else { DBG(DBG_CONTROLMORE, DBG_log("will not send/process a dcookie")); } /* * We have to agree to the DH group before we actually know who * we are talking to. If we support the group, we use it. * * It is really too hard here to go through all the possible policies * that might permit this group. If we think we are being DOS'ed * then we should demand a cookie. */ { struct ikev2_ke *ke; char fromname[ADDRTOT_BUF]; addrtot(&md->sender, 0, fromname, ADDRTOT_BUF); if (!md->chain[ISAKMP_NEXT_v2KE]) { /* is this a notify? If so, log it */ if(md->chain[ISAKMP_NEXT_v2N]) { libreswan_log("Received Notify(%d): %s", md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type, enum_name(&ikev2_notify_names, md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type)); } libreswan_log( "rejecting I1 from %s:%u, no KE payload present", fromname, md->sender_port); return STF_FAIL + v2N_INVALID_KE_PAYLOAD; } ke = &md->chain[ISAKMP_NEXT_v2KE]->payload.v2ke; st->st_oakley.group = lookup_group(ke->isak_group); if (st->st_oakley.group == NULL) { libreswan_log( "rejecting I1 from %s:%u, invalid DH group=%u", fromname, md->sender_port, ke->isak_group); return STF_FAIL + v2N_INVALID_KE_PAYLOAD; } } /* now. we need to go calculate the nonce, and the KE */ { struct ke_continuation *ke = alloc_thing( struct ke_continuation, "ikev2_inI1outR1 KE"); stf_status e; ke->md = md; set_suspended(st, ke->md); if (!st->st_sec_in_use) { pcrc_init(&ke->ke_pcrc); ke->ke_pcrc.pcrc_func = ikev2_parent_inI1outR1_continue; e = build_ke(&ke->ke_pcrc, st, st->st_oakley.group, pcim_stranger_crypto); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } } else { e = ikev2_parent_inI1outR1_tail((struct pluto_crypto_req_cont *)ke, NULL); } reset_globals(); return e; } } static void ikev2_parent_inI1outR1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inI1outR1: calculated ke+nonce, sending R1")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (ke->md) release_md(ke->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == ke->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inI1outR1_tail(pcrc, r); if (ke->md != NULL) { complete_v2_state_transition(&ke->md, e); if (ke->md) release_md(ke->md); } reset_globals(); } static stf_status ikev2_parent_inI1outR1_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct payload_digest *const sa_pd = md->chain[ISAKMP_NEXT_v2SA]; struct state *const st = md->st; struct connection *c = st->st_connection; pb_stream *keyex_pbs; int numvidtosend = 0; if (c->send_vendorid) { numvidtosend++; /* we send Libreswan VID */ } /* note that we don't update the state here yet */ /* record first packet for later checking of signature */ clonetochunk(st->st_firstpacket_him, md->message_pbs.start, pbs_offset( &md->message_pbs), "saved first received packet"); /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr r_hdr = md->hdr; memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); r_hdr.isa_np = ISAKMP_NEXT_v2SA; /* major will be same, but their minor might be higher */ r_hdr.isa_version = build_ike_version(); r_hdr.isa_flags &= ~ISAKMP_FLAGS_I; r_hdr.isa_flags |= ISAKMP_FLAGS_R; /* PAUL shouldn't we set r_hdr.isa_msgid = [htonl](st->st_msgid); here? */ if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) return STF_INTERNAL_ERROR; } /* start of SA out */ { struct isakmp_sa r_sa = sa_pd->payload.sa; v2_notification_t rn; pb_stream r_sa_pbs; r_sa.isasa_np = ISAKMP_NEXT_v2KE; /* XXX */ if (!out_struct(&r_sa, &ikev2_sa_desc, &md->rbody, &r_sa_pbs)) return STF_INTERNAL_ERROR; /* SA body in and out */ rn = ikev2_parse_parent_sa_body(&sa_pd->pbs, &sa_pd->payload.v2sa, &r_sa_pbs, st, FALSE); if (rn != v2N_NOTHING_WRONG) return STF_FAIL + rn; } { v2_notification_t rn; chunk_t dc; keyex_pbs = &md->chain[ISAKMP_NEXT_v2KE]->pbs; /* KE in */ rn = accept_KE(&st->st_gi, "Gi", st->st_oakley.group, keyex_pbs); if (rn != v2N_NOTHING_WRONG) { u_int16_t group_number = htons( st->st_oakley.group->group); dc.ptr = (unsigned char *)&group_number; dc.len = 2; SEND_NOTIFICATION_AA(v2N_INVALID_KE_PAYLOAD, &dc); delete_state(st); return STF_FAIL + rn; } } /* Ni in */ RETURN_STF_FAILURE(accept_v2_nonce(md, &st->st_ni, "Ni")); /* send KE */ if (!ship_v2KE(st, r, &st->st_gr, &md->rbody, ISAKMP_NEXT_v2Nr)) return STF_INTERNAL_ERROR; /* send NONCE */ unpack_nonce(&st->st_nr, r); { int np = numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; struct ikev2_generic in; pb_stream pb; memset(&in, 0, sizeof(in)); in.isag_np = np; in.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); in.isag_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } if (!out_struct(&in, &ikev2_nonce_desc, &md->rbody, &pb) || !out_raw(st->st_nr.ptr, st->st_nr.len, &pb, "IKEv2 nonce")) return STF_INTERNAL_ERROR; close_output_pbs(&pb); } /* Send VendrID if needed VID */ if (c->send_vendorid) { const char *myvid = ipsec_version_vendorid(); int np = --numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; if (!out_generic_raw(np, &isakmp_vendor_id_desc, &md->rbody, myvid, strlen(myvid), "Vendor ID")) return STF_INTERNAL_ERROR; } close_message(&md->rbody, st); close_output_pbs(&reply_stream); /* keep it for a retransmit if necessary */ freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_inI1outR1_tail"); /* save packet for later signing */ freeanychunk(st->st_firstpacket_me); clonetochunk(st->st_firstpacket_me, reply_stream.start, pbs_offset(&reply_stream), "saved first packet"); /* note: retransimission is driven by initiator */ return STF_OK; } /* * *************************************************************** * PARENT_inR1 ***** *************************************************************** * - * * */ static void ikev2_parent_inR1outI2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_inR1outI2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); stf_status ikev2parent_inR1outI2(struct msg_digest *md) { struct state *st = md->st; /* struct connection *c = st->st_connection; */ pb_stream *keyex_pbs; /* check if the responder replied with v2N with DOS COOKIE */ if ( md->chain[ISAKMP_NEXT_v2N] && md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type == v2N_COOKIE) { u_int8_t spisize; const pb_stream *dc_pbs; DBG(DBG_CONTROLMORE, DBG_log( "inR1OutI2 received a DOS v2N_COOKIE from the responder"); DBG_log("resend the I1 with a cookie payload")); spisize = md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_spisize; dc_pbs = &md->chain[ISAKMP_NEXT_v2N]->pbs; clonetochunk(st->st_dcookie, (dc_pbs->cur + spisize), (pbs_left( dc_pbs) - spisize), "saved received dcookie"); DBG(DBG_CONTROLMORE, DBG_dump_chunk("dcookie received (instead of a R1):", st->st_dcookie); DBG_log("next STATE_PARENT_I1 resend I1 with the dcookie")); md->svm = ikev2_parent_firststate(); change_state(st, STATE_PARENT_I1); st->st_msgid_lastack = INVALID_MSGID; md->msgid_received = INVALID_MSGID; /* AAA hack */ st->st_msgid_nextuse = 0; return ikev2_parent_outI1_common(md, st); } /* * If we did not get a KE payload, we cannot continue. There * should be * a Notify telling us why. We inform the user, but continue to try this * connection via regular retransmit intervals. */ if ( md->chain[ISAKMP_NEXT_v2N] && (md->chain[ISAKMP_NEXT_v2KE] == NULL)) { const char *from_state_name = enum_name(&state_names, st->st_state); const u_int16_t isan_type = md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type; libreswan_log("%s: received %s", from_state_name, enum_name(&ikev2_notify_names, isan_type)); return STF_FAIL + isan_type; } else if ( md->chain[ISAKMP_NEXT_v2N]) { DBG(DBG_CONTROL, DBG_log("received a notify..")); } /* * the responder sent us back KE, Gr, Nr, and it's our time to calculate * the shared key values. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inR1: calculating g^{xy} in order to send I2")); /* KE in */ keyex_pbs = &md->chain[ISAKMP_NEXT_v2KE]->pbs; RETURN_STF_FAILURE(accept_KE(&st->st_gr, "Gr", st->st_oakley.group, keyex_pbs)); /* Ni in */ RETURN_STF_FAILURE(accept_v2_nonce(md, &st->st_nr, "Ni")); if (md->chain[ISAKMP_NEXT_v2SA] == NULL) { libreswan_log("No responder SA proposal found"); return v2N_INVALID_SYNTAX; } /* process and confirm the SA selected */ { struct payload_digest *const sa_pd = md->chain[ISAKMP_NEXT_v2SA]; v2_notification_t rn; /* SA body in and out */ rn = ikev2_parse_parent_sa_body(&sa_pd->pbs, &sa_pd->payload.v2sa, NULL, st, FALSE); if (rn != v2N_NOTHING_WRONG) return STF_FAIL + rn; } /* update state */ ikev2_update_counters(md); /* now. we need to go calculate the g^xy */ { struct dh_continuation *dh = alloc_thing( struct dh_continuation, "ikev2_inR1outI2 KE"); stf_status e; dh->md = md; set_suspended(st, dh->md); pcrc_init(&dh->dh_pcrc); dh->dh_pcrc.pcrc_func = ikev2_parent_inR1outI2_continue; e = start_dh_v2(&dh->dh_pcrc, st, st->st_import, INITIATOR, st->st_oakley.groupnum); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } reset_globals(); return e; } } static void ikev2_parent_inR1outI2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inR1outI2: calculating g^{xy}, sending I2")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (dh->md) release_md(dh->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == dh->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inR1outI2_tail(pcrc, r); if (dh->md != NULL) { complete_v2_state_transition(&dh->md, e); if (dh->md) release_md(dh->md); } reset_globals(); } static void ikev2_padup_pre_encrypt(struct msg_digest *md, pb_stream *e_pbs_cipher) { struct state *st = md->st; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); /* pads things up to message size boundary */ { size_t blocksize = pst->st_oakley.encrypter->enc_blocksize; char *b = alloca(blocksize); unsigned int i; size_t padding = pad_up(pbs_offset(e_pbs_cipher), blocksize); if (padding == 0) padding = blocksize; for (i = 0; i < padding; i++) b[i] = i; out_raw(b, padding, e_pbs_cipher, "padding and length"); } } static unsigned char *ikev2_authloc(struct msg_digest *md, pb_stream *e_pbs) { unsigned char *b12; struct state *st = md->st; struct state *pst = st; if (st->st_clonedfrom != 0) { pst = state_with_serialno(st->st_clonedfrom); if ( pst == NULL) return NULL; } b12 = e_pbs->cur; if (!out_zero(pst->st_oakley.integ_hasher->hash_integ_len, e_pbs, "length of truncated HMAC")) return NULL; return b12; } static stf_status ikev2_encrypt_msg(struct msg_digest *md, enum phase1_role init, unsigned char *authstart, unsigned char *iv, unsigned char *encstart, unsigned char *authloc, pb_stream *e_pbs UNUSED, pb_stream *e_pbs_cipher) { struct state *st = md->st; struct state *pst = st; chunk_t *cipherkey, *authkey; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); if (init == INITIATOR) { cipherkey = &pst->st_skey_ei; authkey = &pst->st_skey_ai; } else { cipherkey = &pst->st_skey_er; authkey = &pst->st_skey_ar; } /* encrypt the block */ { size_t blocksize = pst->st_oakley.encrypter->enc_blocksize; unsigned char *savediv = alloca(blocksize); unsigned int cipherlen = e_pbs_cipher->cur - encstart; DBG(DBG_CRYPT, DBG_dump("data before encryption:", encstart, cipherlen)); memcpy(savediv, iv, blocksize); /* now, encrypt */ (st->st_oakley.encrypter->do_crypt)(encstart, cipherlen, cipherkey->ptr, cipherkey->len, savediv, TRUE); DBG(DBG_CRYPT, DBG_dump("data after encryption:", encstart, cipherlen)); } /* okay, authenticate from beginning of IV */ { struct hmac_ctx ctx; DBG(DBG_PARSING, DBG_log("Inside authloc")); DBG(DBG_CRYPT, DBG_dump("authkey value: ", authkey->ptr, authkey->len)); hmac_init_chunk(&ctx, pst->st_oakley.integ_hasher, *authkey); DBG(DBG_PARSING, DBG_log("Inside authloc after init")); hmac_update(&ctx, authstart, authloc - authstart); DBG(DBG_PARSING, DBG_log("Inside authloc after update")); hmac_final(authloc, &ctx); DBG(DBG_PARSING, DBG_log("Inside authloc after final")); DBG(DBG_PARSING, { DBG_dump("data being hmac:", authstart, authloc - authstart); DBG_dump("out calculated auth:", authloc, pst->st_oakley.integ_hasher-> hash_integ_len); }); } return STF_OK; } static stf_status ikev2_decrypt_msg(struct msg_digest *md, enum phase1_role init) { struct state *st = md->st; unsigned char *encend; pb_stream *e_pbs; unsigned int np; unsigned char *iv; chunk_t *cipherkey, *authkey; unsigned char *authstart; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); if (init == INITIATOR) { cipherkey = &pst->st_skey_er; authkey = &pst->st_skey_ar; } else { cipherkey = &pst->st_skey_ei; authkey = &pst->st_skey_ai; } e_pbs = &md->chain[ISAKMP_NEXT_v2E]->pbs; np = md->chain[ISAKMP_NEXT_v2E]->payload.generic.isag_np; authstart = md->packet_pbs.start; iv = e_pbs->cur; encend = e_pbs->roof - pst->st_oakley.integ_hasher->hash_integ_len; /* start by checking authenticator */ { unsigned char *b12 = alloca( pst->st_oakley.integ_hasher->hash_digest_len); struct hmac_ctx ctx; hmac_init_chunk(&ctx, pst->st_oakley.integ_hasher, *authkey); hmac_update(&ctx, authstart, encend - authstart); hmac_final(b12, &ctx); DBG(DBG_PARSING, { DBG_dump("data being hmac:", authstart, encend - authstart); DBG_dump("R2 calculated auth:", b12, pst->st_oakley.integ_hasher-> hash_integ_len); DBG_dump("R2 provided auth:", encend, pst->st_oakley.integ_hasher-> hash_integ_len); }); /* compare first 96 bits == 12 bytes */ /* It is not always 96 bytes, it depends upon which integ algo is used*/ if (memcmp(b12, encend, pst->st_oakley.integ_hasher->hash_integ_len) != 0) { libreswan_log("R2 failed to match authenticator"); return STF_FAIL; } } DBG(DBG_PARSING, DBG_log("authenticator matched")); /* decrypt */ { size_t blocksize = pst->st_oakley.encrypter->enc_blocksize; unsigned char *encstart = iv + blocksize; unsigned int enclen = encend - encstart; unsigned int padlen; DBG(DBG_CRYPT, DBG_dump("data before decryption:", encstart, enclen)); /* now, decrypt */ (pst->st_oakley.encrypter->do_crypt)(encstart, enclen, cipherkey->ptr, cipherkey->len, iv, FALSE); padlen = encstart[enclen - 1]; encend = encend - padlen + 1; if (encend < encstart) { libreswan_log("invalid pad length: %u", padlen); return STF_FAIL; } DBG(DBG_CRYPT, { DBG_dump("decrypted payload:", encstart, enclen); DBG_log("striping %u bytes as pad", padlen + 1); }); init_pbs(&md->clr_pbs, encstart, enclen - (padlen + 1), "cleartext"); } { stf_status ret; ret = ikev2_process_payloads(md, &md->clr_pbs, st->st_state, np); if (ret != STF_OK) return ret; } return STF_OK; } static stf_status ikev2_send_auth(struct connection *c, struct state *st, enum phase1_role role, unsigned int np, unsigned char *idhash_out, pb_stream *outpbs) { struct ikev2_a a; pb_stream a_pbs; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); a.isaa_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); a.isaa_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } a.isaa_np = np; if (c->policy & POLICY_RSASIG) { a.isaa_type = v2_AUTH_RSA; } else if (c->policy & POLICY_PSK) { a.isaa_type = v2_AUTH_SHARED; } else { /* what else is there?... DSS not implemented. */ return STF_FAIL; } if (!out_struct(&a, &ikev2_a_desc, outpbs, &a_pbs)) return STF_INTERNAL_ERROR; if (c->policy & POLICY_RSASIG) { if (!ikev2_calculate_rsa_sha1(pst, role, idhash_out, &a_pbs)) return STF_FATAL + v2N_AUTHENTICATION_FAILED; } else if (c->policy & POLICY_PSK) { if (!ikev2_calculate_psk_auth(pst, role, idhash_out, &a_pbs)) return STF_FAIL + v2N_AUTHENTICATION_FAILED; } close_output_pbs(&a_pbs); return STF_OK; } static stf_status ikev2_parent_inR1outI2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *st = md->st; struct connection *c = st->st_connection; struct ikev2_generic e; unsigned char *encstart; pb_stream e_pbs, e_pbs_cipher; unsigned char *iv; int ivsize; stf_status ret; unsigned char *idhash; unsigned char *authstart; struct state *pst = st; bool send_cert = FALSE; finish_dh_v2(st, r); if (DBGP(DBG_PRIVATE) && DBGP(DBG_CRYPT)) ikev2_log_parentSA(st); pst = st; st = duplicate_state(pst); st->st_msgid = htonl(pst->st_msgid_nextuse); /* PAUL: note ordering */ insert_state(st); md->st = st; md->pst = pst; /* parent had crypto failed, replace it with rekey! */ delete_event(pst); event_schedule(EVENT_SA_REPLACE, c->sa_ike_life_seconds, pst); /* need to force parent state to I2 */ change_state(pst, STATE_PARENT_I2); /* record first packet for later checking of signature */ clonetochunk(pst->st_firstpacket_him, md->message_pbs.start, pbs_offset( &md->message_pbs), "saved first received packet"); /* beginning of data going out */ authstart = reply_stream.cur; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr r_hdr = md->hdr; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_xchg = ISAKMP_v2_AUTH; r_hdr.isa_flags = ISAKMP_FLAGS_I; r_hdr.isa_msgid = st->st_msgid; memcpy(r_hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) return STF_INTERNAL_ERROR; } /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2IDi; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); e.isag_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } if (!out_struct(&e, &ikev2_e_desc, &md->rbody, &e_pbs)) return STF_INTERNAL_ERROR; /* insert IV */ iv = e_pbs.cur; ivsize = st->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_INTERNAL_ERROR; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; /* send out the IDi payload */ { struct ikev2_id r_id; pb_stream r_id_pbs; chunk_t id_b; struct hmac_ctx id_ctx; unsigned char *id_start; unsigned int id_len; hmac_init_chunk(&id_ctx, pst->st_oakley.prf_hasher, pst->st_skey_pi); build_id_payload((struct isakmp_ipsec_id *)&r_id, &id_b, &c->spd.this); r_id.isai_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); r_id.isai_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } { /* decide to send CERT payload */ send_cert = doi_send_ikev2_cert_thinking(st); if (send_cert) r_id.isai_np = ISAKMP_NEXT_v2CERT; else r_id.isai_np = ISAKMP_NEXT_v2AUTH; } id_start = e_pbs_cipher.cur; if (!out_struct(&r_id, &ikev2_id_desc, &e_pbs_cipher, &r_id_pbs) || !out_chunk(id_b, &r_id_pbs, "my identity")) return STF_INTERNAL_ERROR; /* HASH of ID is not done over common header */ id_start += 4; close_output_pbs(&r_id_pbs); /* calculate hash of IDi for AUTH below */ id_len = e_pbs_cipher.cur - id_start; DBG(DBG_CRYPT, DBG_dump_chunk("idhash calc pi", pst->st_skey_pi)); DBG(DBG_CRYPT, DBG_dump("idhash calc I2", id_start, id_len)); hmac_update(&id_ctx, id_start, id_len); idhash = alloca(pst->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash, &id_ctx); } /* send [CERT,] payload RFC 4306 3.6, 1.2) */ { if (send_cert) { stf_status certstat = ikev2_send_cert( st, md, INITIATOR, ISAKMP_NEXT_v2AUTH, &e_pbs_cipher); if (certstat != STF_OK) return certstat; } } /* send out the AUTH payload */ { lset_t policy; struct connection *c0 = first_pending(pst, &policy, &st->st_whack_sock); unsigned int np = (c0 ? ISAKMP_NEXT_v2SA : ISAKMP_NEXT_v2NONE); DBG(DBG_CONTROL, DBG_log(" payload after AUTH will be %s", (c0) ? "ISAKMP_NEXT_v2SA" : "ISAKMP_NEXT_v2NONE/NOTIFY")); stf_status authstat = ikev2_send_auth(c, st, INITIATOR, np, idhash, &e_pbs_cipher); if (authstat != STF_OK) return authstat; /* * now, find an eligible child SA from the pending list, and emit * SA2i, TSi and TSr and (v2N_USE_TRANSPORT_MODE notification in transport mode) for it . */ if (c0) { chunk_t child_spi, notify_data; st->st_connection = c0; ikev2_emit_ipsec_sa(md, &e_pbs_cipher, ISAKMP_NEXT_v2TSi, c0, policy); st->st_ts_this = ikev2_end_to_ts(&c0->spd.this); st->st_ts_that = ikev2_end_to_ts(&c0->spd.that); ikev2_calc_emit_ts(md, &e_pbs_cipher, INITIATOR, c0, policy); if ( !(st->st_connection->policy & POLICY_TUNNEL) ) { DBG_log( "Initiator child policy is transport mode, sending v2N_USE_TRANSPORT_MODE"); memset(&child_spi, 0, sizeof(child_spi)); memset(&notify_data, 0, sizeof(notify_data)); ship_v2N(ISAKMP_NEXT_v2NONE, ISAKMP_PAYLOAD_NONCRITICAL, 0, &child_spi, v2N_USE_TRANSPORT_MODE, &notify_data, &e_pbs_cipher); } } else { libreswan_log( "no pending SAs found, PARENT SA keyed only"); } } /* * need to extend the packet so that we will know how big it is * since the length is under the integrity check */ ikev2_padup_pre_encrypt(md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { unsigned char *authloc = ikev2_authloc(md, &e_pbs); if (authloc == NULL) return STF_INTERNAL_ERROR; close_output_pbs(&e_pbs); close_output_pbs(&md->rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(md, INITIATOR, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return ret; } /* keep it for a retransmit if necessary, but on initiator * we never do that, but send_ike_msg() uses it. */ freeanychunk(pst->st_tpacket); clonetochunk(pst->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_outI1"); /* * Delete previous retransmission event. */ delete_event(st); event_schedule(EVENT_v2_RETRANSMIT, EVENT_RETRANSMIT_DELAY_0, st); return STF_OK; } /* * *************************************************************** * PARENT_inI2 ***** *************************************************************** * - * * */ static void ikev2_parent_inI2outR2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_inI2outR2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); stf_status ikev2parent_inI2outR2(struct msg_digest *md) { struct state *st = md->st; /* struct connection *c = st->st_connection; */ /* * the initiator sent us an encrypted payload. We need to calculate * our g^xy, and skeyseed values, and then decrypt the payload. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inI2outR2: calculating g^{xy} in order to decrypt I2")); /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log("R2 state should receive an encrypted payload"); reset_globals(); /* XXX suspicious - why was this deemed neccessary? */ return STF_FATAL; } /* now. we need to go calculate the g^xy */ { struct dh_continuation *dh = alloc_thing( struct dh_continuation, "ikev2_inI2outR2 KE"); stf_status e; dh->md = md; set_suspended(st, dh->md); pcrc_init(&dh->dh_pcrc); dh->dh_pcrc.pcrc_func = ikev2_parent_inI2outR2_continue; e = start_dh_v2(&dh->dh_pcrc, st, st->st_import, RESPONDER, st->st_oakley.groupnum); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } reset_globals(); return e; } } static void ikev2_parent_inI2outR2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inI2outR2: calculating g^{xy}, sending R2")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (dh->md) release_md(dh->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == dh->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inI2outR2_tail(pcrc, r); if ( e > STF_FAIL) { /* we do not send a notify because we are the initiator that could be responding to an error notification */ int v2_notify_num = e - STF_FAIL; DBG_log( "ikev2_parent_inI2outR2_tail returned STF_FAIL with %s", enum_name(&ikev2_notify_names, v2_notify_num)); } else if ( e != STF_OK) { DBG_log("ikev2_parent_inI2outR2_tail returned %s", enum_name(&stfstatus_name, e)); } if (dh->md != NULL) { complete_v2_state_transition(&dh->md, e); if (dh->md) release_md(dh->md); } reset_globals(); } static stf_status ikev2_parent_inI2outR2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; struct connection *c = st->st_connection; unsigned char *idhash_in, *idhash_out; unsigned char *authstart; unsigned int np; int v2_notify_num = 0; /* extract calculated values from r */ finish_dh_v2(st, r); if (DBGP(DBG_PRIVATE) && DBGP(DBG_CRYPT)) ikev2_log_parentSA(st); /* decrypt things. */ { stf_status ret; ret = ikev2_decrypt_msg(md, RESPONDER); if (ret != STF_OK) return ret; } /*Once the message has been decrypted, then only we can check for auth payload*/ /*check the presense of auth payload now so that it does not crash in rehash_state if auth payload has not been received*/ if (!md->chain[ISAKMP_NEXT_v2AUTH]) { libreswan_log("no authentication payload found"); return STF_FAIL; } if (!ikev2_decode_peer_id(md, RESPONDER)) return STF_FAIL + v2N_AUTHENTICATION_FAILED; { struct hmac_ctx id_ctx; const pb_stream *id_pbs = &md->chain[ISAKMP_NEXT_v2IDi]->pbs; unsigned char *idstart = id_pbs->start + 4; unsigned int idlen = pbs_room(id_pbs) - 4; hmac_init_chunk(&id_ctx, st->st_oakley.prf_hasher, st->st_skey_pi); /* calculate hash of IDi for AUTH below */ DBG(DBG_CRYPT, DBG_dump_chunk("idhash verify pi", st->st_skey_pi)); DBG(DBG_CRYPT, DBG_dump("idhash verify I2", idstart, idlen)); hmac_update(&id_ctx, idstart, idlen); idhash_in = alloca(st->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash_in, &id_ctx); } /* process CERT payload */ { if (md->chain[ISAKMP_NEXT_v2CERT]) { /* should we check if we should accept a cert payload ? * has_preloaded_public_key(st) */ DBG(DBG_CONTROLMORE, DBG_log( "has a v2_CERT payload going to process it ")); ikev2_decode_cert(md); } } /* process CERTREQ payload */ if (md->chain[ISAKMP_NEXT_v2CERTREQ]) { DBG(DBG_CONTROLMORE, DBG_log("has a v2CERTREQ payload going to decode it")); ikev2_decode_cr(md, &st->st_connection->requested_ca); } /* process AUTH payload now */ /* now check signature from RSA key */ switch (md->chain[ISAKMP_NEXT_v2AUTH]->payload.v2a.isaa_type) { case v2_AUTH_RSA: { stf_status authstat = ikev2_verify_rsa_sha1(st, RESPONDER, idhash_in, NULL, /* keys from DNS */ NULL, /* gateways from DNS */ &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log("RSA authentication failed"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FATAL; } break; } case v2_AUTH_SHARED: { stf_status authstat = ikev2_verify_psk_auth(st, RESPONDER, idhash_in, &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log( "PSK authentication failed AUTH mismatch!"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FATAL; } break; } default: libreswan_log("authentication method: %s not supported", enum_name(&ikev2_auth_names, md->chain[ISAKMP_NEXT_v2AUTH]->payload. v2a.isaa_type)); return STF_FATAL; } /* Is there a notify about an error ? */ if (md->chain[ISAKMP_NEXT_v2N] != NULL) { DBG(DBG_CONTROL, DBG_log( " notify payload detected, should be processed....")); } /* good. now create child state */ /* note: as we will switch to child state, we force the parent to the * new state now */ change_state(st, STATE_PARENT_R2); c->newest_isakmp_sa = st->st_serialno; delete_event(st); event_schedule(EVENT_SA_REPLACE, c->sa_ike_life_seconds, st); authstart = reply_stream.cur; /* send response */ { unsigned char *encstart; unsigned char *iv; unsigned int ivsize; struct ikev2_generic e; pb_stream e_pbs, e_pbs_cipher; stf_status ret; bool send_cert = FALSE; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr r_hdr = md->hdr; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_xchg = ISAKMP_v2_AUTH; r_hdr.isa_flags = ISAKMP_FLAGS_R; memcpy(r_hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) return STF_INTERNAL_ERROR; } /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2IDr; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &md->rbody, &e_pbs)) return STF_INTERNAL_ERROR; /* insert IV */ iv = e_pbs.cur; ivsize = st->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_INTERNAL_ERROR; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; /* decide to send CERT payload before we generate IDr */ send_cert = doi_send_ikev2_cert_thinking(st); /* send out the IDr payload */ { struct ikev2_id r_id; pb_stream r_id_pbs; chunk_t id_b; struct hmac_ctx id_ctx; unsigned char *id_start; unsigned int id_len; hmac_init_chunk(&id_ctx, st->st_oakley.prf_hasher, st->st_skey_pr); build_id_payload((struct isakmp_ipsec_id *)&r_id, &id_b, &c->spd.this); r_id.isai_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (send_cert) r_id.isai_np = ISAKMP_NEXT_v2CERT; else r_id.isai_np = ISAKMP_NEXT_v2AUTH; id_start = e_pbs_cipher.cur; if (!out_struct(&r_id, &ikev2_id_desc, &e_pbs_cipher, &r_id_pbs) || !out_chunk(id_b, &r_id_pbs, "my identity")) return STF_INTERNAL_ERROR; close_output_pbs(&r_id_pbs); id_start += 4; /* calculate hash of IDi for AUTH below */ id_len = e_pbs_cipher.cur - id_start; DBG(DBG_CRYPT, DBG_dump_chunk("idhash calc pr", st->st_skey_pr)); DBG(DBG_CRYPT, DBG_dump("idhash calc R2", id_start, id_len)); hmac_update(&id_ctx, id_start, id_len); idhash_out = alloca( st->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash_out, &id_ctx); } DBG(DBG_CONTROLMORE, DBG_log("assembled IDr payload -- CERT next")); /* send CERT payload RFC 4306 3.6, 1.2:([CERT,] ) */ if (send_cert) { stf_status certstat = ikev2_send_cert(st, md, RESPONDER, ISAKMP_NEXT_v2AUTH, &e_pbs_cipher); if (certstat != STF_OK) return certstat; } /* authentication good, see if there is a child SA being proposed */ if (md->chain[ISAKMP_NEXT_v2SA] == NULL || md->chain[ISAKMP_NEXT_v2TSi] == NULL || md->chain[ISAKMP_NEXT_v2TSr] == NULL) { /* initiator didn't propose anything. Weird. Try unpending out end. */ /* UNPEND XXX */ libreswan_log("No CHILD SA proposals received."); np = ISAKMP_NEXT_v2NONE; } else { DBG_log("CHILD SA proposals received"); libreswan_log( "PAUL: this is where we have to check the TSi/TSr"); np = ISAKMP_NEXT_v2SA; } DBG(DBG_CONTROLMORE, DBG_log("going to assemble AUTH payload")); /* now send AUTH payload */ { stf_status authstat = ikev2_send_auth(c, st, RESPONDER, np, idhash_out, &e_pbs_cipher); if (authstat != STF_OK) return authstat; } if (np == ISAKMP_NEXT_v2SA) { /* must have enough to build an CHILD_SA */ ret = ikev2_child_sa_respond(md, RESPONDER, &e_pbs_cipher); if (ret > STF_FAIL) { v2_notify_num = ret - STF_FAIL; DBG(DBG_CONTROL, DBG_log( "ikev2_child_sa_respond returned STF_FAIL with %s", enum_name(&ikev2_notify_names, v2_notify_num))); np = ISAKMP_NEXT_v2NONE; } else if (ret != STF_OK) { DBG_log("ikev2_child_sa_respond returned %s", enum_name( &stfstatus_name, ret)); np = ISAKMP_NEXT_v2NONE; } } ikev2_padup_pre_encrypt(md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { unsigned char *authloc = ikev2_authloc(md, &e_pbs); if (authloc == NULL) return STF_INTERNAL_ERROR; close_output_pbs(&e_pbs); close_output_pbs(&md->rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(md, RESPONDER, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return ret; } } /* keep it for a retransmit if necessary */ freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_inI2outR2_tail"); /* note: retransimission is driven by initiator */ /* if the child failed, delete its state here - we sent the packet */ /* PAUL */ return STF_OK; } /* * *************************************************************** * PARENT_inR2 (I3 state) ***** *************************************************************** * - there are no cryptographic continuations, but be certain * that there will have to be DNS continuations, but they * just aren't implemented yet. * */ stf_status ikev2parent_inR2(struct msg_digest *md) { struct state *st = md->st; struct connection *c = st->st_connection; unsigned char *idhash_in; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); /* * the initiator sent us an encrypted payload. We need to calculate * our g^xy, and skeyseed values, and then decrypt the payload. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inR2: calculating g^{xy} in order to decrypt I2")); /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log("R2 state should receive an encrypted payload"); return STF_FATAL; } /* decrypt things. */ { stf_status ret; ret = ikev2_decrypt_msg(md, INITIATOR); if (ret != STF_OK) return ret; } if (!ikev2_decode_peer_id(md, INITIATOR)) return STF_FAIL + v2N_AUTHENTICATION_FAILED; { struct hmac_ctx id_ctx; const pb_stream *id_pbs = &md->chain[ISAKMP_NEXT_v2IDr]->pbs; unsigned char *idstart = id_pbs->start + 4; unsigned int idlen = pbs_room(id_pbs) - 4; hmac_init_chunk(&id_ctx, pst->st_oakley.prf_hasher, pst->st_skey_pr); /* calculate hash of IDr for AUTH below */ DBG(DBG_CRYPT, DBG_dump_chunk("idhash verify pr", pst->st_skey_pr)); DBG(DBG_CRYPT, DBG_dump("idhash auth R2", idstart, idlen)); hmac_update(&id_ctx, idstart, idlen); idhash_in = alloca(pst->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash_in, &id_ctx); } if (md->chain[ISAKMP_NEXT_v2CERT]) { /* should we check if we should accept a cert payload ? * has_preloaded_public_key(st) */ /* in v1 code it is decode_cert(struct msg_digest *md) */ DBG(DBG_CONTROLMORE, DBG_log("has a v2_CERT payload going to decode it")); ikev2_decode_cert(md); } /* process AUTH payload */ if (!md->chain[ISAKMP_NEXT_v2AUTH]) { libreswan_log("no authentication payload found"); return STF_FAIL; } /* now check signature from RSA key */ switch (md->chain[ISAKMP_NEXT_v2AUTH]->payload.v2a.isaa_type) { case v2_AUTH_RSA: { stf_status authstat = ikev2_verify_rsa_sha1(pst, INITIATOR, idhash_in, NULL, /* keys from DNS */ NULL, /* gateways from DNS */ &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log("authentication failed"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FAIL; } break; } case v2_AUTH_SHARED: { stf_status authstat = ikev2_verify_psk_auth(pst, INITIATOR, idhash_in, &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log("PSK authentication failed"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FAIL; } break; } default: libreswan_log("authentication method: %s not supported", enum_name(&ikev2_auth_names, md->chain[ISAKMP_NEXT_v2AUTH]->payload. v2a.isaa_type)); return STF_FAIL; } /* * update the parent state to make sure that it knows we have * authenticated properly. */ change_state(pst, STATE_PARENT_I3); c->newest_isakmp_sa = pst->st_serialno; /* authentication good, see if there is a child SA available */ if (md->chain[ISAKMP_NEXT_v2SA] == NULL || md->chain[ISAKMP_NEXT_v2TSi] == NULL || md->chain[ISAKMP_NEXT_v2TSr] == NULL) { /* not really anything to here... but it would be worth unpending again */ DBG(DBG_CONTROLMORE, DBG_log( "no v2SA, v2TSi or v2TSr received, not attempting to setup child SA")); DBG(DBG_CONTROLMORE, DBG_log(" Should we check for some notify?")); /* * Delete previous retransmission event. */ delete_event(st); return STF_OK; } { int bestfit_n, bestfit_p, bestfit_pr; unsigned int best_tsi_i, best_tsr_i; bestfit_n = -1; bestfit_p = -1; bestfit_pr = -1; /* Check TSi/TSr http://tools.ietf.org/html/rfc5996#section-2.9 */ DBG(DBG_CONTROLMORE, DBG_log(" check narrowing - we are responding to I2")); struct payload_digest *const tsi_pd = md->chain[ISAKMP_NEXT_v2TSi]; struct payload_digest *const tsr_pd = md->chain[ISAKMP_NEXT_v2TSr]; struct traffic_selector tsi[16], tsr[16]; #if 0 bool instantiate = FALSE; ip_subnet tsi_subnet, tsr_subnet; const char *oops; #endif unsigned int tsi_n, tsr_n; tsi_n = ikev2_parse_ts(tsi_pd, tsi, 16); tsr_n = ikev2_parse_ts(tsr_pd, tsr, 16); DBG_log( "Checking TSi(%d)/TSr(%d) selectors, looking for exact match", tsi_n, tsr_n); { struct spd_route *sra; sra = &c->spd; int bfit_n = ikev2_evaluate_connection_fit(c, sra, INITIATOR, tsi, tsr, tsi_n, tsr_n); if (bfit_n > bestfit_n) { DBG(DBG_CONTROLMORE, DBG_log( "bfit_n=ikev2_evaluate_connection_fit found better fit c %s", c->name)); int bfit_p = ikev2_evaluate_connection_port_fit(c, sra, INITIATOR, tsi, tsr, tsi_n, tsr_n, &best_tsi_i, &best_tsr_i); if (bfit_p > bestfit_p) { DBG(DBG_CONTROLMORE, DBG_log( "ikev2_evaluate_connection_port_fit found better fit c %s, tsi[%d],tsr[%d]", c->name, best_tsi_i, best_tsr_i)); int bfit_pr = ikev2_evaluate_connection_protocol_fit( c, sra, INITIATOR, tsi, tsr, tsi_n, tsr_n, &best_tsi_i, &best_tsr_i); if (bfit_pr > bestfit_pr ) { DBG(DBG_CONTROLMORE, DBG_log( "ikev2_evaluate_connection_protocol_fit found better fit c %s, tsi[%d],tsr[%d]", c ->name, best_tsi_i, best_tsr_i)); bestfit_p = bfit_p; bestfit_n = bfit_n; } else { DBG(DBG_CONTROLMORE, DBG_log( "protocol range fit c %s c->name was rejected by protocol matching", c ->name)); } } } else { DBG(DBG_CONTROLMORE, DBG_log( "prefix range fit c %s c->name was rejected by port matching", c->name)); } } if ( ( bestfit_n > 0 ) && (bestfit_p > 0)) { DBG(DBG_CONTROLMORE, DBG_log( ( "found an acceptable TSi/TSr Traffic Selector"))); memcpy(&st->st_ts_this, &tsi[best_tsi_i], sizeof(struct traffic_selector)); memcpy(&st->st_ts_that, &tsr[best_tsr_i], sizeof(struct traffic_selector)); ikev2_print_ts(&st->st_ts_this); ikev2_print_ts(&st->st_ts_that); ip_subnet tmp_subnet_i; ip_subnet tmp_subnet_r; rangetosubnet(&st->st_ts_this.low, &st->st_ts_this.high, &tmp_subnet_i); rangetosubnet(&st->st_ts_that.low, &st->st_ts_that.high, &tmp_subnet_r); c->spd.this.client = tmp_subnet_i; c->spd.this.port = st->st_ts_this.startport; c->spd.this.protocol = st->st_ts_this.ipprotoid; setportof(htons( c->spd.this.port), &c->spd.this.host_addr); setportof(htons( c->spd.this.port), &c->spd.this.client.addr); if ( subnetishost(&c->spd.this.client) && addrinsubnet(&c->spd.this.host_addr, &c->spd.this.client)) c->spd.this.has_client = FALSE; else c->spd.this.has_client = TRUE; c->spd.that.client = tmp_subnet_r; c->spd.that.port = st->st_ts_that.startport; c->spd.that.protocol = st->st_ts_that.ipprotoid; setportof(htons( c->spd.that.port), &c->spd.that.host_addr); setportof(htons( c->spd.that.port), &c->spd.that.client.addr); if ( subnetishost(&c->spd.that.client) && addrinsubnet(&c->spd.that.host_addr, &c->spd.that.client)) c->spd.that.has_client = FALSE; else c->spd.that.has_client = TRUE; /* AAAA */ } else { DBG(DBG_CONTROLMORE, DBG_log(( "reject responder TSi/TSr Traffic Selector"))); /* prevents parent from going to I3 */ return STF_FAIL + v2N_TS_UNACCEPTABLE; } } /* end of TS check block */ { v2_notification_t rn; struct payload_digest *const sa_pd = md->chain[ISAKMP_NEXT_v2SA]; rn = ikev2_parse_child_sa_body(&sa_pd->pbs, &sa_pd->payload.v2sa, NULL, st, FALSE); if (rn != v2N_NOTHING_WRONG) return STF_FAIL + rn; } { struct payload_digest *p; for (p = md->chain[ISAKMP_NEXT_v2N]; p != NULL; p = p->next) { /* RFC 5996 */ /*Types in the range 0 - 16383 are intended for reporting errors. An * implementation receiving a Notify payload with one of these types * that it does not recognize in a response MUST assume that the * corresponding request has failed entirely. Unrecognized error types * in a request and status types in a request or response MUST be * ignored, and they should be logged.*/ if (enum_name(&ikev2_notify_names, p->payload.v2n.isan_type) == NULL) { if (p->payload.v2n.isan_type < v2N_INITIAL_CONTACT) return STF_FAIL + p->payload.v2n.isan_type; } if ( p->payload.v2n.isan_type == v2N_USE_TRANSPORT_MODE ) { if ( st->st_connection->policy & POLICY_TUNNEL) { /*This means we did not send v2N_USE_TRANSPORT, however responder is sending it in now (inR2), seems incorrect*/ DBG(DBG_CONTROLMORE, DBG_log( "Initiator policy is tunnel, responder sends v2N_USE_TRANSPORT_MODE notification in inR2, ignoring it")); } else { DBG(DBG_CONTROLMORE, DBG_log( "Initiator policy is transport, responder sends v2N_USE_TRANSPORT_MODE, setting CHILD SA to transport mode")); if (st->st_esp.present == TRUE) { /*libreswan supports only "esp" with ikev2 it seems, look at ikev2_parse_child_sa_body handling*/ st->st_esp.attrs.encapsulation = ENCAPSULATION_MODE_TRANSPORT; } } } } /* for */ } /* notification block */ ikev2_derive_child_keys(st, md->role); c->newest_ipsec_sa = st->st_serialno; /* now install child SAs */ if (!install_ipsec_sa(st, TRUE)) return STF_FATAL; /* * Delete previous retransmission event. */ delete_event(st); return STF_OK; } /* * Cookie = <VersionIDofSecret> | Hash(Ni | IPi | SPIi | <secret>) * where <secret> is a randomly generated secret known only to the * in LSW implementation <VersionIDofSecret> is not used. */ static bool ikev2_get_dcookie(u_char *dcookie, chunk_t st_ni, ip_address *addr, u_int8_t *spiI) { size_t addr_length; SHA1_CTX ctx_sha1; unsigned char addr_buff[ sizeof(union { struct in_addr A; struct in6_addr B; })]; addr_length = addrbytesof(addr, addr_buff, sizeof(addr_buff)); SHA1Init(&ctx_sha1); SHA1Update(&ctx_sha1, st_ni.ptr, st_ni.len); SHA1Update(&ctx_sha1, addr_buff, addr_length); SHA1Update(&ctx_sha1, spiI, sizeof(*spiI)); SHA1Update(&ctx_sha1, ikev2_secret_of_the_day, SHA1_DIGEST_SIZE); SHA1Final(dcookie, &ctx_sha1); DBG(DBG_PRIVATE, DBG_log("ikev2 secret_of_the_day used %s, length %d", ikev2_secret_of_the_day, SHA1_DIGEST_SIZE); ); DBG(DBG_CRYPT, DBG_dump("computed dcookie: HASH(Ni | IPi | SPIi | <secret>)", dcookie, SHA1_DIGEST_SIZE)); #if 0 ikev2_secrets_recycle++; if (ikev2_secrets_recycle >= 32768) { /* handed out too many cookies, cycle secrets */ ikev2_secrets_recycle = 0; /* can we call init_secrets() without adding an EVENT? */ init_secrets(); } #endif return TRUE; } /* * *************************************************************** * NOTIFICATION_OUT Complete packet ***** *************************************************************** * */ void send_v2_notification(struct state *p1st, u_int16_t type, struct state *encst, u_char *icookie, u_char *rcookie, chunk_t *n_data) { u_char buffer[1024]; pb_stream reply; pb_stream rbody; chunk_t child_spi, notify_data; /* this function is not generic enough yet just enough for 6msg * TBD accept HDR FLAGS as arg. default ISAKMP_FLAGS_R * TBD when there is a child SA use that SPI in the notify paylod. * TBD support encrypted notifications payloads. * TBD accept Critical bit as an argument. default is set. * TBD accept exchange type as an arg, default is ISAKMP_v2_SA_INIT * do we need to send a notify with empty data? * do we need to support more Protocol ID? more than PROTO_ISAKMP */ libreswan_log("sending %s notification %s to %s:%u", encst ? "encrypted " : "", enum_name(&ikev2_notify_names, type), ip_str(&p1st->st_remoteaddr), p1st->st_remoteport); #if 0 /* Empty notification data section should be fine? */ if (n_data == NULL) { DBG(DBG_CONTROLMORE, DBG_log("don't send packet when notification data empty")); return; } #endif memset(buffer, 0, sizeof(buffer)); init_pbs(&reply, buffer, sizeof(buffer), "notification msg"); /* HDR out */ { struct isakmp_hdr n_hdr; zero(&n_hdr); /* default to 0 */ /* AAA should we copy from MD? */ /* Impair function will raise major/minor by 1 for testing */ n_hdr.isa_version = build_ike_version(); memcpy(n_hdr.isa_rcookie, rcookie, COOKIE_SIZE); memcpy(n_hdr.isa_icookie, icookie, COOKIE_SIZE); n_hdr.isa_xchg = ISAKMP_v2_SA_INIT; n_hdr.isa_np = ISAKMP_NEXT_v2N; n_hdr.isa_flags &= ~ISAKMP_FLAGS_I; n_hdr.isa_flags |= ISAKMP_FLAGS_R; #warning check msgid code here /* PAUL: shouldn't we set n_hdr.isa_msgid = [htonl](p1st->st_msgid); */ if (!out_struct(&n_hdr, &isakmp_hdr_desc, &reply, &rbody)) { libreswan_log( "error initializing hdr for notify message"); return; } } child_spi.ptr = NULL; child_spi.len = 0; /* build and add v2N payload to the packet */ memset(&child_spi, 0, sizeof(child_spi)); memset(&notify_data, 0, sizeof(notify_data)); ship_v2N(ISAKMP_NEXT_v2NONE, DBGP( IMPAIR_SEND_BOGUS_ISAKMP_FLAG) ? (ISAKMP_PAYLOAD_NONCRITICAL | ISAKMP_PAYLOAD_LIBRESWAN_BOGUS) : ISAKMP_PAYLOAD_NONCRITICAL, PROTO_ISAKMP, &child_spi, type, n_data, &rbody); close_message(&rbody, p1st); close_output_pbs(&reply); clonetochunk(p1st->st_tpacket, reply.start, pbs_offset(&reply), "notification packet"); send_ike_msg(p1st, __FUNCTION__); } /* add notify payload to the rbody */ bool ship_v2N(unsigned int np, u_int8_t critical, u_int8_t protoid, chunk_t *spi, u_int16_t type, chunk_t *n_data, pb_stream *rbody) { struct ikev2_notify n; pb_stream n_pbs; DBG(DBG_CONTROLMORE, DBG_log("Adding a v2N Payload")); n.isan_np = np; n.isan_critical = critical; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); n.isan_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } n.isan_protoid = protoid; n.isan_spisize = spi->len; n.isan_type = type; if (!out_struct(&n, &ikev2_notify_desc, rbody, &n_pbs)) { libreswan_log( "error initializing notify payload for notify message"); return FALSE; } if (spi->len > 0) { if (!out_raw(spi->ptr, spi->len, &n_pbs, "SPI ")) { libreswan_log("error writing SPI to notify payload"); return FALSE; } } if (n_data != NULL) { if (!out_raw(n_data->ptr, n_data->len, &n_pbs, "Notify data")) { libreswan_log( "error writing notify payload for notify message"); return FALSE; } } close_output_pbs(&n_pbs); return TRUE; } /* * *************************************************************** * INFORMATIONAL ***** *************************************************************** * - * * */ stf_status process_informational_ikev2(struct msg_digest *md) { /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log( "Ignoring informational exchange outside encrypted payload (rfc5996 section 1.4)"); return STF_IGNORE; } /* decrypt things. */ { stf_status ret; if (md->hdr.isa_flags & ISAKMP_FLAGS_I) { DBG(DBG_CONTROLMORE, DBG_log( "received informational exchange request from INITIATOR")); ret = ikev2_decrypt_msg(md, RESPONDER); } else { DBG(DBG_CONTROLMORE, DBG_log( "received informational exchange request from RESPONDER")); ret = ikev2_decrypt_msg(md, INITIATOR); } if (ret != STF_OK) return ret; } { struct payload_digest *p; struct ikev2_delete *v2del = NULL; stf_status ret; struct state *const st = md->st; /* Only send response if it is request*/ if (!(md->hdr.isa_flags & ISAKMP_FLAGS_R)) { unsigned char *authstart; pb_stream e_pbs, e_pbs_cipher; struct ikev2_generic e; unsigned char *iv; int ivsize; unsigned char *encstart; /* beginning of data going out */ authstart = reply_stream.cur; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "information exchange reply packet"); DBG(DBG_CONTROLMORE | DBG_DPD, DBG_log("Received an INFORMATIONAL request, " "updating liveness, no longer pending")); st->st_last_liveness = now(); st->st_pend_liveness = FALSE; /* HDR out */ { struct isakmp_hdr r_hdr; zero(&r_hdr); /* default to 0 */ /* AAA should we copy from MD? */ r_hdr.isa_version = build_ike_version(); memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); memcpy(r_hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); r_hdr.isa_xchg = ISAKMP_v2_INFORMATIONAL; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_msgid = htonl(md->msgid_received); /*set initiator bit if we are initiator*/ if (md->role == INITIATOR) r_hdr.isa_flags |= ISAKMP_FLAGS_I; r_hdr.isa_flags |= ISAKMP_FLAGS_R; if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) { libreswan_log( "error initializing hdr for informational message"); return STF_INTERNAL_ERROR; } } /*HDR Done*/ /* insert an Encryption payload header */ if (md->chain[ISAKMP_NEXT_v2D]) { bool ikesa_flag = FALSE; /* Search if there is a IKE SA delete payload*/ for (p = md->chain[ISAKMP_NEXT_v2D]; p != NULL; p = p->next) { if (p->payload.v2delete.isad_protoid == PROTO_ISAKMP) { e.isag_np = ISAKMP_NEXT_v2NONE; ikesa_flag = TRUE; break; } } /* if there is no IKE SA DELETE PAYLOAD*/ /* That means, there are AH OR ESP*/ if (!ikesa_flag) e.isag_np = ISAKMP_NEXT_v2D; } else { e.isag_np = ISAKMP_NEXT_v2NONE; } e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &md->rbody, &e_pbs)) return STF_INTERNAL_ERROR; /* insert IV */ iv = e_pbs.cur; ivsize = st->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_INTERNAL_ERROR; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; if (md->chain[ISAKMP_NEXT_v2D]) { for (p = md->chain[ISAKMP_NEXT_v2D]; p != NULL; p = p->next) { v2del = &p->payload.v2delete; switch (v2del->isad_protoid) { case PROTO_ISAKMP: /* My understanding is that delete payload for IKE SA * should be the only payload in the informational exchange */ break; case PROTO_IPSEC_AH: case PROTO_IPSEC_ESP: { char spi_buf[1024]; pb_stream del_pbs; struct ikev2_delete v2del_tmp; u_int16_t i, j = 0; u_char *spi; for (i = 0; i < v2del->isad_nrspi; i++ ) { spi = p->pbs.cur + (i * v2del-> isad_spisize); DBG(DBG_CONTROLMORE, DBG_log( "received delete request for %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); struct state *dst = find_state_ikev2_child_to_delete( st->st_icookie, st->st_rcookie, v2del->isad_protoid, *( ipsec_spi_t *)spi); if (dst != NULL) { struct ipsec_proto_info *pr = v2del-> isad_protoid == PROTO_IPSEC_AH ? &dst ->st_ah : &dst -> st_esp; DBG( DBG_CONTROLMORE, DBG_log( "our side spi that needs to be sent: %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl( pr -> our_spi))); memcpy( spi_buf + (j * v2del -> isad_spisize), (u_char *)&pr->our_spi, v2del->isad_spisize); j++; } else { DBG( DBG_CONTROLMORE, DBG_log( "received delete request for %s SA(0x%08lx) but local state is not found", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); } } if ( !j ) { DBG(DBG_CONTROLMORE, DBG_log( "This delete payload does not contain a single spi that has any local state, ignoring")); return STF_IGNORE; } else { DBG(DBG_CONTROLMORE, DBG_log( "No. of SPIs to be sent %d", j); DBG_dump( " Emit SPIs", spi_buf, j * v2del-> isad_spisize)); } zero(&v2del_tmp); if (p->next != NULL) v2del_tmp.isad_np = ISAKMP_NEXT_v2D; else v2del_tmp.isad_np = ISAKMP_NEXT_v2NONE; v2del_tmp.isad_protoid = v2del->isad_protoid; v2del_tmp.isad_spisize = v2del->isad_spisize; v2del_tmp.isad_nrspi = j; /* Emit delete payload header out*/ if (!out_struct(&v2del_tmp, & ikev2_delete_desc, &e_pbs_cipher, &del_pbs)) { libreswan_log( "error initializing hdr for delete payload"); return STF_INTERNAL_ERROR; } /* Emit values of spi to be sent to the peer*/ if (!out_raw(spi_buf, j * v2del-> isad_spisize, &del_pbs, "local spis")) { libreswan_log( "error sending spi values in delete payload"); return STF_INTERNAL_ERROR; } close_output_pbs(&del_pbs); } break; default: /*Unrecongnized protocol */ return STF_IGNORE; } /* this will break from for loop*/ if (v2del->isad_protoid == PROTO_ISAKMP) break; } } /*If there are no payloads or in other words empty payload in request * that means it is check for liveliness, so send an empty payload message * this will end up sending an empty payload */ ikev2_padup_pre_encrypt(md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { unsigned char *authloc = ikev2_authloc(md, &e_pbs); if (authloc == NULL) return STF_INTERNAL_ERROR; close_output_pbs(&e_pbs); close_output_pbs(&md->rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(md, md->role, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return ret; } /* keep it for a retransmit if necessary */ freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset( &reply_stream), "reply packet for informational exchange"); send_ike_msg(st, __FUNCTION__); } /* Now carry out the actualy task, we can not carry the actual task since * we need to send informational responde using existig SAs */ { if (md->chain[ISAKMP_NEXT_v2D] && st->st_state != STATE_IKESA_DEL) { for (p = md->chain[ISAKMP_NEXT_v2D]; p != NULL; p = p->next) { v2del = &p->payload.v2delete; switch (v2del->isad_protoid) { case PROTO_ISAKMP: { /* My understanding is that delete payload for IKE SA * should be the only payload in the informational * Now delete the IKE SA state and all its child states */ struct state *current_st = st; struct state *next_st = NULL; struct state *first_st = NULL; /* Find the first state in the hash chain*/ while (current_st != (struct state *) NULL) { first_st = current_st; current_st = first_st-> st_hashchain_prev; } current_st = first_st; while (current_st != (struct state *) NULL) { next_st = current_st-> st_hashchain_next; if (current_st-> st_clonedfrom != 0 ) { change_state( current_st, STATE_CHILDSA_DEL); } else { change_state( current_st, STATE_IKESA_DEL); } delete_state(current_st); current_st = next_st; } } break; case PROTO_IPSEC_AH: case PROTO_IPSEC_ESP: { /* pb_stream del_pbs; */ struct ikev2_delete; u_int16_t i; u_char *spi; for (i = 0; i < v2del->isad_nrspi; i++ ) { spi = p->pbs.cur + (i * v2del-> isad_spisize); DBG(DBG_CONTROLMORE, DBG_log( "Now doing actual deletion for request: %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); struct state *dst = find_state_ikev2_child_to_delete( st->st_icookie, st->st_rcookie, v2del->isad_protoid, *( ipsec_spi_t *)spi); if (dst != NULL) { struct ipsec_proto_info *pr = v2del-> isad_protoid == PROTO_IPSEC_AH ? &dst ->st_ah : &dst -> st_esp; DBG( DBG_CONTROLMORE, DBG_log( "our side spi that needs to be deleted: %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl( pr -> our_spi))); /* now delete the state*/ change_state( dst, STATE_CHILDSA_DEL); delete_state( dst); } else { DBG( DBG_CONTROLMORE, DBG_log( "received delete request for %s SA(0x%08lx) but local state is not found", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); } } } break; default: /*Unrecongnized protocol */ return STF_IGNORE; } /* this will break from for loop*/ if (v2del->isad_protoid == PROTO_ISAKMP) break; } /* for */ } /* if*/ else { /* empty response to our IKESA delete request*/ if ((md->hdr.isa_flags & ISAKMP_FLAGS_R) && st->st_state == STATE_IKESA_DEL) { /* My understanding is that delete payload for IKE SA * should be the only payload in the informational * Now delete the IKE SA state and all its child states */ struct state *current_st = st; struct state *next_st = NULL; struct state *first_st = NULL; /* Find the first state in the hash chain*/ while (current_st != (struct state *) NULL) { first_st = current_st; current_st = first_st-> st_hashchain_prev; } current_st = first_st; while (current_st != (struct state *) NULL) { next_st = current_st-> st_hashchain_next; if (current_st->st_clonedfrom != 0 ) { change_state( current_st, STATE_CHILDSA_DEL); } else { change_state( current_st, STATE_IKESA_DEL); } delete_state(current_st); current_st = next_st; } /* empty response to our empty INFORMATIONAL * We don't send anything back */ } else if ((md->hdr.isa_flags & ISAKMP_FLAGS_R) && st->st_state != STATE_IKESA_DEL) { DBG(DBG_CONTROLMORE, DBG_log( "Received an INFORMATIONAL response, " "updating liveness, no longer pending.")); st->st_last_liveness = now(); st->st_pend_liveness = FALSE; st->st_msgid_lastrecv = md->msgid_received; } } } } return STF_OK; } stf_status ikev2_send_informational(struct state *st) { struct state *pst = NULL; if (st->st_clonedfrom != SOS_NOBODY) { pst = state_with_serialno(st->st_clonedfrom); if (!pst) { DBG(DBG_CONTROL, DBG_log( "IKE SA does not exist for this child SA - should not happen")); DBG(DBG_CONTROL, DBG_log("INFORMATIONAL exchange can not be sent")); return STF_IGNORE; } } else { pst = st; } { unsigned char *authstart; unsigned char *encstart; unsigned char *iv; int ivsize; struct msg_digest md; struct ikev2_generic e; enum phase1_role role; pb_stream e_pbs, e_pbs_cipher; pb_stream rbody; pb_stream request; u_char buffer[1024]; md.st = st; md.pst = pst; memset(buffer, 0, sizeof(buffer)); init_pbs(&request, buffer, sizeof(buffer), "informational exchange request packet"); authstart = request.cur; /* HDR out */ { struct isakmp_hdr r_hdr; zero(&r_hdr); r_hdr.isa_version = build_ike_version(); memcpy(r_hdr.isa_rcookie, pst->st_rcookie, COOKIE_SIZE); memcpy(r_hdr.isa_icookie, pst->st_icookie, COOKIE_SIZE); r_hdr.isa_xchg = ISAKMP_v2_INFORMATIONAL; r_hdr.isa_np = ISAKMP_NEXT_v2E; if (pst->st_state == STATE_PARENT_I2 || pst->st_state == STATE_PARENT_I3) { r_hdr.isa_flags |= ISAKMP_FLAGS_I; role = INITIATOR; r_hdr.isa_msgid = htonl(pst->st_msgid_nextuse); } else { role = RESPONDER; r_hdr.isa_msgid = htonl( pst->st_msgid_lastrecv + 1); } if (!out_struct(&r_hdr, &isakmp_hdr_desc, &request, &rbody)) { libreswan_log( "error initializing hdr for informational message"); return STF_FATAL; } } /* HDR done*/ /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2NONE; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &rbody, &e_pbs)) return STF_FATAL; /* IV */ iv = e_pbs.cur; ivsize = pst->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_FATAL; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; /* This is an empty informational exchange (A.K.A liveness check) */ ikev2_padup_pre_encrypt(&md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { stf_status ret; unsigned char *authloc = ikev2_authloc(&md, &e_pbs); if (!authloc) return STF_FATAL; close_output_pbs(&e_pbs); close_output_pbs(&rbody); close_output_pbs(&request); ret = ikev2_encrypt_msg(&md, role, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return STF_FATAL; } /* keep it for a retransmit if necessary */ freeanychunk(pst->st_tpacket); clonetochunk(pst->st_tpacket, request.start, pbs_offset(&request), "reply packet for informational exchange"); pst->st_pend_liveness = TRUE; /* we should only do this when dpd/liveness is active? */ send_ike_msg(pst, __FUNCTION__); ikev2_update_counters(&md); } return STF_OK; } /* * *************************************************************** * DELETE_OUT ***** *************************************************************** * */ void ikev2_delete_out(struct state *st) { struct state *pst = NULL; if (st->st_clonedfrom != 0) { /*child SA*/ pst = state_with_serialno(st->st_clonedfrom); if (!pst) { DBG(DBG_CONTROL, DBG_log("IKE SA does not exist for this child SA")); DBG(DBG_CONTROL, DBG_log( "INFORMATIONAL exchange can not be sent, deleting state")); goto end; } } else { /* Parent SA*/ pst = st; } { unsigned char *authstart; pb_stream e_pbs, e_pbs_cipher; pb_stream rbody; struct ikev2_generic e; unsigned char *iv; int ivsize; unsigned char *encstart; struct msg_digest md; enum phase1_role role; md.st = st; md.pst = pst; /* beginning of data going out */ authstart = reply_stream.cur; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "information exchange request packet"); /* HDR out */ { struct isakmp_hdr r_hdr; zero(&r_hdr); /* default to 0 */ /* AAA should we copy from MD? */ r_hdr.isa_version = build_ike_version(); memcpy(r_hdr.isa_rcookie, pst->st_rcookie, COOKIE_SIZE); memcpy(r_hdr.isa_icookie, pst->st_icookie, COOKIE_SIZE); r_hdr.isa_xchg = ISAKMP_v2_INFORMATIONAL; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_msgid = htonl(pst->st_msgid_nextuse); /*set initiator bit if we are initiator*/ if (pst->st_state == STATE_PARENT_I2 || pst->st_state == STATE_PARENT_I3) { r_hdr.isa_flags |= ISAKMP_FLAGS_I; role = INITIATOR; } else { role = RESPONDER; } /* r_hdr.isa_flags |= ISAKMP_FLAGS_R; */ if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &rbody)) { libreswan_log( "error initializing hdr for informational message"); goto end; } } /*HDR Done*/ /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2D; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &rbody, &e_pbs)) goto end; /* insert IV */ iv = e_pbs.cur; ivsize = pst->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) goto end; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; { pb_stream del_pbs; struct ikev2_delete v2del_tmp; /* * u_int16_t i, j=0; * u_char *spi; * char spi_buf[1024]; */ zero(&v2del_tmp); v2del_tmp.isad_np = ISAKMP_NEXT_v2NONE; if (st->st_clonedfrom != 0 ) { v2del_tmp.isad_protoid = PROTO_IPSEC_ESP; v2del_tmp.isad_spisize = sizeof(ipsec_spi_t); v2del_tmp.isad_nrspi = 1; } else { v2del_tmp.isad_protoid = PROTO_ISAKMP; v2del_tmp.isad_spisize = 0; v2del_tmp.isad_nrspi = 0; } /* Emit delete payload header out*/ if (!out_struct(&v2del_tmp, &ikev2_delete_desc, &e_pbs_cipher, &del_pbs)) { libreswan_log( "error initializing hdr for delete payload"); goto end; } /* Emit values of spi to be sent to the peer*/ if (st->st_clonedfrom != 0) { if (!out_raw( (u_char *)&st->st_esp.our_spi, sizeof(ipsec_spi_t), &del_pbs, "local spis")) { libreswan_log( "error sending spi values in delete payload"); goto end; } } close_output_pbs(&del_pbs); } ikev2_padup_pre_encrypt(&md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { stf_status ret; unsigned char *authloc = ikev2_authloc(&md, &e_pbs); if (authloc == NULL) goto end; close_output_pbs(&e_pbs); close_output_pbs(&rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(&md, role, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) goto end; } /* keep it for a retransmit if necessary */ freeanychunk(pst->st_tpacket); clonetochunk(pst->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "request packet for informational exchange"); send_ike_msg(pst, __FUNCTION__); /* update state */ ikev2_update_counters(&md); } /* If everything is fine, and we sent packet, goto real_end*/ goto real_end; end: /* If some error occurs above that prevents us sending a request packet*/ /* delete the states right now*/ if (st->st_clonedfrom != SOS_NOBODY) { change_state(st, STATE_CHILDSA_DEL); delete_state(st); } else { struct state *current_st = pst; struct state *next_st = NULL; struct state *first_st = NULL; /* Find the first state in the hash chain*/ while (current_st != (struct state *) NULL) { first_st = current_st; current_st = first_st->st_hashchain_prev; } current_st = first_st; while (current_st != (struct state *) NULL) { next_st = current_st->st_hashchain_next; if (current_st->st_clonedfrom != 0 ) change_state(current_st, STATE_CHILDSA_DEL); else change_state(current_st, STATE_IKESA_DEL); delete_state(current_st); current_st = next_st; } } real_end:; } /* * Determine the IKE version we will use for the IKE packet * Normally, this is "2.0", but in the future we might need to * change that. Version used is the minimum 2.x version both * sides support. So if we support 2.1, and they support 2.0, * we should sent 2.0 (not implemented until we hit 2.1 ourselves) * We also have some impair functions that modify the major/minor * version on purpose - for testing * * rcv_version: the received IKE version, 0 if we don't know * * top 4 bits are major version, lower 4 bits are minor version */ static int build_ike_version() { return ((IKEv2_MAJOR_VERSION + (DBGP(IMPAIR_MAJOR_VERSION_BUMP) ? 1 : 0)) << ISA_MAJ_SHIFT) | (IKEv2_MINOR_VERSION + (DBGP(IMPAIR_MINOR_VERSION_BUMP) ? 1 : 0)); }
stf_status ikev2parent_inI2outR2(struct msg_digest *md) { struct state *st = md->st; /* struct connection *c = st->st_connection; */ /* * the initiator sent us an encrypted payload. We need to calculate * our g^xy, and skeyseed values, and then decrypt the payload. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inI2outR2: calculating g^{xy} in order to decrypt I2")); /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log("R2 state should receive an encrypted payload"); reset_globals(); return STF_FATAL; } /* now. we need to go calculate the g^xy */ { struct dh_continuation *dh = alloc_thing( struct dh_continuation, "ikev2_inI2outR2 KE"); stf_status e; dh->md = md; set_suspended(st, dh->md); pcrc_init(&dh->dh_pcrc); dh->dh_pcrc.pcrc_func = ikev2_parent_inI2outR2_continue; e = start_dh_v2(&dh->dh_pcrc, st, st->st_import, RESPONDER, st->st_oakley.groupnum); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } reset_globals(); return e; } }
stf_status ikev2parent_inI2outR2(struct msg_digest *md) { struct state *st = md->st; /* struct connection *c = st->st_connection; */ /* * the initiator sent us an encrypted payload. We need to calculate * our g^xy, and skeyseed values, and then decrypt the payload. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inI2outR2: calculating g^{xy} in order to decrypt I2")); /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log("R2 state should receive an encrypted payload"); reset_globals(); /* XXX suspicious - why was this deemed neccessary? */ return STF_FATAL; } /* now. we need to go calculate the g^xy */ { struct dh_continuation *dh = alloc_thing( struct dh_continuation, "ikev2_inI2outR2 KE"); stf_status e; dh->md = md; set_suspended(st, dh->md); pcrc_init(&dh->dh_pcrc); dh->dh_pcrc.pcrc_func = ikev2_parent_inI2outR2_continue; e = start_dh_v2(&dh->dh_pcrc, st, st->st_import, RESPONDER, st->st_oakley.groupnum); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } reset_globals(); return e; } }
{'added': [(730, '\t\tchar fromname[ADDRTOT_BUF];'), (731, '\t\taddrtot(&md->sender, 0, fromname, ADDRTOT_BUF);'), (732, ''), (733, '\t\tif (!md->chain[ISAKMP_NEXT_v2KE]) {'), (734, '\t\t\t/* is this a notify? If so, log it */'), (735, '\t\t\tif(md->chain[ISAKMP_NEXT_v2N]) {'), (736, '\t\t\t\tlibreswan_log("Received Notify(%d): %s",'), (737, '\t\t\t\t\tmd->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type,'), (738, '\t\t\t\t\tenum_name(&ikev2_notify_names,'), (739, '\t\t\t\t\t\tmd->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type));'), (740, '\t\t\t}'), (741, '\t\t\tlibreswan_log('), (742, '\t\t\t\t"rejecting I1 from %s:%u, no KE payload present",'), (743, '\t\t\t\tfromname, md->sender_port);'), (744, '\t\t\treturn STF_FAIL + v2N_INVALID_KE_PAYLOAD;'), (745, '\t\t}'), (754, '\t\t\treturn STF_FAIL + v2N_INVALID_KE_PAYLOAD;'), (1724, '\t\treset_globals(); /* XXX suspicious - why was this deemed neccessary? */')], 'deleted': [(309, ''), (310, '\tpassert(GLOBALS_ARE_RESET());'), (736, '\t\t\tchar fromname[ADDRTOT_BUF];'), (737, ''), (738, '\t\t\taddrtot(&md->sender, 0, fromname, ADDRTOT_BUF);'), (743, '\t\t\treturn v2N_INVALID_KE_PAYLOAD;'), (822, ''), (823, '\tpassert(GLOBALS_ARE_RESET());'), (1148, ''), (1149, '\tpassert(GLOBALS_ARE_RESET());'), (1717, '\t\treset_globals();'), (1797, ''), (1798, '\tpassert(GLOBALS_ARE_RESET());')]}
18
13
2,697
14,288
30
156
4
https://github.com/libreswan/libreswan
CVE-2013-7294
CWE-20
1,742
xdelta3-test.h
C
test_compressed_stream_overflow
/* xdelta 3 - delta compression tools and library Copyright (C) 2001, * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012. * Joshua P. MacDonald * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This is public-domain Mersenne Twister code, * attributed to Michael Brundage. Thanks! * http://www.qbrundage.com/michaelb/pubs/essays/random_number_generation.html */ static const uint32_t TEST_SEED1 = 5489UL; #define MT_LEN 624 #define MT_IA 397 static const uint32_t UPPER_MASK = 0x80000000; static const uint32_t LOWER_MASK = 0x7FFFFFFF; static const uint32_t MATRIX_A = 0x9908B0DF; #ifndef SHELL_TESTS #define SHELL_TESTS 1 #endif typedef struct mtrand mtrand; struct mtrand { int mt_index_; uint32_t mt_buffer_[MT_LEN]; }; int test_compare_files (const char* tgt, const char *rec); void mt_init(mtrand *mt, uint32_t seed); uint32_t mt_random (mtrand *mt); int test_setup (void); void mt_init(mtrand *mt, uint32_t seed) { int i; mt->mt_buffer_[0] = seed; mt->mt_index_ = MT_LEN; for (i = 1; i < MT_LEN; i++) { /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt->mt_buffer_[i] = (1812433253UL * (mt->mt_buffer_[i-1] ^ (mt->mt_buffer_[i-1] >> 30)) + i); } } uint32_t mt_random (mtrand *mt) { uint32_t y; unsigned long mag01[2]; mag01[0] = 0; mag01[1] = MATRIX_A; if (mt->mt_index_ >= MT_LEN) { int kk; for (kk = 0; kk < MT_LEN - MT_IA; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk < MT_LEN - 1; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) | (mt->mt_buffer_[0] & LOWER_MASK); mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mt->mt_index_ = 0; } y = mt->mt_buffer_[mt->mt_index_++]; y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } static mtrand static_mtrand; #include <math.h> static uint32_t mt_exp_rand (uint32_t mean, uint32_t max_value) { double mean_d = mean; double erand = log (1.0 / (mt_random (&static_mtrand) / (double)UINT32_MAX)); uint32_t x = (uint32_t) (mean_d * erand + 0.5); return min (x, max_value); } #if SHELL_TESTS #include <sys/wait.h> #endif #define MSG_IS(x) (stream->msg != NULL && strcmp ((x), stream->msg) == 0) static const usize_t TWO_MEGS_AND_DELTA = (3 << 20); static const usize_t ADDR_CACHE_ROUNDS = 10000; static const usize_t TEST_FILE_MEAN = 16384; static const double TEST_ADD_MEAN = 128; static const double TEST_ADD_MAX = 512; static const double TEST_ADD_RATIO = 0.1; static const double TEST_EPSILON = 0.25; #define TESTBUFSIZE (1024 * 16) #define TESTFILESIZE (1024) static char TEST_TARGET_FILE[TESTFILESIZE]; static char TEST_SOURCE_FILE[TESTFILESIZE]; static char TEST_DELTA_FILE[TESTFILESIZE]; static char TEST_RECON_FILE[TESTFILESIZE]; static char TEST_RECON2_FILE[TESTFILESIZE]; static char TEST_COPY_FILE[TESTFILESIZE]; static char TEST_NOPERM_FILE[TESTFILESIZE]; #define CHECK(cond) if (!(cond)) { XPR(NT "check failure: " #cond); abort(); } #if SHELL_TESTS /* Use a fixed soft config so that test values are fixed. See also * test_compress_text(). */ static const char* test_softcfg_str = "-C9,3,4,8,2,36,70"; #endif /*********************************************************************** TEST HELPERS ***********************************************************************/ static void DOT (void) { XPR(NTR "."); } static int do_cmd (xd3_stream *stream, const char *buf) { int ret; if ((ret = system (buf)) != 0) { if (WIFEXITED (ret)) { stream->msg = "command exited non-zero"; IF_DEBUG1 (XPR(NT "command was: %s\n", buf)); } else { stream->msg = "abnormal command termination"; } return XD3_INTERNAL; } return 0; } static int do_fail (xd3_stream *stream, const char *buf) { int ret; ret = system (buf); if (! WIFEXITED (ret) || WEXITSTATUS (ret) != 1) { stream->msg = "command should have not succeeded"; XPR(NT "command was %s\n", buf); return XD3_INTERNAL; } return 0; } /* Test that the exponential distribution actually produces its mean. */ static int test_random_numbers (xd3_stream *stream, int ignore) { usize_t i; usize_t sum = 0; usize_t mean = 50; usize_t n_rounds = 1000000; double average, error; double allowed_error = 0.1; mt_init (& static_mtrand, 0x9f73f7fe); for (i = 0; i < n_rounds; i += 1) { sum += mt_exp_rand (mean, USIZE_T_MAX); } average = (double) sum / (double) n_rounds; error = average - (double) mean; if (error < allowed_error && error > -allowed_error) { return 0; } /*XPR(NT "error is %f\n", error);*/ stream->msg = "random distribution looks broken"; return XD3_INTERNAL; } static void test_unlink (char* file) { int ret; if ((ret = unlink (file)) != 0 && errno != ENOENT) { XPR(NT "unlink %s failed: %s\n", file, strerror(ret)); } } static void test_cleanup (void) { #if 1 test_unlink (TEST_TARGET_FILE); test_unlink (TEST_SOURCE_FILE); test_unlink (TEST_DELTA_FILE); test_unlink (TEST_RECON_FILE); test_unlink (TEST_RECON2_FILE); test_unlink (TEST_COPY_FILE); test_unlink (TEST_NOPERM_FILE); #endif } int test_setup (void) { static int x = 0; x++; snprintf_func (TEST_TARGET_FILE, TESTFILESIZE, "/tmp/xdtest.target.%d", x); snprintf_func (TEST_SOURCE_FILE, TESTFILESIZE, "/tmp/xdtest.source.%d", x); snprintf_func (TEST_DELTA_FILE, TESTFILESIZE, "/tmp/xdtest.delta.%d", x); snprintf_func (TEST_RECON_FILE, TESTFILESIZE, "/tmp/xdtest.recon.%d", x); snprintf_func (TEST_RECON2_FILE, TESTFILESIZE, "/tmp/xdtest.recon2.%d", x); snprintf_func (TEST_COPY_FILE, TESTFILESIZE, "/tmp/xdtest.copy.%d", x); snprintf_func (TEST_NOPERM_FILE, TESTFILESIZE, "/tmp/xdtest.noperm.%d", x); test_cleanup(); return 0; } static int test_make_inputs (xd3_stream *stream, xoff_t *ss_out, xoff_t *ts_out) { usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; uint8_t *buf = (uint8_t*) malloc (ts + ss), *sbuf = buf, *tbuf = buf + ss; usize_t sadd = 0, sadd_max = (usize_t)(ss * TEST_ADD_RATIO); FILE *tf = NULL, *sf = NULL; usize_t i, j; int ret; if (buf == NULL) { return ENOMEM; } if ((tf = fopen (TEST_TARGET_FILE, "w")) == NULL || (ss_out != NULL && (sf = fopen (TEST_SOURCE_FILE, "w")) == NULL)) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if (ss_out != NULL) { for (i = 0; i < ss; ) { sbuf[i++] = (uint8_t) mt_random (&static_mtrand); } } /* Then modify the data to produce copies, everything not copied is * an add. The following logic produces the TEST_ADD_RATIO. The * variable SADD contains the number of adds so far, which should * not exceed SADD_MAX. */ /* XPR(NT "ss = %u ts = %u\n", ss, ts); */ for (i = 0; i < ts; ) { usize_t left = ts - i; usize_t next = mt_exp_rand ((uint32_t) TEST_ADD_MEAN, (uint32_t) TEST_ADD_MAX); usize_t add_left = sadd_max - sadd; double add_prob = (left == 0) ? 0 : (add_left / (double) left); int do_copy; next = min (left, next); do_copy = (next > add_left || (mt_random (&static_mtrand) / \ (double)USIZE_T_MAX) >= add_prob); if (ss_out == NULL) { do_copy &= (i > 0); } else { do_copy &= (ss - next) > 0; } if (do_copy) { /* Copy */ size_t offset = mt_random (&static_mtrand) % ((ss_out == NULL) ? i : (ss - next)); /* XPR(NT "[%u] copy %u at %u ", i, next, offset); */ for (j = 0; j < next; j += 1) { char c = ((ss_out == NULL) ? tbuf : sbuf)[offset + j]; /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ } else { /* Add */ /* XPR(NT "[%u] add %u ", i, next); */ for (j = 0; j < next; j += 1) { char c = (char) mt_random (&static_mtrand); /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ sadd += next; } } /* XPR(NT "sadd = %u max = %u\n", sadd, sadd_max); */ if ((fwrite (tbuf, 1, ts, tf) != ts) || (ss_out != NULL && (fwrite (sbuf, 1, ss, sf) != ss))) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if ((ret = fclose (tf)) || (ss_out != NULL && (ret = fclose (sf)))) { stream->msg = "close failed"; ret = get_errno (); goto failure; } if (ts_out) { (*ts_out) = ts; } if (ss_out) { (*ss_out) = ss; } failure: free (buf); return ret; } int test_compare_files (const char* tgt, const char *rec) { FILE *orig, *recons; static uint8_t obuf[TESTBUFSIZE], rbuf[TESTBUFSIZE]; xoff_t offset = 0; size_t i; size_t oc, rc; xoff_t diffs = 0; if ((orig = fopen (tgt, "r")) == NULL) { XPR(NT "open %s failed\n", tgt); return get_errno (); } if ((recons = fopen (rec, "r")) == NULL) { XPR(NT "open %s failed\n", rec); return get_errno (); } for (;;) { oc = fread (obuf, 1, TESTBUFSIZE, orig); rc = fread (rbuf, 1, TESTBUFSIZE, recons); if (oc != rc) { return XD3_INTERNAL; } if (oc == 0) { break; } for (i = 0; i < oc; i += 1) { if (obuf[i] != rbuf[i]) { XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\n", (int)i, (int)oc, offset, obuf[i], rbuf[i]); diffs++; return XD3_INTERNAL; } } offset += oc; } fclose (orig); fclose (recons); if (diffs != 0) { return XD3_INTERNAL; } return 0; } static int test_save_copy (const char *origname) { char buf[TESTBUFSIZE]; int ret; snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", origname, TEST_COPY_FILE); if ((ret = system (buf)) != 0) { return XD3_INTERNAL; } return 0; } static int test_file_size (const char* file, xoff_t *size) { struct stat sbuf; int ret; (*size) = 0; if (stat (file, & sbuf) < 0) { ret = get_errno (); XPR(NT "stat failed: %s: %s\n", file, strerror (ret)); return ret; } if (! S_ISREG (sbuf.st_mode)) { ret = XD3_INTERNAL; XPR(NT "not a regular file: %s: %s\n", file, strerror (ret)); return ret; } (*size) = sbuf.st_size; return 0; } /*********************************************************************** READ OFFSET ***********************************************************************/ /* Common test for read_integer errors: encodes a 64-bit value and * then attempts to read as a 32-bit value. If TRUNC is non-zero, * attempts to get errors by shortening the input, otherwise it should * overflow. Expects XD3_INTERNAL and MSG. */ static int test_read_integer_error (xd3_stream *stream, usize_t trunto, const char *msg) { uint64_t eval = 1ULL << 34; uint32_t rval; xd3_output *buf = NULL; const uint8_t *max; const uint8_t *inp; int ret; buf = xd3_alloc_output (stream, buf); if ((ret = xd3_emit_uint64_t (stream, & buf, eval))) { goto fail; } again: inp = buf->base; max = buf->base + buf->next - trunto; if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) != XD3_INVALID_INPUT || !MSG_IS (msg)) { ret = XD3_INTERNAL; } else if (trunto && trunto < buf->next) { trunto += 1; goto again; } else { ret = 0; } fail: xd3_free_output (stream, buf); return ret; } /* Test integer overflow using the above routine. */ static int test_decode_integer_overflow (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 0, "overflow in read_intger"); } /* Test integer EOI using the above routine. */ static int test_decode_integer_end_of_input (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 1, "end-of-input in read_integer"); } /* Test that emit_integer/decode_integer/sizeof_integer/read_integer * work on correct inputs. Tests powers of (2^7), plus or minus, up * to the maximum value. */ #define TEST_ENCODE_DECODE_INTEGER(TYPE,ONE,MAX) \ xd3_output *rbuf = NULL; \ xd3_output *dbuf = NULL; \ TYPE values[64]; \ usize_t nvalues = 0; \ usize_t i; \ int ret = 0; \ \ for (i = 0; i < (sizeof (TYPE) * 8); i += 7) \ { \ values[nvalues++] = (ONE << i) - ONE; \ values[nvalues++] = (ONE << i); \ values[nvalues++] = (ONE << i) + ONE; \ } \ \ values[nvalues++] = MAX-ONE; \ values[nvalues++] = MAX; \ \ rbuf = xd3_alloc_output (stream, rbuf); \ dbuf = xd3_alloc_output (stream, dbuf); \ \ for (i = 0; i < nvalues; i += 1) \ { \ const uint8_t *max; \ const uint8_t *inp; \ TYPE val; \ \ DOT (); \ rbuf->next = 0; \ \ if ((ret = xd3_emit_ ## TYPE (stream, & rbuf, values[i])) || \ (ret = xd3_emit_ ## TYPE (stream, & dbuf, values[i]))) \ { \ goto fail; \ } \ \ inp = rbuf->base; \ max = rbuf->base + rbuf->next; \ \ if (rbuf->next != xd3_sizeof_ ## TYPE (values[i])) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ if ((ret = xd3_read_ ## TYPE (stream, & inp, max, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ DOT (); \ } \ \ stream->next_in = dbuf->base; \ stream->avail_in = dbuf->next; \ \ for (i = 0; i < nvalues; i += 1) \ { \ TYPE val; \ \ if ((ret = xd3_decode_ ## TYPE (stream, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ } \ \ if (stream->avail_in != 0) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ fail: \ xd3_free_output (stream, rbuf); \ xd3_free_output (stream, dbuf); \ \ return ret static int test_encode_decode_uint32_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint32_t,1U,UINT32_MAX); } static int test_encode_decode_uint64_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint64_t,1ULL,UINT64_MAX); } static int test_usize_t_overflow (xd3_stream *stream, int unused) { if (USIZE_T_OVERFLOW (USIZE_T_MAX, 0)) { goto fail; } if (USIZE_T_OVERFLOW (0, USIZE_T_MAX)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2 + 1)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX, 1)) { goto fail; } if (! USIZE_T_OVERFLOW (1, USIZE_T_MAX)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX / 2 + 1, USIZE_T_MAX / 2 + 1)) { goto fail; } return 0; fail: stream->msg = "incorrect overflow computation"; return XD3_INTERNAL; } static int test_forward_match (xd3_stream *stream, int unused) { usize_t i; uint8_t buf1[256], buf2[256]; memset(buf1, 0, 256); memset(buf2, 0, 256); for (i = 0; i < 256; i++) { CHECK(xd3_forward_match(buf1, buf2, i) == (int)i); } for (i = 0; i < 255; i++) { buf2[i] = 1; CHECK(xd3_forward_match(buf1, buf2, 256) == (int)i); buf2[i] = 0; } return 0; } /*********************************************************************** Address cache ***********************************************************************/ static int test_address_cache (xd3_stream *stream, int unused) { int ret; usize_t i; usize_t offset; usize_t *addrs; uint8_t *big_buf, *buf_max; const uint8_t *buf; xd3_output *outp; uint8_t *modes; int mode_counts[16]; stream->acache.s_near = stream->code_table_desc->near_modes; stream->acache.s_same = stream->code_table_desc->same_modes; if ((ret = xd3_encode_init_partial (stream))) { return ret; } addrs = (usize_t*) xd3_alloc (stream, sizeof (usize_t), ADDR_CACHE_ROUNDS); modes = (uint8_t*) xd3_alloc (stream, sizeof (uint8_t), ADDR_CACHE_ROUNDS); memset (mode_counts, 0, sizeof (mode_counts)); memset (modes, 0, ADDR_CACHE_ROUNDS); addrs[0] = 0; mt_init (& static_mtrand, 0x9f73f7fc); /* First pass: encode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { double p; usize_t addr; usize_t prev_i; usize_t nearby; p = (mt_random (&static_mtrand) / (double)USIZE_T_MAX); prev_i = mt_random (&static_mtrand) % offset; nearby = (mt_random (&static_mtrand) % 256) % offset; nearby = max (1U, nearby); if (p < 0.1) { addr = addrs[offset-nearby]; } else if (p < 0.4) { addr = min (addrs[prev_i] + nearby, offset-1); } else { addr = prev_i; } if ((ret = xd3_encode_address (stream, addr, offset, & modes[offset]))) { return ret; } addrs[offset] = addr; mode_counts[modes[offset]] += 1; } /* Copy addresses into a contiguous buffer. */ big_buf = (uint8_t*) xd3_alloc (stream, xd3_sizeof_output (ADDR_HEAD (stream)), 1); for (offset = 0, outp = ADDR_HEAD (stream); outp != NULL; offset += outp->next, outp = outp->next_page) { memcpy (big_buf + offset, outp->base, outp->next); } buf_max = big_buf + offset; buf = big_buf; /* Second pass: decode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { uint32_t addr; if ((ret = xd3_decode_address (stream, offset, modes[offset], & buf, buf_max, & addr))) { return ret; } if (addr != addrs[offset]) { stream->msg = "incorrect decoded address"; return XD3_INTERNAL; } } /* Check that every byte, mode was used. */ if (buf != buf_max) { stream->msg = "address bytes not used"; return XD3_INTERNAL; } for (i = 0; i < (2 + stream->acache.s_same + stream->acache.s_near); i += 1) { if (mode_counts[i] == 0) { stream->msg = "address mode not used"; return XD3_INTERNAL; } } xd3_free (stream, modes); xd3_free (stream, addrs); xd3_free (stream, big_buf); return 0; } /*********************************************************************** Encode and decode with single bit error ***********************************************************************/ /* It compresses from 256 to around 185 bytes. * Avoids matching addresses that are a single-bit difference. * Avoids matching address 0. */ static const uint8_t test_text[] = "this is a story\n" "abouttttttttttt\n" "- his is a stor\n" "- about nothing " " all. boutique -" "his story is a -" "about " "what happens all" " the time what -" "am I ttttttt the" " person said, so" " what, per son -" " gory story is -" " about nothing -" "tttttt to test -" "his sto nothing"; static const uint8_t test_apphead[] = "header test"; static int test_compress_text (xd3_stream *stream, uint8_t *encoded, usize_t *encoded_size) { int ret; xd3_config cfg; int oflags = stream->flags; int flags = stream->flags | XD3_FLUSH; xd3_free_stream (stream); xd3_init_config (& cfg, flags); /* This configuration is fixed so that the "expected non-error" the counts in * decompress_single_bit_errors are too. See test_coftcfg_str. */ cfg.smatch_cfg = XD3_SMATCH_SOFT; cfg.smatcher_soft.name = "test"; cfg.smatcher_soft.large_look = 64; /* no source, not used */ cfg.smatcher_soft.large_step = 64; /* no source, not used */ cfg.smatcher_soft.small_look = 4; cfg.smatcher_soft.small_chain = 128; cfg.smatcher_soft.small_lchain = 16; cfg.smatcher_soft.max_lazy = 8; cfg.smatcher_soft.long_enough = 128; xd3_config_stream (stream, & cfg); (*encoded_size) = 0; xd3_set_appheader (stream, test_apphead, (usize_t) strlen ((char*) test_apphead)); if ((ret = xd3_encode_stream (stream, test_text, sizeof (test_text), encoded, encoded_size, 4*sizeof (test_text)))) { goto fail; } if ((ret = xd3_close_stream (stream))) { goto fail; } fail: xd3_free_stream (stream); xd3_init_config (& cfg, oflags); xd3_config_stream (stream, & cfg); return ret; } static int test_decompress_text (xd3_stream *stream, uint8_t *enc, usize_t enc_size, usize_t test_desize) { xd3_config cfg; char decoded[sizeof (test_text)]; uint8_t *apphead; usize_t apphead_size; usize_t decoded_size; const char *msg; int ret; usize_t pos = 0; int flags = stream->flags; usize_t take; input: /* Test decoding test_desize input bytes at a time */ take = min (enc_size - pos, test_desize); CHECK(take > 0); xd3_avail_input (stream, enc + pos, take); again: ret = xd3_decode_input (stream); pos += take; take = 0; switch (ret) { case XD3_OUTPUT: break; case XD3_WINSTART: case XD3_GOTHEADER: goto again; case XD3_INPUT: if (pos < enc_size) { goto input; } /* else fallthrough */ case XD3_WINFINISH: default: goto fail; } CHECK(ret == XD3_OUTPUT); CHECK(pos == enc_size); if (stream->avail_out != sizeof (test_text)) { stream->msg = "incorrect output size"; ret = XD3_INTERNAL; goto fail; } decoded_size = stream->avail_out; memcpy (decoded, stream->next_out, stream->avail_out); xd3_consume_output (stream); if ((ret = xd3_get_appheader (stream, & apphead, & apphead_size))) { goto fail; } if (apphead_size != strlen ((char*) test_apphead) || memcmp (apphead, test_apphead, strlen ((char*) test_apphead)) != 0) { stream->msg = "incorrect appheader"; ret = XD3_INTERNAL; goto fail; } if ((ret = xd3_decode_input (stream)) != XD3_WINFINISH || (ret = xd3_close_stream (stream)) != 0) { goto fail; } if (decoded_size != sizeof (test_text) || memcmp (decoded, test_text, sizeof (test_text)) != 0) { stream->msg = "incorrect output text"; ret = EIO; } fail: msg = stream->msg; xd3_free_stream (stream); xd3_init_config (& cfg, flags); xd3_config_stream (stream, & cfg); stream->msg = msg; return ret; } static int test_decompress_single_bit_error (xd3_stream *stream, int expected_non_failures) { int ret; usize_t i; uint8_t encoded[4*sizeof (test_text)]; /* make room for alt code table */ usize_t encoded_size; int non_failures = 0; int cksum = (stream->flags & XD3_ADLER32) != 0; //#define DEBUG_TEST_FAILURES #ifndef DEBUG_TEST_FAILURES #define TEST_FAILURES() #else /* For checking non-failure cases by hand, enable this macro and run * xdelta printdelta with print_cpymode disabled. Every non-failure * should change a copy address mode, which doesn't cause a failure * because the address cache starts out with all zeros. ./xdelta3 test for i in test_text.xz.*; do ./xdelta3 printdelta $i > $i.out; diff $i.out test_text.xz.0.out; done */ system ("rm -rf test_text.*"); { char buf[TESTBUFSIZE]; FILE *f; snprintf_func (buf, TESTBUFSIZE, "test_text"); f = fopen (buf, "w"); fwrite (test_text,1,sizeof (test_text),f); fclose (f); } #define TEST_FAILURES() \ do { \ char buf[TESTBUFSIZE]; \ FILE *f; \ snprintf_func (buf, TESTBUFSIZE, "test_text.xz.%d", non_failures); \ f = fopen (buf, "w"); \ fwrite (encoded,1,encoded_size,f); \ fclose (f); \ } while (0) #endif stream->sec_data.inefficient = 1; stream->sec_inst.inefficient = 1; stream->sec_addr.inefficient = 1; /* Encode text, test correct input */ if ((ret = test_compress_text (stream, encoded, & encoded_size))) { /*stream->msg = "without error: encode failure";*/ return ret; } if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text) / 4))) { /*stream->msg = "without error: decode failure";*/ return ret; } TEST_FAILURES(); for (i = 0; i < encoded_size*8; i += 1) { /* Single bit error. */ encoded[i/8] ^= 1 << (i%8); if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text))) == 0) { non_failures += 1; #ifdef DEBUG_TEST_FAILURES XPR(NT "%u[%u] non-failure %u\n", i/8, i%8, non_failures); #endif TEST_FAILURES(); } else { /*XPR(NT "%u[%u] failure: %s\n", i/8, i%8, stream->msg);*/ } /* decompress_text returns EIO when the final memcmp() fails, but that * should never happen with checksumming on. */ if (cksum && ret == EIO) { /*XPR(NT "%u[%u] cksum mismatch\n", i/8, i%8);*/ stream->msg = "checksum mismatch"; return XD3_INTERNAL; } /* Undo single bit error. */ encoded[i/8] ^= 1 << (i%8); } /* Test correct input again */ if ((ret = test_decompress_text (stream, encoded, encoded_size, 1))) { /*stream->msg = "without error: decode failure";*/ return ret; } /* Check expected non-failures */ if (non_failures != expected_non_failures) { XPR(NT "non-failures %u; expected %u", non_failures, expected_non_failures); stream->msg = "incorrect"; return XD3_INTERNAL; } DOT (); return 0; } /*********************************************************************** Secondary compression tests ***********************************************************************/ #if SECONDARY_ANY typedef int (*sec_dist_func) (xd3_stream *stream, xd3_output *data); static int sec_dist_func1 (xd3_stream *stream, xd3_output *data); static int sec_dist_func2 (xd3_stream *stream, xd3_output *data); static int sec_dist_func3 (xd3_stream *stream, xd3_output *data); static int sec_dist_func4 (xd3_stream *stream, xd3_output *data); static int sec_dist_func5 (xd3_stream *stream, xd3_output *data); static int sec_dist_func6 (xd3_stream *stream, xd3_output *data); static int sec_dist_func7 (xd3_stream *stream, xd3_output *data); static int sec_dist_func8 (xd3_stream *stream, xd3_output *data); static int sec_dist_func9 (xd3_stream *stream, xd3_output *data); static int sec_dist_func10 (xd3_stream *stream, xd3_output *data); static int sec_dist_func11 (xd3_stream *stream, xd3_output *data); static sec_dist_func sec_dists[] = { sec_dist_func1, sec_dist_func2, sec_dist_func3, sec_dist_func4, sec_dist_func5, sec_dist_func6, sec_dist_func7, sec_dist_func8, sec_dist_func9, sec_dist_func10, sec_dist_func11, }; /* Test ditsribution: 100 bytes of the same character (13). */ static int sec_dist_func1 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < 100; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 13))) { return ret; } } return 0; } /* Test ditsribution: uniform covering half the alphabet. */ static int sec_dist_func2 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%(ALPHABET_SIZE/2)))) { return ret; } } return 0; } /* Test ditsribution: uniform covering the entire alphabet. */ static int sec_dist_func3 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%ALPHABET_SIZE))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering half the alphabet */ static int sec_dist_func4 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering the entire alphabet */ static int sec_dist_func5 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE-1); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering half the alphabet */ static int sec_dist_func6 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_random (&static_mtrand) % (ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering the entire alphabet */ static int sec_dist_func7 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*200; i += 1) { x = mt_random (&static_mtrand) % ALPHABET_SIZE; if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: A small number of frequent characters, difficult * to divide into many groups */ static int sec_dist_func8 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE*5; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 0))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 64))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 128))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 255))) { return ret; } } return 0; } /* Test distribution: One that causes many FGK block promotions (found a bug) */ static int sec_dist_func9 (xd3_stream *stream, xd3_output *data) { int i, ret; int ramp = 0; int rcount = 0; int prom = 0; int pcount = 0; /* 200 was long enough to trigger it--only when stricter checking * that counted all blocks was turned on, but it seems I deleted * this code. (missing fgk_free_block on line 398). */ for (i = 0; i < ALPHABET_SIZE*200; i += 1) { repeat: if (ramp < ALPHABET_SIZE) { /* Initially Nth symbol has (N+1) frequency */ if (rcount <= ramp) { rcount += 1; if ((ret = xd3_emit_byte (stream, & data, ramp))) { return ret; } continue; } ramp += 1; rcount = 0; goto repeat; } /* Thereafter, promote least freq to max freq */ if (pcount == ALPHABET_SIZE) { pcount = 0; prom = (prom + 1) % ALPHABET_SIZE; } pcount += 1; if ((ret = xd3_emit_byte (stream, & data, prom))) { return ret; } } return 0; } /* Test distribution: freq[i] == i*i, creates a 21-bit code length, fixed in 3.0r. */ static int sec_dist_func10 (xd3_stream *stream, xd3_output *data) { int i, j, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { for (j = 0; j <= (i*i); j += 1) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } } return 0; } /* Test distribution: fibonacci */ static int sec_dist_func11 (xd3_stream *stream, xd3_output *data) { int sum0 = 0; int sum1 = 1; int i, j, ret; for (i = 0; i < 33; ++i) { for (j = 0; j < (sum0 + sum1); ++j) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } sum0 = sum1; sum1 = j; } return 0; } static int test_secondary_decode (xd3_stream *stream, const xd3_sec_type *sec, usize_t input_size, usize_t compress_size, const uint8_t *dec_input, const uint8_t *dec_correct, uint8_t *dec_output) { int ret; xd3_sec_stream *dec_stream; const uint8_t *dec_input_used, *dec_input_end; uint8_t *dec_output_used, *dec_output_end; if ((dec_stream = sec->alloc (stream)) == NULL) { return ENOMEM; } if ((ret = sec->init (stream, dec_stream, 0)) != 0) { goto fail; } dec_input_used = dec_input; dec_input_end = dec_input + compress_size; dec_output_used = dec_output; dec_output_end = dec_output + input_size; if ((ret = sec->decode (stream, dec_stream, & dec_input_used, dec_input_end, & dec_output_used, dec_output_end))) { goto fail; } if (dec_input_used != dec_input_end) { stream->msg = "unused input"; ret = XD3_INTERNAL; goto fail; } if (dec_output_used != dec_output_end) { stream->msg = "unfinished output"; ret = XD3_INTERNAL; goto fail; } if (memcmp (dec_output, dec_correct, input_size) != 0) { stream->msg = "incorrect output"; ret = XD3_INTERNAL; goto fail; } fail: sec->destroy (stream, dec_stream); return ret; } static int test_secondary (xd3_stream *stream, const xd3_sec_type *sec, usize_t groups) { usize_t test_i; int ret; xd3_output *in_head, *out_head, *p; usize_t p_off, input_size, compress_size; uint8_t *dec_input = NULL, *dec_output = NULL, *dec_correct = NULL; xd3_sec_stream *enc_stream; xd3_sec_cfg cfg; memset (& cfg, 0, sizeof (cfg)); cfg.inefficient = 1; for (cfg.ngroups = 1; cfg.ngroups <= groups; cfg.ngroups += 1) { XPR(NTR "\n..."); for (test_i = 0; test_i < SIZEOF_ARRAY (sec_dists); test_i += 1) { mt_init (& static_mtrand, 0x9f73f7fc); in_head = xd3_alloc_output (stream, NULL); out_head = xd3_alloc_output (stream, NULL); enc_stream = sec->alloc (stream); dec_input = NULL; dec_output = NULL; dec_correct = NULL; if (in_head == NULL || out_head == NULL || enc_stream == NULL) { goto nomem; } if ((ret = sec_dists[test_i] (stream, in_head))) { goto fail; } if ((ret = sec->init (stream, enc_stream, 1)) != 0) { goto fail; } /* Encode data */ if ((ret = sec->encode (stream, enc_stream, in_head, out_head, & cfg))) { XPR(NT "test %u: encode: %s", test_i, stream->msg); goto fail; } /* Calculate sizes, allocate contiguous arrays for decoding */ input_size = xd3_sizeof_output (in_head); compress_size = xd3_sizeof_output (out_head); XPR(NTR "%.3f", 8.0 * (double) compress_size / (double) input_size); if ((dec_input = (uint8_t*) xd3_alloc (stream, compress_size, 1)) == NULL || (dec_output = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL || (dec_correct = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL) { goto nomem; } /* Fill the compressed data array */ for (p_off = 0, p = out_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_input + p_off, p->base, p->next); } CHECK(p_off == compress_size); /* Fill the input data array */ for (p_off = 0, p = in_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_correct + p_off, p->base, p->next); } CHECK(p_off == input_size); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output))) { XPR(NT "test %u: decode: %s", test_i, stream->msg); goto fail; } /* Single-bit error test, only cover the first 10 bytes. * Some non-failures are expected in the Huffman case: * Changing the clclen array, for example, may not harm the * decoding. Really looking for faults here. */ { int i; int bytes = min (compress_size, 10U); for (i = 0; i < bytes * 8; i += 1) { dec_input[i/8] ^= 1 << (i%8); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output)) == 0) { /*XPR(NT "test %u: decode single-bit [%u/%u] error non-failure", test_i, i/8, i%8);*/ } dec_input[i/8] ^= 1 << (i%8); if ((i % (2*bytes)) == (2*bytes)-1) { DOT (); } } ret = 0; } if (0) { nomem: ret = ENOMEM; } fail: sec->destroy (stream, enc_stream); xd3_free_output (stream, in_head); xd3_free_output (stream, out_head); xd3_free (stream, dec_input); xd3_free (stream, dec_output); xd3_free (stream, dec_correct); if (ret != 0) { return ret; } } } return 0; } IF_FGK (static int test_secondary_fgk (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & fgk_sec_type, gp); }) IF_DJW (static int test_secondary_huff (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & djw_sec_type, gp); }) IF_LZMA (static int test_secondary_lzma (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & lzma_sec_type, gp); }) #endif /*********************************************************************** TEST INSTRUCTION TABLE ***********************************************************************/ /* Test that xd3_choose_instruction() does the right thing for its code * table. */ static int test_choose_instruction (xd3_stream *stream, int ignore) { int i; stream->code_table = (*stream->code_table_func) (); for (i = 0; i < 256; i += 1) { const xd3_dinst *d = stream->code_table + i; xd3_rinst prev, inst; CHECK(d->type1 > 0); memset (& prev, 0, sizeof (prev)); memset (& inst, 0, sizeof (inst)); if (d->type2 == 0) { inst.type = d->type1; if ((inst.size = d->size1) == 0) { inst.size = TESTBUFSIZE; } XD3_CHOOSE_INSTRUCTION (stream, NULL, & inst); if (inst.code2 != 0 || inst.code1 != i) { stream->msg = "wrong single instruction"; return XD3_INTERNAL; } } else { prev.type = d->type1; prev.size = d->size1; inst.type = d->type2; inst.size = d->size2; XD3_CHOOSE_INSTRUCTION (stream, & prev, & inst); if (prev.code2 != i) { stream->msg = "wrong double instruction"; return XD3_INTERNAL; } } } return 0; } /*********************************************************************** TEST INSTRUCTION TABLE CODING ***********************************************************************/ #if GENERIC_ENCODE_TABLES /* Test that encoding and decoding a code table works */ static int test_encode_code_table (xd3_stream *stream, int ignore) { int ret; const uint8_t *comp_data; usize_t comp_size; if ((ret = xd3_compute_alternate_table_encoding (stream, & comp_data, & comp_size))) { return ret; } stream->acache.s_near = __alternate_code_table_desc.near_modes; stream->acache.s_same = __alternate_code_table_desc.same_modes; if ((ret = xd3_apply_table_encoding (stream, comp_data, comp_size))) { return ret; } if (memcmp (stream->code_table, xd3_alternate_code_table (), sizeof (xd3_dinst) * 256) != 0) { stream->msg = "wrong code table reconstruction"; return XD3_INTERNAL; } return 0; } #endif /*********************************************************************** 64BIT STREAMING ***********************************************************************/ /* This test encodes and decodes a series of 1 megabyte windows, each * containing a long run of zeros along with a single xoff_t size * record to indicate the sequence. */ static int test_streaming (xd3_stream *in_stream, uint8_t *encbuf, uint8_t *decbuf, uint8_t *delbuf, usize_t megs) { xd3_stream estream, dstream; int ret; usize_t i, delsize, decsize; xd3_config cfg; xd3_init_config (& cfg, in_stream->flags); cfg.flags |= XD3_COMPLEVEL_6; if ((ret = xd3_config_stream (& estream, & cfg)) || (ret = xd3_config_stream (& dstream, & cfg))) { goto fail; } for (i = 0; i < megs; i += 1) { ((usize_t*) encbuf)[0] = i; if ((i % 200) == 199) { DOT (); } if ((ret = xd3_process_stream (1, & estream, xd3_encode_input, 0, encbuf, 1 << 20, delbuf, & delsize, 1 << 20))) { in_stream->msg = estream.msg; goto fail; } if ((ret = xd3_process_stream (0, & dstream, xd3_decode_input, 0, delbuf, delsize, decbuf, & decsize, 1 << 20))) { in_stream->msg = dstream.msg; goto fail; } if (decsize != 1 << 20 || memcmp (encbuf, decbuf, 1 << 20) != 0) { in_stream->msg = "wrong result"; ret = XD3_INTERNAL; goto fail; } } if ((ret = xd3_close_stream (& estream)) || (ret = xd3_close_stream (& dstream))) { goto fail; } fail: xd3_free_stream (& estream); xd3_free_stream (& dstream); return ret; } /* Run tests of data streaming of over and around 4GB of data. */ static int test_compressed_stream_overflow (xd3_stream *stream, int ignore) { int ret; int i; uint8_t *buf; if ((buf = (uint8_t*) malloc (TWO_MEGS_AND_DELTA)) == NULL) { return ENOMEM; } memset (buf, 0, TWO_MEGS_AND_DELTA); for (i = 0; i < (2 << 20); i += 256) { int j; int off = mt_random(& static_mtrand) % 10; for (j = 0; j < 256; j++) { buf[i + j] = j + off; } } /* Test overflow of a 32-bit file offset. */ if (SIZEOF_XOFF_T == 4) { ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), (1 << 12) + 1); if (ret == XD3_INVALID_INPUT && MSG_IS ("decoder file offset overflow")) { ret = 0; } else { XPR(NT XD3_LIB_ERRMSG (stream, ret)); stream->msg = "expected overflow condition"; ret = XD3_INTERNAL; goto fail; } } /* Test transfer of exactly 32bits worth of data. */ if ((ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), 1 << 12))) { goto fail; } fail: free (buf); return ret; } /*********************************************************************** COMMAND LINE ***********************************************************************/ #if SHELL_TESTS /* For each pair of command templates in the array below, test that * encoding and decoding commands work. Also check for the expected * size delta, which should be approximately TEST_ADD_RATIO times the * file size created by test_make_inputs. Due to differences in the * application header, it is suppressed (-A) so that all delta files * are the same. */ static int test_command_line_arguments (xd3_stream *stream, int ignore) { int i, ret; static const char* cmdpairs[] = { /* standard input, output */ "%s %s -A < %s > %s", "%s -d < %s > %s", "%s %s -A -e < %s > %s", "%s -d < %s > %s", "%s %s -A= encode < %s > %s", "%s decode < %s > %s", "%s %s -A -q encode < %s > %s", "%s -qdq < %s > %s", /* file input, standard output */ "%s %s -A= %s > %s", "%s -d %s > %s", "%s %s -A -e %s > %s", "%s -d %s > %s", "%s %s encode -A= %s > %s", "%s decode %s > %s", /* file input, output */ "%s %s -A= %s %s", "%s -d %s %s", "%s %s -A -e %s %s", "%s -d %s %s", "%s %s -A= encode %s %s", "%s decode %s %s", /* option placement */ "%s %s -A -f %s %s", "%s -f -d %s %s", "%s %s -e -A= %s %s", "%s -d -f %s %s", "%s %s -f encode -A= %s %s", "%s -f decode -f %s %s", }; char ecmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; int pairs = SIZEOF_ARRAY (cmdpairs) / 2; xoff_t tsize; xoff_t dsize; double ratio; mt_init (& static_mtrand, 0x9f73f7fc); for (i = 0; i < pairs; i += 1) { test_setup (); if ((ret = test_make_inputs (stream, NULL, & tsize))) { return ret; } snprintf_func (ecmd, TESTBUFSIZE, cmdpairs[2*i], program_name, test_softcfg_str, TEST_TARGET_FILE, TEST_DELTA_FILE); snprintf_func (dcmd, TESTBUFSIZE, cmdpairs[2*i+1], program_name, TEST_DELTA_FILE, TEST_RECON_FILE); /* Encode and decode. */ if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Compare the target file. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } ratio = (double) dsize / (double) tsize; /* Check that it is not too small, not too large. */ if (ratio >= TEST_ADD_RATIO + TEST_EPSILON) { XPR(NT "test encode with size ratio %.4f, " "expected < %.4f (%"Q"u, %"Q"u)\n", ratio, TEST_ADD_RATIO + TEST_EPSILON, dsize, tsize); stream->msg = "strange encoding"; return XD3_INTERNAL; } if (ratio <= TEST_ADD_RATIO * (1.0 - 2 * TEST_EPSILON)) { XPR(NT "test encode with size ratio %.4f, " "expected > %.4f\n", ratio, TEST_ADD_RATIO - TEST_EPSILON); stream->msg = "strange encoding"; return XD3_INTERNAL; } /* Also check that test_compare_files works. The delta and original should * not be identical. */ if ((ret = test_compare_files (TEST_DELTA_FILE, TEST_TARGET_FILE)) == 0) { stream->msg = "broken test_compare_files"; return XD3_INTERNAL; } test_cleanup (); DOT (); } return 0; } static int check_vcdiff_header (xd3_stream *stream, const char *input, const char *line_start, const char *matches, int yes_or_no) { int ret; char vcmd[TESTBUFSIZE], gcmd[TESTBUFSIZE]; snprintf_func (vcmd, TESTBUFSIZE, "%s printhdr -f %s %s", program_name, input, TEST_RECON2_FILE); if ((ret = system (vcmd)) != 0) { XPR(NT "printhdr command: %s\n", vcmd); stream->msg = "printhdr cmd failed"; return XD3_INTERNAL; } snprintf_func (gcmd, TESTBUFSIZE, "grep \"%s.*%s.*\" %s > /dev/null", line_start, matches, TEST_RECON2_FILE); if (yes_or_no) { if ((ret = do_cmd (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } else { if ((ret = do_fail (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } return 0; } static int test_recode_command2 (xd3_stream *stream, int has_source, int variant, int change) { int has_adler32 = (variant & 0x1) != 0; int has_apphead = (variant & 0x2) != 0; int has_secondary = (variant & 0x4) != 0; int change_adler32 = (change & 0x1) != 0; int change_apphead = (change & 0x2) != 0; int change_secondary = (change & 0x4) != 0; int recoded_adler32 = change_adler32 ? !has_adler32 : has_adler32; int recoded_apphead = change_apphead ? !has_apphead : has_apphead; int recoded_secondary = change_secondary ? !has_secondary : has_secondary; char ecmd[TESTBUFSIZE], recmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; xoff_t tsize, ssize; int ret; test_setup (); if ((ret = test_make_inputs (stream, has_source ? & ssize : NULL, & tsize))) { return ret; } /* First encode */ snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s", program_name, test_softcfg_str, has_adler32 ? "" : "-n ", has_apphead ? "-A=encode_apphead " : "-A= ", has_secondary ? "-S djw " : "-S none ", has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } /* Now recode */ snprintf_func (recmd, TESTBUFSIZE, "%s recode %s -f %s %s %s %s %s", program_name, test_softcfg_str, recoded_adler32 ? "" : "-n ", !change_apphead ? "" : (recoded_apphead ? "-A=recode_apphead " : "-A= "), recoded_secondary ? "-S djw " : "-S none ", TEST_DELTA_FILE, TEST_COPY_FILE); if ((ret = system (recmd)) != 0) { XPR(NT "recode command: %s\n", recmd); stream->msg = "recode cmd failed"; return XD3_INTERNAL; } /* Check recode changes. */ if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_SOURCE", has_source))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_SECONDARY", recoded_secondary))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_ADLER32", /* Recode can't generate an adler32 * checksum, it can only preserve it or * remove it. */ has_adler32 && recoded_adler32))) { return ret; } if (!change_apphead) { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", has_apphead))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "encode_apphead", has_apphead))) { return ret; } } else { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", recoded_apphead))) { return ret; } if (recoded_apphead && (ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "recode_apphead", 1))) { return ret; } } /* Now decode */ snprintf_func (dcmd, TESTBUFSIZE, "%s -fd %s %s %s %s ", program_name, has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_COPY_FILE, TEST_RECON_FILE); if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Now compare. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } return 0; } static int test_recode_command (xd3_stream *stream, int ignore) { /* Things to test: * - with and without a source file (recode does not change) * * (recode may or may not change -- 8 variations) * - with and without adler32 * - with and without app header * - with and without secondary */ int has_source; int variant; int change; int ret; for (has_source = 0; has_source < 2; has_source++) { for (variant = 0; variant < 8; variant++) { for (change = 0; change < 8; change++) { if ((ret = test_recode_command2 (stream, has_source, variant, change))) { return ret; } } DOT (); } } return 0; } #endif /*********************************************************************** EXTERNAL I/O DECOMPRESSION/RECOMPRESSION ***********************************************************************/ #if EXTERNAL_COMPRESSION /* This performs one step of the test_externally_compressed_io * function described below. It builds a pipe containing both Xdelta * and external compression/decompression that should not modify the * data passing through. */ static int test_compressed_pipe (xd3_stream *stream, main_extcomp *ext, char* buf, const char* comp_options, const char* decomp_options, int do_ext_recomp, const char* msg) { int ret; char decomp_buf[TESTBUFSIZE]; if (do_ext_recomp) { snprintf_func (decomp_buf, TESTBUFSIZE, " | %s %s", ext->decomp_cmdname, ext->decomp_options); } else { decomp_buf[0] = 0; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s | %s %s | %s %s%s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_TARGET_FILE, program_name, comp_options, program_name, decomp_options, decomp_buf, TEST_RECON_FILE); if ((ret = system (buf)) != 0) { stream->msg = msg; return XD3_INTERNAL; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return XD3_INTERNAL; } DOT (); return 0; } /* We want to test that a pipe such as: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -dcf | gzip -dcf | --> * * is transparent, i.e., does not modify the stream of data. However, * we also want to verify that at the center the data is properly * compressed, i.e., that we do not just have a re-compressed gzip * format, that we have an VCDIFF format. We do this in two steps. * First test the above pipe, then test with suppressed output * recompression (-D). The result should be the original input: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -Ddcf | --> * * Finally we want to test that -D also disables input decompression: * * --> | gzip -cf | xdelta3 -Dcf | xdelta3 -Ddcf | gzip -dcf | --> */ static int test_externally_compressed_io (xd3_stream *stream, int ignore) { usize_t i; int ret; char buf[TESTBUFSIZE]; mt_init (& static_mtrand, 0x9f73f7fc); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } for (i = 0; i < SIZEOF_ARRAY (extcomp_types); i += 1) { main_extcomp *ext = & extcomp_types[i]; /* Test for the existence of the external command first, if not skip. */ snprintf_func (buf, TESTBUFSIZE, "%s %s < /dev/null > /dev/null", ext->recomp_cmdname, ext->recomp_options); if ((ret = system (buf)) != 0) { XPR(NT "%s=0", ext->recomp_cmdname); continue; } if ((ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-dcfq", 1, "compression failed: identity pipe")) || (ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-Rdcfq", 0, "compression failed: without recompression")) || (ret = test_compressed_pipe (stream, ext, buf, "-Dcfq", "-Rdcfq", 1, "compression failed: without decompression"))) { return ret; } } return 0; } /* This tests the proper functioning of external decompression for * source files. The source and target files are identical and * compressed by gzip. Decoding such a delta with recompression * disbaled (-R) should produce the original, uncompressed * source/target file. Then it checks with output recompression * enabled--in this case the output should be a compressed copy of the * original source/target file. Then it checks that encoding with * decompression disabled works--the compressed files are identical * and decoding them should always produce a compressed output, * regardless of -R since the encoded delta file had decompression * disabled.. */ static int test_source_decompression (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; const main_extcomp *ext; xoff_t dsize; mt_init (& static_mtrand, 0x9f73f7fc); test_setup (); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Use gzip. */ if ((ext = main_get_compressor ("G")) == NULL) { XPR(NT "skipped"); return 0; } /* Save an uncompressed copy. */ if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; } /* Compress the source. */ snprintf_func (buf, TESTBUFSIZE, "%s -1 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_SOURCE_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Compress the target. */ snprintf_func (buf, TESTBUFSIZE, "%s -9 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now the two identical files are compressed. Delta-encode the target, * with decompression. */ snprintf_func (buf, TESTBUFSIZE, "%s -e -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Check that the compressed file is small (b/c inputs are * identical). */ if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } /* Deltas for identical files should be very small. */ if (dsize > 200) { XPR(NT "external compression did not happen\n"); stream->msg = "external compression did not happen"; return XD3_INTERNAL; } /* Decode the delta file with recompression disabled, should get an * uncompressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dq -R -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON_FILE))) { return ret; } /* Decode the delta file with recompression, should get a compressed file * out. But we can't compare compressed files directly. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dqf -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s > %s", ext->decomp_cmdname, ext->decomp_options, TEST_RECON_FILE, TEST_RECON2_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON2_FILE))) { return ret; } /* Encode with decompression disabled */ snprintf_func (buf, TESTBUFSIZE, "%s -e -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Decode the delta file with decompression disabled, should get the * identical compressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -d -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } test_cleanup(); return 0; } #endif /*********************************************************************** FORCE, STDOUT ***********************************************************************/ /* This tests that output will not overwrite an existing file unless * -f was specified. The test is for encoding (the same code handles * it for decoding). */ static int test_force_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; /* Create empty target file */ test_setup (); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode again, should fail. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -e %s %s ", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* Force it, should succeed. */ snprintf_func (buf, TESTBUFSIZE, "%s -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This checks the proper operation of the -c flag. When specified * the default output becomes stdout, otherwise the input must be * provided (encode) or it may be defaulted (decode w/ app header). */ static int test_stdout_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup(); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, encode writes to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* With -c, encode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -e -c %s > %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, decode writes to target file name, but it fails because the * file exists. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -d %s ", program_name, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* With -c, decode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -d -c %s > /dev/null", program_name, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This tests that the no-output flag (-J) works. */ static int test_no_output (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup (); snprintf_func (buf, TESTBUFSIZE, "touch %s && chmod 0000 %s", TEST_NOPERM_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Try no_output encode w/out unwritable output file */ snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now really write the delta to test decode no-output */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -q -f -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup (); return 0; } /*********************************************************************** Source identical optimization ***********************************************************************/ /* Computing a delta should be fastest when the two inputs are * identical, this checks it. The library is called to compute a * delta between a 10000 byte file, 1000 byte winsize, 500 byte source * blocksize. The same buffer is used for both source and target. */ static int test_identical_behavior (xd3_stream *stream, int ignore) { #define IDB_TGTSZ 10000 /* Not a power of two b/c of hard-coded expectations below. */ #define IDB_BLKSZ 512 #define IDB_WINSZ 1000 #define IDB_DELSZ 1000 #define IDB_WINCNT (IDB_TGTSZ / IDB_WINSZ) int ret, i; uint8_t buf[IDB_TGTSZ]; uint8_t del[IDB_DELSZ]; uint8_t rec[IDB_TGTSZ]; xd3_source source; int nextencwin = 0; int winstarts = 0, winfinishes = 0; usize_t delpos = 0, recsize; xd3_config config; memset(&source, 0, sizeof(source)); for (i = 0; i < IDB_TGTSZ; i += 1) { buf[i] = (uint8_t) mt_random (&static_mtrand); } stream->winsize = IDB_WINSZ; source.blksize = IDB_BLKSZ; source.name = ""; source.curblk = NULL; source.curblkno = 0; if ((ret = xd3_set_source (stream, & source))) { goto fail; } /* Compute an delta between identical source and targets. */ for (;;) { ret = xd3_encode_input (stream); if (ret == XD3_INPUT) { xd3_avail_input (stream, buf + (IDB_WINSZ * nextencwin), IDB_WINSZ); nextencwin += 1; continue; } if (ret == XD3_GETSRCBLK) { source.curblkno = source.getblkno; source.onblk = IDB_BLKSZ; source.curblk = buf + source.getblkno * IDB_BLKSZ; continue; } if (ret == XD3_WINSTART) { winstarts++; continue; } if (ret == XD3_WINFINISH) { winfinishes++; if (winfinishes == IDB_WINCNT) { break; } continue; } if (ret != XD3_OUTPUT) { goto fail; } CHECK(delpos + stream->avail_out <= IDB_DELSZ); memcpy (del + delpos, stream->next_out, stream->avail_out); delpos += stream->avail_out; xd3_consume_output (stream); } CHECK(winfinishes == IDB_WINCNT); CHECK(winstarts == IDB_WINCNT); CHECK(nextencwin == IDB_WINCNT); /* Reset. */ memset(&source, 0, sizeof(source)); source.blksize = IDB_TGTSZ; source.onblk = IDB_TGTSZ; source.curblk = buf; source.curblkno = 0; if ((ret = xd3_close_stream (stream))) { goto fail; } xd3_free_stream (stream); xd3_init_config (& config, 0); if ((ret = xd3_config_stream (stream, & config))) { goto fail; } if ((ret = xd3_set_source_and_size (stream, & source, IDB_TGTSZ))) { goto fail; } /* Decode. */ if ((ret = xd3_decode_stream (stream, del, delpos, rec, & recsize, IDB_TGTSZ))) { goto fail; } /* Check result size and data. */ if (recsize != IDB_TGTSZ) { stream->msg = "wrong size reconstruction"; goto fail; } if (memcmp (rec, buf, IDB_TGTSZ) != 0) { stream->msg = "wrong data reconstruction"; goto fail; } /* Check that there was one copy per window. */ IF_DEBUG (if (stream->n_scpy != IDB_WINCNT || stream->n_add != 0 || stream->n_run != 0) { stream->msg = "wrong copy count"; goto fail; }); /* Check that no checksums were computed because the initial match was presumed. */ IF_DEBUG (if (stream->large_ckcnt != 0) { stream->msg = "wrong checksum behavior"; goto fail; }); ret = 0; fail: return ret; } /*********************************************************************** String matching test ***********************************************************************/ /* Check particular matching behaviors by calling * xd3_string_match_soft directly with specific arguments. */ typedef struct _string_match_test string_match_test; typedef enum { SM_NONE = 0, SM_LAZY = (1 << 1), } string_match_flags; struct _string_match_test { const char *input; int flags; const char *result; }; static const string_match_test match_tests[] = { /* nothing */ { "1234567890", SM_NONE, "" }, /* basic run, copy */ { "11111111112323232323", SM_NONE, "R0/10 C12/8@10" }, /* no run smaller than MIN_RUN=8 */ { "1111111", SM_NONE, "C1/6@0" }, { "11111111", SM_NONE, "R0/8" }, /* simple promotion: the third copy address depends on promotion */ { "ABCDEF_ABCDEF^ABCDEF", SM_NONE, "C7/6@0 C14/6@7" }, /* { "ABCDEF_ABCDEF^ABCDEF", SM_PROMOTE, "C7/6@0 C14/6@0" }, forgotten */ /* simple lazy: there is a better copy starting with "23 X" than "123 " */ { "123 23 XYZ 123 XYZ", SM_NONE, "C11/4@0" }, { "123 23 XYZ 123 XYZ", SM_LAZY, "C11/4@0 C12/6@4" }, /* trylazy: no lazy matches unless there are at least two characters beyond * the first match */ { "2123_121212", SM_LAZY, "C7/4@5" }, { "2123_1212123", SM_LAZY, "C7/4@5" }, { "2123_1212123_", SM_LAZY, "C7/4@5 C8/5@0" }, /* trylazy: no lazy matches if the copy is >= MAXLAZY=10 */ { "2123_121212123_", SM_LAZY, "C7/6@5 C10/5@0" }, { "2123_12121212123_", SM_LAZY, "C7/8@5 C12/5@0" }, { "2123_1212121212123_", SM_LAZY, "C7/10@5" }, /* lazy run: check a run overlapped by a longer copy */ { "11111112 111111112 1", SM_LAZY, "C1/6@0 R9/8 C10/10@0" }, /* lazy match: match_length,run_l >= min_match tests, shouldn't get any * copies within the run, no run within the copy */ { "^________^________ ", SM_LAZY, "R1/8 C9/9@0" }, /* chain depth: it only goes back 10. this checks that the 10th match hits * and the 11th misses. */ { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/5@0" }, { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234>1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/4@45 C55/4@50" }, /* ssmatch test */ { "ABCDE___ABCDE*** BCDE***", SM_NONE, "C8/5@0 C17/4@1" }, /*{ "ABCDE___ABCDE*** BCDE***", SM_SSMATCH, "C8/5@0 C17/7@9" }, forgotten */ }; static int test_string_matching (xd3_stream *stream, int ignore) { usize_t i; int ret; xd3_config config; char rbuf[TESTBUFSIZE]; for (i = 0; i < SIZEOF_ARRAY (match_tests); i += 1) { const string_match_test *test = & match_tests[i]; char *rptr = rbuf; usize_t len = (usize_t) strlen (test->input); xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 4; config.smatcher_soft.large_step = 4; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 10; config.smatcher_soft.small_lchain = 10; config.smatcher_soft.max_lazy = (test->flags & SM_LAZY) ? 10 : 0; config.smatcher_soft.long_enough = 10; if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_encode_init_full (stream))) { return ret; } xd3_avail_input (stream, (uint8_t*)test->input, len); if ((ret = stream->smatcher.string_match (stream))) { return ret; } *rptr = 0; while (! xd3_rlist_empty (& stream->iopt_used)) { xd3_rinst *inst = xd3_rlist_pop_front (& stream->iopt_used); switch (inst->type) { case XD3_RUN: *rptr++ = 'R'; break; case XD3_CPY: *rptr++ = 'C'; break; default: CHECK(0); } snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d", inst->pos, inst->size); rptr += strlen (rptr); if (inst->type == XD3_CPY) { *rptr++ = '@'; snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%"Q"d", inst->addr); rptr += strlen (rptr); } *rptr++ = ' '; xd3_rlist_push_back (& stream->iopt_free, inst); } if (rptr != rbuf) { rptr -= 1; *rptr = 0; } if (strcmp (rbuf, test->result) != 0) { XPR(NT "test %u: expected %s: got %s", i, test->result, rbuf); stream->msg = "wrong result"; return XD3_INTERNAL; } } return 0; } /* * This is a test for many overlapping instructions. It must be a lazy * matcher. */ static int test_iopt_flush_instructions (xd3_stream *stream, int ignore) { int ret, i; usize_t tpos = 0; usize_t delta_size, recon_size; xd3_config config; uint8_t target[TESTBUFSIZE]; uint8_t delta[TESTBUFSIZE]; uint8_t recon[TESTBUFSIZE]; xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 16; config.smatcher_soft.large_step = 16; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 128; config.smatcher_soft.small_lchain = 16; config.smatcher_soft.max_lazy = 8; config.smatcher_soft.long_enough = 128; if ((ret = xd3_config_stream (stream, & config))) { return ret; } for (i = 1; i < 250; i++) { target[tpos++] = i; target[tpos++] = i+1; target[tpos++] = i+2; target[tpos++] = i+3; target[tpos++] = 0; } for (i = 1; i < 253; i++) { target[tpos++] = i; } if ((ret = xd3_encode_stream (stream, target, tpos, delta, & delta_size, sizeof (delta)))) { return ret; } xd3_free_stream(stream); if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_decode_stream (stream, delta, delta_size, recon, & recon_size, sizeof (recon)))) { return ret; } CHECK(tpos == recon_size); CHECK(memcmp(target, recon, recon_size) == 0); return 0; } /* * This tests the 32/64bit ambiguity for source-window matching. */ static int test_source_cksum_offset (xd3_stream *stream, int ignore) { xd3_source source; // Inputs are: struct { xoff_t cpos; // stream->srcwin_cksum_pos; xoff_t ipos; // stream->total_in; xoff_t size; // stream->src->size; usize_t input; // input 32-bit offset xoff_t output; // output 64-bit offset } cksum_test[] = { // If cpos is <= 2^32 { 1, 1, 1, 1, 1 }, #if XD3_USE_LARGEFILE64 // cpos ipos size input output // 0x____xxxxxULL, 0x____xxxxxULL, 0x____xxxxxULL, 0x___xxxxxUL, 0x____xxxxxULL { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0x00000000UL, 0x100000000ULL }, { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0xF0000000UL, 0x0F0000000ULL }, { 0x100200000ULL, 0x100100000ULL, 0x100200000ULL, 0x00300000UL, 0x000300000ULL }, { 25771983104ULL, 25770000000ULL, 26414808769ULL, 2139216707UL, 23614053187ULL }, #endif { 0, 0, 0, 0, 0 }, }, *test_ptr; stream->src = &source; for (test_ptr = cksum_test; test_ptr->cpos; test_ptr++) { xoff_t r; stream->srcwin_cksum_pos = test_ptr->cpos; stream->total_in = test_ptr->ipos; r = xd3_source_cksum_offset(stream, test_ptr->input); CHECK(r == test_ptr->output); } return 0; } static int test_in_memory (xd3_stream *stream, int ignore) { // test_text is 256 bytes uint8_t ibuf[sizeof(test_text)]; uint8_t dbuf[sizeof(test_text)]; uint8_t obuf[sizeof(test_text)]; usize_t size = sizeof(test_text); usize_t dsize, osize; int r1, r2; int eflags = SECONDARY_DJW ? XD3_SEC_DJW : 0; memcpy(ibuf, test_text, size); memset(ibuf + 128, 0, 16); r1 = xd3_encode_memory(ibuf, size, test_text, size, dbuf, &dsize, size, eflags); r2 = xd3_decode_memory(dbuf, dsize, test_text, size, obuf, &osize, size, 0); if (r1 != 0 || r2 != 0 || dsize >= (size/2) || dsize < 1 || osize != size) { stream->msg = "encode/decode size error"; return XD3_INTERNAL; } if (memcmp(obuf, ibuf, size) != 0) { stream->msg = "encode/decode data error"; return XD3_INTERNAL; } return 0; } /*********************************************************************** TEST MAIN ***********************************************************************/ static int xd3_selftest (void) { #define DO_TEST(fn,flags,arg) \ do { \ xd3_stream stream; \ xd3_config config; \ xd3_init_config (& config, flags); \ XPR(NT "testing " #fn "%s...", \ flags ? (" (" #flags ")") : ""); \ if ((ret = xd3_config_stream (& stream, & config) == 0) && \ (ret = test_ ## fn (& stream, arg)) == 0) { \ XPR(NTR " success\n"); \ } else { \ XPR(NTR " failed: %s: %s\n", xd3_errstring (& stream), \ xd3_mainerror (ret)); } \ xd3_free_stream (& stream); \ if (ret != 0) { goto failure; } \ } while (0) int ret; DO_TEST (random_numbers, 0, 0); DO_TEST (decode_integer_end_of_input, 0, 0); DO_TEST (decode_integer_overflow, 0, 0); DO_TEST (encode_decode_uint32_t, 0, 0); DO_TEST (encode_decode_uint64_t, 0, 0); DO_TEST (usize_t_overflow, 0, 0); DO_TEST (forward_match, 0, 0); DO_TEST (address_cache, 0, 0); IF_GENCODETBL (DO_TEST (address_cache, XD3_ALT_CODE_TABLE, 0)); DO_TEST (string_matching, 0, 0); DO_TEST (choose_instruction, 0, 0); DO_TEST (identical_behavior, 0, 0); DO_TEST (in_memory, 0, 0); IF_GENCODETBL (DO_TEST (choose_instruction, XD3_ALT_CODE_TABLE, 0)); IF_GENCODETBL (DO_TEST (encode_code_table, 0, 0)); DO_TEST (iopt_flush_instructions, 0, 0); DO_TEST (source_cksum_offset, 0, 0); DO_TEST (decompress_single_bit_error, 0, 3); DO_TEST (decompress_single_bit_error, XD3_ADLER32, 3); IF_LZMA (DO_TEST (decompress_single_bit_error, XD3_SEC_LZMA, 54)); IF_FGK (DO_TEST (decompress_single_bit_error, XD3_SEC_FGK, 3)); IF_DJW (DO_TEST (decompress_single_bit_error, XD3_SEC_DJW, 8)); /* There are many expected non-failures for ALT_CODE_TABLE because * not all of the instruction codes are used. */ IF_GENCODETBL ( DO_TEST (decompress_single_bit_error, XD3_ALT_CODE_TABLE, 224)); #if SHELL_TESTS DO_TEST (force_behavior, 0, 0); DO_TEST (stdout_behavior, 0, 0); DO_TEST (no_output, 0, 0); DO_TEST (command_line_arguments, 0, 0); #if EXTERNAL_COMPRESSION DO_TEST (source_decompression, 0, 0); DO_TEST (externally_compressed_io, 0, 0); #endif DO_TEST (recode_command, 0, 0); #endif IF_LZMA (DO_TEST (secondary_lzma, 0, 1)); IF_DJW (DO_TEST (secondary_huff, 0, DJW_MAX_GROUPS)); IF_FGK (DO_TEST (secondary_fgk, 0, 1)); DO_TEST (compressed_stream_overflow, 0, 0); IF_LZMA (DO_TEST (compressed_stream_overflow, XD3_SEC_LZMA, 0)); failure: test_cleanup (); return ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE; #undef DO_TEST }
/* xdelta 3 - delta compression tools and library Copyright (C) 2001, * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012. * Joshua P. MacDonald * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This is public-domain Mersenne Twister code, * attributed to Michael Brundage. Thanks! * http://www.qbrundage.com/michaelb/pubs/essays/random_number_generation.html */ static const uint32_t TEST_SEED1 = 5489UL; #define MT_LEN 624 #define MT_IA 397 static const uint32_t UPPER_MASK = 0x80000000; static const uint32_t LOWER_MASK = 0x7FFFFFFF; static const uint32_t MATRIX_A = 0x9908B0DF; #ifndef SHELL_TESTS #define SHELL_TESTS 1 #endif typedef struct mtrand mtrand; struct mtrand { int mt_index_; uint32_t mt_buffer_[MT_LEN]; }; int test_compare_files (const char* tgt, const char *rec); void mt_init(mtrand *mt, uint32_t seed); uint32_t mt_random (mtrand *mt); int test_setup (void); void mt_init(mtrand *mt, uint32_t seed) { int i; mt->mt_buffer_[0] = seed; mt->mt_index_ = MT_LEN; for (i = 1; i < MT_LEN; i++) { /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt->mt_buffer_[i] = (1812433253UL * (mt->mt_buffer_[i-1] ^ (mt->mt_buffer_[i-1] >> 30)) + i); } } uint32_t mt_random (mtrand *mt) { uint32_t y; unsigned long mag01[2]; mag01[0] = 0; mag01[1] = MATRIX_A; if (mt->mt_index_ >= MT_LEN) { int kk; for (kk = 0; kk < MT_LEN - MT_IA; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk < MT_LEN - 1; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) | (mt->mt_buffer_[0] & LOWER_MASK); mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mt->mt_index_ = 0; } y = mt->mt_buffer_[mt->mt_index_++]; y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } static mtrand static_mtrand; #include <math.h> static uint32_t mt_exp_rand (uint32_t mean, uint32_t max_value) { double mean_d = mean; double erand = log (1.0 / (mt_random (&static_mtrand) / (double)UINT32_MAX)); uint32_t x = (uint32_t) (mean_d * erand + 0.5); return min (x, max_value); } #if SHELL_TESTS #include <sys/wait.h> #endif #define MSG_IS(x) (stream->msg != NULL && strcmp ((x), stream->msg) == 0) static const usize_t TWO_MEGS_AND_DELTA = (3 << 20); static const usize_t ADDR_CACHE_ROUNDS = 10000; static const usize_t TEST_FILE_MEAN = 16384; static const double TEST_ADD_MEAN = 128; static const double TEST_ADD_MAX = 512; static const double TEST_ADD_RATIO = 0.1; static const double TEST_EPSILON = 0.25; #define TESTBUFSIZE (1024 * 16) #define TESTFILESIZE (1024) static char TEST_TARGET_FILE[TESTFILESIZE]; static char TEST_SOURCE_FILE[TESTFILESIZE]; static char TEST_DELTA_FILE[TESTFILESIZE]; static char TEST_RECON_FILE[TESTFILESIZE]; static char TEST_RECON2_FILE[TESTFILESIZE]; static char TEST_COPY_FILE[TESTFILESIZE]; static char TEST_NOPERM_FILE[TESTFILESIZE]; #define CHECK(cond) if (!(cond)) { XPR(NT "check failure: " #cond); abort(); } #if SHELL_TESTS /* Use a fixed soft config so that test values are fixed. See also * test_compress_text(). */ static const char* test_softcfg_str = "-C9,3,4,8,2,36,70"; #endif /*********************************************************************** TEST HELPERS ***********************************************************************/ static void DOT (void) { XPR(NTR "."); } static int do_cmd (xd3_stream *stream, const char *buf) { int ret; if ((ret = system (buf)) != 0) { if (WIFEXITED (ret)) { stream->msg = "command exited non-zero"; IF_DEBUG1 (XPR(NT "command was: %s\n", buf)); } else { stream->msg = "abnormal command termination"; } return ret; } return 0; } static int do_fail (xd3_stream *stream, const char *buf) { int ret; ret = system (buf); if (! WIFEXITED (ret) || WEXITSTATUS (ret) != 1) { stream->msg = "command should have not succeeded"; XPR(NT "command was %s\n", buf); return XD3_INTERNAL; } return 0; } /* Test that the exponential distribution actually produces its mean. */ static int test_random_numbers (xd3_stream *stream, int ignore) { usize_t i; usize_t sum = 0; usize_t mean = 50; usize_t n_rounds = 1000000; double average, error; double allowed_error = 0.1; mt_init (& static_mtrand, 0x9f73f7fe); for (i = 0; i < n_rounds; i += 1) { sum += mt_exp_rand (mean, USIZE_T_MAX); } average = (double) sum / (double) n_rounds; error = average - (double) mean; if (error < allowed_error && error > -allowed_error) { return 0; } /*XPR(NT "error is %f\n", error);*/ stream->msg = "random distribution looks broken"; return XD3_INTERNAL; } static void test_unlink (char* file) { int ret; if ((ret = unlink (file)) != 0 && errno != ENOENT) { XPR(NT "unlink %s failed: %s\n", file, strerror(ret)); } } static void test_cleanup (void) { #if 1 test_unlink (TEST_TARGET_FILE); test_unlink (TEST_SOURCE_FILE); test_unlink (TEST_DELTA_FILE); test_unlink (TEST_RECON_FILE); test_unlink (TEST_RECON2_FILE); test_unlink (TEST_COPY_FILE); test_unlink (TEST_NOPERM_FILE); #endif } int test_setup (void) { static int x = 0; x++; snprintf_func (TEST_TARGET_FILE, TESTFILESIZE, "/tmp/xdtest.target.%d", x); snprintf_func (TEST_SOURCE_FILE, TESTFILESIZE, "/tmp/xdtest.source.%d", x); snprintf_func (TEST_DELTA_FILE, TESTFILESIZE, "/tmp/xdtest.delta.%d", x); snprintf_func (TEST_RECON_FILE, TESTFILESIZE, "/tmp/xdtest.recon.%d", x); snprintf_func (TEST_RECON2_FILE, TESTFILESIZE, "/tmp/xdtest.recon2.%d", x); snprintf_func (TEST_COPY_FILE, TESTFILESIZE, "/tmp/xdtest.copy.%d", x); snprintf_func (TEST_NOPERM_FILE, TESTFILESIZE, "/tmp/xdtest.noperm.%d", x); test_cleanup(); return 0; } static int test_make_inputs (xd3_stream *stream, xoff_t *ss_out, xoff_t *ts_out) { usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; uint8_t *buf = (uint8_t*) malloc (ts + ss), *sbuf = buf, *tbuf = buf + ss; usize_t sadd = 0, sadd_max = (usize_t)(ss * TEST_ADD_RATIO); FILE *tf = NULL, *sf = NULL; usize_t i, j; int ret; if (buf == NULL) { return ENOMEM; } if ((tf = fopen (TEST_TARGET_FILE, "w")) == NULL || (ss_out != NULL && (sf = fopen (TEST_SOURCE_FILE, "w")) == NULL)) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if (ss_out != NULL) { for (i = 0; i < ss; ) { sbuf[i++] = (uint8_t) mt_random (&static_mtrand); } } /* Then modify the data to produce copies, everything not copied is * an add. The following logic produces the TEST_ADD_RATIO. The * variable SADD contains the number of adds so far, which should * not exceed SADD_MAX. */ /* XPR(NT "ss = %u ts = %u\n", ss, ts); */ for (i = 0; i < ts; ) { usize_t left = ts - i; usize_t next = mt_exp_rand ((uint32_t) TEST_ADD_MEAN, (uint32_t) TEST_ADD_MAX); usize_t add_left = sadd_max - sadd; double add_prob = (left == 0) ? 0 : (add_left / (double) left); int do_copy; next = min (left, next); do_copy = (next > add_left || (mt_random (&static_mtrand) / \ (double)USIZE_T_MAX) >= add_prob); if (ss_out == NULL) { do_copy &= (i > 0); } else { do_copy &= (ss - next) > 0; } if (do_copy) { /* Copy */ size_t offset = mt_random (&static_mtrand) % ((ss_out == NULL) ? i : (ss - next)); /* XPR(NT "[%u] copy %u at %u ", i, next, offset); */ for (j = 0; j < next; j += 1) { char c = ((ss_out == NULL) ? tbuf : sbuf)[offset + j]; /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ } else { /* Add */ /* XPR(NT "[%u] add %u ", i, next); */ for (j = 0; j < next; j += 1) { char c = (char) mt_random (&static_mtrand); /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ sadd += next; } } /* XPR(NT "sadd = %u max = %u\n", sadd, sadd_max); */ if ((fwrite (tbuf, 1, ts, tf) != ts) || (ss_out != NULL && (fwrite (sbuf, 1, ss, sf) != ss))) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if ((ret = fclose (tf)) || (ss_out != NULL && (ret = fclose (sf)))) { stream->msg = "close failed"; ret = get_errno (); goto failure; } if (ts_out) { (*ts_out) = ts; } if (ss_out) { (*ss_out) = ss; } failure: free (buf); return ret; } int test_compare_files (const char* tgt, const char *rec) { FILE *orig, *recons; static uint8_t obuf[TESTBUFSIZE], rbuf[TESTBUFSIZE]; xoff_t offset = 0; size_t i; size_t oc, rc; xoff_t diffs = 0; if ((orig = fopen (tgt, "r")) == NULL) { XPR(NT "open %s failed\n", tgt); return get_errno (); } if ((recons = fopen (rec, "r")) == NULL) { XPR(NT "open %s failed\n", rec); return get_errno (); } for (;;) { oc = fread (obuf, 1, TESTBUFSIZE, orig); rc = fread (rbuf, 1, TESTBUFSIZE, recons); if (oc != rc) { return XD3_INTERNAL; } if (oc == 0) { break; } for (i = 0; i < oc; i += 1) { if (obuf[i] != rbuf[i]) { XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\n", (int)i, (int)oc, offset, obuf[i], rbuf[i]); diffs++; return XD3_INTERNAL; } } offset += oc; } fclose (orig); fclose (recons); if (diffs != 0) { return XD3_INTERNAL; } return 0; } static int test_copy_to (const char *from, const char *to) { char buf[TESTBUFSIZE]; int ret; snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", from, to); if ((ret = system (buf)) != 0) { return XD3_INTERNAL; } return 0; } static int test_save_copy (const char *origname) { return test_copy_to(origname, TEST_COPY_FILE); } static int test_file_size (const char* file, xoff_t *size) { struct stat sbuf; int ret; (*size) = 0; if (stat (file, & sbuf) < 0) { ret = get_errno (); XPR(NT "stat failed: %s: %s\n", file, strerror (ret)); return ret; } if (! S_ISREG (sbuf.st_mode)) { ret = XD3_INTERNAL; XPR(NT "not a regular file: %s: %s\n", file, strerror (ret)); return ret; } (*size) = sbuf.st_size; return 0; } /*********************************************************************** READ OFFSET ***********************************************************************/ /* Common test for read_integer errors: encodes a 64-bit value and * then attempts to read as a 32-bit value. If TRUNC is non-zero, * attempts to get errors by shortening the input, otherwise it should * overflow. Expects XD3_INTERNAL and MSG. */ static int test_read_integer_error (xd3_stream *stream, usize_t trunto, const char *msg) { uint64_t eval = 1ULL << 34; uint32_t rval; xd3_output *buf = NULL; const uint8_t *max; const uint8_t *inp; int ret; buf = xd3_alloc_output (stream, buf); if ((ret = xd3_emit_uint64_t (stream, & buf, eval))) { goto fail; } again: inp = buf->base; max = buf->base + buf->next - trunto; if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) != XD3_INVALID_INPUT || !MSG_IS (msg)) { ret = XD3_INTERNAL; } else if (trunto && trunto < buf->next) { trunto += 1; goto again; } else { ret = 0; } fail: xd3_free_output (stream, buf); return ret; } /* Test integer overflow using the above routine. */ static int test_decode_integer_overflow (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 0, "overflow in read_intger"); } /* Test integer EOI using the above routine. */ static int test_decode_integer_end_of_input (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 1, "end-of-input in read_integer"); } /* Test that emit_integer/decode_integer/sizeof_integer/read_integer * work on correct inputs. Tests powers of (2^7), plus or minus, up * to the maximum value. */ #define TEST_ENCODE_DECODE_INTEGER(TYPE,ONE,MAX) \ xd3_output *rbuf = NULL; \ xd3_output *dbuf = NULL; \ TYPE values[64]; \ usize_t nvalues = 0; \ usize_t i; \ int ret = 0; \ \ for (i = 0; i < (sizeof (TYPE) * 8); i += 7) \ { \ values[nvalues++] = (ONE << i) - ONE; \ values[nvalues++] = (ONE << i); \ values[nvalues++] = (ONE << i) + ONE; \ } \ \ values[nvalues++] = MAX-ONE; \ values[nvalues++] = MAX; \ \ rbuf = xd3_alloc_output (stream, rbuf); \ dbuf = xd3_alloc_output (stream, dbuf); \ \ for (i = 0; i < nvalues; i += 1) \ { \ const uint8_t *max; \ const uint8_t *inp; \ TYPE val; \ \ DOT (); \ rbuf->next = 0; \ \ if ((ret = xd3_emit_ ## TYPE (stream, & rbuf, values[i])) || \ (ret = xd3_emit_ ## TYPE (stream, & dbuf, values[i]))) \ { \ goto fail; \ } \ \ inp = rbuf->base; \ max = rbuf->base + rbuf->next; \ \ if (rbuf->next != xd3_sizeof_ ## TYPE (values[i])) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ if ((ret = xd3_read_ ## TYPE (stream, & inp, max, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ DOT (); \ } \ \ stream->next_in = dbuf->base; \ stream->avail_in = dbuf->next; \ \ for (i = 0; i < nvalues; i += 1) \ { \ TYPE val; \ \ if ((ret = xd3_decode_ ## TYPE (stream, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ } \ \ if (stream->avail_in != 0) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ fail: \ xd3_free_output (stream, rbuf); \ xd3_free_output (stream, dbuf); \ \ return ret static int test_encode_decode_uint32_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint32_t,1U,UINT32_MAX); } static int test_encode_decode_uint64_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint64_t,1ULL,UINT64_MAX); } static int test_usize_t_overflow (xd3_stream *stream, int unused) { if (USIZE_T_OVERFLOW (USIZE_T_MAX, 0)) { goto fail; } if (USIZE_T_OVERFLOW (0, USIZE_T_MAX)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2 + 1)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX, 1)) { goto fail; } if (! USIZE_T_OVERFLOW (1, USIZE_T_MAX)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX / 2 + 1, USIZE_T_MAX / 2 + 1)) { goto fail; } return 0; fail: stream->msg = "incorrect overflow computation"; return XD3_INTERNAL; } static int test_forward_match (xd3_stream *stream, int unused) { usize_t i; uint8_t buf1[256], buf2[256]; memset(buf1, 0, 256); memset(buf2, 0, 256); for (i = 0; i < 256; i++) { CHECK(xd3_forward_match(buf1, buf2, i) == (int)i); } for (i = 0; i < 255; i++) { buf2[i] = 1; CHECK(xd3_forward_match(buf1, buf2, 256) == (int)i); buf2[i] = 0; } return 0; } /*********************************************************************** Address cache ***********************************************************************/ static int test_address_cache (xd3_stream *stream, int unused) { int ret; usize_t i; usize_t offset; usize_t *addrs; uint8_t *big_buf, *buf_max; const uint8_t *buf; xd3_output *outp; uint8_t *modes; int mode_counts[16]; stream->acache.s_near = stream->code_table_desc->near_modes; stream->acache.s_same = stream->code_table_desc->same_modes; if ((ret = xd3_encode_init_partial (stream))) { return ret; } addrs = (usize_t*) xd3_alloc (stream, sizeof (usize_t), ADDR_CACHE_ROUNDS); modes = (uint8_t*) xd3_alloc (stream, sizeof (uint8_t), ADDR_CACHE_ROUNDS); memset (mode_counts, 0, sizeof (mode_counts)); memset (modes, 0, ADDR_CACHE_ROUNDS); addrs[0] = 0; mt_init (& static_mtrand, 0x9f73f7fc); /* First pass: encode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { double p; usize_t addr; usize_t prev_i; usize_t nearby; p = (mt_random (&static_mtrand) / (double)USIZE_T_MAX); prev_i = mt_random (&static_mtrand) % offset; nearby = (mt_random (&static_mtrand) % 256) % offset; nearby = max (1U, nearby); if (p < 0.1) { addr = addrs[offset-nearby]; } else if (p < 0.4) { addr = min (addrs[prev_i] + nearby, offset-1); } else { addr = prev_i; } if ((ret = xd3_encode_address (stream, addr, offset, & modes[offset]))) { return ret; } addrs[offset] = addr; mode_counts[modes[offset]] += 1; } /* Copy addresses into a contiguous buffer. */ big_buf = (uint8_t*) xd3_alloc (stream, xd3_sizeof_output (ADDR_HEAD (stream)), 1); for (offset = 0, outp = ADDR_HEAD (stream); outp != NULL; offset += outp->next, outp = outp->next_page) { memcpy (big_buf + offset, outp->base, outp->next); } buf_max = big_buf + offset; buf = big_buf; /* Second pass: decode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { uint32_t addr; if ((ret = xd3_decode_address (stream, offset, modes[offset], & buf, buf_max, & addr))) { return ret; } if (addr != addrs[offset]) { stream->msg = "incorrect decoded address"; return XD3_INTERNAL; } } /* Check that every byte, mode was used. */ if (buf != buf_max) { stream->msg = "address bytes not used"; return XD3_INTERNAL; } for (i = 0; i < (2 + stream->acache.s_same + stream->acache.s_near); i += 1) { if (mode_counts[i] == 0) { stream->msg = "address mode not used"; return XD3_INTERNAL; } } xd3_free (stream, modes); xd3_free (stream, addrs); xd3_free (stream, big_buf); return 0; } /*********************************************************************** Encode and decode with single bit error ***********************************************************************/ /* It compresses from 256 to around 185 bytes. * Avoids matching addresses that are a single-bit difference. * Avoids matching address 0. */ static const uint8_t test_text[] = "this is a story\n" "abouttttttttttt\n" "- his is a stor\n" "- about nothing " " all. boutique -" "his story is a -" "about " "what happens all" " the time what -" "am I ttttttt the" " person said, so" " what, per son -" " gory story is -" " about nothing -" "tttttt to test -" "his sto nothing"; static const uint8_t test_apphead[] = "header test"; static int test_compress_text (xd3_stream *stream, uint8_t *encoded, usize_t *encoded_size) { int ret; xd3_config cfg; int oflags = stream->flags; int flags = stream->flags | XD3_FLUSH; xd3_free_stream (stream); xd3_init_config (& cfg, flags); /* This configuration is fixed so that the "expected non-error" the counts in * decompress_single_bit_errors are too. See test_coftcfg_str. */ cfg.smatch_cfg = XD3_SMATCH_SOFT; cfg.smatcher_soft.name = "test"; cfg.smatcher_soft.large_look = 64; /* no source, not used */ cfg.smatcher_soft.large_step = 64; /* no source, not used */ cfg.smatcher_soft.small_look = 4; cfg.smatcher_soft.small_chain = 128; cfg.smatcher_soft.small_lchain = 16; cfg.smatcher_soft.max_lazy = 8; cfg.smatcher_soft.long_enough = 128; xd3_config_stream (stream, & cfg); (*encoded_size) = 0; xd3_set_appheader (stream, test_apphead, (usize_t) strlen ((char*) test_apphead)); if ((ret = xd3_encode_stream (stream, test_text, sizeof (test_text), encoded, encoded_size, 4*sizeof (test_text)))) { goto fail; } if ((ret = xd3_close_stream (stream))) { goto fail; } fail: xd3_free_stream (stream); xd3_init_config (& cfg, oflags); xd3_config_stream (stream, & cfg); return ret; } static int test_decompress_text (xd3_stream *stream, uint8_t *enc, usize_t enc_size, usize_t test_desize) { xd3_config cfg; char decoded[sizeof (test_text)]; uint8_t *apphead; usize_t apphead_size; usize_t decoded_size; const char *msg; int ret; usize_t pos = 0; int flags = stream->flags; usize_t take; input: /* Test decoding test_desize input bytes at a time */ take = min (enc_size - pos, test_desize); CHECK(take > 0); xd3_avail_input (stream, enc + pos, take); again: ret = xd3_decode_input (stream); pos += take; take = 0; switch (ret) { case XD3_OUTPUT: break; case XD3_WINSTART: case XD3_GOTHEADER: goto again; case XD3_INPUT: if (pos < enc_size) { goto input; } /* else fallthrough */ case XD3_WINFINISH: default: goto fail; } CHECK(ret == XD3_OUTPUT); CHECK(pos == enc_size); if (stream->avail_out != sizeof (test_text)) { stream->msg = "incorrect output size"; ret = XD3_INTERNAL; goto fail; } decoded_size = stream->avail_out; memcpy (decoded, stream->next_out, stream->avail_out); xd3_consume_output (stream); if ((ret = xd3_get_appheader (stream, & apphead, & apphead_size))) { goto fail; } if (apphead_size != strlen ((char*) test_apphead) || memcmp (apphead, test_apphead, strlen ((char*) test_apphead)) != 0) { stream->msg = "incorrect appheader"; ret = XD3_INTERNAL; goto fail; } if ((ret = xd3_decode_input (stream)) != XD3_WINFINISH || (ret = xd3_close_stream (stream)) != 0) { goto fail; } if (decoded_size != sizeof (test_text) || memcmp (decoded, test_text, sizeof (test_text)) != 0) { stream->msg = "incorrect output text"; ret = EIO; } fail: msg = stream->msg; xd3_free_stream (stream); xd3_init_config (& cfg, flags); xd3_config_stream (stream, & cfg); stream->msg = msg; return ret; } static int test_decompress_single_bit_error (xd3_stream *stream, int expected_non_failures) { int ret; usize_t i; uint8_t encoded[4*sizeof (test_text)]; /* make room for alt code table */ usize_t encoded_size; int non_failures = 0; int cksum = (stream->flags & XD3_ADLER32) != 0; //#define DEBUG_TEST_FAILURES #ifndef DEBUG_TEST_FAILURES #define TEST_FAILURES() #else /* For checking non-failure cases by hand, enable this macro and run * xdelta printdelta with print_cpymode disabled. Every non-failure * should change a copy address mode, which doesn't cause a failure * because the address cache starts out with all zeros. ./xdelta3 test for i in test_text.xz.*; do ./xdelta3 printdelta $i > $i.out; diff $i.out test_text.xz.0.out; done */ system ("rm -rf test_text.*"); { char buf[TESTBUFSIZE]; FILE *f; snprintf_func (buf, TESTBUFSIZE, "test_text"); f = fopen (buf, "w"); fwrite (test_text,1,sizeof (test_text),f); fclose (f); } #define TEST_FAILURES() \ do { \ char buf[TESTBUFSIZE]; \ FILE *f; \ snprintf_func (buf, TESTBUFSIZE, "test_text.xz.%d", non_failures); \ f = fopen (buf, "w"); \ fwrite (encoded,1,encoded_size,f); \ fclose (f); \ } while (0) #endif stream->sec_data.inefficient = 1; stream->sec_inst.inefficient = 1; stream->sec_addr.inefficient = 1; /* Encode text, test correct input */ if ((ret = test_compress_text (stream, encoded, & encoded_size))) { /*stream->msg = "without error: encode failure";*/ return ret; } if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text) / 4))) { /*stream->msg = "without error: decode failure";*/ return ret; } TEST_FAILURES(); for (i = 0; i < encoded_size*8; i += 1) { /* Single bit error. */ encoded[i/8] ^= 1 << (i%8); if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text))) == 0) { non_failures += 1; #ifdef DEBUG_TEST_FAILURES XPR(NT "%u[%u] non-failure %u\n", i/8, i%8, non_failures); #endif TEST_FAILURES(); } else { /*XPR(NT "%u[%u] failure: %s\n", i/8, i%8, stream->msg);*/ } /* decompress_text returns EIO when the final memcmp() fails, but that * should never happen with checksumming on. */ if (cksum && ret == EIO) { /*XPR(NT "%u[%u] cksum mismatch\n", i/8, i%8);*/ stream->msg = "checksum mismatch"; return XD3_INTERNAL; } /* Undo single bit error. */ encoded[i/8] ^= 1 << (i%8); } /* Test correct input again */ if ((ret = test_decompress_text (stream, encoded, encoded_size, 1))) { /*stream->msg = "without error: decode failure";*/ return ret; } /* Check expected non-failures */ if (non_failures != expected_non_failures) { XPR(NT "non-failures %u; expected %u", non_failures, expected_non_failures); stream->msg = "incorrect"; return XD3_INTERNAL; } DOT (); return 0; } /*********************************************************************** Secondary compression tests ***********************************************************************/ #if SECONDARY_ANY typedef int (*sec_dist_func) (xd3_stream *stream, xd3_output *data); static int sec_dist_func1 (xd3_stream *stream, xd3_output *data); static int sec_dist_func2 (xd3_stream *stream, xd3_output *data); static int sec_dist_func3 (xd3_stream *stream, xd3_output *data); static int sec_dist_func4 (xd3_stream *stream, xd3_output *data); static int sec_dist_func5 (xd3_stream *stream, xd3_output *data); static int sec_dist_func6 (xd3_stream *stream, xd3_output *data); static int sec_dist_func7 (xd3_stream *stream, xd3_output *data); static int sec_dist_func8 (xd3_stream *stream, xd3_output *data); static int sec_dist_func9 (xd3_stream *stream, xd3_output *data); static int sec_dist_func10 (xd3_stream *stream, xd3_output *data); static int sec_dist_func11 (xd3_stream *stream, xd3_output *data); static sec_dist_func sec_dists[] = { sec_dist_func1, sec_dist_func2, sec_dist_func3, sec_dist_func4, sec_dist_func5, sec_dist_func6, sec_dist_func7, sec_dist_func8, sec_dist_func9, sec_dist_func10, sec_dist_func11, }; /* Test ditsribution: 100 bytes of the same character (13). */ static int sec_dist_func1 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < 100; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 13))) { return ret; } } return 0; } /* Test ditsribution: uniform covering half the alphabet. */ static int sec_dist_func2 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%(ALPHABET_SIZE/2)))) { return ret; } } return 0; } /* Test ditsribution: uniform covering the entire alphabet. */ static int sec_dist_func3 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%ALPHABET_SIZE))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering half the alphabet */ static int sec_dist_func4 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering the entire alphabet */ static int sec_dist_func5 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE-1); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering half the alphabet */ static int sec_dist_func6 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_random (&static_mtrand) % (ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering the entire alphabet */ static int sec_dist_func7 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*200; i += 1) { x = mt_random (&static_mtrand) % ALPHABET_SIZE; if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: A small number of frequent characters, difficult * to divide into many groups */ static int sec_dist_func8 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE*5; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 0))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 64))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 128))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 255))) { return ret; } } return 0; } /* Test distribution: One that causes many FGK block promotions (found a bug) */ static int sec_dist_func9 (xd3_stream *stream, xd3_output *data) { int i, ret; int ramp = 0; int rcount = 0; int prom = 0; int pcount = 0; /* 200 was long enough to trigger it--only when stricter checking * that counted all blocks was turned on, but it seems I deleted * this code. (missing fgk_free_block on line 398). */ for (i = 0; i < ALPHABET_SIZE*200; i += 1) { repeat: if (ramp < ALPHABET_SIZE) { /* Initially Nth symbol has (N+1) frequency */ if (rcount <= ramp) { rcount += 1; if ((ret = xd3_emit_byte (stream, & data, ramp))) { return ret; } continue; } ramp += 1; rcount = 0; goto repeat; } /* Thereafter, promote least freq to max freq */ if (pcount == ALPHABET_SIZE) { pcount = 0; prom = (prom + 1) % ALPHABET_SIZE; } pcount += 1; if ((ret = xd3_emit_byte (stream, & data, prom))) { return ret; } } return 0; } /* Test distribution: freq[i] == i*i, creates a 21-bit code length, fixed in 3.0r. */ static int sec_dist_func10 (xd3_stream *stream, xd3_output *data) { int i, j, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { for (j = 0; j <= (i*i); j += 1) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } } return 0; } /* Test distribution: fibonacci */ static int sec_dist_func11 (xd3_stream *stream, xd3_output *data) { int sum0 = 0; int sum1 = 1; int i, j, ret; for (i = 0; i < 33; ++i) { for (j = 0; j < (sum0 + sum1); ++j) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } sum0 = sum1; sum1 = j; } return 0; } static int test_secondary_decode (xd3_stream *stream, const xd3_sec_type *sec, usize_t input_size, usize_t compress_size, const uint8_t *dec_input, const uint8_t *dec_correct, uint8_t *dec_output) { int ret; xd3_sec_stream *dec_stream; const uint8_t *dec_input_used, *dec_input_end; uint8_t *dec_output_used, *dec_output_end; if ((dec_stream = sec->alloc (stream)) == NULL) { return ENOMEM; } if ((ret = sec->init (stream, dec_stream, 0)) != 0) { goto fail; } dec_input_used = dec_input; dec_input_end = dec_input + compress_size; dec_output_used = dec_output; dec_output_end = dec_output + input_size; if ((ret = sec->decode (stream, dec_stream, & dec_input_used, dec_input_end, & dec_output_used, dec_output_end))) { goto fail; } if (dec_input_used != dec_input_end) { stream->msg = "unused input"; ret = XD3_INTERNAL; goto fail; } if (dec_output_used != dec_output_end) { stream->msg = "unfinished output"; ret = XD3_INTERNAL; goto fail; } if (memcmp (dec_output, dec_correct, input_size) != 0) { stream->msg = "incorrect output"; ret = XD3_INTERNAL; goto fail; } fail: sec->destroy (stream, dec_stream); return ret; } static int test_secondary (xd3_stream *stream, const xd3_sec_type *sec, usize_t groups) { usize_t test_i; int ret; xd3_output *in_head, *out_head, *p; usize_t p_off, input_size, compress_size; uint8_t *dec_input = NULL, *dec_output = NULL, *dec_correct = NULL; xd3_sec_stream *enc_stream; xd3_sec_cfg cfg; memset (& cfg, 0, sizeof (cfg)); cfg.inefficient = 1; for (cfg.ngroups = 1; cfg.ngroups <= groups; cfg.ngroups += 1) { XPR(NTR "\n..."); for (test_i = 0; test_i < SIZEOF_ARRAY (sec_dists); test_i += 1) { mt_init (& static_mtrand, 0x9f73f7fc); in_head = xd3_alloc_output (stream, NULL); out_head = xd3_alloc_output (stream, NULL); enc_stream = sec->alloc (stream); dec_input = NULL; dec_output = NULL; dec_correct = NULL; if (in_head == NULL || out_head == NULL || enc_stream == NULL) { goto nomem; } if ((ret = sec_dists[test_i] (stream, in_head))) { goto fail; } if ((ret = sec->init (stream, enc_stream, 1)) != 0) { goto fail; } /* Encode data */ if ((ret = sec->encode (stream, enc_stream, in_head, out_head, & cfg))) { XPR(NT "test %u: encode: %s", test_i, stream->msg); goto fail; } /* Calculate sizes, allocate contiguous arrays for decoding */ input_size = xd3_sizeof_output (in_head); compress_size = xd3_sizeof_output (out_head); XPR(NTR "%.3f", 8.0 * (double) compress_size / (double) input_size); if ((dec_input = (uint8_t*) xd3_alloc (stream, compress_size, 1)) == NULL || (dec_output = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL || (dec_correct = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL) { goto nomem; } /* Fill the compressed data array */ for (p_off = 0, p = out_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_input + p_off, p->base, p->next); } CHECK(p_off == compress_size); /* Fill the input data array */ for (p_off = 0, p = in_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_correct + p_off, p->base, p->next); } CHECK(p_off == input_size); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output))) { XPR(NT "test %u: decode: %s", test_i, stream->msg); goto fail; } /* Single-bit error test, only cover the first 10 bytes. * Some non-failures are expected in the Huffman case: * Changing the clclen array, for example, may not harm the * decoding. Really looking for faults here. */ { int i; int bytes = min (compress_size, 10U); for (i = 0; i < bytes * 8; i += 1) { dec_input[i/8] ^= 1 << (i%8); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output)) == 0) { /*XPR(NT "test %u: decode single-bit [%u/%u] error non-failure", test_i, i/8, i%8);*/ } dec_input[i/8] ^= 1 << (i%8); if ((i % (2*bytes)) == (2*bytes)-1) { DOT (); } } ret = 0; } if (0) { nomem: ret = ENOMEM; } fail: sec->destroy (stream, enc_stream); xd3_free_output (stream, in_head); xd3_free_output (stream, out_head); xd3_free (stream, dec_input); xd3_free (stream, dec_output); xd3_free (stream, dec_correct); if (ret != 0) { return ret; } } } return 0; } IF_FGK (static int test_secondary_fgk (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & fgk_sec_type, gp); }) IF_DJW (static int test_secondary_huff (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & djw_sec_type, gp); }) IF_LZMA (static int test_secondary_lzma (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & lzma_sec_type, gp); }) #endif /*********************************************************************** TEST INSTRUCTION TABLE ***********************************************************************/ /* Test that xd3_choose_instruction() does the right thing for its code * table. */ static int test_choose_instruction (xd3_stream *stream, int ignore) { int i; stream->code_table = (*stream->code_table_func) (); for (i = 0; i < 256; i += 1) { const xd3_dinst *d = stream->code_table + i; xd3_rinst prev, inst; CHECK(d->type1 > 0); memset (& prev, 0, sizeof (prev)); memset (& inst, 0, sizeof (inst)); if (d->type2 == 0) { inst.type = d->type1; if ((inst.size = d->size1) == 0) { inst.size = TESTBUFSIZE; } XD3_CHOOSE_INSTRUCTION (stream, NULL, & inst); if (inst.code2 != 0 || inst.code1 != i) { stream->msg = "wrong single instruction"; return XD3_INTERNAL; } } else { prev.type = d->type1; prev.size = d->size1; inst.type = d->type2; inst.size = d->size2; XD3_CHOOSE_INSTRUCTION (stream, & prev, & inst); if (prev.code2 != i) { stream->msg = "wrong double instruction"; return XD3_INTERNAL; } } } return 0; } /*********************************************************************** TEST INSTRUCTION TABLE CODING ***********************************************************************/ #if GENERIC_ENCODE_TABLES /* Test that encoding and decoding a code table works */ static int test_encode_code_table (xd3_stream *stream, int ignore) { int ret; const uint8_t *comp_data; usize_t comp_size; if ((ret = xd3_compute_alternate_table_encoding (stream, & comp_data, & comp_size))) { return ret; } stream->acache.s_near = __alternate_code_table_desc.near_modes; stream->acache.s_same = __alternate_code_table_desc.same_modes; if ((ret = xd3_apply_table_encoding (stream, comp_data, comp_size))) { return ret; } if (memcmp (stream->code_table, xd3_alternate_code_table (), sizeof (xd3_dinst) * 256) != 0) { stream->msg = "wrong code table reconstruction"; return XD3_INTERNAL; } return 0; } #endif /*********************************************************************** 64BIT STREAMING ***********************************************************************/ /* This test encodes and decodes a series of 1 megabyte windows, each * containing a long run of zeros along with a single xoff_t size * record to indicate the sequence. */ static int test_streaming (xd3_stream *in_stream, uint8_t *encbuf, uint8_t *decbuf, uint8_t *delbuf, usize_t megs) { xd3_stream estream, dstream; int ret; usize_t i, delsize, decsize; xd3_config cfg; xd3_init_config (& cfg, in_stream->flags); cfg.flags |= XD3_COMPLEVEL_6; if ((ret = xd3_config_stream (& estream, & cfg)) || (ret = xd3_config_stream (& dstream, & cfg))) { goto fail; } for (i = 0; i < megs; i += 1) { ((usize_t*) encbuf)[0] = i; if ((i % 200) == 199) { DOT (); } if ((ret = xd3_process_stream (1, & estream, xd3_encode_input, 0, encbuf, 1 << 20, delbuf, & delsize, 1 << 20))) { in_stream->msg = estream.msg; goto fail; } if ((ret = xd3_process_stream (0, & dstream, xd3_decode_input, 0, delbuf, delsize, decbuf, & decsize, 1 << 20))) { in_stream->msg = dstream.msg; goto fail; } if (decsize != 1 << 20 || memcmp (encbuf, decbuf, 1 << 20) != 0) { in_stream->msg = "wrong result"; ret = XD3_INTERNAL; goto fail; } } if ((ret = xd3_close_stream (& estream)) || (ret = xd3_close_stream (& dstream))) { goto fail; } fail: xd3_free_stream (& estream); xd3_free_stream (& dstream); return ret; } /* Run tests of data streaming of over and around 4GB of data. */ static int test_compressed_stream_overflow (xd3_stream *stream, int ignore) { int ret; int i; uint8_t *buf; if ((buf = (uint8_t*) malloc (TWO_MEGS_AND_DELTA)) == NULL) { return ENOMEM; } memset (buf, 0, TWO_MEGS_AND_DELTA); for (i = 0; i < (2 << 20); i += 256) { int j; int off = mt_random(& static_mtrand) % 10; for (j = 0; j < 256; j++) { buf[i + j] = j + off; } } /* Test overflow of a 32-bit file offset. */ if (SIZEOF_XOFF_T == 4) { ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), (1 << 12) + 1); if (ret == XD3_INVALID_INPUT && MSG_IS ("decoder file offset overflow")) { ret = 0; } else { XPR(NT XD3_LIB_ERRMSG (stream, ret)); stream->msg = "expected overflow condition"; ret = XD3_INTERNAL; goto fail; } } /* Test transfer of exactly 32bits worth of data. */ if ((ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), 1 << 12))) { goto fail; } fail: free (buf); return ret; } /*********************************************************************** COMMAND LINE ***********************************************************************/ #if SHELL_TESTS /* For each pair of command templates in the array below, test that * encoding and decoding commands work. Also check for the expected * size delta, which should be approximately TEST_ADD_RATIO times the * file size created by test_make_inputs. Due to differences in the * application header, it is suppressed (-A) so that all delta files * are the same. */ static int test_command_line_arguments (xd3_stream *stream, int ignore) { int i, ret; static const char* cmdpairs[] = { /* standard input, output */ "%s %s -A < %s > %s", "%s -d < %s > %s", "%s %s -A -e < %s > %s", "%s -d < %s > %s", "%s %s -A= encode < %s > %s", "%s decode < %s > %s", "%s %s -A -q encode < %s > %s", "%s -qdq < %s > %s", /* file input, standard output */ "%s %s -A= %s > %s", "%s -d %s > %s", "%s %s -A -e %s > %s", "%s -d %s > %s", "%s %s encode -A= %s > %s", "%s decode %s > %s", /* file input, output */ "%s %s -A= %s %s", "%s -d %s %s", "%s %s -A -e %s %s", "%s -d %s %s", "%s %s -A= encode %s %s", "%s decode %s %s", /* option placement */ "%s %s -A -f %s %s", "%s -f -d %s %s", "%s %s -e -A= %s %s", "%s -d -f %s %s", "%s %s -f encode -A= %s %s", "%s -f decode -f %s %s", }; char ecmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; int pairs = SIZEOF_ARRAY (cmdpairs) / 2; xoff_t tsize; xoff_t dsize; double ratio; mt_init (& static_mtrand, 0x9f73f7fc); for (i = 0; i < pairs; i += 1) { test_setup (); if ((ret = test_make_inputs (stream, NULL, & tsize))) { return ret; } snprintf_func (ecmd, TESTBUFSIZE, cmdpairs[2*i], program_name, test_softcfg_str, TEST_TARGET_FILE, TEST_DELTA_FILE); snprintf_func (dcmd, TESTBUFSIZE, cmdpairs[2*i+1], program_name, TEST_DELTA_FILE, TEST_RECON_FILE); /* Encode and decode. */ if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Compare the target file. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } ratio = (double) dsize / (double) tsize; /* Check that it is not too small, not too large. */ if (ratio >= TEST_ADD_RATIO + TEST_EPSILON) { XPR(NT "test encode with size ratio %.4f, " "expected < %.4f (%"Q"u, %"Q"u)\n", ratio, TEST_ADD_RATIO + TEST_EPSILON, dsize, tsize); stream->msg = "strange encoding"; return XD3_INTERNAL; } if (ratio <= TEST_ADD_RATIO * (1.0 - 2 * TEST_EPSILON)) { XPR(NT "test encode with size ratio %.4f, " "expected > %.4f\n", ratio, TEST_ADD_RATIO - TEST_EPSILON); stream->msg = "strange encoding"; return XD3_INTERNAL; } /* Also check that test_compare_files works. The delta and original should * not be identical. */ if ((ret = test_compare_files (TEST_DELTA_FILE, TEST_TARGET_FILE)) == 0) { stream->msg = "broken test_compare_files"; return XD3_INTERNAL; } test_cleanup (); DOT (); } return 0; } static int check_vcdiff_header (xd3_stream *stream, const char *input, const char *line_start, const char *matches, int yes_or_no) { int ret; char vcmd[TESTBUFSIZE], gcmd[TESTBUFSIZE]; snprintf_func (vcmd, TESTBUFSIZE, "%s printhdr -f %s %s", program_name, input, TEST_RECON2_FILE); if ((ret = system (vcmd)) != 0) { XPR(NT "printhdr command: %s\n", vcmd); stream->msg = "printhdr cmd failed"; return XD3_INTERNAL; } snprintf_func (gcmd, TESTBUFSIZE, "grep \"%s.*%s.*\" %s > /dev/null", line_start, matches, TEST_RECON2_FILE); if (yes_or_no) { if ((ret = do_cmd (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } else { if ((ret = do_fail (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } return 0; } static int test_recode_command2 (xd3_stream *stream, int has_source, int variant, int change) { int has_adler32 = (variant & 0x1) != 0; int has_apphead = (variant & 0x2) != 0; int has_secondary = (variant & 0x4) != 0; int change_adler32 = (change & 0x1) != 0; int change_apphead = (change & 0x2) != 0; int change_secondary = (change & 0x4) != 0; int recoded_adler32 = change_adler32 ? !has_adler32 : has_adler32; int recoded_apphead = change_apphead ? !has_apphead : has_apphead; int recoded_secondary = change_secondary ? !has_secondary : has_secondary; char ecmd[TESTBUFSIZE], recmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; xoff_t tsize, ssize; int ret; test_setup (); if ((ret = test_make_inputs (stream, has_source ? & ssize : NULL, & tsize))) { return ret; } /* First encode */ snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s", program_name, test_softcfg_str, has_adler32 ? "" : "-n ", has_apphead ? "-A=encode_apphead " : "-A= ", has_secondary ? "-S djw " : "-S none ", has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } /* Now recode */ snprintf_func (recmd, TESTBUFSIZE, "%s recode %s -f %s %s %s %s %s", program_name, test_softcfg_str, recoded_adler32 ? "" : "-n ", !change_apphead ? "" : (recoded_apphead ? "-A=recode_apphead " : "-A= "), recoded_secondary ? "-S djw " : "-S none ", TEST_DELTA_FILE, TEST_COPY_FILE); if ((ret = system (recmd)) != 0) { XPR(NT "recode command: %s\n", recmd); stream->msg = "recode cmd failed"; return XD3_INTERNAL; } /* Check recode changes. */ if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_SOURCE", has_source))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_SECONDARY", recoded_secondary))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_ADLER32", /* Recode can't generate an adler32 * checksum, it can only preserve it or * remove it. */ has_adler32 && recoded_adler32))) { return ret; } if (!change_apphead) { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", has_apphead))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "encode_apphead", has_apphead))) { return ret; } } else { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", recoded_apphead))) { return ret; } if (recoded_apphead && (ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "recode_apphead", 1))) { return ret; } } /* Now decode */ snprintf_func (dcmd, TESTBUFSIZE, "%s -fd %s %s %s %s ", program_name, has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_COPY_FILE, TEST_RECON_FILE); if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Now compare. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } return 0; } static int test_recode_command (xd3_stream *stream, int ignore) { /* Things to test: * - with and without a source file (recode does not change) * * (recode may or may not change -- 8 variations) * - with and without adler32 * - with and without app header * - with and without secondary */ int has_source; int variant; int change; int ret; for (has_source = 0; has_source < 2; has_source++) { for (variant = 0; variant < 8; variant++) { for (change = 0; change < 8; change++) { if ((ret = test_recode_command2 (stream, has_source, variant, change))) { return ret; } } DOT (); } } return 0; } #endif /*********************************************************************** EXTERNAL I/O DECOMPRESSION/RECOMPRESSION ***********************************************************************/ #if EXTERNAL_COMPRESSION /* This performs one step of the test_externally_compressed_io * function described below. It builds a pipe containing both Xdelta * and external compression/decompression that should not modify the * data passing through. */ static int test_compressed_pipe (xd3_stream *stream, main_extcomp *ext, char* buf, const char* comp_options, const char* decomp_options, int do_ext_recomp, const char* msg) { int ret; char decomp_buf[TESTBUFSIZE]; if (do_ext_recomp) { snprintf_func (decomp_buf, TESTBUFSIZE, " | %s %s", ext->decomp_cmdname, ext->decomp_options); } else { decomp_buf[0] = 0; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s | %s %s | %s %s%s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_TARGET_FILE, program_name, comp_options, program_name, decomp_options, decomp_buf, TEST_RECON_FILE); if ((ret = system (buf)) != 0) { stream->msg = msg; return XD3_INTERNAL; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return XD3_INTERNAL; } DOT (); return 0; } /* We want to test that a pipe such as: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -dcf | gzip -dcf | --> * * is transparent, i.e., does not modify the stream of data. However, * we also want to verify that at the center the data is properly * compressed, i.e., that we do not just have a re-compressed gzip * format, that we have an VCDIFF format. We do this in two steps. * First test the above pipe, then test with suppressed output * recompression (-D). The result should be the original input: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -Ddcf | --> * * Finally we want to test that -D also disables input decompression: * * --> | gzip -cf | xdelta3 -Dcf | xdelta3 -Ddcf | gzip -dcf | --> */ static int test_externally_compressed_io (xd3_stream *stream, int ignore) { usize_t i; int ret; char buf[TESTBUFSIZE]; mt_init (& static_mtrand, 0x9f73f7fc); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } for (i = 0; i < SIZEOF_ARRAY (extcomp_types); i += 1) { main_extcomp *ext = & extcomp_types[i]; /* Test for the existence of the external command first, if not skip. */ snprintf_func (buf, TESTBUFSIZE, "%s %s < /dev/null > /dev/null", ext->recomp_cmdname, ext->recomp_options); if ((ret = system (buf)) != 0) { XPR(NT "%s=0", ext->recomp_cmdname); continue; } if ((ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-dcfq", 1, "compression failed: identity pipe")) || (ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-Rdcfq", 0, "compression failed: without recompression")) || (ret = test_compressed_pipe (stream, ext, buf, "-Dcfq", "-Rdcfq", 1, "compression failed: without decompression"))) { return ret; } } return 0; } /* This tests the proper functioning of external decompression for * source files. The source and target files are identical and * compressed by gzip. Decoding such a delta with recompression * disbaled (-R) should produce the original, uncompressed * source/target file. Then it checks with output recompression * enabled--in this case the output should be a compressed copy of the * original source/target file. Then it checks that encoding with * decompression disabled works--the compressed files are identical * and decoding them should always produce a compressed output, * regardless of -R since the encoded delta file had decompression * disabled.. */ static int test_source_decompression (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; const main_extcomp *ext; xoff_t dsize; mt_init (& static_mtrand, 0x9f73f7fc); test_setup (); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Use gzip. */ if ((ext = main_get_compressor ("G")) == NULL) { XPR(NT "skipped"); return 0; } /* Save an uncompressed copy. */ if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; } /* Compress the source. */ snprintf_func (buf, TESTBUFSIZE, "%s -1 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_SOURCE_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Compress the target. */ snprintf_func (buf, TESTBUFSIZE, "%s -9 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now the two identical files are compressed. Delta-encode the target, * with decompression. */ snprintf_func (buf, TESTBUFSIZE, "%s -e -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Check that the compressed file is small (b/c inputs are * identical). */ if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } /* Deltas for identical files should be very small. */ if (dsize > 200) { XPR(NT "external compression did not happen\n"); stream->msg = "external compression did not happen"; return XD3_INTERNAL; } /* Decode the delta file with recompression disabled, should get an * uncompressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dq -R -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON_FILE))) { return ret; } /* Decode the delta file with recompression, should get a compressed file * out. But we can't compare compressed files directly. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dqf -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s > %s", ext->decomp_cmdname, ext->decomp_options, TEST_RECON_FILE, TEST_RECON2_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON2_FILE))) { return ret; } /* Encode with decompression disabled */ snprintf_func (buf, TESTBUFSIZE, "%s -e -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Decode the delta file with decompression disabled, should get the * identical compressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -d -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } test_cleanup(); return 0; } #endif /*********************************************************************** FORCE, STDOUT ***********************************************************************/ /* This tests that output will not overwrite an existing file unless * -f was specified. The test is for encoding (the same code handles * it for decoding). */ static int test_force_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; /* Create empty target file */ test_setup (); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode again, should fail. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -e %s %s ", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* Force it, should succeed. */ snprintf_func (buf, TESTBUFSIZE, "%s -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This checks the proper operation of the -c flag. When specified * the default output becomes stdout, otherwise the input must be * provided (encode) or it may be defaulted (decode w/ app header). */ static int test_stdout_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup(); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, encode writes to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* With -c, encode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -e -c %s > %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, decode writes to target file name, but it fails because the * file exists. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -d %s ", program_name, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* With -c, decode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -d -c %s > /dev/null", program_name, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This tests that the no-output flag (-J) works. */ static int test_no_output (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup (); snprintf_func (buf, TESTBUFSIZE, "touch %s && chmod 0000 %s", TEST_NOPERM_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Try no_output encode w/out unwritable output file */ snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now really write the delta to test decode no-output */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -q -f -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup (); return 0; } /* This tests that the default appheader works */ static int test_appheader (xd3_stream *stream, int ignore) { int i; int ret; char buf[TESTBUFSIZE]; char bogus[TESTBUFSIZE]; xoff_t ssize, tsize; test_setup (); if ((ret = test_make_inputs (stream, &ssize, &tsize))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e -s %s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_copy_to (program_name, TEST_RECON2_FILE))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "chmod 0700 %s", TEST_RECON2_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; } if ((ret = test_copy_to (TEST_SOURCE_FILE, TEST_TARGET_FILE))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) == 0) { return XD3_INVALID; // I.e., files are different! } // Test that the target file is restored. snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)", TEST_RECON2_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) != 0) { return ret; } // Test a malicious string w/ entries > 4 in the appheader by having // the encoder write it: for (i = 0; i < TESTBUFSIZE / 4; ++i) { bogus[2*i] = 'G'; bogus[2*i+1] = '/'; } bogus[TESTBUFSIZE/2-1] = 0; snprintf_func (buf, TESTBUFSIZE, "%s -q -f -A=%s -e -s %s %s %s", program_name, bogus, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } // Then read it: snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)", TEST_RECON2_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf)) == 0) { return XD3_INVALID; // Impossible } if (!WIFEXITED(ret)) { return XD3_INVALID; // Must have crashed! } return 0; } /*********************************************************************** Source identical optimization ***********************************************************************/ /* Computing a delta should be fastest when the two inputs are * identical, this checks it. The library is called to compute a * delta between a 10000 byte file, 1000 byte winsize, 500 byte source * blocksize. The same buffer is used for both source and target. */ static int test_identical_behavior (xd3_stream *stream, int ignore) { #define IDB_TGTSZ 10000 /* Not a power of two b/c of hard-coded expectations below. */ #define IDB_BLKSZ 512 #define IDB_WINSZ 1000 #define IDB_DELSZ 1000 #define IDB_WINCNT (IDB_TGTSZ / IDB_WINSZ) int ret, i; uint8_t buf[IDB_TGTSZ]; uint8_t del[IDB_DELSZ]; uint8_t rec[IDB_TGTSZ]; xd3_source source; int nextencwin = 0; int winstarts = 0, winfinishes = 0; usize_t delpos = 0, recsize; xd3_config config; memset(&source, 0, sizeof(source)); for (i = 0; i < IDB_TGTSZ; i += 1) { buf[i] = (uint8_t) mt_random (&static_mtrand); } stream->winsize = IDB_WINSZ; source.blksize = IDB_BLKSZ; source.name = ""; source.curblk = NULL; source.curblkno = 0; if ((ret = xd3_set_source (stream, & source))) { goto fail; } /* Compute an delta between identical source and targets. */ for (;;) { ret = xd3_encode_input (stream); if (ret == XD3_INPUT) { xd3_avail_input (stream, buf + (IDB_WINSZ * nextencwin), IDB_WINSZ); nextencwin += 1; continue; } if (ret == XD3_GETSRCBLK) { source.curblkno = source.getblkno; source.onblk = IDB_BLKSZ; source.curblk = buf + source.getblkno * IDB_BLKSZ; continue; } if (ret == XD3_WINSTART) { winstarts++; continue; } if (ret == XD3_WINFINISH) { winfinishes++; if (winfinishes == IDB_WINCNT) { break; } continue; } if (ret != XD3_OUTPUT) { goto fail; } CHECK(delpos + stream->avail_out <= IDB_DELSZ); memcpy (del + delpos, stream->next_out, stream->avail_out); delpos += stream->avail_out; xd3_consume_output (stream); } CHECK(winfinishes == IDB_WINCNT); CHECK(winstarts == IDB_WINCNT); CHECK(nextencwin == IDB_WINCNT); /* Reset. */ memset(&source, 0, sizeof(source)); source.blksize = IDB_TGTSZ; source.onblk = IDB_TGTSZ; source.curblk = buf; source.curblkno = 0; if ((ret = xd3_close_stream (stream))) { goto fail; } xd3_free_stream (stream); xd3_init_config (& config, 0); if ((ret = xd3_config_stream (stream, & config))) { goto fail; } if ((ret = xd3_set_source_and_size (stream, & source, IDB_TGTSZ))) { goto fail; } /* Decode. */ if ((ret = xd3_decode_stream (stream, del, delpos, rec, & recsize, IDB_TGTSZ))) { goto fail; } /* Check result size and data. */ if (recsize != IDB_TGTSZ) { stream->msg = "wrong size reconstruction"; goto fail; } if (memcmp (rec, buf, IDB_TGTSZ) != 0) { stream->msg = "wrong data reconstruction"; goto fail; } /* Check that there was one copy per window. */ IF_DEBUG (if (stream->n_scpy != IDB_WINCNT || stream->n_add != 0 || stream->n_run != 0) { stream->msg = "wrong copy count"; goto fail; }); /* Check that no checksums were computed because the initial match was presumed. */ IF_DEBUG (if (stream->large_ckcnt != 0) { stream->msg = "wrong checksum behavior"; goto fail; }); ret = 0; fail: return ret; } /*********************************************************************** String matching test ***********************************************************************/ /* Check particular matching behaviors by calling * xd3_string_match_soft directly with specific arguments. */ typedef struct _string_match_test string_match_test; typedef enum { SM_NONE = 0, SM_LAZY = (1 << 1), } string_match_flags; struct _string_match_test { const char *input; int flags; const char *result; }; static const string_match_test match_tests[] = { /* nothing */ { "1234567890", SM_NONE, "" }, /* basic run, copy */ { "11111111112323232323", SM_NONE, "R0/10 C12/8@10" }, /* no run smaller than MIN_RUN=8 */ { "1111111", SM_NONE, "C1/6@0" }, { "11111111", SM_NONE, "R0/8" }, /* simple promotion: the third copy address depends on promotion */ { "ABCDEF_ABCDEF^ABCDEF", SM_NONE, "C7/6@0 C14/6@7" }, /* { "ABCDEF_ABCDEF^ABCDEF", SM_PROMOTE, "C7/6@0 C14/6@0" }, forgotten */ /* simple lazy: there is a better copy starting with "23 X" than "123 " */ { "123 23 XYZ 123 XYZ", SM_NONE, "C11/4@0" }, { "123 23 XYZ 123 XYZ", SM_LAZY, "C11/4@0 C12/6@4" }, /* trylazy: no lazy matches unless there are at least two characters beyond * the first match */ { "2123_121212", SM_LAZY, "C7/4@5" }, { "2123_1212123", SM_LAZY, "C7/4@5" }, { "2123_1212123_", SM_LAZY, "C7/4@5 C8/5@0" }, /* trylazy: no lazy matches if the copy is >= MAXLAZY=10 */ { "2123_121212123_", SM_LAZY, "C7/6@5 C10/5@0" }, { "2123_12121212123_", SM_LAZY, "C7/8@5 C12/5@0" }, { "2123_1212121212123_", SM_LAZY, "C7/10@5" }, /* lazy run: check a run overlapped by a longer copy */ { "11111112 111111112 1", SM_LAZY, "C1/6@0 R9/8 C10/10@0" }, /* lazy match: match_length,run_l >= min_match tests, shouldn't get any * copies within the run, no run within the copy */ { "^________^________ ", SM_LAZY, "R1/8 C9/9@0" }, /* chain depth: it only goes back 10. this checks that the 10th match hits * and the 11th misses. */ { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/5@0" }, { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234>1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/4@45 C55/4@50" }, /* ssmatch test */ { "ABCDE___ABCDE*** BCDE***", SM_NONE, "C8/5@0 C17/4@1" }, /*{ "ABCDE___ABCDE*** BCDE***", SM_SSMATCH, "C8/5@0 C17/7@9" }, forgotten */ }; static int test_string_matching (xd3_stream *stream, int ignore) { usize_t i; int ret; xd3_config config; char rbuf[TESTBUFSIZE]; for (i = 0; i < SIZEOF_ARRAY (match_tests); i += 1) { const string_match_test *test = & match_tests[i]; char *rptr = rbuf; usize_t len = (usize_t) strlen (test->input); xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 4; config.smatcher_soft.large_step = 4; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 10; config.smatcher_soft.small_lchain = 10; config.smatcher_soft.max_lazy = (test->flags & SM_LAZY) ? 10 : 0; config.smatcher_soft.long_enough = 10; if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_encode_init_full (stream))) { return ret; } xd3_avail_input (stream, (uint8_t*)test->input, len); if ((ret = stream->smatcher.string_match (stream))) { return ret; } *rptr = 0; while (! xd3_rlist_empty (& stream->iopt_used)) { xd3_rinst *inst = xd3_rlist_pop_front (& stream->iopt_used); switch (inst->type) { case XD3_RUN: *rptr++ = 'R'; break; case XD3_CPY: *rptr++ = 'C'; break; default: CHECK(0); } snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d", inst->pos, inst->size); rptr += strlen (rptr); if (inst->type == XD3_CPY) { *rptr++ = '@'; snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%"Q"d", inst->addr); rptr += strlen (rptr); } *rptr++ = ' '; xd3_rlist_push_back (& stream->iopt_free, inst); } if (rptr != rbuf) { rptr -= 1; *rptr = 0; } if (strcmp (rbuf, test->result) != 0) { XPR(NT "test %u: expected %s: got %s", i, test->result, rbuf); stream->msg = "wrong result"; return XD3_INTERNAL; } } return 0; } /* * This is a test for many overlapping instructions. It must be a lazy * matcher. */ static int test_iopt_flush_instructions (xd3_stream *stream, int ignore) { int ret, i; usize_t tpos = 0; usize_t delta_size, recon_size; xd3_config config; uint8_t target[TESTBUFSIZE]; uint8_t delta[TESTBUFSIZE]; uint8_t recon[TESTBUFSIZE]; xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 16; config.smatcher_soft.large_step = 16; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 128; config.smatcher_soft.small_lchain = 16; config.smatcher_soft.max_lazy = 8; config.smatcher_soft.long_enough = 128; if ((ret = xd3_config_stream (stream, & config))) { return ret; } for (i = 1; i < 250; i++) { target[tpos++] = i; target[tpos++] = i+1; target[tpos++] = i+2; target[tpos++] = i+3; target[tpos++] = 0; } for (i = 1; i < 253; i++) { target[tpos++] = i; } if ((ret = xd3_encode_stream (stream, target, tpos, delta, & delta_size, sizeof (delta)))) { return ret; } xd3_free_stream(stream); if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_decode_stream (stream, delta, delta_size, recon, & recon_size, sizeof (recon)))) { return ret; } CHECK(tpos == recon_size); CHECK(memcmp(target, recon, recon_size) == 0); return 0; } /* * This tests the 32/64bit ambiguity for source-window matching. */ static int test_source_cksum_offset (xd3_stream *stream, int ignore) { xd3_source source; // Inputs are: struct { xoff_t cpos; // stream->srcwin_cksum_pos; xoff_t ipos; // stream->total_in; xoff_t size; // stream->src->size; usize_t input; // input 32-bit offset xoff_t output; // output 64-bit offset } cksum_test[] = { // If cpos is <= 2^32 { 1, 1, 1, 1, 1 }, #if XD3_USE_LARGEFILE64 // cpos ipos size input output // 0x____xxxxxULL, 0x____xxxxxULL, 0x____xxxxxULL, 0x___xxxxxUL, 0x____xxxxxULL { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0x00000000UL, 0x100000000ULL }, { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0xF0000000UL, 0x0F0000000ULL }, { 0x100200000ULL, 0x100100000ULL, 0x100200000ULL, 0x00300000UL, 0x000300000ULL }, { 25771983104ULL, 25770000000ULL, 26414808769ULL, 2139216707UL, 23614053187ULL }, #endif { 0, 0, 0, 0, 0 }, }, *test_ptr; stream->src = &source; for (test_ptr = cksum_test; test_ptr->cpos; test_ptr++) { xoff_t r; stream->srcwin_cksum_pos = test_ptr->cpos; stream->total_in = test_ptr->ipos; r = xd3_source_cksum_offset(stream, test_ptr->input); CHECK(r == test_ptr->output); } return 0; } static int test_in_memory (xd3_stream *stream, int ignore) { // test_text is 256 bytes uint8_t ibuf[sizeof(test_text)]; uint8_t dbuf[sizeof(test_text)]; uint8_t obuf[sizeof(test_text)]; usize_t size = sizeof(test_text); usize_t dsize, osize; int r1, r2; int eflags = SECONDARY_DJW ? XD3_SEC_DJW : 0; memcpy(ibuf, test_text, size); memset(ibuf + 128, 0, 16); r1 = xd3_encode_memory(ibuf, size, test_text, size, dbuf, &dsize, size, eflags); r2 = xd3_decode_memory(dbuf, dsize, test_text, size, obuf, &osize, size, 0); if (r1 != 0 || r2 != 0 || dsize >= (size/2) || dsize < 1 || osize != size) { stream->msg = "encode/decode size error"; return XD3_INTERNAL; } if (memcmp(obuf, ibuf, size) != 0) { stream->msg = "encode/decode data error"; return XD3_INTERNAL; } return 0; } /*********************************************************************** TEST MAIN ***********************************************************************/ static int xd3_selftest (void) { #define DO_TEST(fn,flags,arg) \ do { \ xd3_stream stream; \ xd3_config config; \ xd3_init_config (& config, flags); \ XPR(NT "testing " #fn "%s...", \ flags ? (" (" #flags ")") : ""); \ if ((ret = xd3_config_stream (& stream, & config) == 0) && \ (ret = test_ ## fn (& stream, arg)) == 0) { \ XPR(NTR " success\n"); \ } else { \ XPR(NTR " failed: %s: %s\n", xd3_errstring (& stream), \ xd3_mainerror (ret)); } \ xd3_free_stream (& stream); \ if (ret != 0) { goto failure; } \ } while (0) int ret; DO_TEST (random_numbers, 0, 0); DO_TEST (decode_integer_end_of_input, 0, 0); DO_TEST (decode_integer_overflow, 0, 0); DO_TEST (encode_decode_uint32_t, 0, 0); DO_TEST (encode_decode_uint64_t, 0, 0); DO_TEST (usize_t_overflow, 0, 0); DO_TEST (forward_match, 0, 0); DO_TEST (address_cache, 0, 0); IF_GENCODETBL (DO_TEST (address_cache, XD3_ALT_CODE_TABLE, 0)); DO_TEST (string_matching, 0, 0); DO_TEST (choose_instruction, 0, 0); DO_TEST (identical_behavior, 0, 0); DO_TEST (in_memory, 0, 0); IF_GENCODETBL (DO_TEST (choose_instruction, XD3_ALT_CODE_TABLE, 0)); IF_GENCODETBL (DO_TEST (encode_code_table, 0, 0)); DO_TEST (iopt_flush_instructions, 0, 0); DO_TEST (source_cksum_offset, 0, 0); DO_TEST (decompress_single_bit_error, 0, 3); DO_TEST (decompress_single_bit_error, XD3_ADLER32, 3); IF_LZMA (DO_TEST (decompress_single_bit_error, XD3_SEC_LZMA, 54)); IF_FGK (DO_TEST (decompress_single_bit_error, XD3_SEC_FGK, 3)); IF_DJW (DO_TEST (decompress_single_bit_error, XD3_SEC_DJW, 8)); /* There are many expected non-failures for ALT_CODE_TABLE because * not all of the instruction codes are used. */ IF_GENCODETBL ( DO_TEST (decompress_single_bit_error, XD3_ALT_CODE_TABLE, 224)); #if SHELL_TESTS DO_TEST (force_behavior, 0, 0); DO_TEST (stdout_behavior, 0, 0); DO_TEST (no_output, 0, 0); DO_TEST (appheader, 0, 0); DO_TEST (command_line_arguments, 0, 0); #if EXTERNAL_COMPRESSION DO_TEST (source_decompression, 0, 0); DO_TEST (externally_compressed_io, 0, 0); #endif DO_TEST (recode_command, 0, 0); #endif IF_LZMA (DO_TEST (secondary_lzma, 0, 1)); IF_DJW (DO_TEST (secondary_huff, 0, DJW_MAX_GROUPS)); IF_FGK (DO_TEST (secondary_fgk, 0, 1)); DO_TEST (compressed_stream_overflow, 0, 0); IF_LZMA (DO_TEST (compressed_stream_overflow, XD3_SEC_LZMA, 0)); failure: test_cleanup (); return ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE; #undef DO_TEST }
test_compressed_stream_overflow (xd3_stream *stream, int ignore) { int ret; int i; uint8_t *buf; if ((buf = (uint8_t*) malloc (TWO_MEGS_AND_DELTA)) == NULL) { return ENOMEM; } memset (buf, 0, TWO_MEGS_AND_DELTA); for (i = 0; i < (2 << 20); i += 256) { int j; int off = mt_random(& static_mtrand) % 10; for (j = 0; j < 256; j++) { buf[i + j] = j + off; } } /* Test overflow of a 32-bit file offset. */ if (SIZEOF_XOFF_T == 4) { ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), (1 << 12) + 1); if (ret == XD3_INVALID_INPUT && MSG_IS ("decoder file offset overflow")) { ret = 0; } else { XPR(NT XD3_LIB_ERRMSG (stream, ret)); stream->msg = "expected overflow condition"; ret = XD3_INTERNAL; goto fail; } } /* Test transfer of exactly 32bits worth of data. */ if ((ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), 1 << 12))) { goto fail; } fail: free (buf); return ret; }
test_compressed_stream_overflow (xd3_stream *stream, int ignore) { int ret; int i; uint8_t *buf; if ((buf = (uint8_t*) malloc (TWO_MEGS_AND_DELTA)) == NULL) { return ENOMEM; } memset (buf, 0, TWO_MEGS_AND_DELTA); for (i = 0; i < (2 << 20); i += 256) { int j; int off = mt_random(& static_mtrand) % 10; for (j = 0; j < 256; j++) { buf[i + j] = j + off; } } /* Test overflow of a 32-bit file offset. */ if (SIZEOF_XOFF_T == 4) { ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), (1 << 12) + 1); if (ret == XD3_INVALID_INPUT && MSG_IS ("decoder file offset overflow")) { ret = 0; } else { XPR(NT XD3_LIB_ERRMSG (stream, ret)); stream->msg = "expected overflow condition"; ret = XD3_INTERNAL; goto fail; } } /* Test transfer of exactly 32bits worth of data. */ if ((ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), 1 << 12))) { goto fail; } fail: free (buf); return ret; }
{'added': [(2, ' * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012.'), (57, '\t(1812433253UL * (mt->mt_buffer_[i-1] ^'), (72, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (74, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^'), (78, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (80, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^'), (83, ' y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) |'), (85, ' mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^'), (169, ' return ret;'), (260, ' usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) +'), (261, ' TEST_FILE_MEAN / 2;'), (262, ' usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) +'), (263, ' TEST_FILE_MEAN / 2;'), (414, '\t XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\\n",'), (426, ' if (diffs != 0)'), (434, 'test_copy_to (const char *from, const char *to)'), (439, ' snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", from, to);'), (449, 'static int'), (450, 'test_save_copy (const char *origname)'), (451, '{'), (452, ' return test_copy_to(origname, TEST_COPY_FILE);'), (453, '}'), (454, ''), (510, ' if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) !='), (1665, ' for (i = 0; i < (2 << 20); i += 256)'), (1669, ' for (j = 0; j < 256; j++)'), (1694, ' if ((ret = test_streaming (stream,'), (1695, '\t\t\t buf,'), (1696, '\t\t\t buf + (1 << 20),'), (1697, '\t\t\t buf + (2 << 20),'), (1698, '\t\t\t 1 << 12)))'), (1900, ' snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s",'), (1921, '\t !change_apphead ? "" :'), (2372, '/* This tests that the default appheader works */'), (2373, 'static int'), (2374, 'test_appheader (xd3_stream *stream, int ignore)'), (2375, '{'), (2376, ' int i;'), (2377, ' int ret;'), (2378, ' char buf[TESTBUFSIZE];'), (2379, ' char bogus[TESTBUFSIZE];'), (2380, ' xoff_t ssize, tsize;'), (2381, ' test_setup ();'), (2382, ''), (2383, ' if ((ret = test_make_inputs (stream, &ssize, &tsize))) { return ret; }'), (2384, ''), (2385, ' snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e -s %s %s %s", program_name,'), (2386, '\t\t TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE);'), (2387, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2388, ''), (2389, ' if ((ret = test_copy_to (program_name, TEST_RECON2_FILE))) { return ret; }'), (2390, ''), (2391, ' snprintf_func (buf, TESTBUFSIZE, "chmod 0700 %s", TEST_RECON2_FILE);'), (2392, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2393, ''), (2394, ' if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; }'), (2395, ' if ((ret = test_copy_to (TEST_SOURCE_FILE, TEST_TARGET_FILE))) { return ret; }'), (2396, ''), (2397, ' if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) == 0)'), (2398, ' {'), (2399, ' return XD3_INVALID; // I.e., files are different!'), (2400, ' }'), (2401, ''), (2402, ' // Test that the target file is restored.'), (2403, ' snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)",'), (2404, '\t\t TEST_RECON2_FILE,'), (2405, '\t\t TEST_DELTA_FILE);'), (2406, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2407, ''), (2408, ' if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) != 0)'), (2409, ' {'), (2410, ' return ret;'), (2411, ' }'), (2412, ''), (2413, ' // Test a malicious string w/ entries > 4 in the appheader by having'), (2414, ' // the encoder write it:'), (2415, ' for (i = 0; i < TESTBUFSIZE / 4; ++i)'), (2416, ' {'), (2417, " bogus[2*i] = 'G';"), (2418, " bogus[2*i+1] = '/';"), (2419, ' }'), (2420, ' bogus[TESTBUFSIZE/2-1] = 0;'), (2421, ''), (2422, ' snprintf_func (buf, TESTBUFSIZE,'), (2423, '\t\t "%s -q -f -A=%s -e -s %s %s %s", program_name, bogus,'), (2424, '\t\t TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE);'), (2425, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2426, ' // Then read it:'), (2427, ' snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)",'), (2428, '\t\t TEST_RECON2_FILE,'), (2429, '\t\t TEST_DELTA_FILE);'), (2430, ' if ((ret = do_cmd (stream, buf)) == 0)'), (2431, ' {'), (2432, ' return XD3_INVALID; // Impossible'), (2433, ' }'), (2434, ' if (!WIFEXITED(ret))'), (2435, ' {'), (2436, ' return XD3_INVALID; // Must have crashed!'), (2437, ' }'), (2438, ''), (2439, ' return 0;'), (2440, '}'), (2441, ''), (2684, '\t snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d",'), (2929, ' DO_TEST (appheader, 0, 0);')], 'deleted': [(2, ' * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012.'), (57, '\t(1812433253UL * (mt->mt_buffer_[i-1] ^'), (72, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (74, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^'), (78, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (80, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^'), (83, ' y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) |'), (85, ' mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^'), (169, ' return XD3_INTERNAL;'), (260, ' usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2;'), (261, ' usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2;'), (412, '\t XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\\n",'), (424, ' if (diffs != 0)'), (432, 'test_save_copy (const char *origname)'), (437, ' snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", origname, TEST_COPY_FILE);'), (502, ' if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) !='), (1657, ' for (i = 0; i < (2 << 20); i += 256)'), (1661, ' for (j = 0; j < 256; j++)'), (1686, ' if ((ret = test_streaming (stream,'), (1687, '\t\t\t buf,'), (1688, '\t\t\t buf + (1 << 20),'), (1689, '\t\t\t buf + (2 << 20),'), (1690, '\t\t\t 1 << 12)))'), (1892, ' snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s",'), (1913, '\t !change_apphead ? "" :'), (2606, '\t snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d",')]}
105
26
2,023
12,828
43
252
8
https://github.com/jmacd/xdelta-devel
CVE-2014-9765
CWE-119
1,878
socket.c
C
SYSCALL_DEFINE6
/* * NET An implementation of the SOCKET network access protocol. * * Version: @(#)socket.c 1.1.93 18/02/95 * * Authors: Orest Zborowski, <obz@Kodak.COM> * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Anonymous : NOTSOCK/BADF cleanup. Error fix in * shutdown() * Alan Cox : verify_area() fixes * Alan Cox : Removed DDI * Jonathan Kamens : SOCK_DGRAM reconnect bug * Alan Cox : Moved a load of checks to the very * top level. * Alan Cox : Move address structures to/from user * mode above the protocol layers. * Rob Janssen : Allow 0 length sends. * Alan Cox : Asynchronous I/O support (cribbed from the * tty drivers). * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style) * Jeff Uphoff : Made max number of sockets command-line * configurable. * Matti Aarnio : Made the number of sockets dynamic, * to be allocated when needed, and mr. * Uphoff's max is used as max to be * allowed to allocate. * Linus : Argh. removed all the socket allocation * altogether: it's in the inode now. * Alan Cox : Made sock_alloc()/sock_release() public * for NetROM and future kernel nfsd type * stuff. * Alan Cox : sendmsg/recvmsg basics. * Tom Dyas : Export net symbols. * Marcin Dalecki : Fixed problems with CONFIG_NET="n". * Alan Cox : Added thread locking to sys_* calls * for sockets. May have errors at the * moment. * Kevin Buhr : Fixed the dumb errors in the above. * Andi Kleen : Some small cleanups, optimizations, * and fixed a copy_from_user() bug. * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0) * Tigran Aivazian : Made listen(2) backlog sanity checks * protocol-independent * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * * This module is effectively the top level interface to the BSD socket * paradigm. * * Based upon Swansea University Computer Society NET3.039 */ #include <linux/mm.h> #include <linux/socket.h> #include <linux/file.h> #include <linux/net.h> #include <linux/interrupt.h> #include <linux/thread_info.h> #include <linux/rcupdate.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/if_bridge.h> #include <linux/if_frad.h> #include <linux/if_vlan.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/cache.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/kmod.h> #include <linux/audit.h> #include <linux/wireless.h> #include <linux/nsproxy.h> #include <linux/magic.h> #include <linux/slab.h> #include <linux/xattr.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <net/compat.h> #include <net/wext.h> #include <net/cls_cgroup.h> #include <net/sock.h> #include <linux/netfilter.h> #include <linux/if_tun.h> #include <linux/ipv6_route.h> #include <linux/route.h> #include <linux/sockios.h> #include <linux/atalk.h> #include <net/busy_poll.h> #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sysctl_net_busy_read __read_mostly; unsigned int sysctl_net_busy_poll __read_mostly; #endif static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); static unsigned int sock_poll(struct file *file, struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #endif static int sock_fasync(int fd, struct file *filp, int on); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); /* * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear * in the operation structures but are done directly via the socketcall() multiplexor. */ static const struct file_operations socket_file_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .aio_read = sock_aio_read, .aio_write = sock_aio_write, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_sock_ioctl, #endif .mmap = sock_mmap, .open = sock_no_open, /* special open code to disallow open via /proc */ .release = sock_close, .fasync = sock_fasync, .sendpage = sock_sendpage, .splice_write = generic_splice_sendpage, .splice_read = sock_splice_read, }; /* * The protocol list. Each protocol is registered in here. */ static DEFINE_SPINLOCK(net_family_lock); static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; /* * Statistics counters of the socket lists */ static DEFINE_PER_CPU(int, sockets_in_use); /* * Support routines. * Move socket addresses back and forth across the kernel/user * divide and look after the messy bits. */ /** * move_addr_to_kernel - copy a socket address into kernel space * @uaddr: Address in user space * @kaddr: Address in kernel space * @ulen: Length in user space * * The address is copied into kernel space. If the provided address is * too long an error code of -EINVAL is returned. If the copy gives * invalid addresses -EFAULT is returned. On a success 0 is returned. */ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); } /** * move_addr_to_user - copy an address to user space * @kaddr: kernel space address * @klen: length of address in kernel * @uaddr: user space address * @ulen: pointer to user length field * * The value pointed to by ulen on entry is the buffer length available. * This is overwritten with the buffer space used. -EINVAL is returned * if an overlong buffer is specified or a negative buffer size. -EFAULT * is returned if either the buffer or the length field are not * accessible. * After copying the data up to the limit the user specifies, the true * length of the data is written over the length limit the user * specified. Zero is returned for a success. */ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen, void __user *uaddr, int __user *ulen) { int err; int len; err = get_user(len, ulen); if (err) return err; if (len > klen) len = klen; if (len < 0 || len > sizeof(struct sockaddr_storage)) return -EINVAL; if (len) { if (audit_sockaddr(klen, kaddr)) return -ENOMEM; if (copy_to_user(uaddr, kaddr, len)) return -EFAULT; } /* * "fromlen shall refer to the value before truncation.." * 1003.1g */ return __put_user(klen, ulen); } static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; struct socket_wq *wq; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; wq = kmalloc(sizeof(*wq), GFP_KERNEL); if (!wq) { kmem_cache_free(sock_inode_cachep, ei); return NULL; } init_waitqueue_head(&wq->wait); wq->fasync_list = NULL; RCU_INIT_POINTER(ei->socket.wq, wq); ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; ei->socket.ops = NULL; ei->socket.sk = NULL; ei->socket.file = NULL; return &ei->vfs_inode; } static void sock_destroy_inode(struct inode *inode) { struct socket_alloc *ei; struct socket_wq *wq; ei = container_of(inode, struct socket_alloc, vfs_inode); wq = rcu_dereference_protected(ei->socket.wq, 1); kfree_rcu(wq, rcu); kmem_cache_free(sock_inode_cachep, ei); } static void init_once(void *foo) { struct socket_alloc *ei = (struct socket_alloc *)foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { sock_inode_cachep = kmem_cache_create("sock_inode_cache", sizeof(struct socket_alloc), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), init_once); if (sock_inode_cachep == NULL) return -ENOMEM; return 0; } static const struct super_operations sockfs_ops = { .alloc_inode = sock_alloc_inode, .destroy_inode = sock_destroy_inode, .statfs = simple_statfs, }; /* * sockfs_dname() is called from d_path(). */ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", dentry->d_inode->i_ino); } static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, }; static struct dentry *sockfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "socket:", &sockfs_ops, &sockfs_dentry_operations, SOCKFS_MAGIC); } static struct vfsmount *sock_mnt __read_mostly; static struct file_system_type sock_fs_type = { .name = "sockfs", .mount = sockfs_mount, .kill_sb = kill_anon_super, }; /* * Obtains the first available file descriptor and sets it up for use. * * These functions create file structures and maps them to fd space * of the current process. On success it returns file descriptor * and file struct implicitly stored in sock->file. * Note that another thread may close file descriptor before we return * from this function. We use the fact that now we do not refer * to socket after mapping. If one day we will need it, this * function will increment ref. count on file by 1. * * In any case returned fd MAY BE not valid! * This race condition is unavoidable * with shared fd spaces, we cannot solve it inside kernel, * but we take care of internal coherence yet. */ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) { struct qstr name = { .name = "" }; struct path path; struct file *file; if (dname) { name.name = dname; name.len = strlen(name.name); } else if (sock->sk) { name.name = sock->sk->sk_prot_creator->name; name.len = strlen(name.name); } path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); if (unlikely(!path.dentry)) return ERR_PTR(-ENOMEM); path.mnt = mntget(sock_mnt); d_instantiate(path.dentry, SOCK_INODE(sock)); SOCK_INODE(sock)->i_fop = &socket_file_ops; file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &socket_file_ops); if (unlikely(IS_ERR(file))) { /* drop dentry, keep inode */ ihold(path.dentry->d_inode); path_put(&path); return file; } sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->private_data = sock; return file; } EXPORT_SYMBOL(sock_alloc_file); static int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; int fd = get_unused_fd_flags(flags); if (unlikely(fd < 0)) return fd; newfile = sock_alloc_file(sock, flags, NULL); if (likely(!IS_ERR(newfile))) { fd_install(fd, newfile); return fd; } put_unused_fd(fd); return PTR_ERR(newfile); } struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; } EXPORT_SYMBOL(sock_from_file); /** * sockfd_lookup - Go from a file number to its socket slot * @fd: file handle * @err: pointer to an error code return * * The file handle passed in is locked and the socket it is bound * too is returned. If an error occurs the err pointer is overwritten * with a negative errno code and NULL is returned. The function checks * for both invalid handles and passing a handle which is not a socket. * * On a success the socket object pointer is returned. */ struct socket *sockfd_lookup(int fd, int *err) { struct file *file; struct socket *sock; file = fget(fd); if (!file) { *err = -EBADF; return NULL; } sock = sock_from_file(file, err); if (!sock) fput(file); return sock; } EXPORT_SYMBOL(sockfd_lookup); static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { struct file *file; struct socket *sock; *err = -EBADF; file = fget_light(fd, fput_needed); if (file) { sock = sock_from_file(file, err); if (sock) return sock; fput_light(file, *fput_needed); } return NULL; } #define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname" #define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX) #define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1) static ssize_t sockfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) { const char *proto_name; size_t proto_size; int error; error = -ENODATA; if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) { proto_name = dentry->d_name.name; proto_size = strlen(proto_name); if (value) { error = -ERANGE; if (proto_size + 1 > size) goto out; strncpy(value, proto_name, proto_size + 1); } error = proto_size + 1; } out: return error; } static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer, size_t size) { ssize_t len; ssize_t used = 0; len = security_inode_listsecurity(dentry->d_inode, buffer, size); if (len < 0) return len; used += len; if (buffer) { if (size < used) return -ERANGE; buffer += len; } len = (XATTR_NAME_SOCKPROTONAME_LEN + 1); used += len; if (buffer) { if (size < used) return -ERANGE; memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len); buffer += len; } return used; } static const struct inode_operations sockfs_inode_ops = { .getxattr = sockfs_getxattr, .listxattr = sockfs_listxattr, }; /** * sock_alloc - allocate a socket * * Allocate a new inode and socket object. The two are bound together * and initialised. The socket is then returned. If we are out of inodes * NULL is returned. */ static struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode_pseudo(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_op = &sockfs_inode_ops; this_cpu_add(sockets_in_use, 1); return sock; } /* * In theory you can't get an open on this inode, but /proc provides * a back door. Remember to keep it shut otherwise you'll let the * creepy crawlies in. */ static int sock_no_open(struct inode *irrelevant, struct file *dontcare) { return -ENXIO; } const struct file_operations bad_sock_fops = { .owner = THIS_MODULE, .open = sock_no_open, .llseek = noop_llseek, }; /** * sock_release - close a socket * @sock: socket to close * * The socket is released from the protocol stack if it has a release * callback, and the inode is then released if the socket is bound to * an inode not a file. */ void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n"); if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags)) return; this_cpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; } EXPORT_SYMBOL(sock_release); void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags) { *tx_flags = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) *tx_flags |= SKBTX_HW_TSTAMP; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) *tx_flags |= SKBTX_SW_TSTAMP; if (sock_flag(sk, SOCK_WIFI_STATUS)) *tx_flags |= SKBTX_WIFI_STATUS; } EXPORT_SYMBOL(sock_tx_timestamp); static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { struct sock_iocb *si = kiocb_to_siocb(iocb); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; return sock->ops->sendmsg(iocb, sock, msg, size); } static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { int err = security_socket_sendmsg(sock, msg, size); return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size); } int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_sendmsg); static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg_nosec(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec; msg->msg_iovlen = num; result = sock_sendmsg(sock, msg, size); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_sendmsg); /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct timespec ts[3]; int empty = 1; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ if (need_software_tstamp && skb->tstamp.tv64 == 0) __net_timestamp(skb); if (need_software_tstamp) { if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { struct timeval tv; skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv); } else { skb_get_timestampns(skb, &ts[0]); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts[0]), &ts[0]); } } memset(ts, 0, sizeof(ts)); if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE) && ktime_to_timespec_cond(skb->tstamp, ts + 0)) empty = 0; if (shhwtstamps) { if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->syststamp, ts + 1)) empty = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts + 2)) empty = 0; } if (!empty) put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(ts), &ts); } EXPORT_SYMBOL_GPL(__sock_recv_timestamp); void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int ack; if (!sock_flag(sk, SOCK_WIFI_STATUS)) return; if (!skb->wifi_acked_valid) return; ack = skb->wifi_acked; put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack); } EXPORT_SYMBOL_GPL(__sock_recv_wifi_status); static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, sizeof(__u32), &skb->dropcount); } void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { sock_recv_timestamp(msg, sk, skb); sock_recv_drops(msg, sk, skb); } EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock_iocb *si = kiocb_to_siocb(iocb); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; si->flags = flags; return sock->ops->recvmsg(iocb, sock, msg, size, flags); } static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { int err = security_socket_recvmsg(sock, msg, size, flags); return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); } int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_recvmsg); static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } /** * kernel_recvmsg - Receive a message from a socket (kernel space) * @sock: The socket to receive the message from * @msg: Received message * @vec: Input s/g array for message data * @num: Size of input s/g array * @size: Number of bytes to read * @flags: Message flags (MSG_DONTWAIT, etc...) * * On return the msg structure contains the scatter/gather array passed in the * vec argument. The array is modified so that it consists of the unfilled * portion of the original array. * * The returned value is the total number of bytes received, or an error. */ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size, int flags) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num; result = sock_recvmsg(sock, msg, size, flags); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_recvmsg); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ flags |= more; return kernel_sendpage(sock, page, offset, size, flags); } static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct socket *sock = file->private_data; if (unlikely(!sock->ops->splice_read)) return -EINVAL; return sock->ops->splice_read(sock, ppos, pipe, len, flags); } static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, struct sock_iocb *siocb) { if (!is_sync_kiocb(iocb)) BUG(); siocb->kiocb = iocb; iocb->private = siocb; return siocb; } static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); } static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; if (iocb->ki_nbytes == 0) /* Match SYS5 behaviour */ return 0; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; if (sock->type == SOCK_SEQPACKET) msg->msg_flags |= MSG_EOR; return __sock_sendmsg(iocb, sock, msg, size); } static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } /* * Atomic setting of ioctl hooks to avoid race * with module unload. */ static DEFINE_MUTEX(br_ioctl_mutex); static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) { mutex_lock(&br_ioctl_mutex); br_ioctl_hook = hook; mutex_unlock(&br_ioctl_mutex); } EXPORT_SYMBOL(brioctl_set); static DEFINE_MUTEX(vlan_ioctl_mutex); static int (*vlan_ioctl_hook) (struct net *, void __user *arg); void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); } EXPORT_SYMBOL(vlan_ioctl_set); static DEFINE_MUTEX(dlci_ioctl_mutex); static int (*dlci_ioctl_hook) (unsigned int, void __user *); void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) { mutex_lock(&dlci_ioctl_mutex); dlci_ioctl_hook = hook; mutex_unlock(&dlci_ioctl_mutex); } EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, unsigned int cmd, unsigned long arg) { int err; void __user *argp = (void __user *)arg; err = sock->ops->ioctl(sock, cmd, arg); /* * If this ioctl is unknown try to hand it down * to the NIC driver. */ if (err == -ENOIOCTLCMD) err = dev_ioctl(net, cmd, argp); return err; } /* * With an ioctl, arg may well be a user mode pointer, but we don't know * what to do with it - that's up to the protocol still. */ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock; struct sock *sk; void __user *argp = (void __user *)arg; int pid, err; struct net *net; sock = file->private_data; sk = sock->sk; net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { err = dev_ioctl(net, cmd, argp); } else #ifdef CONFIG_WEXT_CORE if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { err = dev_ioctl(net, cmd, argp); } else #endif switch (cmd) { case FIOSETOWN: case SIOCSPGRP: err = -EFAULT; if (get_user(pid, (int __user *)argp)) break; err = f_setown(sock->file, pid, 1); break; case FIOGETOWN: case SIOCGPGRP: err = put_user(f_getown(sock->file), (int __user *)argp); break; case SIOCGIFBR: case SIOCSIFBR: case SIOCBRADDBR: case SIOCBRDELBR: err = -ENOPKG; if (!br_ioctl_hook) request_module("bridge"); mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) err = br_ioctl_hook(net, cmd, argp); mutex_unlock(&br_ioctl_mutex); break; case SIOCGIFVLAN: case SIOCSIFVLAN: err = -ENOPKG; if (!vlan_ioctl_hook) request_module("8021q"); mutex_lock(&vlan_ioctl_mutex); if (vlan_ioctl_hook) err = vlan_ioctl_hook(net, argp); mutex_unlock(&vlan_ioctl_mutex); break; case SIOCADDDLCI: case SIOCDELDLCI: err = -ENOPKG; if (!dlci_ioctl_hook) request_module("dlci"); mutex_lock(&dlci_ioctl_mutex); if (dlci_ioctl_hook) err = dlci_ioctl_hook(cmd, argp); mutex_unlock(&dlci_ioctl_mutex); break; default: err = sock_do_ioctl(net, sock, cmd, arg); break; } return err; } int sock_create_lite(int family, int type, int protocol, struct socket **res) { int err; struct socket *sock = NULL; err = security_socket_create(family, type, protocol, 1); if (err) goto out; sock = sock_alloc(); if (!sock) { err = -ENOMEM; goto out; } sock->type = type; err = security_socket_post_create(sock, family, type, protocol, 1); if (err) goto out_release; out: *res = sock; return err; out_release: sock_release(sock); sock = NULL; goto out; } EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { unsigned int busy_flag = 0; struct socket *sock; /* * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; if (sk_can_busy_loop(sock->sk)) { /* this socket can poll_ll so tell the system call */ busy_flag = POLL_BUSY_LOOP; /* once, only if requested by syscall */ if (wait && (wait->_key & POLL_BUSY_LOOP)) sk_busy_loop(sock->sk, 1); } return busy_flag | sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) { struct socket *sock = file->private_data; return sock->ops->mmap(file, sock, vma); } static int sock_close(struct inode *inode, struct file *filp) { sock_release(SOCKET_I(inode)); return 0; } /* * Update the socket async list * * Fasync_list locking strategy. * * 1. fasync_list is modified only under process context socket lock * i.e. under semaphore. * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) * or under socket lock */ static int sock_fasync(int fd, struct file *filp, int on) { struct socket *sock = filp->private_data; struct sock *sk = sock->sk; struct socket_wq *wq; if (sk == NULL) return -EINVAL; lock_sock(sk); wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); fasync_helper(fd, filp, on, &wq->fasync_list); if (!wq->fasync_list) sock_reset_flag(sk, SOCK_FASYNC); else sock_set_flag(sk, SOCK_FASYNC); release_sock(sk); return 0; } /* This function may be called only under socket lock or callback_lock or rcu_lock */ int sock_wake_async(struct socket *sock, int how, int band) { struct socket_wq *wq; if (!sock) return -1; rcu_read_lock(); wq = rcu_dereference(sock->wq); if (!wq || !wq->fasync_list) { rcu_read_unlock(); return -1; } switch (how) { case SOCK_WAKE_WAITD: if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) break; goto call_kill; case SOCK_WAKE_SPACE: if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) break; /* fall through */ case SOCK_WAKE_IO: call_kill: kill_fasync(&wq->fasync_list, SIGIO, band); break; case SOCK_WAKE_URG: kill_fasync(&wq->fasync_list, SIGURG, band); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(sock_wake_async); int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; struct socket *sock; const struct net_proto_family *pf; /* * Check protocol is in range */ if (family < 0 || family >= NPROTO) return -EAFNOSUPPORT; if (type < 0 || type >= SOCK_MAX) return -EINVAL; /* Compatibility. This uglymoron is moved from INET layer to here to avoid deadlock in module load. */ if (family == PF_INET && type == SOCK_PACKET) { static int warned; if (!warned) { warned = 1; printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n", current->comm); } family = PF_PACKET; } err = security_socket_create(family, type, protocol, kern); if (err) return err; /* * Allocate the socket and allow the family to set things up. if * the protocol is 0, the family is instructed to select an appropriate * default. */ sock = sock_alloc(); if (!sock) { net_warn_ratelimited("socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } sock->type = type; #ifdef CONFIG_MODULES /* Attempt to load a protocol module if the find failed. * * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user * requested real, full-featured networking support upon configuration. * Otherwise module support will break! */ if (rcu_access_pointer(net_families[family]) == NULL) request_module("net-pf-%d", family); #endif rcu_read_lock(); pf = rcu_dereference(net_families[family]); err = -EAFNOSUPPORT; if (!pf) goto out_release; /* * We will call the ->create function, that possibly is in a loadable * module, so we have to bump that loadable module refcnt first. */ if (!try_module_get(pf->owner)) goto out_release; /* Now protected by module ref count */ rcu_read_unlock(); err = pf->create(net, sock, protocol, kern); if (err < 0) goto out_module_put; /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. */ if (!try_module_get(sock->ops->owner)) goto out_module_busy; /* * Now that we're done with the ->create function, the [loadable] * module can have its refcnt decremented */ module_put(pf->owner); err = security_socket_post_create(sock, family, type, protocol, kern); if (err) goto out_sock_release; *res = sock; return 0; out_module_busy: err = -EAFNOSUPPORT; out_module_put: sock->ops = NULL; module_put(pf->owner); out_sock_release: sock_release(sock); return err; out_release: rcu_read_unlock(); goto out_sock_release; } EXPORT_SYMBOL(__sock_create); int sock_create(int family, int type, int protocol, struct socket **res) { return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); } EXPORT_SYMBOL(sock_create); int sock_create_kern(int family, int type, int protocol, struct socket **res) { return __sock_create(&init_net, family, type, protocol, res, 1); } EXPORT_SYMBOL(sock_create_kern); SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) { int retval; struct socket *sock; int flags; /* Check the SOCK_* constants for consistency. */ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; retval = sock_create(family, type, protocol, &sock); if (retval < 0) goto out; retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; out: /* It may be already another descriptor 8) Not kernel problem. */ return retval; out_release: sock_release(sock); return retval; } /* * Create a pair of connected sockets. */ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec) { struct socket *sock1, *sock2; int fd1, fd2, err; struct file *newfile1, *newfile2; int flags; flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; /* * Obtain the first socket and check if the underlying protocol * supports the socketpair call. */ err = sock_create(family, type, protocol, &sock1); if (err < 0) goto out; err = sock_create(family, type, protocol, &sock2); if (err < 0) goto out_release_1; err = sock1->ops->socketpair(sock1, sock2); if (err < 0) goto out_release_both; fd1 = get_unused_fd_flags(flags); if (unlikely(fd1 < 0)) { err = fd1; goto out_release_both; } fd2 = get_unused_fd_flags(flags); if (unlikely(fd2 < 0)) { err = fd2; put_unused_fd(fd1); goto out_release_both; } newfile1 = sock_alloc_file(sock1, flags, NULL); if (unlikely(IS_ERR(newfile1))) { err = PTR_ERR(newfile1); put_unused_fd(fd1); put_unused_fd(fd2); goto out_release_both; } newfile2 = sock_alloc_file(sock2, flags, NULL); if (IS_ERR(newfile2)) { err = PTR_ERR(newfile2); fput(newfile1); put_unused_fd(fd1); put_unused_fd(fd2); sock_release(sock2); goto out; } audit_fd_pair(fd1, fd2); fd_install(fd1, newfile1); fd_install(fd2, newfile2); /* fd1 and fd2 may be already another descriptors. * Not kernel problem. */ err = put_user(fd1, &usockvec[0]); if (!err) err = put_user(fd2, &usockvec[1]); if (!err) return 0; sys_close(fd2); sys_close(fd1); return err; out_release_both: sock_release(sock2); out_release_1: sock_release(sock1); out: return err; } /* * Bind a name to a socket. Nothing much to do here since it's * the protocol's responsibility to handle the local address. * * We move the socket address to kernel space before we call * the protocol layer (having also checked the address is ok). */ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, &address); if (err >= 0) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) &address, addrlen); } fput_light(sock->file, fput_needed); } return err; } /* * Perform a listen. Basically, we allow the protocol to do anything * necessary for a listen, and if that works, we mark the socket as * ready for listening. */ SYSCALL_DEFINE2(listen, int, fd, int, backlog) { struct socket *sock; int err, fput_needed; int somaxconn; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; if ((unsigned int)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); fput_light(sock->file, fput_needed); } return err; } /* * For accept, we attempt to create a new socket, set up the link * with the client, wake up the client, then return the new * connected fd. We collect the address of the connector in kernel * space and move it to user at the very end. This is unclean because * we open the socket then return an error. * * 1003.1g adds the ability to recvmsg() to query connection pending * status to recvmsg. We need to add that support in a way thats * clean when we restucture accept also. */ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags) { struct socket *sock, *newsock; struct file *newfile; int err, len, newfd, fput_needed; struct sockaddr_storage address; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = -ENFILE; newsock = sock_alloc(); if (!newsock) goto out_put; newsock->type = sock->type; newsock->ops = sock->ops; /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); newfd = get_unused_fd_flags(flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name); if (unlikely(IS_ERR(newfile))) { err = PTR_ERR(newfile); put_unused_fd(newfd); sock_release(newsock); goto out_put; } err = security_socket_accept(sock, newsock); if (err) goto out_fd; err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_fd; if (upeer_sockaddr) { if (newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) < 0) { err = -ECONNABORTED; goto out_fd; } err = move_addr_to_user(&address, len, upeer_sockaddr, upeer_addrlen); if (err < 0) goto out_fd; } /* File flags are not inherited via accept() unlike another OSes. */ fd_install(newfd, newfile); err = newfd; out_put: fput_light(sock->file, fput_needed); out: return err; out_fd: fput(newfile); put_unused_fd(newfd); goto out_put; } SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen) { return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); } /* * Attempt to connect to a socket with the server address. The address * is in user space so we verify it is OK and move it to kernel space. * * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to * break bindings * * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and * other SEQPACKET protocols that take time to connect() as it doesn't * include the -EINPROGRESS status for such sockets. */ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = move_addr_to_kernel(uservaddr, addrlen, &address); if (err < 0) goto out_put; err = security_socket_connect(sock, (struct sockaddr *)&address, addrlen); if (err) goto out_put; err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, sock->file->f_flags); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the local address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = security_socket_getsockname(sock); if (err) goto out_put; err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); if (err) goto out_put; err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the remote address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getpeername(sock); if (err) { fput_light(sock->file, fput_needed); return err; } err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1); if (!err) err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); fput_light(sock->file, fput_needed); } return err; } /* * Send a datagram to a given address. We move the address into kernel * space and check the user space data area is readable before invoking * the protocol. */ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; if (len > INT_MAX) len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, &address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg, len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Send a datagram down a socket. */ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, unsigned int, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); } /* * Receive a frame from the socket and optionally record the address of the * sender. We verify the buffers are writable and if needed move the * sender address from kernel to user space. */ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; } /* * Receive a datagram from a socket. */ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, unsigned int flags) { return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } /* * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, char __user *, optval, int, optlen) { int err, fput_needed; struct socket *sock; if (optlen < 0) return -EINVAL; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_setsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, optval, optlen); else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Get a socket option. Because we don't know the option lengths we have * to pass a user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, char __user *, optval, int __user *, optlen) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Shutdown a socket. */ SYSCALL_DEFINE2(shutdown, int, fd, int, how) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_shutdown(sock, how); if (!err) err = sock->ops->shutdown(sock, how); fput_light(sock->file, fput_needed); } return err; } /* A couple of helpful macros for getting the address of the 32/64 bit * fields which are the same type (int / unsigned) on our platforms. */ #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member) #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) struct used_address { struct sockaddr_storage name; unsigned int name_len; }; static int copy_msghdr_from_user(struct msghdr *kmsg, struct msghdr __user *umsg) { if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) return -EFAULT; if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) return -EINVAL; return 0; } static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct sockaddr_storage address; struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; unsigned char ctl[sizeof(struct cmsghdr) + 20] __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; int err, ctl_len, total_len; err = -EFAULT; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else { err = copy_msghdr_from_user(msg_sys, msg); if (err) return err; } if (msg_sys->msg_iovlen > UIO_FASTIOV) { err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; err = -ENOMEM; iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), GFP_KERNEL); if (!iov) goto out; } /* This will also move the address data into kernel space */ if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, &address, VERIFY_READ); } else err = verify_iovec(msg_sys, iov, &address, VERIFY_READ); if (err < 0) goto out_freeiov; total_len = err; err = -ENOBUFS; if (msg_sys->msg_controllen > INT_MAX) goto out_freeiov; ctl_len = msg_sys->msg_controllen; if ((MSG_CMSG_COMPAT & flags) && ctl_len) { err = cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, sizeof(ctl)); if (err) goto out_freeiov; ctl_buf = msg_sys->msg_control; ctl_len = msg_sys->msg_controllen; } else if (ctl_len) { if (ctl_len > sizeof(ctl)) { ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); if (ctl_buf == NULL) goto out_freeiov; } err = -EFAULT; /* * Careful! Before this, msg_sys->msg_control contains a user pointer. * Afterwards, it will be a kernel pointer. Thus the compiler-assisted * checking falls down on this. */ if (copy_from_user(ctl_buf, (void __user __force *)msg_sys->msg_control, ctl_len)) goto out_freectl; msg_sys->msg_control = ctl_buf; } msg_sys->msg_flags = flags; if (sock->file->f_flags & O_NONBLOCK) msg_sys->msg_flags |= MSG_DONTWAIT; /* * If this is sendmmsg() and current destination address is same as * previously succeeded address, omit asking LSM's decision. * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ if (used_address && msg_sys->msg_name && used_address->name_len == msg_sys->msg_namelen && !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; } err = sock_sendmsg(sock, msg_sys, total_len); /* * If this is sendmmsg() and sending to current destination address was * successful, remember it. */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; if (msg_sys->msg_name) memcpy(&used_address->name, msg_sys->msg_name, used_address->name_len); } out_freectl: if (ctl_buf != ctl) sock_kfree_s(sock->sk, ctl_buf, ctl_len); out_freeiov: if (iov != iovstack) kfree(iov); out: return err; } /* * BSD sendmsg interface */ long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL); fput_light(sock->file, fput_needed); out: return err; } SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_sendmsg(fd, msg, flags); } /* * Linux sendmmsg interface */ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct used_address used_address; if (vlen > UIO_MAXIOV) vlen = UIO_MAXIOV; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; used_address.name_len = UINT_MAX; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; err = 0; while (datagrams < vlen) { if (MSG_CMSG_COMPAT & flags) { err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags, &used_address); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = ___sys_sendmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags, &used_address); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; } fput_light(sock->file, fput_needed); /* We only return an error if no datagrams were able to be sent */ if (datagrams != 0) return datagrams; return err; } SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_sendmmsg(fd, mmsg, vlen, flags); } static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; int err, total_len, len; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; int __user *uaddr_len; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else { err = copy_msghdr_from_user(msg_sys, msg); if (err) return err; } if (msg_sys->msg_iovlen > UIO_FASTIOV) { err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; err = -ENOMEM; iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), GFP_KERNEL); if (!iov) goto out; } /* * Save the user-mode address (verify_iovec will change the * kernel msghdr to use the kernel address space) */ uaddr = (__force void __user *)msg_sys->msg_name; uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE); } else err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE); if (err < 0) goto out_freeiov; total_len = err; cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, total_len, flags); if (err < 0) goto out_freeiov; len = err; if (uaddr != NULL) { err = move_addr_to_user(&addr, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) goto out_freeiov; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) goto out_freeiov; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); else err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) goto out_freeiov; err = len; out_freeiov: if (iov != iovstack) kfree(iov); out: return err; } /* * BSD recvmsg interface */ long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); fput_light(sock->file, fput_needed); out: return err; } SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_recvmsg(fd, msg, flags); } /* * Linux recvmmsg interface */ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct timespec end_time; if (timeout && poll_select_set_timeout(&end_time, timeout->tv_sec, timeout->tv_nsec)) return -EINVAL; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; err = sock_error(sock->sk); if (err) goto out_put; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; while (datagrams < vlen) { /* * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = ___sys_recvmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ if (flags & MSG_WAITFORONE) flags |= MSG_DONTWAIT; if (timeout) { ktime_get_ts(timeout); *timeout = timespec_sub(end_time, *timeout); if (timeout->tv_sec < 0) { timeout->tv_sec = timeout->tv_nsec = 0; break; } /* Timeout, return less than vlen datagrams */ if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) break; } /* Out of band data, return right away */ if (msg_sys.msg_flags & MSG_OOB) break; } out_put: fput_light(sock->file, fput_needed); if (err == 0) return datagrams; if (datagrams != 0) { /* * We may return less entries than requested (vlen) if the * sock is non block and there aren't enough datagrams... */ if (err != -EAGAIN) { /* * ... or if recvmsg returns an error after we * received some datagrams, where we record the * error to return on the next call or if the * app asks about it using getsockopt(SO_ERROR). */ sock->sk->sk_err = -err; } return datagrams; } return err; } SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, struct timespec __user *, timeout) { int datagrams; struct timespec timeout_sys; if (flags & MSG_CMSG_COMPAT) return -EINVAL; if (!timeout) return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) return -EFAULT; datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); if (datagrams > 0 && copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) datagrams = -EFAULT; return datagrams; } #ifdef __ARCH_WANT_SYS_SOCKETCALL /* Argument list sizes for sys_socketcall */ #define AL(x) ((x) * sizeof(unsigned long)) static const unsigned char nargs[21] = { AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), AL(4), AL(5), AL(4) }; #undef AL /* * System call vectors. * * Argument checking cleaned up. Saved 20% in size. * This function doesn't need to set the kernel lock because * it is set by the callees. */ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) { unsigned long a[AUDITSC_ARGS]; unsigned long a0, a1; int err; unsigned int len; if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; len = nargs[call]; if (len > sizeof(a)) return -EINVAL; /* copy_from_user should be SMP safe. */ if (copy_from_user(a, args, len)) return -EFAULT; err = audit_socketcall(nargs[call] / sizeof(unsigned long), a); if (err) return err; a0 = a[0]; a1 = a[1]; switch (call) { case SYS_SOCKET: err = sys_socket(a0, a1, a[2]); break; case SYS_BIND: err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_CONNECT: err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_LISTEN: err = sys_listen(a0, a1); break; case SYS_ACCEPT: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], 0); break; case SYS_GETSOCKNAME: err = sys_getsockname(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_GETPEERNAME: err = sys_getpeername(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_SOCKETPAIR: err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]); break; case SYS_SEND: err = sys_send(a0, (void __user *)a1, a[2], a[3]); break; case SYS_SENDTO: err = sys_sendto(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], a[5]); break; case SYS_RECV: err = sys_recv(a0, (void __user *)a1, a[2], a[3]); break; case SYS_RECVFROM: err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], (int __user *)a[5]); break; case SYS_SHUTDOWN: err = sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]); break; case SYS_GETSOCKOPT: err = sys_getsockopt(a0, a1, a[2], (char __user *)a[3], (int __user *)a[4]); break; case SYS_SENDMSG: err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_SENDMMSG: err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); break; case SYS_RECVMSG: err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_RECVMMSG: err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], (struct timespec __user *)a[4]); break; case SYS_ACCEPT4: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], a[3]); break; default: err = -EINVAL; break; } return err; } #endif /* __ARCH_WANT_SYS_SOCKETCALL */ /** * sock_register - add a socket protocol handler * @ops: description of protocol * * This function is called by a protocol handler that wants to * advertise its address family, and have it linked into the * socket interface. The value ops->family coresponds to the * socket system call protocol family. */ int sock_register(const struct net_proto_family *ops) { int err; if (ops->family >= NPROTO) { printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family, NPROTO); return -ENOBUFS; } spin_lock(&net_family_lock); if (rcu_dereference_protected(net_families[ops->family], lockdep_is_held(&net_family_lock))) err = -EEXIST; else { rcu_assign_pointer(net_families[ops->family], ops); err = 0; } spin_unlock(&net_family_lock); printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); return err; } EXPORT_SYMBOL(sock_register); /** * sock_unregister - remove a protocol handler * @family: protocol family to remove * * This function is called by a protocol handler that wants to * remove its address family, and have it unlinked from the * new socket creation. * * If protocol handler is a module, then it can use module reference * counts to protect against new references. If protocol handler is not * a module then it needs to provide its own protection in * the ops->create routine. */ void sock_unregister(int family) { BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); RCU_INIT_POINTER(net_families[family], NULL); spin_unlock(&net_family_lock); synchronize_rcu(); printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); } EXPORT_SYMBOL(sock_unregister); static int __init sock_init(void) { int err; /* * Initialize the network sysctl infrastructure. */ err = net_sysctl_init(); if (err) goto out; /* * Initialize skbuff SLAB cache */ skb_init(); /* * Initialize the protocols module. */ init_inodecache(); err = register_filesystem(&sock_fs_type); if (err) goto out_fs; sock_mnt = kern_mount(&sock_fs_type); if (IS_ERR(sock_mnt)) { err = PTR_ERR(sock_mnt); goto out_mount; } /* The real protocol initialization is performed in later initcalls. */ #ifdef CONFIG_NETFILTER err = netfilter_init(); if (err) goto out; #endif #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING skb_timestamping_init(); #endif out: return err; out_mount: unregister_filesystem(&sock_fs_type); out_fs: goto out; } core_initcall(sock_init); /* early initcall */ #ifdef CONFIG_PROC_FS void socket_seq_show(struct seq_file *seq) { int cpu; int counter = 0; for_each_possible_cpu(cpu) counter += per_cpu(sockets_in_use, cpu); /* It can be negative, by the way. 8) */ if (counter < 0) counter = 0; seq_printf(seq, "sockets: used %d\n", counter); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int do_siocgstamp(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timeval ktv; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) err = compat_put_timeval(&ktv, up); return err; } static int do_siocgstampns(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timespec kts; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) err = compat_put_timespec(&kts, up); return err; } static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(struct ifreq)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; err = dev_ioctl(net, SIOCGIFNAME, uifr); if (err) return err; if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) return -EFAULT; return 0; } static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) { struct compat_ifconf ifc32; struct ifconf ifc; struct ifconf __user *uifc; struct compat_ifreq __user *ifr32; struct ifreq __user *ifr; unsigned int i, j; int err; if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; memset(&ifc, 0, sizeof(ifc)); if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; ifc.ifc_req = NULL; uifc = compat_alloc_user_space(sizeof(struct ifconf)); } else { size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * sizeof(struct ifreq); uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); ifc.ifc_len = len; ifr = ifc.ifc_req = (void __user *)(uifc + 1); ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; ifr++; ifr32++; } } if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) return -EFAULT; err = dev_ioctl(net, SIOCGIFCONF, uifc); if (err) return err; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; ifr = ifc.ifc_req; ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0, j = 0; i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) return -EFAULT; ifr32++; ifr++; } if (ifc32.ifcbuf == 0) { /* Translate from 64-bit structure multiple to * a 32-bit one. */ i = ifc.ifc_len; i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); ifc32.ifc_len = i; } else { ifc32.ifc_len = i; } if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) return -EFAULT; return 0; } static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { struct compat_ethtool_rxnfc __user *compat_rxnfc; bool convert_in = false, convert_out = false; size_t buf_size = ALIGN(sizeof(struct ifreq), 8); struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; u32 rule_cnt = 0, actual_rule_cnt; u32 ethcmd; u32 data; int ret; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; compat_rxnfc = compat_ptr(data); if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; /* Most ethtool structures are defined without padding. * Unfortunately struct ethtool_rxnfc is an exception. */ switch (ethcmd) { default: break; case ETHTOOL_GRXCLSRLALL: /* Buffer size is variable */ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) return -EFAULT; if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) return -ENOMEM; buf_size += rule_cnt * sizeof(u32); /* fall through */ case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_SRXCLSRLINS: convert_out = true; /* fall through */ case ETHTOOL_SRXCLSRLDEL: buf_size += sizeof(struct ethtool_rxnfc); convert_in = true; break; } ifr = compat_alloc_user_space(buf_size); rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8); if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (put_user(convert_in ? rxnfc : compat_ptr(data), &ifr->ifr_ifru.ifru_data)) return -EFAULT; if (convert_in) { /* We expect there to be holes between fs.m_ext and * fs.ring_cookie and at the end of fs, but nowhere else. */ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + sizeof(compat_rxnfc->fs.m_ext) != offsetof(struct ethtool_rxnfc, fs.m_ext) + sizeof(rxnfc->fs.m_ext)); BUILD_BUG_ON( offsetof(struct compat_ethtool_rxnfc, fs.location) - offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != offsetof(struct ethtool_rxnfc, fs.location) - offsetof(struct ethtool_rxnfc, fs.ring_cookie)); if (copy_in_user(rxnfc, compat_rxnfc, (void __user *)(&rxnfc->fs.m_ext + 1) - (void __user *)rxnfc) || copy_in_user(&rxnfc->fs.ring_cookie, &compat_rxnfc->fs.ring_cookie, (void __user *)(&rxnfc->fs.location + 1) - (void __user *)&rxnfc->fs.ring_cookie) || copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; } ret = dev_ioctl(net, SIOCETHTOOL, ifr); if (ret) return ret; if (convert_out) { if (copy_in_user(compat_rxnfc, rxnfc, (const void __user *)(&rxnfc->fs.m_ext + 1) - (const void __user *)rxnfc) || copy_in_user(&compat_rxnfc->fs.ring_cookie, &rxnfc->fs.ring_cookie, (const void __user *)(&rxnfc->fs.location + 1) - (const void __user *)&rxnfc->fs.ring_cookie) || copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; if (ethcmd == ETHTOOL_GRXCLSRLALL) { /* As an optimisation, we only copy the actual * number of rules that the underlying * function returned. Since Mallory might * change the rule count in user memory, we * check that it is less than the rule count * originally given (as the user buffer size), * which has been range-checked. */ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) return -EFAULT; if (actual_rule_cnt < rule_cnt) rule_cnt = actual_rule_cnt; if (copy_in_user(&compat_rxnfc->rule_locs[0], &rxnfc->rule_locs[0], rule_cnt * sizeof(u32))) return -EFAULT; } } return 0; } static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) return -EFAULT; return dev_ioctl(net, SIOCWANDEV, uifr); } static int bond_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *ifr32) { struct ifreq kifr; struct ifreq __user *uifr; mm_segment_t old_fs; int err; u32 data; void __user *datap; switch (cmd) { case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (struct ifreq __user __force *) &kifr); set_fs(old_fs); return err; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; datap = compat_ptr(data); if (put_user(datap, &uifr->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, uifr); default: return -ENOIOCTLCMD; } } static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *u_ifreq32) { struct ifreq __user *u_ifreq64; char tmp_buf[IFNAMSIZ]; void __user *data64; u32 data32; if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), IFNAMSIZ)) return -EFAULT; if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) return -EFAULT; data64 = compat_ptr(data32); u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); /* Don't check these user accesses, just let that get trapped * in the ioctl handler instead. */ if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], IFNAMSIZ)) return -EFAULT; if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, u_ifreq64); } static int dev_ifsioc(struct net *net, struct socket *sock, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) return -EFAULT; err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); if (!err) { switch (cmd) { case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFMEM: case SIOCGIFHWADDR: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCGIFBRDADDR: case SIOCGIFDSTADDR: case SIOCGIFNETMASK: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCGMIIPHY: case SIOCGMIIREG: if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) err = -EFAULT; break; } } return err; } static int compat_sioc_ifmap(struct net *net, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq ifr; struct compat_ifmap __user *uifmap32; mm_segment_t old_fs; int err; uifmap32 = &uifr32->ifr_ifru.ifru_map; err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); err |= get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= get_user(ifr.ifr_map.irq, &uifmap32->irq); err |= get_user(ifr.ifr_map.dma, &uifmap32->dma); err |= get_user(ifr.ifr_map.port, &uifmap32->port); if (err) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (void __user __force *)&ifr); set_fs(old_fs); if (cmd == SIOCGIFMAP && !err) { err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); err |= put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= put_user(ifr.ifr_map.irq, &uifmap32->irq); err |= put_user(ifr.ifr_map.dma, &uifmap32->dma); err |= put_user(ifr.ifr_map.port, &uifmap32->port); if (err) err = -EFAULT; } return err; } static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_data)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_data)) return -EFAULT; return dev_ioctl(net, SIOCSHWTSTAMP, uifr); } struct rtentry32 { u32 rt_pad1; struct sockaddr rt_dst; /* target address */ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ struct sockaddr rt_genmask; /* target network mask (IP) */ unsigned short rt_flags; short rt_pad2; u32 rt_pad3; unsigned char rt_tos; unsigned char rt_class; short rt_pad4; short rt_metric; /* +1 for binary compatibility! */ /* char * */ u32 rt_dev; /* forcing the device at add */ u32 rt_mtu; /* per route MTU/Window */ u32 rt_window; /* Window clamping */ unsigned short rt_irtt; /* Initial RTT */ }; struct in6_rtmsg32 { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; u32 rtmsg_type; u16 rtmsg_dst_len; u16 rtmsg_src_len; u32 rtmsg_metric; u32 rtmsg_info; u32 rtmsg_flags; s32 rtmsg_ifindex; }; static int routing_ioctl(struct net *net, struct socket *sock, unsigned int cmd, void __user *argp) { int ret; void *r = NULL; struct in6_rtmsg r6; struct rtentry r4; char devname[16]; u32 rtdev; mm_segment_t old_fs = get_fs(); if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ struct in6_rtmsg32 __user *ur6 = argp; ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3 * sizeof(struct in6_addr)); ret |= get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); ret |= get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); ret |= get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); ret |= get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); ret |= get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); ret |= get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); ret |= get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); r = (void *) &r6; } else { /* ipv4 */ struct rtentry32 __user *ur4 = argp; ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), 3 * sizeof(struct sockaddr)); ret |= get_user(r4.rt_flags, &(ur4->rt_flags)); ret |= get_user(r4.rt_metric, &(ur4->rt_metric)); ret |= get_user(r4.rt_mtu, &(ur4->rt_mtu)); ret |= get_user(r4.rt_window, &(ur4->rt_window)); ret |= get_user(r4.rt_irtt, &(ur4->rt_irtt)); ret |= get_user(rtdev, &(ur4->rt_dev)); if (rtdev) { ret |= copy_from_user(devname, compat_ptr(rtdev), 15); r4.rt_dev = (char __user __force *)devname; devname[15] = 0; } else r4.rt_dev = NULL; r = (void *) &r4; } if (ret) { ret = -EFAULT; goto out; } set_fs(KERNEL_DS); ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); set_fs(old_fs); out: return ret; } /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE * for some operations; this forces use of the newer bridge-utils that * use compatible ioctls */ static int old_bridge_ioctl(compat_ulong_t __user *argp) { compat_ulong_t tmp; if (get_user(tmp, argp)) return -EFAULT; if (tmp == BRCTL_GET_VERSION) return BRCTL_VERSION + 1; return -EINVAL; } static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; struct net *net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) return siocdevprivate_ioctl(net, cmd, argp); switch (cmd) { case SIOCSIFBR: case SIOCGIFBR: return old_bridge_ioctl(argp); case SIOCGIFNAME: return dev_ifname32(net, argp); case SIOCGIFCONF: return dev_ifconf(net, argp); case SIOCETHTOOL: return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGIFMAP: case SIOCSIFMAP: return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCBONDCHANGEACTIVE: return bond_ioctl(net, cmd, argp); case SIOCADDRT: case SIOCDELRT: return routing_ioctl(net, sock, cmd, argp); case SIOCGSTAMP: return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); case SIOCSHWTSTAMP: return compat_siocshwtstamp(net, argp); case FIOSETOWN: case SIOCSPGRP: case FIOGETOWN: case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: case SIOCSIFFLAGS: case SIOCGIFMETRIC: case SIOCSIFMETRIC: case SIOCGIFMTU: case SIOCSIFMTU: case SIOCGIFMEM: case SIOCSIFMEM: case SIOCGIFHWADDR: case SIOCSIFHWADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); case SIOCSARP: case SIOCGARP: case SIOCDARP: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } return -ENOIOCTLCMD; } static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct socket *sock = file->private_data; int ret = -ENOIOCTLCMD; struct sock *sk; struct net *net; sk = sock->sk; net = sock_net(sk); if (sock->ops->compat_ioctl) ret = sock->ops->compat_ioctl(sock, cmd, arg); if (ret == -ENOIOCTLCMD && (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) ret = compat_wext_handle_ioctl(net, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_sock_ioctl_trans(file, sock, cmd, arg); return ret; } #endif int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) { return sock->ops->bind(sock, addr, addrlen); } EXPORT_SYMBOL(kernel_bind); int kernel_listen(struct socket *sock, int backlog) { return sock->ops->listen(sock, backlog); } EXPORT_SYMBOL(kernel_listen); int kernel_accept(struct socket *sock, struct socket **newsock, int flags) { struct sock *sk = sock->sk; int err; err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, newsock); if (err < 0) goto done; err = sock->ops->accept(sock, *newsock, flags); if (err < 0) { sock_release(*newsock); *newsock = NULL; goto done; } (*newsock)->ops = sock->ops; __module_get((*newsock)->ops->owner); done: return err; } EXPORT_SYMBOL(kernel_accept); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags) { return sock->ops->connect(sock, addr, addrlen, flags); } EXPORT_SYMBOL(kernel_connect); int kernel_getsockname(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 0); } EXPORT_SYMBOL(kernel_getsockname); int kernel_getpeername(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 1); } EXPORT_SYMBOL(kernel_getpeername); int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int __user *uoptlen; int err; uoptval = (char __user __force *) optval; uoptlen = (int __user __force *) optlen; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, uoptval, uoptlen); else err = sock->ops->getsockopt(sock, level, optname, uoptval, uoptlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_getsockopt); int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int err; uoptval = (char __user __force *) optval; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, uoptval, optlen); else err = sock->ops->setsockopt(sock, level, optname, uoptval, optlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_setsockopt); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { if (sock->ops->sendpage) return sock->ops->sendpage(sock, page, offset, size, flags); return sock_no_sendpage(sock, page, offset, size, flags); } EXPORT_SYMBOL(kernel_sendpage); int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) { mm_segment_t oldfs = get_fs(); int err; set_fs(KERNEL_DS); err = sock->ops->ioctl(sock, cmd, arg); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_sock_ioctl); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) { return sock->ops->shutdown(sock, how); } EXPORT_SYMBOL(kernel_sock_shutdown);
/* * NET An implementation of the SOCKET network access protocol. * * Version: @(#)socket.c 1.1.93 18/02/95 * * Authors: Orest Zborowski, <obz@Kodak.COM> * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Anonymous : NOTSOCK/BADF cleanup. Error fix in * shutdown() * Alan Cox : verify_area() fixes * Alan Cox : Removed DDI * Jonathan Kamens : SOCK_DGRAM reconnect bug * Alan Cox : Moved a load of checks to the very * top level. * Alan Cox : Move address structures to/from user * mode above the protocol layers. * Rob Janssen : Allow 0 length sends. * Alan Cox : Asynchronous I/O support (cribbed from the * tty drivers). * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style) * Jeff Uphoff : Made max number of sockets command-line * configurable. * Matti Aarnio : Made the number of sockets dynamic, * to be allocated when needed, and mr. * Uphoff's max is used as max to be * allowed to allocate. * Linus : Argh. removed all the socket allocation * altogether: it's in the inode now. * Alan Cox : Made sock_alloc()/sock_release() public * for NetROM and future kernel nfsd type * stuff. * Alan Cox : sendmsg/recvmsg basics. * Tom Dyas : Export net symbols. * Marcin Dalecki : Fixed problems with CONFIG_NET="n". * Alan Cox : Added thread locking to sys_* calls * for sockets. May have errors at the * moment. * Kevin Buhr : Fixed the dumb errors in the above. * Andi Kleen : Some small cleanups, optimizations, * and fixed a copy_from_user() bug. * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0) * Tigran Aivazian : Made listen(2) backlog sanity checks * protocol-independent * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * * This module is effectively the top level interface to the BSD socket * paradigm. * * Based upon Swansea University Computer Society NET3.039 */ #include <linux/mm.h> #include <linux/socket.h> #include <linux/file.h> #include <linux/net.h> #include <linux/interrupt.h> #include <linux/thread_info.h> #include <linux/rcupdate.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/if_bridge.h> #include <linux/if_frad.h> #include <linux/if_vlan.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/cache.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/kmod.h> #include <linux/audit.h> #include <linux/wireless.h> #include <linux/nsproxy.h> #include <linux/magic.h> #include <linux/slab.h> #include <linux/xattr.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <net/compat.h> #include <net/wext.h> #include <net/cls_cgroup.h> #include <net/sock.h> #include <linux/netfilter.h> #include <linux/if_tun.h> #include <linux/ipv6_route.h> #include <linux/route.h> #include <linux/sockios.h> #include <linux/atalk.h> #include <net/busy_poll.h> #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sysctl_net_busy_read __read_mostly; unsigned int sysctl_net_busy_poll __read_mostly; #endif static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); static unsigned int sock_poll(struct file *file, struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #endif static int sock_fasync(int fd, struct file *filp, int on); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); /* * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear * in the operation structures but are done directly via the socketcall() multiplexor. */ static const struct file_operations socket_file_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .aio_read = sock_aio_read, .aio_write = sock_aio_write, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_sock_ioctl, #endif .mmap = sock_mmap, .open = sock_no_open, /* special open code to disallow open via /proc */ .release = sock_close, .fasync = sock_fasync, .sendpage = sock_sendpage, .splice_write = generic_splice_sendpage, .splice_read = sock_splice_read, }; /* * The protocol list. Each protocol is registered in here. */ static DEFINE_SPINLOCK(net_family_lock); static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; /* * Statistics counters of the socket lists */ static DEFINE_PER_CPU(int, sockets_in_use); /* * Support routines. * Move socket addresses back and forth across the kernel/user * divide and look after the messy bits. */ /** * move_addr_to_kernel - copy a socket address into kernel space * @uaddr: Address in user space * @kaddr: Address in kernel space * @ulen: Length in user space * * The address is copied into kernel space. If the provided address is * too long an error code of -EINVAL is returned. If the copy gives * invalid addresses -EFAULT is returned. On a success 0 is returned. */ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); } /** * move_addr_to_user - copy an address to user space * @kaddr: kernel space address * @klen: length of address in kernel * @uaddr: user space address * @ulen: pointer to user length field * * The value pointed to by ulen on entry is the buffer length available. * This is overwritten with the buffer space used. -EINVAL is returned * if an overlong buffer is specified or a negative buffer size. -EFAULT * is returned if either the buffer or the length field are not * accessible. * After copying the data up to the limit the user specifies, the true * length of the data is written over the length limit the user * specified. Zero is returned for a success. */ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen, void __user *uaddr, int __user *ulen) { int err; int len; err = get_user(len, ulen); if (err) return err; if (len > klen) len = klen; if (len < 0 || len > sizeof(struct sockaddr_storage)) return -EINVAL; if (len) { if (audit_sockaddr(klen, kaddr)) return -ENOMEM; if (copy_to_user(uaddr, kaddr, len)) return -EFAULT; } /* * "fromlen shall refer to the value before truncation.." * 1003.1g */ return __put_user(klen, ulen); } static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; struct socket_wq *wq; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; wq = kmalloc(sizeof(*wq), GFP_KERNEL); if (!wq) { kmem_cache_free(sock_inode_cachep, ei); return NULL; } init_waitqueue_head(&wq->wait); wq->fasync_list = NULL; RCU_INIT_POINTER(ei->socket.wq, wq); ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; ei->socket.ops = NULL; ei->socket.sk = NULL; ei->socket.file = NULL; return &ei->vfs_inode; } static void sock_destroy_inode(struct inode *inode) { struct socket_alloc *ei; struct socket_wq *wq; ei = container_of(inode, struct socket_alloc, vfs_inode); wq = rcu_dereference_protected(ei->socket.wq, 1); kfree_rcu(wq, rcu); kmem_cache_free(sock_inode_cachep, ei); } static void init_once(void *foo) { struct socket_alloc *ei = (struct socket_alloc *)foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { sock_inode_cachep = kmem_cache_create("sock_inode_cache", sizeof(struct socket_alloc), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), init_once); if (sock_inode_cachep == NULL) return -ENOMEM; return 0; } static const struct super_operations sockfs_ops = { .alloc_inode = sock_alloc_inode, .destroy_inode = sock_destroy_inode, .statfs = simple_statfs, }; /* * sockfs_dname() is called from d_path(). */ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", dentry->d_inode->i_ino); } static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, }; static struct dentry *sockfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "socket:", &sockfs_ops, &sockfs_dentry_operations, SOCKFS_MAGIC); } static struct vfsmount *sock_mnt __read_mostly; static struct file_system_type sock_fs_type = { .name = "sockfs", .mount = sockfs_mount, .kill_sb = kill_anon_super, }; /* * Obtains the first available file descriptor and sets it up for use. * * These functions create file structures and maps them to fd space * of the current process. On success it returns file descriptor * and file struct implicitly stored in sock->file. * Note that another thread may close file descriptor before we return * from this function. We use the fact that now we do not refer * to socket after mapping. If one day we will need it, this * function will increment ref. count on file by 1. * * In any case returned fd MAY BE not valid! * This race condition is unavoidable * with shared fd spaces, we cannot solve it inside kernel, * but we take care of internal coherence yet. */ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) { struct qstr name = { .name = "" }; struct path path; struct file *file; if (dname) { name.name = dname; name.len = strlen(name.name); } else if (sock->sk) { name.name = sock->sk->sk_prot_creator->name; name.len = strlen(name.name); } path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); if (unlikely(!path.dentry)) return ERR_PTR(-ENOMEM); path.mnt = mntget(sock_mnt); d_instantiate(path.dentry, SOCK_INODE(sock)); SOCK_INODE(sock)->i_fop = &socket_file_ops; file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &socket_file_ops); if (unlikely(IS_ERR(file))) { /* drop dentry, keep inode */ ihold(path.dentry->d_inode); path_put(&path); return file; } sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->private_data = sock; return file; } EXPORT_SYMBOL(sock_alloc_file); static int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; int fd = get_unused_fd_flags(flags); if (unlikely(fd < 0)) return fd; newfile = sock_alloc_file(sock, flags, NULL); if (likely(!IS_ERR(newfile))) { fd_install(fd, newfile); return fd; } put_unused_fd(fd); return PTR_ERR(newfile); } struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; } EXPORT_SYMBOL(sock_from_file); /** * sockfd_lookup - Go from a file number to its socket slot * @fd: file handle * @err: pointer to an error code return * * The file handle passed in is locked and the socket it is bound * too is returned. If an error occurs the err pointer is overwritten * with a negative errno code and NULL is returned. The function checks * for both invalid handles and passing a handle which is not a socket. * * On a success the socket object pointer is returned. */ struct socket *sockfd_lookup(int fd, int *err) { struct file *file; struct socket *sock; file = fget(fd); if (!file) { *err = -EBADF; return NULL; } sock = sock_from_file(file, err); if (!sock) fput(file); return sock; } EXPORT_SYMBOL(sockfd_lookup); static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { struct file *file; struct socket *sock; *err = -EBADF; file = fget_light(fd, fput_needed); if (file) { sock = sock_from_file(file, err); if (sock) return sock; fput_light(file, *fput_needed); } return NULL; } #define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname" #define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX) #define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1) static ssize_t sockfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) { const char *proto_name; size_t proto_size; int error; error = -ENODATA; if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) { proto_name = dentry->d_name.name; proto_size = strlen(proto_name); if (value) { error = -ERANGE; if (proto_size + 1 > size) goto out; strncpy(value, proto_name, proto_size + 1); } error = proto_size + 1; } out: return error; } static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer, size_t size) { ssize_t len; ssize_t used = 0; len = security_inode_listsecurity(dentry->d_inode, buffer, size); if (len < 0) return len; used += len; if (buffer) { if (size < used) return -ERANGE; buffer += len; } len = (XATTR_NAME_SOCKPROTONAME_LEN + 1); used += len; if (buffer) { if (size < used) return -ERANGE; memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len); buffer += len; } return used; } static const struct inode_operations sockfs_inode_ops = { .getxattr = sockfs_getxattr, .listxattr = sockfs_listxattr, }; /** * sock_alloc - allocate a socket * * Allocate a new inode and socket object. The two are bound together * and initialised. The socket is then returned. If we are out of inodes * NULL is returned. */ static struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode_pseudo(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_op = &sockfs_inode_ops; this_cpu_add(sockets_in_use, 1); return sock; } /* * In theory you can't get an open on this inode, but /proc provides * a back door. Remember to keep it shut otherwise you'll let the * creepy crawlies in. */ static int sock_no_open(struct inode *irrelevant, struct file *dontcare) { return -ENXIO; } const struct file_operations bad_sock_fops = { .owner = THIS_MODULE, .open = sock_no_open, .llseek = noop_llseek, }; /** * sock_release - close a socket * @sock: socket to close * * The socket is released from the protocol stack if it has a release * callback, and the inode is then released if the socket is bound to * an inode not a file. */ void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n"); if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags)) return; this_cpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; } EXPORT_SYMBOL(sock_release); void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags) { *tx_flags = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) *tx_flags |= SKBTX_HW_TSTAMP; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) *tx_flags |= SKBTX_SW_TSTAMP; if (sock_flag(sk, SOCK_WIFI_STATUS)) *tx_flags |= SKBTX_WIFI_STATUS; } EXPORT_SYMBOL(sock_tx_timestamp); static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { struct sock_iocb *si = kiocb_to_siocb(iocb); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; return sock->ops->sendmsg(iocb, sock, msg, size); } static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { int err = security_socket_sendmsg(sock, msg, size); return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size); } int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_sendmsg); static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg_nosec(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec; msg->msg_iovlen = num; result = sock_sendmsg(sock, msg, size); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_sendmsg); /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct timespec ts[3]; int empty = 1; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ if (need_software_tstamp && skb->tstamp.tv64 == 0) __net_timestamp(skb); if (need_software_tstamp) { if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { struct timeval tv; skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv); } else { skb_get_timestampns(skb, &ts[0]); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts[0]), &ts[0]); } } memset(ts, 0, sizeof(ts)); if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE) && ktime_to_timespec_cond(skb->tstamp, ts + 0)) empty = 0; if (shhwtstamps) { if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->syststamp, ts + 1)) empty = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts + 2)) empty = 0; } if (!empty) put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(ts), &ts); } EXPORT_SYMBOL_GPL(__sock_recv_timestamp); void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int ack; if (!sock_flag(sk, SOCK_WIFI_STATUS)) return; if (!skb->wifi_acked_valid) return; ack = skb->wifi_acked; put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack); } EXPORT_SYMBOL_GPL(__sock_recv_wifi_status); static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, sizeof(__u32), &skb->dropcount); } void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { sock_recv_timestamp(msg, sk, skb); sock_recv_drops(msg, sk, skb); } EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock_iocb *si = kiocb_to_siocb(iocb); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; si->flags = flags; return sock->ops->recvmsg(iocb, sock, msg, size, flags); } static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { int err = security_socket_recvmsg(sock, msg, size, flags); return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); } int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_recvmsg); static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } /** * kernel_recvmsg - Receive a message from a socket (kernel space) * @sock: The socket to receive the message from * @msg: Received message * @vec: Input s/g array for message data * @num: Size of input s/g array * @size: Number of bytes to read * @flags: Message flags (MSG_DONTWAIT, etc...) * * On return the msg structure contains the scatter/gather array passed in the * vec argument. The array is modified so that it consists of the unfilled * portion of the original array. * * The returned value is the total number of bytes received, or an error. */ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size, int flags) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num; result = sock_recvmsg(sock, msg, size, flags); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_recvmsg); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ flags |= more; return kernel_sendpage(sock, page, offset, size, flags); } static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct socket *sock = file->private_data; if (unlikely(!sock->ops->splice_read)) return -EINVAL; return sock->ops->splice_read(sock, ppos, pipe, len, flags); } static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, struct sock_iocb *siocb) { if (!is_sync_kiocb(iocb)) BUG(); siocb->kiocb = iocb; iocb->private = siocb; return siocb; } static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); } static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; if (iocb->ki_nbytes == 0) /* Match SYS5 behaviour */ return 0; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; if (sock->type == SOCK_SEQPACKET) msg->msg_flags |= MSG_EOR; return __sock_sendmsg(iocb, sock, msg, size); } static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } /* * Atomic setting of ioctl hooks to avoid race * with module unload. */ static DEFINE_MUTEX(br_ioctl_mutex); static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) { mutex_lock(&br_ioctl_mutex); br_ioctl_hook = hook; mutex_unlock(&br_ioctl_mutex); } EXPORT_SYMBOL(brioctl_set); static DEFINE_MUTEX(vlan_ioctl_mutex); static int (*vlan_ioctl_hook) (struct net *, void __user *arg); void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); } EXPORT_SYMBOL(vlan_ioctl_set); static DEFINE_MUTEX(dlci_ioctl_mutex); static int (*dlci_ioctl_hook) (unsigned int, void __user *); void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) { mutex_lock(&dlci_ioctl_mutex); dlci_ioctl_hook = hook; mutex_unlock(&dlci_ioctl_mutex); } EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, unsigned int cmd, unsigned long arg) { int err; void __user *argp = (void __user *)arg; err = sock->ops->ioctl(sock, cmd, arg); /* * If this ioctl is unknown try to hand it down * to the NIC driver. */ if (err == -ENOIOCTLCMD) err = dev_ioctl(net, cmd, argp); return err; } /* * With an ioctl, arg may well be a user mode pointer, but we don't know * what to do with it - that's up to the protocol still. */ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock; struct sock *sk; void __user *argp = (void __user *)arg; int pid, err; struct net *net; sock = file->private_data; sk = sock->sk; net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { err = dev_ioctl(net, cmd, argp); } else #ifdef CONFIG_WEXT_CORE if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { err = dev_ioctl(net, cmd, argp); } else #endif switch (cmd) { case FIOSETOWN: case SIOCSPGRP: err = -EFAULT; if (get_user(pid, (int __user *)argp)) break; err = f_setown(sock->file, pid, 1); break; case FIOGETOWN: case SIOCGPGRP: err = put_user(f_getown(sock->file), (int __user *)argp); break; case SIOCGIFBR: case SIOCSIFBR: case SIOCBRADDBR: case SIOCBRDELBR: err = -ENOPKG; if (!br_ioctl_hook) request_module("bridge"); mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) err = br_ioctl_hook(net, cmd, argp); mutex_unlock(&br_ioctl_mutex); break; case SIOCGIFVLAN: case SIOCSIFVLAN: err = -ENOPKG; if (!vlan_ioctl_hook) request_module("8021q"); mutex_lock(&vlan_ioctl_mutex); if (vlan_ioctl_hook) err = vlan_ioctl_hook(net, argp); mutex_unlock(&vlan_ioctl_mutex); break; case SIOCADDDLCI: case SIOCDELDLCI: err = -ENOPKG; if (!dlci_ioctl_hook) request_module("dlci"); mutex_lock(&dlci_ioctl_mutex); if (dlci_ioctl_hook) err = dlci_ioctl_hook(cmd, argp); mutex_unlock(&dlci_ioctl_mutex); break; default: err = sock_do_ioctl(net, sock, cmd, arg); break; } return err; } int sock_create_lite(int family, int type, int protocol, struct socket **res) { int err; struct socket *sock = NULL; err = security_socket_create(family, type, protocol, 1); if (err) goto out; sock = sock_alloc(); if (!sock) { err = -ENOMEM; goto out; } sock->type = type; err = security_socket_post_create(sock, family, type, protocol, 1); if (err) goto out_release; out: *res = sock; return err; out_release: sock_release(sock); sock = NULL; goto out; } EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { unsigned int busy_flag = 0; struct socket *sock; /* * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; if (sk_can_busy_loop(sock->sk)) { /* this socket can poll_ll so tell the system call */ busy_flag = POLL_BUSY_LOOP; /* once, only if requested by syscall */ if (wait && (wait->_key & POLL_BUSY_LOOP)) sk_busy_loop(sock->sk, 1); } return busy_flag | sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) { struct socket *sock = file->private_data; return sock->ops->mmap(file, sock, vma); } static int sock_close(struct inode *inode, struct file *filp) { sock_release(SOCKET_I(inode)); return 0; } /* * Update the socket async list * * Fasync_list locking strategy. * * 1. fasync_list is modified only under process context socket lock * i.e. under semaphore. * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) * or under socket lock */ static int sock_fasync(int fd, struct file *filp, int on) { struct socket *sock = filp->private_data; struct sock *sk = sock->sk; struct socket_wq *wq; if (sk == NULL) return -EINVAL; lock_sock(sk); wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); fasync_helper(fd, filp, on, &wq->fasync_list); if (!wq->fasync_list) sock_reset_flag(sk, SOCK_FASYNC); else sock_set_flag(sk, SOCK_FASYNC); release_sock(sk); return 0; } /* This function may be called only under socket lock or callback_lock or rcu_lock */ int sock_wake_async(struct socket *sock, int how, int band) { struct socket_wq *wq; if (!sock) return -1; rcu_read_lock(); wq = rcu_dereference(sock->wq); if (!wq || !wq->fasync_list) { rcu_read_unlock(); return -1; } switch (how) { case SOCK_WAKE_WAITD: if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) break; goto call_kill; case SOCK_WAKE_SPACE: if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) break; /* fall through */ case SOCK_WAKE_IO: call_kill: kill_fasync(&wq->fasync_list, SIGIO, band); break; case SOCK_WAKE_URG: kill_fasync(&wq->fasync_list, SIGURG, band); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(sock_wake_async); int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; struct socket *sock; const struct net_proto_family *pf; /* * Check protocol is in range */ if (family < 0 || family >= NPROTO) return -EAFNOSUPPORT; if (type < 0 || type >= SOCK_MAX) return -EINVAL; /* Compatibility. This uglymoron is moved from INET layer to here to avoid deadlock in module load. */ if (family == PF_INET && type == SOCK_PACKET) { static int warned; if (!warned) { warned = 1; printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n", current->comm); } family = PF_PACKET; } err = security_socket_create(family, type, protocol, kern); if (err) return err; /* * Allocate the socket and allow the family to set things up. if * the protocol is 0, the family is instructed to select an appropriate * default. */ sock = sock_alloc(); if (!sock) { net_warn_ratelimited("socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } sock->type = type; #ifdef CONFIG_MODULES /* Attempt to load a protocol module if the find failed. * * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user * requested real, full-featured networking support upon configuration. * Otherwise module support will break! */ if (rcu_access_pointer(net_families[family]) == NULL) request_module("net-pf-%d", family); #endif rcu_read_lock(); pf = rcu_dereference(net_families[family]); err = -EAFNOSUPPORT; if (!pf) goto out_release; /* * We will call the ->create function, that possibly is in a loadable * module, so we have to bump that loadable module refcnt first. */ if (!try_module_get(pf->owner)) goto out_release; /* Now protected by module ref count */ rcu_read_unlock(); err = pf->create(net, sock, protocol, kern); if (err < 0) goto out_module_put; /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. */ if (!try_module_get(sock->ops->owner)) goto out_module_busy; /* * Now that we're done with the ->create function, the [loadable] * module can have its refcnt decremented */ module_put(pf->owner); err = security_socket_post_create(sock, family, type, protocol, kern); if (err) goto out_sock_release; *res = sock; return 0; out_module_busy: err = -EAFNOSUPPORT; out_module_put: sock->ops = NULL; module_put(pf->owner); out_sock_release: sock_release(sock); return err; out_release: rcu_read_unlock(); goto out_sock_release; } EXPORT_SYMBOL(__sock_create); int sock_create(int family, int type, int protocol, struct socket **res) { return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); } EXPORT_SYMBOL(sock_create); int sock_create_kern(int family, int type, int protocol, struct socket **res) { return __sock_create(&init_net, family, type, protocol, res, 1); } EXPORT_SYMBOL(sock_create_kern); SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) { int retval; struct socket *sock; int flags; /* Check the SOCK_* constants for consistency. */ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; retval = sock_create(family, type, protocol, &sock); if (retval < 0) goto out; retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; out: /* It may be already another descriptor 8) Not kernel problem. */ return retval; out_release: sock_release(sock); return retval; } /* * Create a pair of connected sockets. */ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec) { struct socket *sock1, *sock2; int fd1, fd2, err; struct file *newfile1, *newfile2; int flags; flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; /* * Obtain the first socket and check if the underlying protocol * supports the socketpair call. */ err = sock_create(family, type, protocol, &sock1); if (err < 0) goto out; err = sock_create(family, type, protocol, &sock2); if (err < 0) goto out_release_1; err = sock1->ops->socketpair(sock1, sock2); if (err < 0) goto out_release_both; fd1 = get_unused_fd_flags(flags); if (unlikely(fd1 < 0)) { err = fd1; goto out_release_both; } fd2 = get_unused_fd_flags(flags); if (unlikely(fd2 < 0)) { err = fd2; put_unused_fd(fd1); goto out_release_both; } newfile1 = sock_alloc_file(sock1, flags, NULL); if (unlikely(IS_ERR(newfile1))) { err = PTR_ERR(newfile1); put_unused_fd(fd1); put_unused_fd(fd2); goto out_release_both; } newfile2 = sock_alloc_file(sock2, flags, NULL); if (IS_ERR(newfile2)) { err = PTR_ERR(newfile2); fput(newfile1); put_unused_fd(fd1); put_unused_fd(fd2); sock_release(sock2); goto out; } audit_fd_pair(fd1, fd2); fd_install(fd1, newfile1); fd_install(fd2, newfile2); /* fd1 and fd2 may be already another descriptors. * Not kernel problem. */ err = put_user(fd1, &usockvec[0]); if (!err) err = put_user(fd2, &usockvec[1]); if (!err) return 0; sys_close(fd2); sys_close(fd1); return err; out_release_both: sock_release(sock2); out_release_1: sock_release(sock1); out: return err; } /* * Bind a name to a socket. Nothing much to do here since it's * the protocol's responsibility to handle the local address. * * We move the socket address to kernel space before we call * the protocol layer (having also checked the address is ok). */ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, &address); if (err >= 0) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) &address, addrlen); } fput_light(sock->file, fput_needed); } return err; } /* * Perform a listen. Basically, we allow the protocol to do anything * necessary for a listen, and if that works, we mark the socket as * ready for listening. */ SYSCALL_DEFINE2(listen, int, fd, int, backlog) { struct socket *sock; int err, fput_needed; int somaxconn; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; if ((unsigned int)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); fput_light(sock->file, fput_needed); } return err; } /* * For accept, we attempt to create a new socket, set up the link * with the client, wake up the client, then return the new * connected fd. We collect the address of the connector in kernel * space and move it to user at the very end. This is unclean because * we open the socket then return an error. * * 1003.1g adds the ability to recvmsg() to query connection pending * status to recvmsg. We need to add that support in a way thats * clean when we restucture accept also. */ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags) { struct socket *sock, *newsock; struct file *newfile; int err, len, newfd, fput_needed; struct sockaddr_storage address; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = -ENFILE; newsock = sock_alloc(); if (!newsock) goto out_put; newsock->type = sock->type; newsock->ops = sock->ops; /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); newfd = get_unused_fd_flags(flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name); if (unlikely(IS_ERR(newfile))) { err = PTR_ERR(newfile); put_unused_fd(newfd); sock_release(newsock); goto out_put; } err = security_socket_accept(sock, newsock); if (err) goto out_fd; err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_fd; if (upeer_sockaddr) { if (newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) < 0) { err = -ECONNABORTED; goto out_fd; } err = move_addr_to_user(&address, len, upeer_sockaddr, upeer_addrlen); if (err < 0) goto out_fd; } /* File flags are not inherited via accept() unlike another OSes. */ fd_install(newfd, newfile); err = newfd; out_put: fput_light(sock->file, fput_needed); out: return err; out_fd: fput(newfile); put_unused_fd(newfd); goto out_put; } SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen) { return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); } /* * Attempt to connect to a socket with the server address. The address * is in user space so we verify it is OK and move it to kernel space. * * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to * break bindings * * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and * other SEQPACKET protocols that take time to connect() as it doesn't * include the -EINPROGRESS status for such sockets. */ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = move_addr_to_kernel(uservaddr, addrlen, &address); if (err < 0) goto out_put; err = security_socket_connect(sock, (struct sockaddr *)&address, addrlen); if (err) goto out_put; err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, sock->file->f_flags); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the local address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = security_socket_getsockname(sock); if (err) goto out_put; err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); if (err) goto out_put; err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the remote address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getpeername(sock); if (err) { fput_light(sock->file, fput_needed); return err; } err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1); if (!err) err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); fput_light(sock->file, fput_needed); } return err; } /* * Send a datagram to a given address. We move the address into kernel * space and check the user space data area is readable before invoking * the protocol. */ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; if (len > INT_MAX) len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, &address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg, len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Send a datagram down a socket. */ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, unsigned int, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); } /* * Receive a frame from the socket and optionally record the address of the * sender. We verify the buffers are writable and if needed move the * sender address from kernel to user space. */ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; /* Save some cycles and don't copy the address if not needed */ msg.msg_name = addr ? (struct sockaddr *)&address : NULL; /* We assume all kernel code knows the size of sockaddr_storage */ msg.msg_namelen = 0; if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; } /* * Receive a datagram from a socket. */ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, unsigned int flags) { return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } /* * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, char __user *, optval, int, optlen) { int err, fput_needed; struct socket *sock; if (optlen < 0) return -EINVAL; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_setsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, optval, optlen); else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Get a socket option. Because we don't know the option lengths we have * to pass a user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, char __user *, optval, int __user *, optlen) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Shutdown a socket. */ SYSCALL_DEFINE2(shutdown, int, fd, int, how) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_shutdown(sock, how); if (!err) err = sock->ops->shutdown(sock, how); fput_light(sock->file, fput_needed); } return err; } /* A couple of helpful macros for getting the address of the 32/64 bit * fields which are the same type (int / unsigned) on our platforms. */ #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member) #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) struct used_address { struct sockaddr_storage name; unsigned int name_len; }; static int copy_msghdr_from_user(struct msghdr *kmsg, struct msghdr __user *umsg) { if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) return -EFAULT; if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) return -EINVAL; return 0; } static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct sockaddr_storage address; struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; unsigned char ctl[sizeof(struct cmsghdr) + 20] __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; int err, ctl_len, total_len; err = -EFAULT; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else { err = copy_msghdr_from_user(msg_sys, msg); if (err) return err; } if (msg_sys->msg_iovlen > UIO_FASTIOV) { err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; err = -ENOMEM; iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), GFP_KERNEL); if (!iov) goto out; } /* This will also move the address data into kernel space */ if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, &address, VERIFY_READ); } else err = verify_iovec(msg_sys, iov, &address, VERIFY_READ); if (err < 0) goto out_freeiov; total_len = err; err = -ENOBUFS; if (msg_sys->msg_controllen > INT_MAX) goto out_freeiov; ctl_len = msg_sys->msg_controllen; if ((MSG_CMSG_COMPAT & flags) && ctl_len) { err = cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, sizeof(ctl)); if (err) goto out_freeiov; ctl_buf = msg_sys->msg_control; ctl_len = msg_sys->msg_controllen; } else if (ctl_len) { if (ctl_len > sizeof(ctl)) { ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); if (ctl_buf == NULL) goto out_freeiov; } err = -EFAULT; /* * Careful! Before this, msg_sys->msg_control contains a user pointer. * Afterwards, it will be a kernel pointer. Thus the compiler-assisted * checking falls down on this. */ if (copy_from_user(ctl_buf, (void __user __force *)msg_sys->msg_control, ctl_len)) goto out_freectl; msg_sys->msg_control = ctl_buf; } msg_sys->msg_flags = flags; if (sock->file->f_flags & O_NONBLOCK) msg_sys->msg_flags |= MSG_DONTWAIT; /* * If this is sendmmsg() and current destination address is same as * previously succeeded address, omit asking LSM's decision. * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ if (used_address && msg_sys->msg_name && used_address->name_len == msg_sys->msg_namelen && !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; } err = sock_sendmsg(sock, msg_sys, total_len); /* * If this is sendmmsg() and sending to current destination address was * successful, remember it. */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; if (msg_sys->msg_name) memcpy(&used_address->name, msg_sys->msg_name, used_address->name_len); } out_freectl: if (ctl_buf != ctl) sock_kfree_s(sock->sk, ctl_buf, ctl_len); out_freeiov: if (iov != iovstack) kfree(iov); out: return err; } /* * BSD sendmsg interface */ long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL); fput_light(sock->file, fput_needed); out: return err; } SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_sendmsg(fd, msg, flags); } /* * Linux sendmmsg interface */ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct used_address used_address; if (vlen > UIO_MAXIOV) vlen = UIO_MAXIOV; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; used_address.name_len = UINT_MAX; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; err = 0; while (datagrams < vlen) { if (MSG_CMSG_COMPAT & flags) { err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags, &used_address); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = ___sys_sendmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags, &used_address); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; } fput_light(sock->file, fput_needed); /* We only return an error if no datagrams were able to be sent */ if (datagrams != 0) return datagrams; return err; } SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_sendmmsg(fd, mmsg, vlen, flags); } static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; int err, total_len, len; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; int __user *uaddr_len; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else { err = copy_msghdr_from_user(msg_sys, msg); if (err) return err; } if (msg_sys->msg_iovlen > UIO_FASTIOV) { err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; err = -ENOMEM; iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), GFP_KERNEL); if (!iov) goto out; } /* Save the user-mode address (verify_iovec will change the * kernel msghdr to use the kernel address space) */ uaddr = (__force void __user *)msg_sys->msg_name; uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE); else err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE); if (err < 0) goto out_freeiov; total_len = err; cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); /* We assume all kernel code knows the size of sockaddr_storage */ msg_sys->msg_namelen = 0; if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, total_len, flags); if (err < 0) goto out_freeiov; len = err; if (uaddr != NULL) { err = move_addr_to_user(&addr, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) goto out_freeiov; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) goto out_freeiov; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); else err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) goto out_freeiov; err = len; out_freeiov: if (iov != iovstack) kfree(iov); out: return err; } /* * BSD recvmsg interface */ long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); fput_light(sock->file, fput_needed); out: return err; } SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_recvmsg(fd, msg, flags); } /* * Linux recvmmsg interface */ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct timespec end_time; if (timeout && poll_select_set_timeout(&end_time, timeout->tv_sec, timeout->tv_nsec)) return -EINVAL; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; err = sock_error(sock->sk); if (err) goto out_put; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; while (datagrams < vlen) { /* * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = ___sys_recvmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ if (flags & MSG_WAITFORONE) flags |= MSG_DONTWAIT; if (timeout) { ktime_get_ts(timeout); *timeout = timespec_sub(end_time, *timeout); if (timeout->tv_sec < 0) { timeout->tv_sec = timeout->tv_nsec = 0; break; } /* Timeout, return less than vlen datagrams */ if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) break; } /* Out of band data, return right away */ if (msg_sys.msg_flags & MSG_OOB) break; } out_put: fput_light(sock->file, fput_needed); if (err == 0) return datagrams; if (datagrams != 0) { /* * We may return less entries than requested (vlen) if the * sock is non block and there aren't enough datagrams... */ if (err != -EAGAIN) { /* * ... or if recvmsg returns an error after we * received some datagrams, where we record the * error to return on the next call or if the * app asks about it using getsockopt(SO_ERROR). */ sock->sk->sk_err = -err; } return datagrams; } return err; } SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, struct timespec __user *, timeout) { int datagrams; struct timespec timeout_sys; if (flags & MSG_CMSG_COMPAT) return -EINVAL; if (!timeout) return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) return -EFAULT; datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); if (datagrams > 0 && copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) datagrams = -EFAULT; return datagrams; } #ifdef __ARCH_WANT_SYS_SOCKETCALL /* Argument list sizes for sys_socketcall */ #define AL(x) ((x) * sizeof(unsigned long)) static const unsigned char nargs[21] = { AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), AL(4), AL(5), AL(4) }; #undef AL /* * System call vectors. * * Argument checking cleaned up. Saved 20% in size. * This function doesn't need to set the kernel lock because * it is set by the callees. */ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) { unsigned long a[AUDITSC_ARGS]; unsigned long a0, a1; int err; unsigned int len; if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; len = nargs[call]; if (len > sizeof(a)) return -EINVAL; /* copy_from_user should be SMP safe. */ if (copy_from_user(a, args, len)) return -EFAULT; err = audit_socketcall(nargs[call] / sizeof(unsigned long), a); if (err) return err; a0 = a[0]; a1 = a[1]; switch (call) { case SYS_SOCKET: err = sys_socket(a0, a1, a[2]); break; case SYS_BIND: err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_CONNECT: err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_LISTEN: err = sys_listen(a0, a1); break; case SYS_ACCEPT: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], 0); break; case SYS_GETSOCKNAME: err = sys_getsockname(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_GETPEERNAME: err = sys_getpeername(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_SOCKETPAIR: err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]); break; case SYS_SEND: err = sys_send(a0, (void __user *)a1, a[2], a[3]); break; case SYS_SENDTO: err = sys_sendto(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], a[5]); break; case SYS_RECV: err = sys_recv(a0, (void __user *)a1, a[2], a[3]); break; case SYS_RECVFROM: err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], (int __user *)a[5]); break; case SYS_SHUTDOWN: err = sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]); break; case SYS_GETSOCKOPT: err = sys_getsockopt(a0, a1, a[2], (char __user *)a[3], (int __user *)a[4]); break; case SYS_SENDMSG: err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_SENDMMSG: err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); break; case SYS_RECVMSG: err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_RECVMMSG: err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], (struct timespec __user *)a[4]); break; case SYS_ACCEPT4: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], a[3]); break; default: err = -EINVAL; break; } return err; } #endif /* __ARCH_WANT_SYS_SOCKETCALL */ /** * sock_register - add a socket protocol handler * @ops: description of protocol * * This function is called by a protocol handler that wants to * advertise its address family, and have it linked into the * socket interface. The value ops->family coresponds to the * socket system call protocol family. */ int sock_register(const struct net_proto_family *ops) { int err; if (ops->family >= NPROTO) { printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family, NPROTO); return -ENOBUFS; } spin_lock(&net_family_lock); if (rcu_dereference_protected(net_families[ops->family], lockdep_is_held(&net_family_lock))) err = -EEXIST; else { rcu_assign_pointer(net_families[ops->family], ops); err = 0; } spin_unlock(&net_family_lock); printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); return err; } EXPORT_SYMBOL(sock_register); /** * sock_unregister - remove a protocol handler * @family: protocol family to remove * * This function is called by a protocol handler that wants to * remove its address family, and have it unlinked from the * new socket creation. * * If protocol handler is a module, then it can use module reference * counts to protect against new references. If protocol handler is not * a module then it needs to provide its own protection in * the ops->create routine. */ void sock_unregister(int family) { BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); RCU_INIT_POINTER(net_families[family], NULL); spin_unlock(&net_family_lock); synchronize_rcu(); printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); } EXPORT_SYMBOL(sock_unregister); static int __init sock_init(void) { int err; /* * Initialize the network sysctl infrastructure. */ err = net_sysctl_init(); if (err) goto out; /* * Initialize skbuff SLAB cache */ skb_init(); /* * Initialize the protocols module. */ init_inodecache(); err = register_filesystem(&sock_fs_type); if (err) goto out_fs; sock_mnt = kern_mount(&sock_fs_type); if (IS_ERR(sock_mnt)) { err = PTR_ERR(sock_mnt); goto out_mount; } /* The real protocol initialization is performed in later initcalls. */ #ifdef CONFIG_NETFILTER err = netfilter_init(); if (err) goto out; #endif #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING skb_timestamping_init(); #endif out: return err; out_mount: unregister_filesystem(&sock_fs_type); out_fs: goto out; } core_initcall(sock_init); /* early initcall */ #ifdef CONFIG_PROC_FS void socket_seq_show(struct seq_file *seq) { int cpu; int counter = 0; for_each_possible_cpu(cpu) counter += per_cpu(sockets_in_use, cpu); /* It can be negative, by the way. 8) */ if (counter < 0) counter = 0; seq_printf(seq, "sockets: used %d\n", counter); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int do_siocgstamp(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timeval ktv; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) err = compat_put_timeval(&ktv, up); return err; } static int do_siocgstampns(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timespec kts; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) err = compat_put_timespec(&kts, up); return err; } static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(struct ifreq)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; err = dev_ioctl(net, SIOCGIFNAME, uifr); if (err) return err; if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) return -EFAULT; return 0; } static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) { struct compat_ifconf ifc32; struct ifconf ifc; struct ifconf __user *uifc; struct compat_ifreq __user *ifr32; struct ifreq __user *ifr; unsigned int i, j; int err; if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; memset(&ifc, 0, sizeof(ifc)); if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; ifc.ifc_req = NULL; uifc = compat_alloc_user_space(sizeof(struct ifconf)); } else { size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * sizeof(struct ifreq); uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); ifc.ifc_len = len; ifr = ifc.ifc_req = (void __user *)(uifc + 1); ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; ifr++; ifr32++; } } if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) return -EFAULT; err = dev_ioctl(net, SIOCGIFCONF, uifc); if (err) return err; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; ifr = ifc.ifc_req; ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0, j = 0; i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) return -EFAULT; ifr32++; ifr++; } if (ifc32.ifcbuf == 0) { /* Translate from 64-bit structure multiple to * a 32-bit one. */ i = ifc.ifc_len; i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); ifc32.ifc_len = i; } else { ifc32.ifc_len = i; } if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) return -EFAULT; return 0; } static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { struct compat_ethtool_rxnfc __user *compat_rxnfc; bool convert_in = false, convert_out = false; size_t buf_size = ALIGN(sizeof(struct ifreq), 8); struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; u32 rule_cnt = 0, actual_rule_cnt; u32 ethcmd; u32 data; int ret; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; compat_rxnfc = compat_ptr(data); if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; /* Most ethtool structures are defined without padding. * Unfortunately struct ethtool_rxnfc is an exception. */ switch (ethcmd) { default: break; case ETHTOOL_GRXCLSRLALL: /* Buffer size is variable */ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) return -EFAULT; if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) return -ENOMEM; buf_size += rule_cnt * sizeof(u32); /* fall through */ case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_SRXCLSRLINS: convert_out = true; /* fall through */ case ETHTOOL_SRXCLSRLDEL: buf_size += sizeof(struct ethtool_rxnfc); convert_in = true; break; } ifr = compat_alloc_user_space(buf_size); rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8); if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (put_user(convert_in ? rxnfc : compat_ptr(data), &ifr->ifr_ifru.ifru_data)) return -EFAULT; if (convert_in) { /* We expect there to be holes between fs.m_ext and * fs.ring_cookie and at the end of fs, but nowhere else. */ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + sizeof(compat_rxnfc->fs.m_ext) != offsetof(struct ethtool_rxnfc, fs.m_ext) + sizeof(rxnfc->fs.m_ext)); BUILD_BUG_ON( offsetof(struct compat_ethtool_rxnfc, fs.location) - offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != offsetof(struct ethtool_rxnfc, fs.location) - offsetof(struct ethtool_rxnfc, fs.ring_cookie)); if (copy_in_user(rxnfc, compat_rxnfc, (void __user *)(&rxnfc->fs.m_ext + 1) - (void __user *)rxnfc) || copy_in_user(&rxnfc->fs.ring_cookie, &compat_rxnfc->fs.ring_cookie, (void __user *)(&rxnfc->fs.location + 1) - (void __user *)&rxnfc->fs.ring_cookie) || copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; } ret = dev_ioctl(net, SIOCETHTOOL, ifr); if (ret) return ret; if (convert_out) { if (copy_in_user(compat_rxnfc, rxnfc, (const void __user *)(&rxnfc->fs.m_ext + 1) - (const void __user *)rxnfc) || copy_in_user(&compat_rxnfc->fs.ring_cookie, &rxnfc->fs.ring_cookie, (const void __user *)(&rxnfc->fs.location + 1) - (const void __user *)&rxnfc->fs.ring_cookie) || copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; if (ethcmd == ETHTOOL_GRXCLSRLALL) { /* As an optimisation, we only copy the actual * number of rules that the underlying * function returned. Since Mallory might * change the rule count in user memory, we * check that it is less than the rule count * originally given (as the user buffer size), * which has been range-checked. */ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) return -EFAULT; if (actual_rule_cnt < rule_cnt) rule_cnt = actual_rule_cnt; if (copy_in_user(&compat_rxnfc->rule_locs[0], &rxnfc->rule_locs[0], rule_cnt * sizeof(u32))) return -EFAULT; } } return 0; } static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) return -EFAULT; return dev_ioctl(net, SIOCWANDEV, uifr); } static int bond_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *ifr32) { struct ifreq kifr; struct ifreq __user *uifr; mm_segment_t old_fs; int err; u32 data; void __user *datap; switch (cmd) { case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (struct ifreq __user __force *) &kifr); set_fs(old_fs); return err; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; datap = compat_ptr(data); if (put_user(datap, &uifr->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, uifr); default: return -ENOIOCTLCMD; } } static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *u_ifreq32) { struct ifreq __user *u_ifreq64; char tmp_buf[IFNAMSIZ]; void __user *data64; u32 data32; if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), IFNAMSIZ)) return -EFAULT; if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) return -EFAULT; data64 = compat_ptr(data32); u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); /* Don't check these user accesses, just let that get trapped * in the ioctl handler instead. */ if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], IFNAMSIZ)) return -EFAULT; if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, u_ifreq64); } static int dev_ifsioc(struct net *net, struct socket *sock, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) return -EFAULT; err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); if (!err) { switch (cmd) { case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFMEM: case SIOCGIFHWADDR: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCGIFBRDADDR: case SIOCGIFDSTADDR: case SIOCGIFNETMASK: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCGMIIPHY: case SIOCGMIIREG: if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) err = -EFAULT; break; } } return err; } static int compat_sioc_ifmap(struct net *net, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq ifr; struct compat_ifmap __user *uifmap32; mm_segment_t old_fs; int err; uifmap32 = &uifr32->ifr_ifru.ifru_map; err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); err |= get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= get_user(ifr.ifr_map.irq, &uifmap32->irq); err |= get_user(ifr.ifr_map.dma, &uifmap32->dma); err |= get_user(ifr.ifr_map.port, &uifmap32->port); if (err) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (void __user __force *)&ifr); set_fs(old_fs); if (cmd == SIOCGIFMAP && !err) { err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); err |= put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= put_user(ifr.ifr_map.irq, &uifmap32->irq); err |= put_user(ifr.ifr_map.dma, &uifmap32->dma); err |= put_user(ifr.ifr_map.port, &uifmap32->port); if (err) err = -EFAULT; } return err; } static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_data)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_data)) return -EFAULT; return dev_ioctl(net, SIOCSHWTSTAMP, uifr); } struct rtentry32 { u32 rt_pad1; struct sockaddr rt_dst; /* target address */ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ struct sockaddr rt_genmask; /* target network mask (IP) */ unsigned short rt_flags; short rt_pad2; u32 rt_pad3; unsigned char rt_tos; unsigned char rt_class; short rt_pad4; short rt_metric; /* +1 for binary compatibility! */ /* char * */ u32 rt_dev; /* forcing the device at add */ u32 rt_mtu; /* per route MTU/Window */ u32 rt_window; /* Window clamping */ unsigned short rt_irtt; /* Initial RTT */ }; struct in6_rtmsg32 { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; u32 rtmsg_type; u16 rtmsg_dst_len; u16 rtmsg_src_len; u32 rtmsg_metric; u32 rtmsg_info; u32 rtmsg_flags; s32 rtmsg_ifindex; }; static int routing_ioctl(struct net *net, struct socket *sock, unsigned int cmd, void __user *argp) { int ret; void *r = NULL; struct in6_rtmsg r6; struct rtentry r4; char devname[16]; u32 rtdev; mm_segment_t old_fs = get_fs(); if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ struct in6_rtmsg32 __user *ur6 = argp; ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3 * sizeof(struct in6_addr)); ret |= get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); ret |= get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); ret |= get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); ret |= get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); ret |= get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); ret |= get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); ret |= get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); r = (void *) &r6; } else { /* ipv4 */ struct rtentry32 __user *ur4 = argp; ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), 3 * sizeof(struct sockaddr)); ret |= get_user(r4.rt_flags, &(ur4->rt_flags)); ret |= get_user(r4.rt_metric, &(ur4->rt_metric)); ret |= get_user(r4.rt_mtu, &(ur4->rt_mtu)); ret |= get_user(r4.rt_window, &(ur4->rt_window)); ret |= get_user(r4.rt_irtt, &(ur4->rt_irtt)); ret |= get_user(rtdev, &(ur4->rt_dev)); if (rtdev) { ret |= copy_from_user(devname, compat_ptr(rtdev), 15); r4.rt_dev = (char __user __force *)devname; devname[15] = 0; } else r4.rt_dev = NULL; r = (void *) &r4; } if (ret) { ret = -EFAULT; goto out; } set_fs(KERNEL_DS); ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); set_fs(old_fs); out: return ret; } /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE * for some operations; this forces use of the newer bridge-utils that * use compatible ioctls */ static int old_bridge_ioctl(compat_ulong_t __user *argp) { compat_ulong_t tmp; if (get_user(tmp, argp)) return -EFAULT; if (tmp == BRCTL_GET_VERSION) return BRCTL_VERSION + 1; return -EINVAL; } static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; struct net *net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) return siocdevprivate_ioctl(net, cmd, argp); switch (cmd) { case SIOCSIFBR: case SIOCGIFBR: return old_bridge_ioctl(argp); case SIOCGIFNAME: return dev_ifname32(net, argp); case SIOCGIFCONF: return dev_ifconf(net, argp); case SIOCETHTOOL: return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGIFMAP: case SIOCSIFMAP: return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCBONDCHANGEACTIVE: return bond_ioctl(net, cmd, argp); case SIOCADDRT: case SIOCDELRT: return routing_ioctl(net, sock, cmd, argp); case SIOCGSTAMP: return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); case SIOCSHWTSTAMP: return compat_siocshwtstamp(net, argp); case FIOSETOWN: case SIOCSPGRP: case FIOGETOWN: case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: case SIOCSIFFLAGS: case SIOCGIFMETRIC: case SIOCSIFMETRIC: case SIOCGIFMTU: case SIOCSIFMTU: case SIOCGIFMEM: case SIOCSIFMEM: case SIOCGIFHWADDR: case SIOCSIFHWADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); case SIOCSARP: case SIOCGARP: case SIOCDARP: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } return -ENOIOCTLCMD; } static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct socket *sock = file->private_data; int ret = -ENOIOCTLCMD; struct sock *sk; struct net *net; sk = sock->sk; net = sock_net(sk); if (sock->ops->compat_ioctl) ret = sock->ops->compat_ioctl(sock, cmd, arg); if (ret == -ENOIOCTLCMD && (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) ret = compat_wext_handle_ioctl(net, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_sock_ioctl_trans(file, sock, cmd, arg); return ret; } #endif int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) { return sock->ops->bind(sock, addr, addrlen); } EXPORT_SYMBOL(kernel_bind); int kernel_listen(struct socket *sock, int backlog) { return sock->ops->listen(sock, backlog); } EXPORT_SYMBOL(kernel_listen); int kernel_accept(struct socket *sock, struct socket **newsock, int flags) { struct sock *sk = sock->sk; int err; err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, newsock); if (err < 0) goto done; err = sock->ops->accept(sock, *newsock, flags); if (err < 0) { sock_release(*newsock); *newsock = NULL; goto done; } (*newsock)->ops = sock->ops; __module_get((*newsock)->ops->owner); done: return err; } EXPORT_SYMBOL(kernel_accept); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags) { return sock->ops->connect(sock, addr, addrlen, flags); } EXPORT_SYMBOL(kernel_connect); int kernel_getsockname(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 0); } EXPORT_SYMBOL(kernel_getsockname); int kernel_getpeername(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 1); } EXPORT_SYMBOL(kernel_getpeername); int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int __user *uoptlen; int err; uoptval = (char __user __force *) optval; uoptlen = (int __user __force *) optlen; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, uoptval, uoptlen); else err = sock->ops->getsockopt(sock, level, optname, uoptval, uoptlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_getsockopt); int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int err; uoptval = (char __user __force *) optval; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, uoptval, optlen); else err = sock->ops->setsockopt(sock, level, optname, uoptval, optlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_setsockopt); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { if (sock->ops->sendpage) return sock->ops->sendpage(sock, page, offset, size, flags); return sock_no_sendpage(sock, page, offset, size, flags); } EXPORT_SYMBOL(kernel_sendpage); int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) { mm_segment_t oldfs = get_fs(); int err; set_fs(KERNEL_DS); err = sock->ops->ioctl(sock, cmd, arg); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_sock_ioctl); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) { return sock->ops->shutdown(sock, how); } EXPORT_SYMBOL(kernel_sock_shutdown);
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; /* Save some cycles and don't copy the address if not needed */ msg.msg_name = addr ? (struct sockaddr *)&address : NULL; /* We assume all kernel code knows the size of sockaddr_storage */ msg.msg_namelen = 0; if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
{'added': [(1843, "\t/* Save some cycles and don't copy the address if not needed */"), (1844, '\tmsg.msg_name = addr ? (struct sockaddr *)&address : NULL;'), (1845, '\t/* We assume all kernel code knows the size of sockaddr_storage */'), (1846, '\tmsg.msg_namelen = 0;'), (2226, '\t/* Save the user-mode address (verify_iovec will change the'), (2227, '\t * kernel msghdr to use the kernel address space)'), (2231, '\tif (MSG_CMSG_COMPAT & flags)'), (2233, '\telse'), (2242, '\t/* We assume all kernel code knows the size of sockaddr_storage */'), (2243, '\tmsg_sys->msg_namelen = 0;'), (2244, '')], 'deleted': [(1843, '\tmsg.msg_name = (struct sockaddr *)&address;'), (1844, '\tmsg.msg_namelen = sizeof(address);'), (2224, '\t/*'), (2225, '\t * Save the user-mode address (verify_iovec will change the'), (2226, '\t * kernel msghdr to use the kernel address space)'), (2228, ''), (2231, '\tif (MSG_CMSG_COMPAT & flags) {'), (2233, '\t} else')]}
11
8
2,499
15,842
36
232
7
https://github.com/torvalds/linux
CVE-2013-7266
CWE-20
2,899
string-data-inl.h
C++
HPHP::StringData::setSize
/* +----------------------------------------------------------------------+ | HipHop for PHP | +----------------------------------------------------------------------+ | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ */ #pragma once namespace HPHP { ////////////////////////////////////////////////////////////////////// // CopyString inline StringData* StringData::Make(folly::StringPiece s) { return Make(s.begin(), s.size(), CopyString); } inline StringData* StringData::Make(const char* data, CopyStringMode) { return Make(data, strlen(data), CopyString); } ////////////////////////////////////////////////////////////////////// // AttachString inline StringData* StringData::Make(char* data, AttachStringMode) { SCOPE_EXIT { free(data); }; return Make(data, CopyString); } ////////////////////////////////////////////////////////////////////// // Concat creation inline StringData* StringData::Make(const StringData* s1, folly::StringPiece s2) { return Make(s1->slice(), s2); } inline StringData* StringData::Make(const StringData* s1, const char* lit2) { return Make(s1->slice(), lit2); } ////////////////////////////////////////////////////////////////////// inline folly::StringPiece StringData::slice() const { return folly::StringPiece{data(), m_len}; } inline folly::MutableStringPiece StringData::bufferSlice() { assertx(!isImmutable()); return folly::MutableStringPiece{mutableData(), capacity()}; } inline void StringData::invalidateHash() { assertx(!isImmutable()); assertx(!hasMultipleRefs()); m_hash = 0; assertx(checkSane()); } inline void StringData::setSize(int len) { assertx(!isImmutable() && !hasMultipleRefs()); assertx(len >= 0 && len <= capacity()); mutableData()[len] = 0; m_lenAndHash = len; assertx(m_hash == 0); assertx(checkSane()); } inline void StringData::checkStack() const { assertx(uintptr_t(this) - s_stackLimit >= s_stackSize); } inline const char* StringData::data() const { // TODO: t1800106: re-enable this assert // assertx(data()[size()] == 0); // all strings must be null-terminated #ifdef NO_M_DATA return reinterpret_cast<const char*>(this + 1); #else return m_data; #endif } inline char* StringData::mutableData() const { assertx(!isImmutable()); return const_cast<char*>(data()); } inline int StringData::size() const { return m_len; } inline bool StringData::empty() const { return size() == 0; } inline uint32_t StringData::capacity() const { assertx(isRefCounted()); return kSizeIndex2StringCapacity[m_aux16 & 0xff]; } inline size_t StringData::heapSize() const { return isFlat() ? isRefCounted() ? MemoryManager::sizeIndex2Size(m_aux16) : size() + kStringOverhead : sizeof(StringData) + sizeof(Proxy); } inline size_t StringData::estimateCap(size_t size) { assertx(size <= MaxSize); return MemoryManager::sizeClass(size + kStringOverhead); } inline bool StringData::isStrictlyInteger(int64_t& res) const { // Exploit the NUL terminator and unsigned comparison. This single comparison // checks whether the string is empty or if the first byte is greater than '9' // or less than '-'. Note that '-' == 45 and '0' == 48, which makes this // valid. (46 == '.' and 47 == '/', so if one of those is the first byte, this // check will be a false positive, but it will still be caught later.) if ((unsigned char)(data()[0] - '-') > ('9' - '-')) { return false; } if (m_hash < 0) return false; auto const s = slice(); return is_strictly_integer(s.data(), s.size(), res); } inline bool StringData::isZero() const { return size() == 1 && data()[0] == '0'; } inline StringData* StringData::modifyChar(int offset, char c) { assertx(offset >= 0 && offset < size()); assertx(!hasMultipleRefs()); auto const sd = isProxy() ? escalate(size()) : this; sd->mutableData()[offset] = c; sd->m_hash = 0; return sd; } inline strhash_t StringData::hash_unsafe(const char* s, size_t len) { return hash_string_i_unsafe(s, len); } inline strhash_t StringData::hash(const char* s, size_t len) { return hash_string_i(s, len); } inline strhash_t StringData::hash() const { strhash_t h = m_hash & STRHASH_MASK; return h ? h : hashHelper(); } inline strhash_t StringData::hashStatic() const { assertx(isStatic()); const strhash_t h = m_hash & STRHASH_MASK; assertx(h); return h; } inline bool StringData::same(const StringData* s) const { assertx(s); if (m_len != s->m_len) return false; // The underlying buffer and its length are 8-byte aligned, ensured by // StringData layout, req::malloc, or malloc. So compare words. assertx(uintptr_t(data()) % 8 == 0); assertx(uintptr_t(s->data()) % 8 == 0); return wordsame(data(), s->data(), m_len); } inline bool StringData::isame(const StringData* s) const { assertx(s); if (this == s) return true; if (m_len != s->m_len) return false; return bstrcaseeq(data(), s->data(), m_len); } ////////////////////////////////////////////////////////////////////// inline const void* StringData::payload() const { return this + 1; } inline void* StringData::payload() { return this + 1; } inline const StringData::Proxy* StringData::proxy() const { return static_cast<const Proxy*>(payload()); } inline StringData::Proxy* StringData::proxy() { return static_cast<Proxy*>(payload()); } #ifndef NO_M_DATA inline bool StringData::isFlat() const { return m_data == payload(); } inline bool StringData::isProxy() const { return m_data != payload(); } #endif inline bool StringData::isImmutable() const { return !isRefCounted() || isProxy(); } ////////////////////////////////////////////////////////////////////// ALWAYS_INLINE void decRefStr(StringData* s) { s->decRefAndRelease(); } struct string_data_hash { size_t operator()(const StringData *s) const { return s->hash(); } }; struct string_data_same { bool operator()(const StringData *s1, const StringData *s2) const { assertx(s1 && s2); return s1->same(s2); } }; struct string_data_eq_same { bool operator()(const StringData* a, const StringData* b) const { return a == b || a->same(b); } }; struct string_data_isame { bool operator()(const StringData *s1, const StringData *s2) const { assertx(s1 && s2); return s1->isame(s2); } }; struct string_data_lt { bool operator()(const StringData *s1, const StringData *s2) const { int len1 = s1->size(); int len2 = s2->size(); if (len1 < len2) { return (len1 == 0) || (memcmp(s1->data(), s2->data(), len1) <= 0); } else if (len1 == len2) { return (len1 != 0) && (memcmp(s1->data(), s2->data(), len1) < 0); } else /* len1 > len2 */ { return ((len2 != 0) && (memcmp(s1->data(), s2->data(), len2) < 0)); } } }; struct string_data_lti { bool operator()(const StringData *s1, const StringData *s2) const { return bstrcasecmp(s1->data(), s1->size(), s2->data(), s2->size()) < 0; } }; ////////////////////////////////////////////////////////////////////// }
/* +----------------------------------------------------------------------+ | HipHop for PHP | +----------------------------------------------------------------------+ | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ */ #pragma once namespace HPHP { ////////////////////////////////////////////////////////////////////// // CopyString inline StringData* StringData::Make(folly::StringPiece s) { return Make(s.begin(), s.size(), CopyString); } inline StringData* StringData::Make(const char* data, CopyStringMode) { return Make(data, strlen(data), CopyString); } ////////////////////////////////////////////////////////////////////// // AttachString inline StringData* StringData::Make(char* data, AttachStringMode) { SCOPE_EXIT { free(data); }; return Make(data, CopyString); } ////////////////////////////////////////////////////////////////////// // Concat creation inline StringData* StringData::Make(const StringData* s1, folly::StringPiece s2) { return Make(s1->slice(), s2); } inline StringData* StringData::Make(const StringData* s1, const char* lit2) { return Make(s1->slice(), lit2); } ////////////////////////////////////////////////////////////////////// inline folly::StringPiece StringData::slice() const { return folly::StringPiece{data(), m_len}; } inline folly::MutableStringPiece StringData::bufferSlice() { assertx(!isImmutable()); return folly::MutableStringPiece{mutableData(), capacity()}; } inline void StringData::invalidateHash() { assertx(!isImmutable()); assertx(!hasMultipleRefs()); m_hash = 0; assertx(checkSane()); } inline void StringData::setSize(int64_t len) { assertx(!isImmutable() && !hasMultipleRefs()); assertx(len >= 0 && len <= capacity()); mutableData()[len] = 0; m_lenAndHash = len; assertx(m_hash == 0); assertx(checkSane()); } inline void StringData::checkStack() const { assertx(uintptr_t(this) - s_stackLimit >= s_stackSize); } inline const char* StringData::data() const { // TODO: t1800106: re-enable this assert // assertx(data()[size()] == 0); // all strings must be null-terminated #ifdef NO_M_DATA return reinterpret_cast<const char*>(this + 1); #else return m_data; #endif } inline char* StringData::mutableData() const { assertx(!isImmutable()); return const_cast<char*>(data()); } inline int64_t StringData::size() const { return m_len; } inline bool StringData::empty() const { return size() == 0; } inline uint32_t StringData::capacity() const { assertx(isRefCounted()); return kSizeIndex2StringCapacity[m_aux16 & 0xff]; } inline size_t StringData::heapSize() const { return isFlat() ? isRefCounted() ? MemoryManager::sizeIndex2Size(m_aux16) : size() + kStringOverhead : sizeof(StringData) + sizeof(Proxy); } inline size_t StringData::estimateCap(size_t size) { assertx(size <= MaxSize); return MemoryManager::sizeClass(size + kStringOverhead); } inline bool StringData::isStrictlyInteger(int64_t& res) const { // Exploit the NUL terminator and unsigned comparison. This single comparison // checks whether the string is empty or if the first byte is greater than '9' // or less than '-'. Note that '-' == 45 and '0' == 48, which makes this // valid. (46 == '.' and 47 == '/', so if one of those is the first byte, this // check will be a false positive, but it will still be caught later.) if ((unsigned char)(data()[0] - '-') > ('9' - '-')) { return false; } if (m_hash < 0) return false; auto const s = slice(); return is_strictly_integer(s.data(), s.size(), res); } inline bool StringData::isZero() const { return size() == 1 && data()[0] == '0'; } inline StringData* StringData::modifyChar(int offset, char c) { assertx(offset >= 0 && offset < size()); assertx(!hasMultipleRefs()); auto const sd = isProxy() ? escalate(size()) : this; sd->mutableData()[offset] = c; sd->m_hash = 0; return sd; } inline strhash_t StringData::hash_unsafe(const char* s, size_t len) { return hash_string_i_unsafe(s, len); } inline strhash_t StringData::hash(const char* s, size_t len) { return hash_string_i(s, len); } inline strhash_t StringData::hash() const { strhash_t h = m_hash & STRHASH_MASK; return h ? h : hashHelper(); } inline strhash_t StringData::hashStatic() const { assertx(isStatic()); const strhash_t h = m_hash & STRHASH_MASK; assertx(h); return h; } inline bool StringData::same(const StringData* s) const { assertx(s); if (m_len != s->m_len) return false; // The underlying buffer and its length are 8-byte aligned, ensured by // StringData layout, req::malloc, or malloc. So compare words. assertx(uintptr_t(data()) % 8 == 0); assertx(uintptr_t(s->data()) % 8 == 0); return wordsame(data(), s->data(), m_len); } inline bool StringData::isame(const StringData* s) const { assertx(s); if (this == s) return true; if (m_len != s->m_len) return false; return bstrcaseeq(data(), s->data(), m_len); } ////////////////////////////////////////////////////////////////////// inline const void* StringData::payload() const { return this + 1; } inline void* StringData::payload() { return this + 1; } inline const StringData::Proxy* StringData::proxy() const { return static_cast<const Proxy*>(payload()); } inline StringData::Proxy* StringData::proxy() { return static_cast<Proxy*>(payload()); } #ifndef NO_M_DATA inline bool StringData::isFlat() const { return m_data == payload(); } inline bool StringData::isProxy() const { return m_data != payload(); } #endif inline bool StringData::isImmutable() const { return !isRefCounted() || isProxy(); } ////////////////////////////////////////////////////////////////////// ALWAYS_INLINE void decRefStr(StringData* s) { s->decRefAndRelease(); } struct string_data_hash { size_t operator()(const StringData *s) const { return s->hash(); } }; struct string_data_same { bool operator()(const StringData *s1, const StringData *s2) const { assertx(s1 && s2); return s1->same(s2); } }; struct string_data_eq_same { bool operator()(const StringData* a, const StringData* b) const { return a == b || a->same(b); } }; struct string_data_isame { bool operator()(const StringData *s1, const StringData *s2) const { assertx(s1 && s2); return s1->isame(s2); } }; struct string_data_lt { bool operator()(const StringData *s1, const StringData *s2) const { int len1 = s1->size(); int len2 = s2->size(); if (len1 < len2) { return (len1 == 0) || (memcmp(s1->data(), s2->data(), len1) <= 0); } else if (len1 == len2) { return (len1 != 0) && (memcmp(s1->data(), s2->data(), len1) < 0); } else /* len1 > len2 */ { return ((len2 != 0) && (memcmp(s1->data(), s2->data(), len2) < 0)); } } }; struct string_data_lti { bool operator()(const StringData *s1, const StringData *s2) const { return bstrcasecmp(s1->data(), s1->size(), s2->data(), s2->size()) < 0; } }; ////////////////////////////////////////////////////////////////////// }
inline void StringData::setSize(int len) { assertx(!isImmutable() && !hasMultipleRefs()); assertx(len >= 0 && len <= capacity()); mutableData()[len] = 0; m_lenAndHash = len; assertx(m_hash == 0); assertx(checkSane()); }
inline void StringData::setSize(int64_t len) { assertx(!isImmutable() && !hasMultipleRefs()); assertx(len >= 0 && len <= capacity()); mutableData()[len] = 0; m_lenAndHash = len; assertx(m_hash == 0); assertx(checkSane()); }
{'added': [(69, 'inline void StringData::setSize(int64_t len) {'), (97, 'inline int64_t StringData::size() const { return m_len; }')], 'deleted': [(69, 'inline void StringData::setSize(int len) {'), (97, 'inline int StringData::size() const { return m_len; }'), (258, '')]}
2
3
172
1,424
8
62
3
https://github.com/facebook/hhvm
CVE-2020-1917
CWE-787
3,169
bin_xnu_kernelcache.c
C
load_buffer
/* radare2 - LGPL - Copyright 2019-2022 - mrmacete */ #include <r_types.h> #include <r_util.h> #include <r_lib.h> #include <r_bin.h> #include <r_core.h> #include <r_syscall.h> #define R_BIN_MACH064 1 #include "../format/mach0/mach0.h" #include "../format/xnu/r_cf_dict.h" #include "../format/xnu/mig_index.h" #include "../format/mach0/mach064_is_kernelcache.c" typedef bool (*ROnRebaseFunc) (ut64 offset, ut64 decorated_addr, void *user_data); typedef struct _RKernelCacheObj { RBuffer *cache_buf; RCFValueDict *prelink_info; ut64 pa2va_exec; ut64 pa2va_data; struct _RKextIndex *kexts; struct MACH0_(obj_t) *mach0; struct _RRebaseInfo *rebase_info; int (*original_io_read)(RIO *io, RIODesc *fd, ut8 *buf, int count); bool rebase_info_populated; bool rebasing_buffer; bool kexts_initialized; } RKernelCacheObj; typedef struct _RFileRange { ut64 offset; ut64 size; } RFileRange; typedef struct _RPrelinkRange { RFileRange range; ut64 pa2va_exec; ut64 pa2va_data; } RPrelinkRange; typedef struct _RStubsInfo { RFileRange got; RFileRange stubs; ut64 got_addr; } RStubsInfo; typedef struct _RKext { RFileRange range; RFileRange text_range; char *name; ut64 mod_info; ut64 vaddr; struct MACH0_(obj_t) *mach0; bool own_name; ut64 pa2va_exec; ut64 pa2va_data; } RKext; typedef struct _RKextIndex { ut64 length; RKext **entries; } RKextIndex; typedef struct _RRebaseInfo { RFileRange *ranges; ut64 n_ranges; ut64 multiplier; ut64 kernel_base; } RRebaseInfo; typedef struct _RRebaseCtx { ut64 off, eob; ut8 *buf; int count; RKernelCacheObj *obj; } RRebaseCtx; typedef struct _RParsedPointer { ut64 address; } RParsedPointer; typedef struct _RKmodInfo { char name[0x41]; ut64 start; } RKmodInfo; #define KEXT_SHORT_NAME_FROM_SECTION(io_section) ({\ char *result = NULL;\ char *clone = strdup (io_section->name);\ char *cursor = strstr (clone, "__");\ if (cursor) {\ cursor--;\ *cursor = 0;\ cursor--;\ cursor = strrchr (cursor, '.');\ if (cursor) {\ *cursor = 0;\ cursor = strrchr (cursor, '.');\ if (cursor) {\ result = strdup (cursor + 1);\ R_FREE (clone);\ }\ }\ }\ result ? result : clone;\ }) #define KEXT_INFER_VSIZE(index, i)\ ((i+1 < index->length) ? index->entries[i+1]->vaddr - index->entries[i]->vaddr : UT64_MAX) #define KEXT_INFER_PSIZE(index, i)\ ((i+1 < index->length) ? index->entries[i+1]->range.offset - index->entries[i]->range.offset : UT64_MAX) #define R_K_CONSTRUCTOR_TO_ENTRY 0 #define R_K_CONSTRUCTOR_TO_SYMBOL 1 #define K_PPTR(p) p_ptr (p, obj) #define K_RPTR(buf) r_ptr (buf, obj) #define IS_PTR_AUTH(x) ((x & (1ULL << 63)) != 0) #define IS_PTR_BIND(x) ((x & (1ULL << 62)) != 0) static ut64 p_ptr(ut64 decorated_addr, RKernelCacheObj *obj); static ut64 r_ptr(ut8 *buf, RKernelCacheObj *obj); static RRebaseInfo *r_rebase_info_new_from_mach0(RBuffer *cache_buf, struct MACH0_(obj_t) *mach0); static void r_rebase_info_free(RRebaseInfo *info); static void r_rebase_info_populate(RRebaseInfo *info, RKernelCacheObj *obj); static ut64 iterate_rebase_list(RBuffer *cache_buf, ut64 multiplier, ut64 start_offset, ROnRebaseFunc func, void *user_data); static ut64 r_rebase_offset_to_paddr(RKernelCacheObj *obj, struct section_t *sections, ut64 offset); static void swizzle_io_read(RKernelCacheObj *obj, RIO *io); static int kernelcache_io_read(RIO *io, RIODesc *fd, ut8 *buf, int count); static bool r_parse_pointer(RParsedPointer *ptr, ut64 decorated_addr, RKernelCacheObj *obj); static bool on_rebase_pointer(ut64 offset, ut64 decorated_addr, RRebaseCtx *ctx); static void rebase_buffer(RKernelCacheObj *obj, ut64 off, RIODesc *fd, ut8 *buf, int count); static void rebase_buffer_fixup(RKernelCacheObj *kobj, ut64 off, RIODesc *fd, ut8 *buf, int count); static RPrelinkRange *get_prelink_info_range_from_mach0(struct MACH0_(obj_t) *mach0); static RList *filter_kexts(RKernelCacheObj *obj, RBinFile *bf); static RList *carve_kexts(RKernelCacheObj *obj, RBinFile *bf); static RList *kexts_from_load_commands(RKernelCacheObj *obj, RBinFile *bf); static void sections_from_mach0(RList *ret, struct MACH0_(obj_t) *mach0, RBinFile *bf, ut64 paddr, char *prefix, RKernelCacheObj *obj); static void handle_data_sections(RBinSection *sect); static void symbols_from_mach0(RList *ret, struct MACH0_(obj_t) *mach0, RBinFile *bf, ut64 paddr, int ordinal); static RList *resolve_syscalls(RKernelCacheObj *obj, ut64 enosys_addr); static RList *resolve_mig_subsystem(RKernelCacheObj *obj); static void symbols_from_stubs(RList *ret, HtPP *kernel_syms_by_addr, RKernelCacheObj *obj, RBinFile *bf, RKext *kext, int ordinal); static RStubsInfo *get_stubs_info(struct MACH0_(obj_t) *mach0, ut64 paddr, RKernelCacheObj *obj); static int prot2perm(int x); static void r_kext_free(RKext *kext); static void r_kext_fill_text_range(RKext *kext); static int kexts_sort_vaddr_func(const void *a, const void *b); static struct MACH0_(obj_t) *create_kext_mach0(RKernelCacheObj *obj, RKext *kext, RBinFile *bf); static struct MACH0_(obj_t) *create_kext_shared_mach0(RKernelCacheObj *obj, RKext *kext, RBinFile *bf); #define r_kext_index_foreach(index, i, item)\ if (index)\ for (i = 0; i < index->length && (item = index->entries[i], 1); i++) static RKextIndex *r_kext_index_new(RList *kexts); static void r_kext_index_free(RKextIndex *index); static RKext *r_kext_index_vget(RKextIndex *index, ut64 vaddr); static void process_kmod_init_term(RKernelCacheObj *obj, RKext *kext, RList *ret, ut64 **inits, ut64 **terms); static void create_initterm_syms(RKext *kext, RList *ret, int type, ut64 *pointers); static void process_constructors(RKernelCacheObj *obj, struct MACH0_(obj_t) *mach0, RList *ret, ut64 paddr, bool is_first, int mode, const char *prefix); static RBinAddr *newEntry(ut64 haddr, ut64 vaddr, int type); static void ensure_kexts_initialized(RKernelCacheObj *obj, RBinFile *bf); static void r_kernel_cache_free(RKernelCacheObj *obj); static R_TH_LOCAL RList *pending_bin_files = NULL; static bool load_buffer(RBinFile *bf, void **bin_obj, RBuffer *buf, ut64 loadaddr, Sdb *sdb) { RBuffer *fbuf = r_buf_ref (buf); struct MACH0_(opts_t) opts; MACH0_(opts_set_default) (&opts, bf); struct MACH0_(obj_t) *main_mach0 = MACH0_(new_buf) (fbuf, &opts); if (!main_mach0) { return false; } RRebaseInfo *rebase_info = r_rebase_info_new_from_mach0 (fbuf, main_mach0); RKernelCacheObj *obj = NULL; RPrelinkRange *prelink_range = get_prelink_info_range_from_mach0 (main_mach0); if (!prelink_range) { goto beach; } obj = R_NEW0 (RKernelCacheObj); if (!obj) { R_FREE (prelink_range); goto beach; } RCFValueDict *prelink_info = NULL; if (main_mach0->hdr.filetype != MH_FILESET && prelink_range->range.size) { prelink_info = r_cf_value_dict_parse (fbuf, prelink_range->range.offset, prelink_range->range.size, R_CF_OPTION_SKIP_NSDATA); if (!prelink_info) { R_FREE (prelink_range); R_FREE (obj); goto beach; } } if (!pending_bin_files) { pending_bin_files = r_list_new (); if (!pending_bin_files) { R_FREE (prelink_range); R_FREE (obj); R_FREE (prelink_info); goto beach; } } obj->mach0 = main_mach0; obj->rebase_info = rebase_info; obj->prelink_info = prelink_info; obj->cache_buf = fbuf; obj->pa2va_exec = prelink_range->pa2va_exec; obj->pa2va_data = prelink_range->pa2va_data; R_FREE (prelink_range); *bin_obj = obj; r_list_push (pending_bin_files, bf); if (rebase_info || main_mach0->chained_starts) { RIO *io = bf->rbin->iob.io; swizzle_io_read (obj, io); } return true; beach: r_buf_free (fbuf); obj->cache_buf = NULL; MACH0_(mach0_free) (main_mach0); return false; } static void ensure_kexts_initialized(RKernelCacheObj *obj, RBinFile *bf) { if (obj->kexts_initialized) { return; } obj->kexts_initialized = true; RList *kexts = NULL; if (obj->prelink_info) { kexts = filter_kexts (obj, bf); } if (kexts && !r_list_length (kexts)) { r_list_free (kexts); kexts = NULL; } if (!kexts) { kexts = kexts_from_load_commands (obj, bf); } if (kexts && !r_list_length (kexts)) { r_list_free (kexts); kexts = NULL; } if (!kexts) { kexts = carve_kexts (obj, bf); } obj->kexts = r_kext_index_new (kexts); if (kexts) { kexts->free = NULL; r_list_free (kexts); } } static RPrelinkRange *get_prelink_info_range_from_mach0(struct MACH0_(obj_t) *mach0) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return NULL; } RPrelinkRange *prelink_range = R_NEW0 (RPrelinkRange); if (!prelink_range) { R_FREE (sections); return NULL; } int incomplete = 3; int i = 0; for (; !sections[i].last; i++) { if (strstr (sections[i].name, "__PRELINK_INFO.__info")) { prelink_range->range.offset = sections[i].offset; prelink_range->range.size = sections[i].size; if (!--incomplete) { break; } } if (strstr (sections[i].name, "__PRELINK_TEXT.__text")) { prelink_range->pa2va_exec = sections[i].addr - sections[i].offset; if (!--incomplete) { break; } } if (strstr (sections[i].name, "__PRELINK_DATA.__data")) { prelink_range->pa2va_data = sections[i].addr - sections[i].offset; if (!--incomplete) { break; } } } R_FREE (sections); if (incomplete == 1 && !prelink_range->pa2va_data) { struct MACH0_(segment_command) *seg; int nsegs = R_MIN (mach0->nsegs, 128); size_t i; for (i = 0; i < nsegs; i++) { seg = &mach0->segs[i]; if (!strcmp (seg->segname, "__DATA")) { prelink_range->pa2va_data = seg->vmaddr - seg->fileoff; incomplete--; break; } } } if (incomplete) { R_FREE (prelink_range); } return prelink_range; } static RList *filter_kexts(RKernelCacheObj *obj, RBinFile *bf) { RCFValueArray *kext_array = NULL; RListIter *iter; RCFKeyValue *item; r_list_foreach (obj->prelink_info->pairs, iter, item) { if (!strcmp (item->key, "_PrelinkInfoDictionary")) { kext_array = (RCFValueArray*) item->value; break; } } if (!kext_array) { return NULL; } RList *kexts = r_list_newf ((RListFree) &r_kext_free); if (!kexts) { return NULL; } bool is_sorted = true; RKext *prev_kext = NULL; RCFValueDict *kext_item; r_list_foreach (kext_array->values, iter, kext_item) { RKext *kext = R_NEW0 (RKext); if (!kext) { R_FREE (kexts); return NULL; } int kext_incomplete = 5; RListIter *internal_iter; r_list_foreach (kext_item->pairs, internal_iter, item) { if (!strcmp (item->key, "CFBundlePackageType")) { if (item->value->type != R_CF_STRING) { break; } RCFValueString *type = (RCFValueString*) item->value; if (strcmp (type->value, "KEXT")) { break; } kext_incomplete--; } if (!strcmp (item->key, "_PrelinkExecutableLoadAddr")) { if (item->value->type == R_CF_INTEGER) { kext_incomplete--; kext->vaddr = ((RCFValueInteger*) item->value)->value; kext->range.offset = kext->vaddr - obj->pa2va_exec; } } if (!strcmp (item->key, "_PrelinkExecutableSize")) { kext_incomplete--; if (item->value->type == R_CF_INTEGER) { kext->range.size = ((RCFValueInteger*) item->value)->value; } else { kext->range.size = 0; } } if (!strcmp (item->key, "_PrelinkKmodInfo")) { if (item->value->type == R_CF_INTEGER) { kext_incomplete--; kext->mod_info = ((RCFValueInteger*) item->value)->value; kext->mod_info -= obj->pa2va_data; } } if (!strcmp (item->key, "CFBundleIdentifier")) { if (item->value->type == R_CF_STRING) { kext_incomplete--; kext->name = ((RCFValueString*) item->value)->value; } } } if (kext_incomplete) { r_kext_free (kext); continue; } if (prev_kext && kext->vaddr < prev_kext->vaddr) { is_sorted = false; } prev_kext = kext; kext->mach0 = create_kext_mach0 (obj, kext, bf); if (!kext->mach0) { r_kext_free (kext); continue; } r_kext_fill_text_range (kext); r_list_push (kexts, kext); } if (!is_sorted) { eprintf ("SORTING KEXTs...\n"); r_list_sort (kexts, kexts_sort_vaddr_func); } return kexts; } static ut64 p_ptr(ut64 decorated_addr, RKernelCacheObj *obj) { RParsedPointer ptr; r_parse_pointer (&ptr, decorated_addr, obj); return ptr.address; } static ut64 r_ptr(ut8 *buf, RKernelCacheObj *obj) { ut64 decorated_addr = r_read_le64 (buf); return K_PPTR (decorated_addr); } static RList *carve_kexts(RKernelCacheObj *obj, RBinFile *bf) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (obj->mach0))) { return NULL; } ut64 pa2va_exec = 0; ut64 pa2va_data = 0; ut64 kmod_start = 0, kmod_end = 0; ut64 kmod_info = 0, kmod_info_end = 0; int incomplete = 4; RKmodInfo *all_infos = NULL; int i = 0; for (; !sections[i].last && incomplete > 0; i++) { if (strstr (sections[i].name, "__TEXT_EXEC.__text")) { pa2va_exec = sections[i].addr - sections[i].offset; incomplete--; } if (strstr (sections[i].name, "__DATA.__data")) { pa2va_data = sections[i].addr - sections[i].offset; incomplete--; } if (strstr (sections[i].name, "__PRELINK_INFO.__kmod_start")) { kmod_start = sections[i].offset; kmod_end = kmod_start + sections[i].size; incomplete--; } if (strstr (sections[i].name, "__PRELINK_INFO.__kmod_info")) { kmod_info = sections[i].offset; kmod_info_end = kmod_info + sections[i].size; incomplete--; } } R_FREE (sections); if (incomplete) { return NULL; } RList *kexts = r_list_newf ((RListFree) &r_kext_free); if (!kexts) { return NULL; } int n_kmod_info = (kmod_info_end - kmod_info) / 8; if (n_kmod_info == 0) { goto beach; } all_infos = R_NEWS0 (RKmodInfo, n_kmod_info); if (!all_infos) { goto beach; } ut8 bytes[8]; int j = 0; for (; j < n_kmod_info; j++) { ut64 entry_offset = j * 8 + kmod_info; if (r_buf_read_at (obj->cache_buf, entry_offset, bytes, 8) < 8) { goto beach; } ut64 kmod_info_paddr = K_RPTR (bytes) - pa2va_data; ut64 field_name = kmod_info_paddr + 0x10; ut64 field_start = kmod_info_paddr + 0xb4; if (r_buf_read_at (obj->cache_buf, field_start, bytes, 8) < 8) { goto beach; } all_infos[j].start = K_RPTR (bytes); if (r_buf_read_at (obj->cache_buf, field_name, (ut8 *) all_infos[j].name, 0x40) < 0x40) { goto beach; } all_infos[j].name[0x40] = 0; } ut64 cursor = kmod_start; for(; cursor < kmod_end; cursor += 8) { ut8 bytes[8]; if (r_buf_read_at (obj->cache_buf, cursor, bytes, 8) < 8) { goto beach; } RKext *kext = R_NEW0 (RKext); if (!kext) { goto beach; } kext->vaddr = K_RPTR (bytes); kext->range.offset = kext->vaddr - pa2va_exec; kext->mach0 = create_kext_mach0 (obj, kext, bf); if (!kext->mach0) { r_kext_free (kext); continue; } r_kext_fill_text_range (kext); kext->vaddr = K_PPTR (kext->vaddr); kext->pa2va_exec = pa2va_exec; kext->pa2va_data = pa2va_data; ut64 text_start = kext->vaddr; ut64 text_end = text_start + kext->text_range.size; if (text_start == text_end) { r_kext_free (kext); continue; } for (j = 0; j < n_kmod_info; j++) { if (text_start > all_infos[j].start || all_infos[j].start >= text_end) { continue; } kext->name = strdup (all_infos[j].name); kext->own_name = true; break; } if (!kext->name) { r_kext_free (kext); continue; } r_list_push (kexts, kext); } R_FREE (all_infos); return kexts; beach: r_list_free (kexts); R_FREE (all_infos); return NULL; } static RList *kexts_from_load_commands(RKernelCacheObj *obj, RBinFile *bf) { RList *kexts = r_list_newf ((RListFree) &r_kext_free); if (!kexts) { return NULL; } ut32 i, ncmds = r_buf_read_le32_at (obj->cache_buf, 16); ut64 length = r_buf_size (obj->cache_buf); ut32 cursor = sizeof (struct MACH0_(mach_header)); for (i = 0; i < ncmds && cursor < length; i++) { ut32 cmdtype = r_buf_read_le32_at (obj->cache_buf, cursor); ut32 cmdsize = r_buf_read_le32_at (obj->cache_buf, cursor + 4); if (!cmdsize || cmdsize + cursor < cursor) { break; } if (cmdtype != LC_KEXT) { cursor += cmdsize; continue; } ut64 vaddr = r_buf_read_le64_at (obj->cache_buf, cursor + 8); ut64 paddr = r_buf_read_le64_at (obj->cache_buf, cursor + 16); st32 padded_name_length = (st32)cmdsize - 32; if (padded_name_length <= 0 || cmdsize - 32 + cursor >= length) { cursor += cmdsize; continue; } char *padded_name = calloc (1, padded_name_length); if (!padded_name) { goto beach; } if (r_buf_read_at (obj->cache_buf, cursor + 32, (ut8 *)padded_name, padded_name_length) != padded_name_length) { free (padded_name); goto early; } RKext *kext = R_NEW0 (RKext); if (!kext) { free (padded_name); goto beach; } kext->vaddr = vaddr; kext->range.offset = paddr; kext->mach0 = create_kext_shared_mach0 (obj, kext, bf); if (!kext->mach0) { free (padded_name); r_kext_free (kext); cursor += cmdsize; continue; } r_kext_fill_text_range (kext); kext->vaddr = K_PPTR (kext->vaddr); kext->pa2va_exec = obj->pa2va_exec; kext->pa2va_data = obj->pa2va_data; kext->name = strdup (padded_name); kext->own_name = true; free (padded_name); r_list_push (kexts, kext); cursor += cmdsize; } early: return kexts; beach: r_list_free (kexts); return NULL; } static void r_kext_free(RKext *kext) { if (!kext) { return; } if (kext->mach0) { MACH0_(mach0_free) (kext->mach0); kext->mach0 = NULL; } if (kext->own_name && kext->name) { R_FREE (kext->name); kext->name = NULL; } R_FREE (kext); } static void r_kext_fill_text_range(RKext *kext) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (kext->mach0))) { return; } int i = 0; for (; !sections[i].last; i++) { if (strstr (sections[i].name, "__TEXT_EXEC.__text")) { kext->text_range.offset = sections[i].offset; kext->text_range.size = sections[i].size; kext->vaddr = sections[i].addr; break; } } R_FREE (sections); } static int kexts_sort_vaddr_func(const void *a, const void *b) { RKext *A = (RKext *) a; RKext *B = (RKext *) b; int vaddr_compare = A->vaddr - B->vaddr; if (vaddr_compare == 0) { return A->text_range.size - B->text_range.size; } return vaddr_compare; } static RKextIndex *r_kext_index_new(RList *kexts) { if (!kexts) { return NULL; } int length = r_list_length (kexts); if (!length) { return NULL; } RKextIndex *index = R_NEW0 (RKextIndex); if (!index) { return NULL; } index->entries = malloc (length *sizeof(RKext*)); if (!index->entries) { R_FREE (index); return NULL; } RListIter *iter; RKext *kext; int i = 0; r_list_foreach (kexts, iter, kext) { index->entries[i++] = kext; } index->length = i; return index; } static void r_kext_index_free(RKextIndex *index) { if (!index) { return; } int i = 0; RKext *kext; r_kext_index_foreach (index, i, kext) { r_kext_free (kext); index->entries[i] = NULL; } index->length = 0; R_FREE (index); } static RKext *r_kext_index_vget(RKextIndex *index, ut64 vaddr) { int imid; int imin = 0; int imax = index->length - 1; while (imin < imax) { imid = (imin + imax) / 2; RKext *entry = index->entries[imid]; if ((entry->vaddr + entry->text_range.size) <= vaddr || (entry->vaddr == vaddr && entry->text_range.size == 0)) { imin = imid + 1; } else { imax = imid; } } RKext *minEntry = index->entries[imin]; if ((imax == imin) && (minEntry->vaddr <= vaddr) && ((minEntry->vaddr + minEntry->text_range.size) > vaddr)) { return minEntry; } return NULL; } static struct MACH0_(obj_t) *create_kext_mach0(RKernelCacheObj *obj, RKext *kext, RBinFile *bf) { RBuffer *buf = r_buf_new_slice (obj->cache_buf, kext->range.offset, r_buf_size (obj->cache_buf) - kext->range.offset); struct MACH0_(opts_t) opts; MACH0_(opts_set_default) (&opts, bf); opts.verbose = true; opts.header_at = 0; struct MACH0_(obj_t) *mach0 = MACH0_(new_buf) (buf, &opts); r_buf_free (buf); return mach0; } static struct MACH0_(obj_t) *create_kext_shared_mach0(RKernelCacheObj *obj, RKext *kext, RBinFile *bf) { RBuffer *buf = r_buf_ref (obj->cache_buf); struct MACH0_(opts_t) opts; MACH0_(opts_set_default) (&opts, bf); opts.verbose = false; opts.header_at = kext->range.offset; struct MACH0_(obj_t) *mach0 = MACH0_(new_buf) (buf, &opts); r_buf_free (buf); return mach0; } static RList *entries(RBinFile *bf) { RList *ret; RBinObject *obj = bf ? bf->o : NULL; if (!obj || !obj->bin_obj || !(ret = r_list_newf (free))) { return NULL; } RKernelCacheObj *kobj = (RKernelCacheObj*) obj->bin_obj; ut64 entry_vaddr = kobj->mach0->entry; if (kobj->pa2va_exec <= entry_vaddr) { ut64 entry_paddr = entry_vaddr - kobj->pa2va_exec; RBinAddr *ba = newEntry (entry_paddr, entry_vaddr, 0); if (ba) { r_list_append (ret, ba); } } process_constructors (kobj, kobj->mach0, ret, 0, true, R_K_CONSTRUCTOR_TO_ENTRY, NULL); return ret; } static void process_kmod_init_term(RKernelCacheObj *obj, RKext *kext, RList *ret, ut64 **inits, ut64 **terms) { if (!*inits || !*terms) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (obj->mach0))) { return; } int i = 0; for (; !sections[i].last; i++) { if (sections[i].size == 0) { continue; } ut64 start_paddr = 0; ut64 *target = NULL; int n_ptrs = 0; if (!*inits && strstr (sections[i].name, "__kmod_init")) { int n_inits = sections[i].size / 8; if (n_inits <= 0) { continue; } *inits = R_NEWS0 (ut64, n_inits + 1); target = *inits; n_ptrs = n_inits; } if (!*terms && strstr (sections[i].name, "__kmod_term")) { int n_terms = sections[i].size / 8; if (n_terms <= 0) { continue; } *terms = R_NEWS0 (ut64, n_terms + 1); target = *terms; n_ptrs = n_terms; } if (!target || !n_ptrs) { continue; } start_paddr = sections[i].offset; int j = 0; ut8 bytes[8]; for (; j < n_ptrs; j++) { if (r_buf_read_at (obj->cache_buf, start_paddr + j * 8, bytes, 8) < 8) { break; } target[j] = K_RPTR (bytes); } target[j] = 0; } R_FREE (sections); } if (*inits) { create_initterm_syms (kext, ret, R_BIN_ENTRY_TYPE_INIT, *inits); } if (*terms) { create_initterm_syms (kext, ret, R_BIN_ENTRY_TYPE_FINI, *terms); } } /* * com.apple.driver.AppleMesaSEPDriver.3.__TEXT_EXEC.__text * | * | * AppleMesaSEPDriver <--+ */ static const char *kext_short_name(RKext *kext) { const char *sn = strrchr (kext->name, '.'); return sn ? sn + 1 : kext->name; } static void create_initterm_syms(RKext *kext, RList *ret, int type, ut64 *pointers) { int i = 0; int count = 0; for (; pointers[i]; i++) { ut64 func_vaddr = pointers[i]; ut64 text_start = kext->vaddr; ut64 text_end = text_start + kext->text_range.size; if (text_start == text_end) { continue; } if (text_start > func_vaddr || func_vaddr >= text_end) { continue; } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("%s.%s.%d", kext_short_name (kext), (type == R_BIN_ENTRY_TYPE_INIT) ? "init" : "fini", count++); sym->vaddr = func_vaddr; sym->paddr = func_vaddr - kext->pa2va_exec; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "FUNC"; r_list_append (ret, sym); } } static void process_constructors(RKernelCacheObj *obj, struct MACH0_(obj_t) *mach0, RList *ret, ut64 paddr, bool is_first, int mode, const char *prefix) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return; } int i, type; for (i = 0; !sections[i].last; i++) { if (sections[i].size == 0) { continue; } if (strstr (sections[i].name, "_mod_fini_func") || strstr (sections[i].name, "_mod_term_func")) { type = R_BIN_ENTRY_TYPE_FINI; } else if (strstr (sections[i].name, "_mod_init_func")) { type = is_first ? 0 : R_BIN_ENTRY_TYPE_INIT; is_first = false; } else { continue; } ut8 *buf = calloc (sections[i].size, 1); if (!buf) { break; } if (r_buf_read_at (obj->cache_buf, sections[i].offset + paddr, buf, sections[i].size) < sections[i].size) { free (buf); break; } int j; int count = 0; for (j = 0; j < sections[i].size; j += 8) { ut64 addr64 = K_RPTR (buf + j); ut64 paddr64 = sections[i].offset + paddr + j; if (mode == R_K_CONSTRUCTOR_TO_ENTRY) { RBinAddr *ba = newEntry (paddr64, addr64, type); r_list_append (ret, ba); } else if (mode == R_K_CONSTRUCTOR_TO_SYMBOL) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("%s.%s.%d", prefix, (type == R_BIN_ENTRY_TYPE_INIT) ? "init" : "fini", count++); sym->vaddr = addr64; sym->paddr = paddr64; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "FUNC"; r_list_append (ret, sym); } } free (buf); } free (sections); } static RBinAddr *newEntry(ut64 haddr, ut64 vaddr, int type) { RBinAddr *ptr = R_NEW0 (RBinAddr); if (!ptr) { return NULL; } ptr->paddr = haddr; ptr->vaddr = vaddr; ptr->hpaddr = haddr; ptr->bits = 64; ptr->type = type; return ptr; } static bool check_buffer(RBinFile *bf, RBuffer *b) { if (r_buf_size (b) > 4) { ut8 buf[4]; r_buf_read_at (b, 0, buf, sizeof (buf)); if (!memcmp (buf, "\xcf\xfa\xed\xfe", 4)) { return is_kernelcache_buffer (b); } } return false; } static RList *sections(RBinFile *bf) { RList *ret = NULL; RBinObject *obj = bf ? bf->o : NULL; if (!obj || !obj->bin_obj || !(ret = r_list_newf ((RListFree)free))) { return NULL; } RKernelCacheObj *kobj = (RKernelCacheObj*) obj->bin_obj; ensure_kexts_initialized (kobj, bf); int iter; RKext *kext; r_kext_index_foreach (kobj->kexts, iter, kext) { ut8 magicbytes[4]; r_buf_read_at (kobj->cache_buf, kext->range.offset, magicbytes, 4); int magic = r_read_le32 (magicbytes); switch (magic) { case MH_MAGIC_64: sections_from_mach0 (ret, kext->mach0, bf, kext->range.offset, kext->name, kobj); break; default: eprintf ("Unknown sub-bin\n"); break; } } sections_from_mach0 (ret, kobj->mach0, bf, 0, NULL, kobj); struct MACH0_(segment_command) *seg; int nsegs = R_MIN (kobj->mach0->nsegs, 128); int i; for (i = 0; i < nsegs; i++) { RBinSection *ptr; char segname[17]; if (!(ptr = R_NEW0 (RBinSection))) { break; } seg = &kobj->mach0->segs[i]; r_str_ncpy (segname, seg->segname, 17); r_str_filter (segname, -1); ptr->name = r_str_newf ("%d.%s", i, segname); ptr->size = seg->vmsize; ptr->vsize = seg->vmsize; ptr->paddr = seg->fileoff + bf->o->boffset; ptr->vaddr = seg->vmaddr; ptr->add = true; ptr->is_segment = true; if (!ptr->vaddr) { ptr->vaddr = ptr->paddr; } ptr->perm = prot2perm (seg->initprot); r_list_append (ret, ptr); } return ret; } static int prot2perm(int x) { int r = 0; if (x&1) r |= 4; if (x&2) r |= 2; if (x&4) r |= 1; return r; } static void sections_from_mach0(RList *ret, struct MACH0_(obj_t) *mach0, RBinFile *bf, ut64 paddr, char *prefix, RKernelCacheObj *obj) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return; } int i; for (i = 0; !sections[i].last; i++) { RBinSection *ptr; if (!(ptr = R_NEW0 (RBinSection))) { break; } if (prefix) { ptr->name = r_str_newf ("%s.%s", prefix, (char*)sections[i].name); } else { ptr->name = r_str_newf ("%s", (char*)sections[i].name); } if (strstr (ptr->name, "la_symbol_ptr")) { int len = sections[i].size / 8; ptr->format = r_str_newf ("Cd %d[%d]", 8, len); } handle_data_sections (ptr); ptr->size = sections[i].size; ptr->vsize = sections[i].vsize; ptr->paddr = sections[i].offset + bf->o->boffset + paddr; ptr->vaddr = K_PPTR (sections[i].addr); if (!ptr->vaddr) { ptr->vaddr = ptr->paddr; } ptr->perm = sections[i].perm; if (!ptr->perm && strstr (sections[i].name, "__TEXT_EXEC.__text")) { ptr->perm = 1 | 4; } r_list_append (ret, ptr); } free (sections); } static void handle_data_sections(RBinSection *sect) { if (strstr (sect->name, "_cstring")) { sect->is_data = true; } else if (strstr (sect->name, "_os_log")) { sect->is_data = true; } else if (strstr (sect->name, "_objc_methname")) { sect->is_data = true; } else if (strstr (sect->name, "_objc_classname")) { sect->is_data = true; } else if (strstr (sect->name, "_objc_methtype")) { sect->is_data = true; } } static RList *symbols(RBinFile *bf) { RList *ret = r_list_newf (free); if (!ret) { return NULL; } RKernelCacheObj *obj = (RKernelCacheObj*) bf->o->bin_obj; symbols_from_mach0 (ret, obj->mach0, bf, 0, 0); HtPP *kernel_syms_by_addr = sdb_ht_new (); if (!kernel_syms_by_addr) { r_list_free (ret); return NULL; } RListIter *iter; RBinSymbol *sym; ut64 enosys_addr = 0; r_list_foreach (ret, iter, sym) { r_strf_var (key, 64, "%"PFMT64x, sym->vaddr); sdb_ht_insert (kernel_syms_by_addr, key, sym->dname ? sym->dname : sym->name); if (!enosys_addr && strstr (sym->name, "enosys")) { enosys_addr = sym->vaddr; } } RList *syscalls = resolve_syscalls (obj, enosys_addr); if (syscalls) { r_list_foreach (syscalls, iter, sym) { r_strf_var (key, 32, "%"PFMT64x, sym->vaddr); sdb_ht_insert (kernel_syms_by_addr, key, sym->name); r_list_append (ret, sym); } syscalls->free = NULL; r_list_free (syscalls); } RList *subsystem = resolve_mig_subsystem (obj); if (subsystem) { r_list_foreach (subsystem, iter, sym) { r_strf_var (key, 64, "%"PFMT64x, sym->vaddr); sdb_ht_insert (kernel_syms_by_addr, key, sym->name); r_list_append (ret, sym); } subsystem->free = NULL; r_list_free (subsystem); } ensure_kexts_initialized (obj, bf); RKext *kext; int kiter; ut64 *inits = NULL; ut64 *terms = NULL; r_kext_index_foreach (obj->kexts, kiter, kext) { ut8 magicbytes[4]; r_buf_read_at (obj->cache_buf, kext->range.offset, magicbytes, 4); int magic = r_read_le32 (magicbytes); switch (magic) { case MH_MAGIC_64: symbols_from_mach0 (ret, kext->mach0, bf, kext->range.offset, r_list_length (ret)); symbols_from_stubs (ret, kernel_syms_by_addr, obj, bf, kext, r_list_length (ret)); process_constructors (obj, kext->mach0, ret, kext->range.offset, false, R_K_CONSTRUCTOR_TO_SYMBOL, kext_short_name (kext)); process_kmod_init_term (obj, kext, ret, &inits, &terms); break; default: eprintf ("Unknown sub-bin\n"); break; } } R_FREE (inits); R_FREE (terms); sdb_ht_free (kernel_syms_by_addr); return ret; } static void symbols_from_mach0(RList *ret, struct MACH0_(obj_t) *mach0, RBinFile *bf, ut64 paddr, int ordinal) { const struct symbol_t *symbols = MACH0_(get_symbols) (mach0); if (!symbols) { return; } int i; for (i = 0; !symbols[i].last; i++) { if (!symbols[i].name[0] || symbols[i].addr < 100) { continue; } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = strdup (symbols[i].name); sym->vaddr = symbols[i].addr; if (sym->name[0] == '_') { char *dn = r_bin_demangle (bf, sym->name, sym->name, sym->vaddr, false); if (dn) { sym->dname = dn; char *p = strchr (dn, '.'); if (p) { if (IS_UPPER (sym->name[0])) { sym->classname = strdup (sym->name); sym->classname[p - sym->name] = 0; } else if (IS_UPPER (p[1])) { sym->classname = strdup (p + 1); p = strchr (sym->classname, '.'); if (p) { *p = 0; } } } } } sym->forwarder = "NONE"; sym->bind = (symbols[i].type == R_BIN_MACH0_SYMBOL_TYPE_LOCAL)? "LOCAL": "GLOBAL"; sym->type = "FUNC"; sym->paddr = symbols[i].offset + bf->o->boffset + paddr; sym->size = symbols[i].size; sym->ordinal = ordinal + i; r_list_append (ret, sym); } } #define IS_KERNEL_ADDR(x) ((x & 0xfffffff000000000L) == 0xfffffff000000000L) typedef struct _r_sysent { ut64 sy_call; ut64 sy_arg_munge32; st32 sy_return_type; st16 sy_narg; ut16 sy_arg_bytes; } RSysEnt; static RList *resolve_syscalls(RKernelCacheObj *obj, ut64 enosys_addr) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (obj->mach0))) { return NULL; } RList *syscalls = NULL; RSyscall *syscall = NULL; ut8 *data_const = NULL; ut64 data_const_offset = 0, data_const_size = 0, data_const_vaddr = 0; int i = 0; for (; !sections[i].last; i++) { if (strstr (sections[i].name, "__DATA_CONST.__const")) { data_const_offset = sections[i].offset; data_const_size = sections[i].size; data_const_vaddr = K_PPTR (sections[i].addr); break; } } if (!data_const_offset || !data_const_size || !data_const_vaddr) { goto beach; } data_const = malloc (data_const_size); if (!data_const) { goto beach; } if (r_buf_read_at (obj->cache_buf, data_const_offset, data_const, data_const_size) < data_const_size) { goto beach; } ut8 *cursor = data_const; ut8 *end = data_const + data_const_size; while (cursor < end) { ut64 test = r_read_le64 (cursor); if (test == enosys_addr) { break; } cursor += 8; } if (cursor >= end) { goto beach; } cursor -= 24; while (cursor >= data_const) { ut64 addr = r_read_le64 (cursor); ut64 x = r_read_le64 (cursor + 8); ut64 y = r_read_le64 (cursor + 16); if (IS_KERNEL_ADDR (addr) && (x == 0 || IS_KERNEL_ADDR (x)) && (y != 0 && !IS_KERNEL_ADDR (y))) { cursor -= 24; continue; } cursor += 24; break; } if (cursor < data_const) { goto beach; } syscalls = r_list_newf (r_bin_symbol_free); if (!syscalls) { goto beach; } syscall = r_syscall_new (); if (!syscall) { goto beach; } r_syscall_setup (syscall, "arm", 64, NULL, "ios"); if (!syscall->db) { r_syscall_free (syscall); goto beach; } ut64 sysent_vaddr = cursor - data_const + data_const_vaddr; RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { goto beach; } sym->name = r_str_newf ("sysent"); sym->vaddr = sysent_vaddr; sym->paddr = cursor - data_const + data_const_offset; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "OBJECT"; r_list_append (syscalls, sym); i = 1; cursor += 24; int num_syscalls = sdb_count (syscall->db); while (cursor < end && i < num_syscalls) { ut64 addr = r_read_le64 (cursor); RSyscallItem *item = r_syscall_get (syscall, i, 0x80); if (item && item->name) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { r_syscall_item_free (item); goto beach; } sym->name = r_str_newf ("syscall.%d.%s", i, item->name); sym->vaddr = addr; sym->paddr = addr; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "FUNC"; r_list_append (syscalls, sym); } r_syscall_item_free (item); cursor += 24; i++; } r_syscall_free (syscall); R_FREE (data_const); R_FREE (sections); return syscalls; beach: r_syscall_free (syscall); if (syscalls) { r_list_free (syscalls); } R_FREE (data_const); R_FREE (sections); return NULL; } #define K_MIG_SUBSYSTEM_SIZE (4 * 8) #define K_MIG_ROUTINE_SIZE (5 * 8) #define K_MIG_MAX_ROUTINES 100 static HtPP *mig_hash_new(void) { HtPP *hash = sdb_ht_new (); if (!hash) { return NULL; } int i; for (i = 0; i < R_MIG_INDEX_LEN; i += 2) { const char *num = mig_index[i]; const char *name = mig_index[i+1]; sdb_ht_insert (hash, num, name); } return hash; } static RList *resolve_mig_subsystem(RKernelCacheObj *obj) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (obj->mach0))) { return NULL; } HtPP *mig_hash = NULL; RList *subsystem = NULL; ut8 *data_const = NULL; ut64 data_const_offset = 0, data_const_size = 0, data_const_vaddr = 0; ut64 text_exec_offset = 0, text_exec_size = 0, text_exec_vaddr = 0; int incomplete = 2; int i = 0; for (; !sections[i].last && incomplete > 0; i++) { if (strstr (sections[i].name, "__DATA_CONST.__const")) { data_const_offset = sections[i].offset; data_const_size = sections[i].size; data_const_vaddr = K_PPTR (sections[i].addr); incomplete--; } if (strstr (sections[i].name, "__TEXT_EXEC.__text")) { text_exec_offset = sections[i].offset; text_exec_size = sections[i].size; text_exec_vaddr = K_PPTR (sections[i].addr); incomplete--; } } if (!data_const_offset || !data_const_size || !data_const_vaddr || !text_exec_offset || !text_exec_size || !text_exec_vaddr) { goto beach; } data_const = malloc (data_const_size); if (!data_const) { goto beach; } if (r_buf_read_at (obj->cache_buf, data_const_offset, data_const, data_const_size) < data_const_size) { goto beach; } subsystem = r_list_newf (r_bin_symbol_free); if (!subsystem) { goto beach; } mig_hash = mig_hash_new (); if (!mig_hash) { goto beach; } ut8 *cursor = data_const; ut8 *end = data_const + data_const_size; while (cursor < end) { ut64 subs_p = K_PPTR (r_read_le64 (cursor)); if (subs_p < text_exec_vaddr || subs_p >= text_exec_vaddr + text_exec_size) { cursor += 8; continue; } ut32 subs_min_idx = r_read_le32 (cursor + 8); ut32 subs_max_idx = r_read_le32 (cursor + 12); if (subs_min_idx >= subs_max_idx || (subs_max_idx - subs_min_idx) > K_MIG_MAX_ROUTINES) { cursor += 16; continue; } ut32 n_routines = (subs_max_idx - subs_min_idx); ut64 *routines = (ut64 *) calloc (n_routines, sizeof (ut64)); if (!routines) { goto beach; } ut8 *array_cursor = cursor + K_MIG_SUBSYSTEM_SIZE; ut8 *end_array = array_cursor + n_routines * K_MIG_ROUTINE_SIZE; bool is_consistent = true; int idx = 0; while (array_cursor < end_array) { ut64 should_be_null = r_read_le64 (array_cursor); if (should_be_null != 0) { is_consistent = false; break; } ut64 routine_p = K_PPTR (r_read_le64 (array_cursor + 8)); if (routine_p != 0 && (routine_p < text_exec_vaddr || routine_p >= text_exec_vaddr + text_exec_size)) { is_consistent = false; break; } routines[idx++] = routine_p; array_cursor += K_MIG_ROUTINE_SIZE; } if (is_consistent) { for (idx = 0; idx < n_routines; idx++) { ut64 routine_p = routines[idx]; if (!routine_p) { continue; } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { R_FREE (routines); goto beach; } int num = idx + subs_min_idx; bool found = false; r_strf_var (key, 32, "%d", num); const char *name = sdb_ht_find (mig_hash, key, &found); if (found && name && *name) { sym->name = r_str_newf ("mig.%d.%s", num, name); } else { sym->name = r_str_newf ("mig.%d", num); } sym->vaddr = routine_p; sym->paddr = sym->vaddr - text_exec_vaddr + text_exec_offset; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "OBJECT"; r_list_append (subsystem, sym); } cursor += K_MIG_SUBSYSTEM_SIZE + n_routines * K_MIG_ROUTINE_SIZE; } else { cursor += 8; } R_FREE (routines); } sdb_ht_free (mig_hash); R_FREE (data_const); R_FREE (sections); return subsystem; beach: if (subsystem) { r_list_free (subsystem); } if (mig_hash) { sdb_ht_free (mig_hash); } R_FREE (data_const); R_FREE (sections); return NULL; } static ut64 extract_addr_from_code(ut8 *arm64_code, ut64 vaddr) { ut64 addr = vaddr & ~0xfff; ut64 adrp = r_read_le32 (arm64_code); ut64 adrp_offset = ((adrp & 0x60000000) >> 29) | ((adrp & 0xffffe0) >> 3); addr += adrp_offset << 12; ut64 ldr = r_read_le32 (arm64_code + 4); addr += ((ldr & 0x3ffc00) >> 10) << ((ldr & 0xc0000000) >> 30); return addr; } static void symbols_from_stubs(RList *ret, HtPP *kernel_syms_by_addr, RKernelCacheObj *obj, RBinFile *bf, RKext *kext, int ordinal) { RStubsInfo *stubs_info = get_stubs_info(kext->mach0, kext->range.offset, obj); if (!stubs_info) { return; } ut64 stubs_cursor = stubs_info->stubs.offset; ut64 stubs_end = stubs_cursor + stubs_info->stubs.size; for (; stubs_cursor < stubs_end; stubs_cursor += 12) { ut8 arm64_code[8]; if (r_buf_read_at (obj->cache_buf, stubs_cursor, arm64_code, 8) < 8) { break; } ut64 vaddr = stubs_cursor + obj->pa2va_exec; ut64 addr_in_got = extract_addr_from_code (arm64_code, vaddr); bool found = false; int level = 3; ut64 target_addr = UT64_MAX; while (!found && level-- > 0) { ut64 offset_in_got = addr_in_got - obj->pa2va_exec; ut64 addr; if (r_buf_read_at (obj->cache_buf, offset_in_got, (ut8*) &addr, 8) < 8) { break; } if (level == 2) { target_addr = addr; } r_strf_var (key, 32, "%"PFMT64x, addr); const char *name = sdb_ht_find (kernel_syms_by_addr, key, &found); if (found) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("stub.%s", name); sym->vaddr = vaddr; sym->paddr = stubs_cursor; sym->size = 12; sym->forwarder = "NONE"; sym->bind = "LOCAL"; sym->type = "FUNC"; sym->ordinal = ordinal ++; r_list_append (ret, sym); break; } addr_in_got = addr; } if (found || target_addr == UT64_MAX) { continue; } ensure_kexts_initialized (obj, bf); RKext *remote_kext = r_kext_index_vget (obj->kexts, target_addr); if (!remote_kext) { continue; } RBinSymbol *remote_sym = R_NEW0 (RBinSymbol); if (!remote_sym) { break; } remote_sym->name = r_str_newf ("exp.%s.0x%"PFMT64x, kext_short_name (remote_kext), target_addr); remote_sym->vaddr = target_addr; remote_sym->paddr = target_addr - obj->pa2va_exec; remote_sym->size = 0; remote_sym->forwarder = "NONE"; remote_sym->bind = "GLOBAL"; remote_sym->type = "FUNC"; remote_sym->ordinal = ordinal ++; r_list_append (ret, remote_sym); RBinSymbol *local_sym = R_NEW0 (RBinSymbol); if (!local_sym) { break; } local_sym->name = r_str_newf ("stub.%s.0x%"PFMT64x, kext_short_name (remote_kext), target_addr); local_sym->vaddr = vaddr; local_sym->paddr = stubs_cursor; local_sym->size = 12; local_sym->forwarder = "NONE"; local_sym->bind = "GLOBAL"; local_sym->type = "FUNC"; local_sym->ordinal = ordinal ++; r_list_append (ret, local_sym); } R_FREE (stubs_info); } static RStubsInfo *get_stubs_info(struct MACH0_(obj_t) *mach0, ut64 paddr, RKernelCacheObj *obj) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return NULL; } RStubsInfo *stubs_info = R_NEW0 (RStubsInfo); if (!stubs_info) { free (sections); return NULL; } int incomplete = 2; int i = 0; for (; !sections[i].last; i++) { if (strstr (sections[i].name, "__DATA_CONST.__got")) { stubs_info->got.offset = sections[i].offset + paddr; stubs_info->got.size = sections[i].size; stubs_info->got_addr = K_PPTR (sections[i].addr); if (!--incomplete) { break; } } if (strstr (sections[i].name, "__TEXT_EXEC.__stubs")) { stubs_info->stubs.offset = sections[i].offset + paddr; stubs_info->stubs.size = sections[i].size; if (!--incomplete) { break; } } } R_FREE (sections); if (incomplete) { R_FREE (stubs_info); } return stubs_info; } static RBinInfo *info(RBinFile *bf) { RBinInfo *ret = NULL; bool big_endian = 0; if (!(ret = R_NEW0 (RBinInfo))) { return NULL; } ret->file = strdup (bf->file); ret->bclass = strdup ("kernelcache"); ret->rclass = strdup ("ios"); ret->os = strdup ("iOS"); ret->arch = strdup ("arm"); // XXX ret->machine = strdup (ret->arch); ret->subsystem = strdup ("xnu"); ret->type = strdup ("kernel-cache"); ret->bits = 64; ret->has_va = true; ret->big_endian = big_endian; ret->dbg_info = 0; return ret; } static ut64 baddr(RBinFile *bf) { if (!bf || !bf->o || !bf->o->bin_obj) { return 8LL; } RKernelCacheObj *obj = (RKernelCacheObj*) bf->o->bin_obj; return MACH0_(get_baddr)(obj->mach0); } static void destroy(RBinFile *bf) { r_kernel_cache_free ((RKernelCacheObj*) bf->o->bin_obj); } static void r_kernel_cache_free(RKernelCacheObj *obj) { if (!obj) { return; } if (obj->mach0) { MACH0_(mach0_free) (obj->mach0); obj->mach0 = NULL; obj->cache_buf = NULL; } if (obj->cache_buf) { r_buf_free (obj->cache_buf); obj->cache_buf = NULL; } if (obj->prelink_info) { r_cf_value_dict_free (obj->prelink_info); obj->prelink_info = NULL; } if (obj->kexts) { r_kext_index_free (obj->kexts); obj->kexts = NULL; } if (obj->rebase_info) { r_rebase_info_free (obj->rebase_info); obj->rebase_info = NULL; } R_FREE (obj); } static RRebaseInfo *r_rebase_info_new_from_mach0(RBuffer *cache_buf, struct MACH0_(obj_t) *mach0) { RFileRange *rebase_ranges = NULL; struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return NULL; } ut64 starts_offset = 0, starts_size = 0; int i = 0; for (; !sections[i].last; i++) { if (strstr (sections[i].name, "__TEXT.__thread_starts")) { starts_offset = sections[i].offset; starts_size = sections[i].size; break; } } R_FREE (sections); ut64 kernel_base = 0; struct MACH0_(segment_command) *seg; int nsegs = R_MIN (mach0->nsegs, 128); for (i = 0; i < nsegs; i++) { char segname[17]; seg = &mach0->segs[i]; r_str_ncpy (segname, seg->segname, 17); if (!strncmp (segname, "__TEXT", 6) && segname[6] == '\0') { kernel_base = seg->vmaddr; break; } } if (starts_offset == 0 || starts_size == 0 || kernel_base == 0) { return NULL; } int n_starts = starts_size / 4; if (n_starts <= 1) { return NULL; } rebase_ranges = R_NEWS0 (RFileRange, n_starts - 1); if (rebase_ranges == NULL) { return NULL; } ut64 multiplier = 4; for (i = 0; i != n_starts; i++) { ut8 bytes[4]; if (r_buf_read_at (cache_buf, starts_offset + i * 4, bytes, 4) < 4) { goto beach; } if (i == 0) { multiplier += 4 * (r_read_le32 (bytes) & 1); continue; } rebase_ranges[i - 1].offset = r_read_le32 (bytes); rebase_ranges[i - 1].size = UT64_MAX; } RRebaseInfo *rebase_info = R_NEW0 (RRebaseInfo); if (rebase_info == NULL) { goto beach; } rebase_info->ranges = rebase_ranges; rebase_info->n_ranges = n_starts - 1; rebase_info->multiplier = multiplier; rebase_info->kernel_base = kernel_base; return rebase_info; beach: R_FREE (rebase_ranges); return NULL; } static void r_rebase_info_free(RRebaseInfo *info) { if (!info) { return; } if (info->ranges) { R_FREE (info->ranges); info->ranges = NULL; } R_FREE (info); } static void r_rebase_info_populate(RRebaseInfo *info, RKernelCacheObj *obj) { struct section_t *sections = NULL; int i = 0; if (obj->rebase_info_populated) { return; } obj->rebase_info_populated = true; for (; i < info->n_ranges; i++) { if (info->ranges[i].size != UT64_MAX) { goto cleanup; } else if (sections == NULL) { if (!(sections = MACH0_(get_sections) (obj->mach0))) { return; } } info->ranges[i].offset = r_rebase_offset_to_paddr (obj, sections, info->ranges[i].offset); ut64 end = iterate_rebase_list (obj->cache_buf, info->multiplier, info->ranges[i].offset, NULL, NULL); if (end != UT64_MAX) { info->ranges[i].size = end - info->ranges[i].offset + 8; } else { info->ranges[i].size = 0; } } cleanup: R_FREE (sections); } static ut64 r_rebase_offset_to_paddr(RKernelCacheObj *obj, struct section_t *sections, ut64 offset) { ut64 vaddr = obj->rebase_info->kernel_base + offset; int i = 0; for (; !sections[i].last; i++) { if (sections[i].addr <= vaddr && vaddr < (sections[i].addr + sections[i].vsize)) { return sections[i].offset + (vaddr - sections[i].addr); } } return offset; } static ut64 iterate_rebase_list(RBuffer *cache_buf, ut64 multiplier, ut64 start_offset, ROnRebaseFunc func, void *user_data) { ut8 bytes[8]; ut64 cursor = start_offset; while (true) { if (r_buf_read_at (cache_buf, cursor, bytes, 8) < 8) { return UT64_MAX; } ut64 decorated_addr = r_read_le64 (bytes); if (func) { bool carry_on = func (cursor, decorated_addr, user_data); if (!carry_on) { break; } } ut64 delta = ((decorated_addr >> 51) & 0x7ff) * multiplier; if (delta == 0) { break; } cursor += delta; } return cursor; } static void swizzle_io_read(RKernelCacheObj *obj, RIO *io) { r_return_if_fail (io && io->desc && io->desc->plugin); RIOPlugin *plugin = io->desc->plugin; obj->original_io_read = plugin->read; plugin->read = &kernelcache_io_read; } static int kernelcache_io_read(RIO *io, RIODesc *fd, ut8 *buf, int count) { r_return_val_if_fail (io, -1); RCore *core = (RCore*) io->corebind.core; if (!fd || !core || !core->bin || !core->bin->binfiles) { return -1; } RKernelCacheObj *cache = NULL; RListIter *iter; RBinFile *bf; r_list_foreach (core->bin->binfiles, iter, bf) { if (bf->fd == fd->fd && bf->o && bf->o->bin_obj) { cache = bf->o->bin_obj; if (pending_bin_files) { RListIter *to_remove = r_list_contains (pending_bin_files, bf); if (to_remove) { r_list_delete (pending_bin_files, to_remove); if (r_list_empty (pending_bin_files)) { r_list_free (pending_bin_files); pending_bin_files = NULL; } } } break; } } if (!cache) { r_list_foreach (pending_bin_files, iter, bf) { if (bf->fd == fd->fd && bf->o) { cache = bf->o->bin_obj; break; } } } if (!cache || !cache->original_io_read || cache->rebasing_buffer) { if (cache) { if ((!cache->rebasing_buffer && fd->plugin->read == &kernelcache_io_read) || (cache->rebasing_buffer && !cache->original_io_read)) { return -1; } if (cache->rebasing_buffer) { return cache->original_io_read (io, fd, buf, count); } } if (fd->plugin->read == kernelcache_io_read) { if (core->bin->verbose) { eprintf ("Avoid recursive reads\n"); } return -1; } return fd->plugin->read (io, fd, buf, count); } if (cache->rebase_info) { r_rebase_info_populate (cache->rebase_info, cache); } static ut8 *internal_buffer = NULL; static int internal_buf_size = 0; if (count > internal_buf_size) { if (internal_buffer) { R_FREE (internal_buffer); internal_buffer = NULL; } internal_buffer = (ut8 *) malloc (count); internal_buf_size = count; } if (!cache->original_io_read) { return -1; } ut64 io_off = io->off; int result = cache->original_io_read (io, fd, internal_buffer, count); if (result == count) { if (cache->mach0->chained_starts) { rebase_buffer_fixup (cache, io_off, fd, internal_buffer, count); } else if (cache->rebase_info) { rebase_buffer (cache, io_off, fd, internal_buffer, count); } memcpy (buf, internal_buffer, result); } return result; } static void rebase_buffer(RKernelCacheObj *obj, ut64 off, RIODesc *fd, ut8 *buf, int count) { if (obj->rebasing_buffer || !buf) { return; } obj->rebasing_buffer = true; ut64 eob = off + count; int i = 0; RRebaseCtx ctx; ctx.off = off; ctx.eob = eob; ctx.buf = buf; ctx.count = count; ctx.obj = obj; for (; i < obj->rebase_info->n_ranges; i++) { ut64 start = obj->rebase_info->ranges[i].offset; ut64 end = start + obj->rebase_info->ranges[i].size; if (end >= off && start <= eob) { iterate_rebase_list (obj->cache_buf, obj->rebase_info->multiplier, start, (ROnRebaseFunc) on_rebase_pointer, &ctx); } } obj->rebasing_buffer = false; } static void rebase_buffer_fixup(RKernelCacheObj *kobj, ut64 off, RIODesc *fd, ut8 *buf, int count) { if (kobj->rebasing_buffer) { return; } kobj->rebasing_buffer = true; struct MACH0_(obj_t) *obj = kobj->mach0; ut64 eob = off + count; size_t i = 0; for (; i < obj->nsegs; i++) { if (!obj->chained_starts[i]) { continue; } ut64 page_size = obj->chained_starts[i]->page_size; ut64 start = obj->segs[i].fileoff; ut64 end = start + obj->segs[i].filesize; if (end >= off && start <= eob) { ut64 page_idx = (R_MAX (start, off) - start) / page_size; ut64 page_end_idx = (R_MIN (eob, end) - start) / page_size; for (; page_idx <= page_end_idx; page_idx++) { if (page_idx >= obj->chained_starts[i]->page_count) { break; } ut16 page_start = obj->chained_starts[i]->page_start[page_idx]; if (page_start == DYLD_CHAINED_PTR_START_NONE) { continue; } ut64 cursor = start + page_idx * page_size + page_start; while (cursor < eob && cursor < end) { ut8 tmp[8]; if (r_buf_read_at (obj->b, cursor, tmp, 8) != 8) { break; } ut64 raw_ptr = r_read_le64 (tmp); ut64 ptr_value = raw_ptr; ut64 delta = 0; ut64 stride = 8; if (obj->chained_starts[i]->pointer_format == DYLD_CHAINED_PTR_ARM64E) { bool is_auth = IS_PTR_AUTH (raw_ptr); bool is_bind = IS_PTR_BIND (raw_ptr); if (is_auth && is_bind) { struct dyld_chained_ptr_arm64e_auth_bind *p = (struct dyld_chained_ptr_arm64e_auth_bind *) &raw_ptr; delta = p->next; } else if (!is_auth && is_bind) { struct dyld_chained_ptr_arm64e_bind *p = (struct dyld_chained_ptr_arm64e_bind *) &raw_ptr; delta = p->next; } else if (is_auth && !is_bind) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; delta = p->next; ptr_value = p->target + obj->baddr; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; delta = p->next; ptr_value = ((ut64)p->high8 << 56) | p->target; ptr_value += obj->baddr; } } else if (obj->chained_starts[i]->pointer_format == DYLD_CHAINED_PTR_64_KERNEL_CACHE || obj->chained_starts[i]->pointer_format == DYLD_CHAINED_PTR_ARM64E_KERNEL) { bool is_auth = IS_PTR_AUTH (raw_ptr); stride = 4; if (is_auth) { struct dyld_chained_ptr_arm64e_cache_auth_rebase *p = (struct dyld_chained_ptr_arm64e_cache_auth_rebase *) &raw_ptr; delta = p->next; ptr_value = p->target + obj->baddr; } else { struct dyld_chained_ptr_arm64e_cache_rebase *p = (struct dyld_chained_ptr_arm64e_cache_rebase *) &raw_ptr; delta = p->next; ptr_value = ((ut64)p->high8 << 56) | p->target; ptr_value += obj->baddr; } } else { eprintf ("Unsupported pointer format: %u\n", obj->chained_starts[i]->pointer_format); } ut64 in_buf = cursor - off; if (cursor >= off && cursor <= eob - 8) { r_write_le64 (&buf[in_buf], ptr_value); } cursor += delta * stride; if (!delta) { break; } } } } } kobj->rebasing_buffer = false; } static bool on_rebase_pointer(ut64 offset, ut64 decorated_addr, RRebaseCtx *ctx) { if (offset < ctx->off) { return true; } if (offset >= ctx->eob) { return false; } ut64 in_buf = offset - ctx->off; if (in_buf >= ctx->count || (in_buf + 8) > ctx->count) { return false; } RParsedPointer ptr; r_parse_pointer (&ptr, decorated_addr, ctx->obj); r_write_le64 (&ctx->buf[in_buf], ptr.address); return true; } static bool r_parse_pointer(RParsedPointer *ptr, ut64 decorated_addr, RKernelCacheObj *obj) { /* * Logic taken from: * https://github.com/Synacktiv/kernelcache-laundering/blob/master/ios12_kernel_cache_helper.py */ if ((decorated_addr & 0x4000000000000000LL) == 0 && obj->rebase_info) { if (decorated_addr & 0x8000000000000000LL) { ptr->address = obj->rebase_info->kernel_base + (decorated_addr & 0xFFFFFFFFLL); } else { ptr->address = ((decorated_addr << 13) & 0xFF00000000000000LL) | (decorated_addr & 0x7ffffffffffLL); if (decorated_addr & 0x40000000000LL) { ptr->address |= 0xfffc0000000000LL; } } } else { ptr->address = decorated_addr; } return true; } RBinPlugin r_bin_plugin_xnu_kernelcache = { .name = "kernelcache", .desc = "kernelcache bin plugin", .license = "LGPL3", .destroy = &destroy, .load_buffer = &load_buffer, .entries = &entries, .baddr = &baddr, .symbols = &symbols, .sections = &sections, .check_buffer = &check_buffer, .info = &info }; #ifndef R2_PLUGIN_INCORE RLibStruct radare_plugin = { .type = R_LIB_TYPE_BIN, .data = &r_bin_plugin_kernelcache, .version = R2_VERSION }; #endif
/* radare2 - LGPL - Copyright 2019-2022 - mrmacete */ #include <r_types.h> #include <r_util.h> #include <r_lib.h> #include <r_bin.h> #include <r_core.h> #include <r_syscall.h> #define R_BIN_MACH064 1 #include "../format/mach0/mach0.h" #include "../format/xnu/r_cf_dict.h" #include "../format/xnu/mig_index.h" #include "../format/mach0/mach064_is_kernelcache.c" typedef bool (*ROnRebaseFunc) (ut64 offset, ut64 decorated_addr, void *user_data); typedef struct _RKernelCacheObj { RBuffer *cache_buf; RCFValueDict *prelink_info; ut64 pa2va_exec; ut64 pa2va_data; struct _RKextIndex *kexts; struct MACH0_(obj_t) *mach0; struct _RRebaseInfo *rebase_info; int (*original_io_read)(RIO *io, RIODesc *fd, ut8 *buf, int count); bool rebase_info_populated; bool rebasing_buffer; bool kexts_initialized; } RKernelCacheObj; typedef struct _RFileRange { ut64 offset; ut64 size; } RFileRange; typedef struct _RPrelinkRange { RFileRange range; ut64 pa2va_exec; ut64 pa2va_data; } RPrelinkRange; typedef struct _RStubsInfo { RFileRange got; RFileRange stubs; ut64 got_addr; } RStubsInfo; typedef struct _RKext { RFileRange range; RFileRange text_range; char *name; ut64 mod_info; ut64 vaddr; struct MACH0_(obj_t) *mach0; bool own_name; ut64 pa2va_exec; ut64 pa2va_data; } RKext; typedef struct _RKextIndex { ut64 length; RKext **entries; } RKextIndex; typedef struct _RRebaseInfo { RFileRange *ranges; ut64 n_ranges; ut64 multiplier; ut64 kernel_base; } RRebaseInfo; typedef struct _RRebaseCtx { ut64 off, eob; ut8 *buf; int count; RKernelCacheObj *obj; } RRebaseCtx; typedef struct _RParsedPointer { ut64 address; } RParsedPointer; typedef struct _RKmodInfo { char name[0x41]; ut64 start; } RKmodInfo; #define KEXT_SHORT_NAME_FROM_SECTION(io_section) ({\ char *result = NULL;\ char *clone = strdup (io_section->name);\ char *cursor = strstr (clone, "__");\ if (cursor) {\ cursor--;\ *cursor = 0;\ cursor--;\ cursor = strrchr (cursor, '.');\ if (cursor) {\ *cursor = 0;\ cursor = strrchr (cursor, '.');\ if (cursor) {\ result = strdup (cursor + 1);\ R_FREE (clone);\ }\ }\ }\ result ? result : clone;\ }) #define KEXT_INFER_VSIZE(index, i)\ ((i+1 < index->length) ? index->entries[i+1]->vaddr - index->entries[i]->vaddr : UT64_MAX) #define KEXT_INFER_PSIZE(index, i)\ ((i+1 < index->length) ? index->entries[i+1]->range.offset - index->entries[i]->range.offset : UT64_MAX) #define R_K_CONSTRUCTOR_TO_ENTRY 0 #define R_K_CONSTRUCTOR_TO_SYMBOL 1 #define K_PPTR(p) p_ptr (p, obj) #define K_RPTR(buf) r_ptr (buf, obj) #define IS_PTR_AUTH(x) ((x & (1ULL << 63)) != 0) #define IS_PTR_BIND(x) ((x & (1ULL << 62)) != 0) static ut64 p_ptr(ut64 decorated_addr, RKernelCacheObj *obj); static ut64 r_ptr(ut8 *buf, RKernelCacheObj *obj); static RRebaseInfo *r_rebase_info_new_from_mach0(RBuffer *cache_buf, struct MACH0_(obj_t) *mach0); static void r_rebase_info_free(RRebaseInfo *info); static void r_rebase_info_populate(RRebaseInfo *info, RKernelCacheObj *obj); static ut64 iterate_rebase_list(RBuffer *cache_buf, ut64 multiplier, ut64 start_offset, ROnRebaseFunc func, void *user_data); static ut64 r_rebase_offset_to_paddr(RKernelCacheObj *obj, struct section_t *sections, ut64 offset); static void swizzle_io_read(RKernelCacheObj *obj, RIO *io); static int kernelcache_io_read(RIO *io, RIODesc *fd, ut8 *buf, int count); static bool r_parse_pointer(RParsedPointer *ptr, ut64 decorated_addr, RKernelCacheObj *obj); static bool on_rebase_pointer(ut64 offset, ut64 decorated_addr, RRebaseCtx *ctx); static void rebase_buffer(RKernelCacheObj *obj, ut64 off, RIODesc *fd, ut8 *buf, int count); static void rebase_buffer_fixup(RKernelCacheObj *kobj, ut64 off, RIODesc *fd, ut8 *buf, int count); static RPrelinkRange *get_prelink_info_range_from_mach0(struct MACH0_(obj_t) *mach0); static RList *filter_kexts(RKernelCacheObj *obj, RBinFile *bf); static RList *carve_kexts(RKernelCacheObj *obj, RBinFile *bf); static RList *kexts_from_load_commands(RKernelCacheObj *obj, RBinFile *bf); static void sections_from_mach0(RList *ret, struct MACH0_(obj_t) *mach0, RBinFile *bf, ut64 paddr, char *prefix, RKernelCacheObj *obj); static void handle_data_sections(RBinSection *sect); static void symbols_from_mach0(RList *ret, struct MACH0_(obj_t) *mach0, RBinFile *bf, ut64 paddr, int ordinal); static RList *resolve_syscalls(RKernelCacheObj *obj, ut64 enosys_addr); static RList *resolve_mig_subsystem(RKernelCacheObj *obj); static void symbols_from_stubs(RList *ret, HtPP *kernel_syms_by_addr, RKernelCacheObj *obj, RBinFile *bf, RKext *kext, int ordinal); static RStubsInfo *get_stubs_info(struct MACH0_(obj_t) *mach0, ut64 paddr, RKernelCacheObj *obj); static int prot2perm(int x); static void r_kext_free(RKext *kext); static void r_kext_fill_text_range(RKext *kext); static int kexts_sort_vaddr_func(const void *a, const void *b); static struct MACH0_(obj_t) *create_kext_mach0(RKernelCacheObj *obj, RKext *kext, RBinFile *bf); static struct MACH0_(obj_t) *create_kext_shared_mach0(RKernelCacheObj *obj, RKext *kext, RBinFile *bf); #define r_kext_index_foreach(index, i, item)\ if (index)\ for (i = 0; i < index->length && (item = index->entries[i], 1); i++) static RKextIndex *r_kext_index_new(RList *kexts); static void r_kext_index_free(RKextIndex *index); static RKext *r_kext_index_vget(RKextIndex *index, ut64 vaddr); static void process_kmod_init_term(RKernelCacheObj *obj, RKext *kext, RList *ret, ut64 **inits, ut64 **terms); static void create_initterm_syms(RKext *kext, RList *ret, int type, ut64 *pointers); static void process_constructors(RKernelCacheObj *obj, struct MACH0_(obj_t) *mach0, RList *ret, ut64 paddr, bool is_first, int mode, const char *prefix); static RBinAddr *newEntry(ut64 haddr, ut64 vaddr, int type); static void ensure_kexts_initialized(RKernelCacheObj *obj, RBinFile *bf); static void r_kernel_cache_free(RKernelCacheObj *obj); static R_TH_LOCAL RList *pending_bin_files = NULL; static bool load_buffer(RBinFile *bf, void **bin_obj, RBuffer *buf, ut64 loadaddr, Sdb *sdb) { RBuffer *fbuf = r_buf_ref (buf); struct MACH0_(opts_t) opts; MACH0_(opts_set_default) (&opts, bf); struct MACH0_(obj_t) *main_mach0 = MACH0_(new_buf) (fbuf, &opts); if (!main_mach0) { return false; } RRebaseInfo *rebase_info = r_rebase_info_new_from_mach0 (fbuf, main_mach0); RKernelCacheObj *obj = NULL; RPrelinkRange *prelink_range = get_prelink_info_range_from_mach0 (main_mach0); if (!prelink_range) { goto beach; } obj = R_NEW0 (RKernelCacheObj); if (!obj) { R_FREE (prelink_range); goto beach; } RCFValueDict *prelink_info = NULL; if (main_mach0->hdr.filetype != MH_FILESET && prelink_range->range.size) { prelink_info = r_cf_value_dict_parse (fbuf, prelink_range->range.offset, prelink_range->range.size, R_CF_OPTION_SKIP_NSDATA); if (!prelink_info) { R_FREE (prelink_range); R_FREE (obj); goto beach; } } if (!pending_bin_files) { pending_bin_files = r_list_new (); if (!pending_bin_files) { R_FREE (prelink_range); R_FREE (obj); R_FREE (prelink_info); goto beach; } } obj->mach0 = main_mach0; obj->rebase_info = rebase_info; obj->prelink_info = prelink_info; obj->cache_buf = fbuf; obj->pa2va_exec = prelink_range->pa2va_exec; obj->pa2va_data = prelink_range->pa2va_data; R_FREE (prelink_range); *bin_obj = obj; r_list_push (pending_bin_files, bf); if (rebase_info || main_mach0->chained_starts) { RIO *io = bf->rbin->iob.io; swizzle_io_read (obj, io); } return true; beach: r_buf_free (fbuf); if (obj) { obj->cache_buf = NULL; } MACH0_(mach0_free) (main_mach0); return false; } static void ensure_kexts_initialized(RKernelCacheObj *obj, RBinFile *bf) { if (obj->kexts_initialized) { return; } obj->kexts_initialized = true; RList *kexts = NULL; if (obj->prelink_info) { kexts = filter_kexts (obj, bf); } if (kexts && !r_list_length (kexts)) { r_list_free (kexts); kexts = NULL; } if (!kexts) { kexts = kexts_from_load_commands (obj, bf); } if (kexts && !r_list_length (kexts)) { r_list_free (kexts); kexts = NULL; } if (!kexts) { kexts = carve_kexts (obj, bf); } obj->kexts = r_kext_index_new (kexts); if (kexts) { kexts->free = NULL; r_list_free (kexts); } } static RPrelinkRange *get_prelink_info_range_from_mach0(struct MACH0_(obj_t) *mach0) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return NULL; } RPrelinkRange *prelink_range = R_NEW0 (RPrelinkRange); if (!prelink_range) { R_FREE (sections); return NULL; } int incomplete = 3; int i = 0; for (; !sections[i].last; i++) { if (strstr (sections[i].name, "__PRELINK_INFO.__info")) { prelink_range->range.offset = sections[i].offset; prelink_range->range.size = sections[i].size; if (!--incomplete) { break; } } if (strstr (sections[i].name, "__PRELINK_TEXT.__text")) { prelink_range->pa2va_exec = sections[i].addr - sections[i].offset; if (!--incomplete) { break; } } if (strstr (sections[i].name, "__PRELINK_DATA.__data")) { prelink_range->pa2va_data = sections[i].addr - sections[i].offset; if (!--incomplete) { break; } } } R_FREE (sections); if (incomplete == 1 && !prelink_range->pa2va_data) { struct MACH0_(segment_command) *seg; int nsegs = R_MIN (mach0->nsegs, 128); size_t i; for (i = 0; i < nsegs; i++) { seg = &mach0->segs[i]; if (!strcmp (seg->segname, "__DATA")) { prelink_range->pa2va_data = seg->vmaddr - seg->fileoff; incomplete--; break; } } } if (incomplete) { R_FREE (prelink_range); } return prelink_range; } static RList *filter_kexts(RKernelCacheObj *obj, RBinFile *bf) { RCFValueArray *kext_array = NULL; RListIter *iter; RCFKeyValue *item; r_list_foreach (obj->prelink_info->pairs, iter, item) { if (!strcmp (item->key, "_PrelinkInfoDictionary")) { kext_array = (RCFValueArray*) item->value; break; } } if (!kext_array) { return NULL; } RList *kexts = r_list_newf ((RListFree) &r_kext_free); if (!kexts) { return NULL; } bool is_sorted = true; RKext *prev_kext = NULL; RCFValueDict *kext_item; r_list_foreach (kext_array->values, iter, kext_item) { RKext *kext = R_NEW0 (RKext); if (!kext) { R_FREE (kexts); return NULL; } int kext_incomplete = 5; RListIter *internal_iter; r_list_foreach (kext_item->pairs, internal_iter, item) { if (!strcmp (item->key, "CFBundlePackageType")) { if (item->value->type != R_CF_STRING) { break; } RCFValueString *type = (RCFValueString*) item->value; if (strcmp (type->value, "KEXT")) { break; } kext_incomplete--; } if (!strcmp (item->key, "_PrelinkExecutableLoadAddr")) { if (item->value->type == R_CF_INTEGER) { kext_incomplete--; kext->vaddr = ((RCFValueInteger*) item->value)->value; kext->range.offset = kext->vaddr - obj->pa2va_exec; } } if (!strcmp (item->key, "_PrelinkExecutableSize")) { kext_incomplete--; if (item->value->type == R_CF_INTEGER) { kext->range.size = ((RCFValueInteger*) item->value)->value; } else { kext->range.size = 0; } } if (!strcmp (item->key, "_PrelinkKmodInfo")) { if (item->value->type == R_CF_INTEGER) { kext_incomplete--; kext->mod_info = ((RCFValueInteger*) item->value)->value; kext->mod_info -= obj->pa2va_data; } } if (!strcmp (item->key, "CFBundleIdentifier")) { if (item->value->type == R_CF_STRING) { kext_incomplete--; kext->name = ((RCFValueString*) item->value)->value; } } } if (kext_incomplete) { r_kext_free (kext); continue; } if (prev_kext && kext->vaddr < prev_kext->vaddr) { is_sorted = false; } prev_kext = kext; kext->mach0 = create_kext_mach0 (obj, kext, bf); if (!kext->mach0) { r_kext_free (kext); continue; } r_kext_fill_text_range (kext); r_list_push (kexts, kext); } if (!is_sorted) { eprintf ("SORTING KEXTs...\n"); r_list_sort (kexts, kexts_sort_vaddr_func); } return kexts; } static ut64 p_ptr(ut64 decorated_addr, RKernelCacheObj *obj) { RParsedPointer ptr; r_parse_pointer (&ptr, decorated_addr, obj); return ptr.address; } static ut64 r_ptr(ut8 *buf, RKernelCacheObj *obj) { ut64 decorated_addr = r_read_le64 (buf); return K_PPTR (decorated_addr); } static RList *carve_kexts(RKernelCacheObj *obj, RBinFile *bf) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (obj->mach0))) { return NULL; } ut64 pa2va_exec = 0; ut64 pa2va_data = 0; ut64 kmod_start = 0, kmod_end = 0; ut64 kmod_info = 0, kmod_info_end = 0; int incomplete = 4; RKmodInfo *all_infos = NULL; int i = 0; for (; !sections[i].last && incomplete > 0; i++) { if (strstr (sections[i].name, "__TEXT_EXEC.__text")) { pa2va_exec = sections[i].addr - sections[i].offset; incomplete--; } if (strstr (sections[i].name, "__DATA.__data")) { pa2va_data = sections[i].addr - sections[i].offset; incomplete--; } if (strstr (sections[i].name, "__PRELINK_INFO.__kmod_start")) { kmod_start = sections[i].offset; kmod_end = kmod_start + sections[i].size; incomplete--; } if (strstr (sections[i].name, "__PRELINK_INFO.__kmod_info")) { kmod_info = sections[i].offset; kmod_info_end = kmod_info + sections[i].size; incomplete--; } } R_FREE (sections); if (incomplete) { return NULL; } RList *kexts = r_list_newf ((RListFree) &r_kext_free); if (!kexts) { return NULL; } int n_kmod_info = (kmod_info_end - kmod_info) / 8; if (n_kmod_info == 0) { goto beach; } all_infos = R_NEWS0 (RKmodInfo, n_kmod_info); if (!all_infos) { goto beach; } ut8 bytes[8]; int j = 0; for (; j < n_kmod_info; j++) { ut64 entry_offset = j * 8 + kmod_info; if (r_buf_read_at (obj->cache_buf, entry_offset, bytes, 8) < 8) { goto beach; } ut64 kmod_info_paddr = K_RPTR (bytes) - pa2va_data; ut64 field_name = kmod_info_paddr + 0x10; ut64 field_start = kmod_info_paddr + 0xb4; if (r_buf_read_at (obj->cache_buf, field_start, bytes, 8) < 8) { goto beach; } all_infos[j].start = K_RPTR (bytes); if (r_buf_read_at (obj->cache_buf, field_name, (ut8 *) all_infos[j].name, 0x40) < 0x40) { goto beach; } all_infos[j].name[0x40] = 0; } ut64 cursor = kmod_start; for(; cursor < kmod_end; cursor += 8) { ut8 bytes[8]; if (r_buf_read_at (obj->cache_buf, cursor, bytes, 8) < 8) { goto beach; } RKext *kext = R_NEW0 (RKext); if (!kext) { goto beach; } kext->vaddr = K_RPTR (bytes); kext->range.offset = kext->vaddr - pa2va_exec; kext->mach0 = create_kext_mach0 (obj, kext, bf); if (!kext->mach0) { r_kext_free (kext); continue; } r_kext_fill_text_range (kext); kext->vaddr = K_PPTR (kext->vaddr); kext->pa2va_exec = pa2va_exec; kext->pa2va_data = pa2va_data; ut64 text_start = kext->vaddr; ut64 text_end = text_start + kext->text_range.size; if (text_start == text_end) { r_kext_free (kext); continue; } for (j = 0; j < n_kmod_info; j++) { if (text_start > all_infos[j].start || all_infos[j].start >= text_end) { continue; } kext->name = strdup (all_infos[j].name); kext->own_name = true; break; } if (!kext->name) { r_kext_free (kext); continue; } r_list_push (kexts, kext); } R_FREE (all_infos); return kexts; beach: r_list_free (kexts); R_FREE (all_infos); return NULL; } static RList *kexts_from_load_commands(RKernelCacheObj *obj, RBinFile *bf) { RList *kexts = r_list_newf ((RListFree) &r_kext_free); if (!kexts) { return NULL; } ut32 i, ncmds = r_buf_read_le32_at (obj->cache_buf, 16); ut64 length = r_buf_size (obj->cache_buf); ut32 cursor = sizeof (struct MACH0_(mach_header)); for (i = 0; i < ncmds && cursor < length; i++) { ut32 cmdtype = r_buf_read_le32_at (obj->cache_buf, cursor); ut32 cmdsize = r_buf_read_le32_at (obj->cache_buf, cursor + 4); if (!cmdsize || cmdsize + cursor < cursor) { break; } if (cmdtype != LC_KEXT) { cursor += cmdsize; continue; } ut64 vaddr = r_buf_read_le64_at (obj->cache_buf, cursor + 8); ut64 paddr = r_buf_read_le64_at (obj->cache_buf, cursor + 16); st32 padded_name_length = (st32)cmdsize - 32; if (padded_name_length <= 0 || cmdsize - 32 + cursor >= length) { cursor += cmdsize; continue; } char *padded_name = calloc (1, padded_name_length); if (!padded_name) { goto beach; } if (r_buf_read_at (obj->cache_buf, cursor + 32, (ut8 *)padded_name, padded_name_length) != padded_name_length) { free (padded_name); goto early; } RKext *kext = R_NEW0 (RKext); if (!kext) { free (padded_name); goto beach; } kext->vaddr = vaddr; kext->range.offset = paddr; kext->mach0 = create_kext_shared_mach0 (obj, kext, bf); if (!kext->mach0) { free (padded_name); r_kext_free (kext); cursor += cmdsize; continue; } r_kext_fill_text_range (kext); kext->vaddr = K_PPTR (kext->vaddr); kext->pa2va_exec = obj->pa2va_exec; kext->pa2va_data = obj->pa2va_data; kext->name = strdup (padded_name); kext->own_name = true; free (padded_name); r_list_push (kexts, kext); cursor += cmdsize; } early: return kexts; beach: r_list_free (kexts); return NULL; } static void r_kext_free(RKext *kext) { if (!kext) { return; } if (kext->mach0) { MACH0_(mach0_free) (kext->mach0); kext->mach0 = NULL; } if (kext->own_name && kext->name) { R_FREE (kext->name); kext->name = NULL; } R_FREE (kext); } static void r_kext_fill_text_range(RKext *kext) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (kext->mach0))) { return; } int i = 0; for (; !sections[i].last; i++) { if (strstr (sections[i].name, "__TEXT_EXEC.__text")) { kext->text_range.offset = sections[i].offset; kext->text_range.size = sections[i].size; kext->vaddr = sections[i].addr; break; } } R_FREE (sections); } static int kexts_sort_vaddr_func(const void *a, const void *b) { RKext *A = (RKext *) a; RKext *B = (RKext *) b; int vaddr_compare = A->vaddr - B->vaddr; if (vaddr_compare == 0) { return A->text_range.size - B->text_range.size; } return vaddr_compare; } static RKextIndex *r_kext_index_new(RList *kexts) { if (!kexts) { return NULL; } int length = r_list_length (kexts); if (!length) { return NULL; } RKextIndex *index = R_NEW0 (RKextIndex); if (!index) { return NULL; } index->entries = malloc (length *sizeof(RKext*)); if (!index->entries) { R_FREE (index); return NULL; } RListIter *iter; RKext *kext; int i = 0; r_list_foreach (kexts, iter, kext) { index->entries[i++] = kext; } index->length = i; return index; } static void r_kext_index_free(RKextIndex *index) { if (!index) { return; } int i = 0; RKext *kext; r_kext_index_foreach (index, i, kext) { r_kext_free (kext); index->entries[i] = NULL; } index->length = 0; R_FREE (index); } static RKext *r_kext_index_vget(RKextIndex *index, ut64 vaddr) { int imid; int imin = 0; int imax = index->length - 1; while (imin < imax) { imid = (imin + imax) / 2; RKext *entry = index->entries[imid]; if ((entry->vaddr + entry->text_range.size) <= vaddr || (entry->vaddr == vaddr && entry->text_range.size == 0)) { imin = imid + 1; } else { imax = imid; } } RKext *minEntry = index->entries[imin]; if ((imax == imin) && (minEntry->vaddr <= vaddr) && ((minEntry->vaddr + minEntry->text_range.size) > vaddr)) { return minEntry; } return NULL; } static struct MACH0_(obj_t) *create_kext_mach0(RKernelCacheObj *obj, RKext *kext, RBinFile *bf) { RBuffer *buf = r_buf_new_slice (obj->cache_buf, kext->range.offset, r_buf_size (obj->cache_buf) - kext->range.offset); struct MACH0_(opts_t) opts; MACH0_(opts_set_default) (&opts, bf); opts.verbose = true; opts.header_at = 0; struct MACH0_(obj_t) *mach0 = MACH0_(new_buf) (buf, &opts); r_buf_free (buf); return mach0; } static struct MACH0_(obj_t) *create_kext_shared_mach0(RKernelCacheObj *obj, RKext *kext, RBinFile *bf) { RBuffer *buf = r_buf_ref (obj->cache_buf); struct MACH0_(opts_t) opts; MACH0_(opts_set_default) (&opts, bf); opts.verbose = false; opts.header_at = kext->range.offset; struct MACH0_(obj_t) *mach0 = MACH0_(new_buf) (buf, &opts); r_buf_free (buf); return mach0; } static RList *entries(RBinFile *bf) { RList *ret; RBinObject *obj = bf ? bf->o : NULL; if (!obj || !obj->bin_obj || !(ret = r_list_newf (free))) { return NULL; } RKernelCacheObj *kobj = (RKernelCacheObj*) obj->bin_obj; ut64 entry_vaddr = kobj->mach0->entry; if (kobj->pa2va_exec <= entry_vaddr) { ut64 entry_paddr = entry_vaddr - kobj->pa2va_exec; RBinAddr *ba = newEntry (entry_paddr, entry_vaddr, 0); if (ba) { r_list_append (ret, ba); } } process_constructors (kobj, kobj->mach0, ret, 0, true, R_K_CONSTRUCTOR_TO_ENTRY, NULL); return ret; } static void process_kmod_init_term(RKernelCacheObj *obj, RKext *kext, RList *ret, ut64 **inits, ut64 **terms) { if (!*inits || !*terms) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (obj->mach0))) { return; } int i = 0; for (; !sections[i].last; i++) { if (sections[i].size == 0) { continue; } ut64 start_paddr = 0; ut64 *target = NULL; int n_ptrs = 0; if (!*inits && strstr (sections[i].name, "__kmod_init")) { int n_inits = sections[i].size / 8; if (n_inits <= 0) { continue; } *inits = R_NEWS0 (ut64, n_inits + 1); target = *inits; n_ptrs = n_inits; } if (!*terms && strstr (sections[i].name, "__kmod_term")) { int n_terms = sections[i].size / 8; if (n_terms <= 0) { continue; } *terms = R_NEWS0 (ut64, n_terms + 1); target = *terms; n_ptrs = n_terms; } if (!target || !n_ptrs) { continue; } start_paddr = sections[i].offset; int j = 0; ut8 bytes[8]; for (; j < n_ptrs; j++) { if (r_buf_read_at (obj->cache_buf, start_paddr + j * 8, bytes, 8) < 8) { break; } target[j] = K_RPTR (bytes); } target[j] = 0; } R_FREE (sections); } if (*inits) { create_initterm_syms (kext, ret, R_BIN_ENTRY_TYPE_INIT, *inits); } if (*terms) { create_initterm_syms (kext, ret, R_BIN_ENTRY_TYPE_FINI, *terms); } } /* * com.apple.driver.AppleMesaSEPDriver.3.__TEXT_EXEC.__text * | * | * AppleMesaSEPDriver <--+ */ static const char *kext_short_name(RKext *kext) { const char *sn = strrchr (kext->name, '.'); return sn ? sn + 1 : kext->name; } static void create_initterm_syms(RKext *kext, RList *ret, int type, ut64 *pointers) { int i = 0; int count = 0; for (; pointers[i]; i++) { ut64 func_vaddr = pointers[i]; ut64 text_start = kext->vaddr; ut64 text_end = text_start + kext->text_range.size; if (text_start == text_end) { continue; } if (text_start > func_vaddr || func_vaddr >= text_end) { continue; } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("%s.%s.%d", kext_short_name (kext), (type == R_BIN_ENTRY_TYPE_INIT) ? "init" : "fini", count++); sym->vaddr = func_vaddr; sym->paddr = func_vaddr - kext->pa2va_exec; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "FUNC"; r_list_append (ret, sym); } } static void process_constructors(RKernelCacheObj *obj, struct MACH0_(obj_t) *mach0, RList *ret, ut64 paddr, bool is_first, int mode, const char *prefix) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return; } int i, type; for (i = 0; !sections[i].last; i++) { if (sections[i].size == 0) { continue; } if (strstr (sections[i].name, "_mod_fini_func") || strstr (sections[i].name, "_mod_term_func")) { type = R_BIN_ENTRY_TYPE_FINI; } else if (strstr (sections[i].name, "_mod_init_func")) { type = is_first ? 0 : R_BIN_ENTRY_TYPE_INIT; is_first = false; } else { continue; } ut8 *buf = calloc (sections[i].size, 1); if (!buf) { break; } if (r_buf_read_at (obj->cache_buf, sections[i].offset + paddr, buf, sections[i].size) < sections[i].size) { free (buf); break; } int j; int count = 0; for (j = 0; j < sections[i].size; j += 8) { ut64 addr64 = K_RPTR (buf + j); ut64 paddr64 = sections[i].offset + paddr + j; if (mode == R_K_CONSTRUCTOR_TO_ENTRY) { RBinAddr *ba = newEntry (paddr64, addr64, type); r_list_append (ret, ba); } else if (mode == R_K_CONSTRUCTOR_TO_SYMBOL) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("%s.%s.%d", prefix, (type == R_BIN_ENTRY_TYPE_INIT) ? "init" : "fini", count++); sym->vaddr = addr64; sym->paddr = paddr64; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "FUNC"; r_list_append (ret, sym); } } free (buf); } free (sections); } static RBinAddr *newEntry(ut64 haddr, ut64 vaddr, int type) { RBinAddr *ptr = R_NEW0 (RBinAddr); if (!ptr) { return NULL; } ptr->paddr = haddr; ptr->vaddr = vaddr; ptr->hpaddr = haddr; ptr->bits = 64; ptr->type = type; return ptr; } static bool check_buffer(RBinFile *bf, RBuffer *b) { if (r_buf_size (b) > 4) { ut8 buf[4]; r_buf_read_at (b, 0, buf, sizeof (buf)); if (!memcmp (buf, "\xcf\xfa\xed\xfe", 4)) { return is_kernelcache_buffer (b); } } return false; } static RList *sections(RBinFile *bf) { RList *ret = NULL; RBinObject *obj = bf ? bf->o : NULL; if (!obj || !obj->bin_obj || !(ret = r_list_newf ((RListFree)free))) { return NULL; } RKernelCacheObj *kobj = (RKernelCacheObj*) obj->bin_obj; ensure_kexts_initialized (kobj, bf); int iter; RKext *kext; r_kext_index_foreach (kobj->kexts, iter, kext) { ut8 magicbytes[4]; r_buf_read_at (kobj->cache_buf, kext->range.offset, magicbytes, 4); int magic = r_read_le32 (magicbytes); switch (magic) { case MH_MAGIC_64: sections_from_mach0 (ret, kext->mach0, bf, kext->range.offset, kext->name, kobj); break; default: eprintf ("Unknown sub-bin\n"); break; } } sections_from_mach0 (ret, kobj->mach0, bf, 0, NULL, kobj); struct MACH0_(segment_command) *seg; int nsegs = R_MIN (kobj->mach0->nsegs, 128); int i; for (i = 0; i < nsegs; i++) { RBinSection *ptr; char segname[17]; if (!(ptr = R_NEW0 (RBinSection))) { break; } seg = &kobj->mach0->segs[i]; r_str_ncpy (segname, seg->segname, 17); r_str_filter (segname, -1); ptr->name = r_str_newf ("%d.%s", i, segname); ptr->size = seg->vmsize; ptr->vsize = seg->vmsize; ptr->paddr = seg->fileoff + bf->o->boffset; ptr->vaddr = seg->vmaddr; ptr->add = true; ptr->is_segment = true; if (!ptr->vaddr) { ptr->vaddr = ptr->paddr; } ptr->perm = prot2perm (seg->initprot); r_list_append (ret, ptr); } return ret; } static int prot2perm(int x) { int r = 0; if (x&1) r |= 4; if (x&2) r |= 2; if (x&4) r |= 1; return r; } static void sections_from_mach0(RList *ret, struct MACH0_(obj_t) *mach0, RBinFile *bf, ut64 paddr, char *prefix, RKernelCacheObj *obj) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return; } int i; for (i = 0; !sections[i].last; i++) { RBinSection *ptr; if (!(ptr = R_NEW0 (RBinSection))) { break; } if (prefix) { ptr->name = r_str_newf ("%s.%s", prefix, (char*)sections[i].name); } else { ptr->name = r_str_newf ("%s", (char*)sections[i].name); } if (strstr (ptr->name, "la_symbol_ptr")) { int len = sections[i].size / 8; ptr->format = r_str_newf ("Cd %d[%d]", 8, len); } handle_data_sections (ptr); ptr->size = sections[i].size; ptr->vsize = sections[i].vsize; ptr->paddr = sections[i].offset + bf->o->boffset + paddr; ptr->vaddr = K_PPTR (sections[i].addr); if (!ptr->vaddr) { ptr->vaddr = ptr->paddr; } ptr->perm = sections[i].perm; if (!ptr->perm && strstr (sections[i].name, "__TEXT_EXEC.__text")) { ptr->perm = 1 | 4; } r_list_append (ret, ptr); } free (sections); } static void handle_data_sections(RBinSection *sect) { if (strstr (sect->name, "_cstring")) { sect->is_data = true; } else if (strstr (sect->name, "_os_log")) { sect->is_data = true; } else if (strstr (sect->name, "_objc_methname")) { sect->is_data = true; } else if (strstr (sect->name, "_objc_classname")) { sect->is_data = true; } else if (strstr (sect->name, "_objc_methtype")) { sect->is_data = true; } } static RList *symbols(RBinFile *bf) { RList *ret = r_list_newf (free); if (!ret) { return NULL; } RKernelCacheObj *obj = (RKernelCacheObj*) bf->o->bin_obj; symbols_from_mach0 (ret, obj->mach0, bf, 0, 0); HtPP *kernel_syms_by_addr = sdb_ht_new (); if (!kernel_syms_by_addr) { r_list_free (ret); return NULL; } RListIter *iter; RBinSymbol *sym; ut64 enosys_addr = 0; r_list_foreach (ret, iter, sym) { r_strf_var (key, 64, "%"PFMT64x, sym->vaddr); sdb_ht_insert (kernel_syms_by_addr, key, sym->dname ? sym->dname : sym->name); if (!enosys_addr && strstr (sym->name, "enosys")) { enosys_addr = sym->vaddr; } } RList *syscalls = resolve_syscalls (obj, enosys_addr); if (syscalls) { r_list_foreach (syscalls, iter, sym) { r_strf_var (key, 32, "%"PFMT64x, sym->vaddr); sdb_ht_insert (kernel_syms_by_addr, key, sym->name); r_list_append (ret, sym); } syscalls->free = NULL; r_list_free (syscalls); } RList *subsystem = resolve_mig_subsystem (obj); if (subsystem) { r_list_foreach (subsystem, iter, sym) { r_strf_var (key, 64, "%"PFMT64x, sym->vaddr); sdb_ht_insert (kernel_syms_by_addr, key, sym->name); r_list_append (ret, sym); } subsystem->free = NULL; r_list_free (subsystem); } ensure_kexts_initialized (obj, bf); RKext *kext; int kiter; ut64 *inits = NULL; ut64 *terms = NULL; r_kext_index_foreach (obj->kexts, kiter, kext) { ut8 magicbytes[4]; r_buf_read_at (obj->cache_buf, kext->range.offset, magicbytes, 4); int magic = r_read_le32 (magicbytes); switch (magic) { case MH_MAGIC_64: symbols_from_mach0 (ret, kext->mach0, bf, kext->range.offset, r_list_length (ret)); symbols_from_stubs (ret, kernel_syms_by_addr, obj, bf, kext, r_list_length (ret)); process_constructors (obj, kext->mach0, ret, kext->range.offset, false, R_K_CONSTRUCTOR_TO_SYMBOL, kext_short_name (kext)); process_kmod_init_term (obj, kext, ret, &inits, &terms); break; default: eprintf ("Unknown sub-bin\n"); break; } } R_FREE (inits); R_FREE (terms); sdb_ht_free (kernel_syms_by_addr); return ret; } static void symbols_from_mach0(RList *ret, struct MACH0_(obj_t) *mach0, RBinFile *bf, ut64 paddr, int ordinal) { const struct symbol_t *symbols = MACH0_(get_symbols) (mach0); if (!symbols) { return; } int i; for (i = 0; !symbols[i].last; i++) { if (!symbols[i].name[0] || symbols[i].addr < 100) { continue; } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = strdup (symbols[i].name); sym->vaddr = symbols[i].addr; if (sym->name[0] == '_') { char *dn = r_bin_demangle (bf, sym->name, sym->name, sym->vaddr, false); if (dn) { sym->dname = dn; char *p = strchr (dn, '.'); if (p) { if (IS_UPPER (sym->name[0])) { sym->classname = strdup (sym->name); sym->classname[p - sym->name] = 0; } else if (IS_UPPER (p[1])) { sym->classname = strdup (p + 1); p = strchr (sym->classname, '.'); if (p) { *p = 0; } } } } } sym->forwarder = "NONE"; sym->bind = (symbols[i].type == R_BIN_MACH0_SYMBOL_TYPE_LOCAL)? "LOCAL": "GLOBAL"; sym->type = "FUNC"; sym->paddr = symbols[i].offset + bf->o->boffset + paddr; sym->size = symbols[i].size; sym->ordinal = ordinal + i; r_list_append (ret, sym); } } #define IS_KERNEL_ADDR(x) ((x & 0xfffffff000000000L) == 0xfffffff000000000L) typedef struct _r_sysent { ut64 sy_call; ut64 sy_arg_munge32; st32 sy_return_type; st16 sy_narg; ut16 sy_arg_bytes; } RSysEnt; static RList *resolve_syscalls(RKernelCacheObj *obj, ut64 enosys_addr) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (obj->mach0))) { return NULL; } RList *syscalls = NULL; RSyscall *syscall = NULL; ut8 *data_const = NULL; ut64 data_const_offset = 0, data_const_size = 0, data_const_vaddr = 0; int i = 0; for (; !sections[i].last; i++) { if (strstr (sections[i].name, "__DATA_CONST.__const")) { data_const_offset = sections[i].offset; data_const_size = sections[i].size; data_const_vaddr = K_PPTR (sections[i].addr); break; } } if (!data_const_offset || !data_const_size || !data_const_vaddr) { goto beach; } data_const = malloc (data_const_size); if (!data_const) { goto beach; } if (r_buf_read_at (obj->cache_buf, data_const_offset, data_const, data_const_size) < data_const_size) { goto beach; } ut8 *cursor = data_const; ut8 *end = data_const + data_const_size; while (cursor < end) { ut64 test = r_read_le64 (cursor); if (test == enosys_addr) { break; } cursor += 8; } if (cursor >= end) { goto beach; } cursor -= 24; while (cursor >= data_const) { ut64 addr = r_read_le64 (cursor); ut64 x = r_read_le64 (cursor + 8); ut64 y = r_read_le64 (cursor + 16); if (IS_KERNEL_ADDR (addr) && (x == 0 || IS_KERNEL_ADDR (x)) && (y != 0 && !IS_KERNEL_ADDR (y))) { cursor -= 24; continue; } cursor += 24; break; } if (cursor < data_const) { goto beach; } syscalls = r_list_newf (r_bin_symbol_free); if (!syscalls) { goto beach; } syscall = r_syscall_new (); if (!syscall) { goto beach; } r_syscall_setup (syscall, "arm", 64, NULL, "ios"); if (!syscall->db) { r_syscall_free (syscall); goto beach; } ut64 sysent_vaddr = cursor - data_const + data_const_vaddr; RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { goto beach; } sym->name = r_str_newf ("sysent"); sym->vaddr = sysent_vaddr; sym->paddr = cursor - data_const + data_const_offset; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "OBJECT"; r_list_append (syscalls, sym); i = 1; cursor += 24; int num_syscalls = sdb_count (syscall->db); while (cursor < end && i < num_syscalls) { ut64 addr = r_read_le64 (cursor); RSyscallItem *item = r_syscall_get (syscall, i, 0x80); if (item && item->name) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { r_syscall_item_free (item); goto beach; } sym->name = r_str_newf ("syscall.%d.%s", i, item->name); sym->vaddr = addr; sym->paddr = addr; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "FUNC"; r_list_append (syscalls, sym); } r_syscall_item_free (item); cursor += 24; i++; } r_syscall_free (syscall); R_FREE (data_const); R_FREE (sections); return syscalls; beach: r_syscall_free (syscall); if (syscalls) { r_list_free (syscalls); } R_FREE (data_const); R_FREE (sections); return NULL; } #define K_MIG_SUBSYSTEM_SIZE (4 * 8) #define K_MIG_ROUTINE_SIZE (5 * 8) #define K_MIG_MAX_ROUTINES 100 static HtPP *mig_hash_new(void) { HtPP *hash = sdb_ht_new (); if (!hash) { return NULL; } int i; for (i = 0; i < R_MIG_INDEX_LEN; i += 2) { const char *num = mig_index[i]; const char *name = mig_index[i+1]; sdb_ht_insert (hash, num, name); } return hash; } static RList *resolve_mig_subsystem(RKernelCacheObj *obj) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (obj->mach0))) { return NULL; } HtPP *mig_hash = NULL; RList *subsystem = NULL; ut8 *data_const = NULL; ut64 data_const_offset = 0, data_const_size = 0, data_const_vaddr = 0; ut64 text_exec_offset = 0, text_exec_size = 0, text_exec_vaddr = 0; int incomplete = 2; int i = 0; for (; !sections[i].last && incomplete > 0; i++) { if (strstr (sections[i].name, "__DATA_CONST.__const")) { data_const_offset = sections[i].offset; data_const_size = sections[i].size; data_const_vaddr = K_PPTR (sections[i].addr); incomplete--; } if (strstr (sections[i].name, "__TEXT_EXEC.__text")) { text_exec_offset = sections[i].offset; text_exec_size = sections[i].size; text_exec_vaddr = K_PPTR (sections[i].addr); incomplete--; } } if (!data_const_offset || !data_const_size || !data_const_vaddr || !text_exec_offset || !text_exec_size || !text_exec_vaddr) { goto beach; } data_const = malloc (data_const_size); if (!data_const) { goto beach; } if (r_buf_read_at (obj->cache_buf, data_const_offset, data_const, data_const_size) < data_const_size) { goto beach; } subsystem = r_list_newf (r_bin_symbol_free); if (!subsystem) { goto beach; } mig_hash = mig_hash_new (); if (!mig_hash) { goto beach; } ut8 *cursor = data_const; ut8 *end = data_const + data_const_size; while (cursor < end) { ut64 subs_p = K_PPTR (r_read_le64 (cursor)); if (subs_p < text_exec_vaddr || subs_p >= text_exec_vaddr + text_exec_size) { cursor += 8; continue; } ut32 subs_min_idx = r_read_le32 (cursor + 8); ut32 subs_max_idx = r_read_le32 (cursor + 12); if (subs_min_idx >= subs_max_idx || (subs_max_idx - subs_min_idx) > K_MIG_MAX_ROUTINES) { cursor += 16; continue; } ut32 n_routines = (subs_max_idx - subs_min_idx); ut64 *routines = (ut64 *) calloc (n_routines, sizeof (ut64)); if (!routines) { goto beach; } ut8 *array_cursor = cursor + K_MIG_SUBSYSTEM_SIZE; ut8 *end_array = array_cursor + n_routines * K_MIG_ROUTINE_SIZE; bool is_consistent = true; int idx = 0; while (array_cursor < end_array) { ut64 should_be_null = r_read_le64 (array_cursor); if (should_be_null != 0) { is_consistent = false; break; } ut64 routine_p = K_PPTR (r_read_le64 (array_cursor + 8)); if (routine_p != 0 && (routine_p < text_exec_vaddr || routine_p >= text_exec_vaddr + text_exec_size)) { is_consistent = false; break; } routines[idx++] = routine_p; array_cursor += K_MIG_ROUTINE_SIZE; } if (is_consistent) { for (idx = 0; idx < n_routines; idx++) { ut64 routine_p = routines[idx]; if (!routine_p) { continue; } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { R_FREE (routines); goto beach; } int num = idx + subs_min_idx; bool found = false; r_strf_var (key, 32, "%d", num); const char *name = sdb_ht_find (mig_hash, key, &found); if (found && name && *name) { sym->name = r_str_newf ("mig.%d.%s", num, name); } else { sym->name = r_str_newf ("mig.%d", num); } sym->vaddr = routine_p; sym->paddr = sym->vaddr - text_exec_vaddr + text_exec_offset; sym->size = 0; sym->forwarder = "NONE"; sym->bind = "GLOBAL"; sym->type = "OBJECT"; r_list_append (subsystem, sym); } cursor += K_MIG_SUBSYSTEM_SIZE + n_routines * K_MIG_ROUTINE_SIZE; } else { cursor += 8; } R_FREE (routines); } sdb_ht_free (mig_hash); R_FREE (data_const); R_FREE (sections); return subsystem; beach: if (subsystem) { r_list_free (subsystem); } if (mig_hash) { sdb_ht_free (mig_hash); } R_FREE (data_const); R_FREE (sections); return NULL; } static ut64 extract_addr_from_code(ut8 *arm64_code, ut64 vaddr) { ut64 addr = vaddr & ~0xfff; ut64 adrp = r_read_le32 (arm64_code); ut64 adrp_offset = ((adrp & 0x60000000) >> 29) | ((adrp & 0xffffe0) >> 3); addr += adrp_offset << 12; ut64 ldr = r_read_le32 (arm64_code + 4); addr += ((ldr & 0x3ffc00) >> 10) << ((ldr & 0xc0000000) >> 30); return addr; } static void symbols_from_stubs(RList *ret, HtPP *kernel_syms_by_addr, RKernelCacheObj *obj, RBinFile *bf, RKext *kext, int ordinal) { RStubsInfo *stubs_info = get_stubs_info(kext->mach0, kext->range.offset, obj); if (!stubs_info) { return; } ut64 stubs_cursor = stubs_info->stubs.offset; ut64 stubs_end = stubs_cursor + stubs_info->stubs.size; for (; stubs_cursor < stubs_end; stubs_cursor += 12) { ut8 arm64_code[8]; if (r_buf_read_at (obj->cache_buf, stubs_cursor, arm64_code, 8) < 8) { break; } ut64 vaddr = stubs_cursor + obj->pa2va_exec; ut64 addr_in_got = extract_addr_from_code (arm64_code, vaddr); bool found = false; int level = 3; ut64 target_addr = UT64_MAX; while (!found && level-- > 0) { ut64 offset_in_got = addr_in_got - obj->pa2va_exec; ut64 addr; if (r_buf_read_at (obj->cache_buf, offset_in_got, (ut8*) &addr, 8) < 8) { break; } if (level == 2) { target_addr = addr; } r_strf_var (key, 32, "%"PFMT64x, addr); const char *name = sdb_ht_find (kernel_syms_by_addr, key, &found); if (found) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("stub.%s", name); sym->vaddr = vaddr; sym->paddr = stubs_cursor; sym->size = 12; sym->forwarder = "NONE"; sym->bind = "LOCAL"; sym->type = "FUNC"; sym->ordinal = ordinal ++; r_list_append (ret, sym); break; } addr_in_got = addr; } if (found || target_addr == UT64_MAX) { continue; } ensure_kexts_initialized (obj, bf); RKext *remote_kext = r_kext_index_vget (obj->kexts, target_addr); if (!remote_kext) { continue; } RBinSymbol *remote_sym = R_NEW0 (RBinSymbol); if (!remote_sym) { break; } remote_sym->name = r_str_newf ("exp.%s.0x%"PFMT64x, kext_short_name (remote_kext), target_addr); remote_sym->vaddr = target_addr; remote_sym->paddr = target_addr - obj->pa2va_exec; remote_sym->size = 0; remote_sym->forwarder = "NONE"; remote_sym->bind = "GLOBAL"; remote_sym->type = "FUNC"; remote_sym->ordinal = ordinal ++; r_list_append (ret, remote_sym); RBinSymbol *local_sym = R_NEW0 (RBinSymbol); if (!local_sym) { break; } local_sym->name = r_str_newf ("stub.%s.0x%"PFMT64x, kext_short_name (remote_kext), target_addr); local_sym->vaddr = vaddr; local_sym->paddr = stubs_cursor; local_sym->size = 12; local_sym->forwarder = "NONE"; local_sym->bind = "GLOBAL"; local_sym->type = "FUNC"; local_sym->ordinal = ordinal ++; r_list_append (ret, local_sym); } R_FREE (stubs_info); } static RStubsInfo *get_stubs_info(struct MACH0_(obj_t) *mach0, ut64 paddr, RKernelCacheObj *obj) { struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return NULL; } RStubsInfo *stubs_info = R_NEW0 (RStubsInfo); if (!stubs_info) { free (sections); return NULL; } int incomplete = 2; int i = 0; for (; !sections[i].last; i++) { if (strstr (sections[i].name, "__DATA_CONST.__got")) { stubs_info->got.offset = sections[i].offset + paddr; stubs_info->got.size = sections[i].size; stubs_info->got_addr = K_PPTR (sections[i].addr); if (!--incomplete) { break; } } if (strstr (sections[i].name, "__TEXT_EXEC.__stubs")) { stubs_info->stubs.offset = sections[i].offset + paddr; stubs_info->stubs.size = sections[i].size; if (!--incomplete) { break; } } } R_FREE (sections); if (incomplete) { R_FREE (stubs_info); } return stubs_info; } static RBinInfo *info(RBinFile *bf) { RBinInfo *ret = NULL; bool big_endian = 0; if (!(ret = R_NEW0 (RBinInfo))) { return NULL; } ret->file = strdup (bf->file); ret->bclass = strdup ("kernelcache"); ret->rclass = strdup ("ios"); ret->os = strdup ("iOS"); ret->arch = strdup ("arm"); // XXX ret->machine = strdup (ret->arch); ret->subsystem = strdup ("xnu"); ret->type = strdup ("kernel-cache"); ret->bits = 64; ret->has_va = true; ret->big_endian = big_endian; ret->dbg_info = 0; return ret; } static ut64 baddr(RBinFile *bf) { if (!bf || !bf->o || !bf->o->bin_obj) { return 8LL; } RKernelCacheObj *obj = (RKernelCacheObj*) bf->o->bin_obj; return MACH0_(get_baddr)(obj->mach0); } static void destroy(RBinFile *bf) { r_kernel_cache_free ((RKernelCacheObj*) bf->o->bin_obj); } static void r_kernel_cache_free(RKernelCacheObj *obj) { if (!obj) { return; } if (obj->mach0) { MACH0_(mach0_free) (obj->mach0); obj->mach0 = NULL; obj->cache_buf = NULL; } if (obj->cache_buf) { r_buf_free (obj->cache_buf); obj->cache_buf = NULL; } if (obj->prelink_info) { r_cf_value_dict_free (obj->prelink_info); obj->prelink_info = NULL; } if (obj->kexts) { r_kext_index_free (obj->kexts); obj->kexts = NULL; } if (obj->rebase_info) { r_rebase_info_free (obj->rebase_info); obj->rebase_info = NULL; } R_FREE (obj); } static RRebaseInfo *r_rebase_info_new_from_mach0(RBuffer *cache_buf, struct MACH0_(obj_t) *mach0) { RFileRange *rebase_ranges = NULL; struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return NULL; } ut64 starts_offset = 0, starts_size = 0; int i = 0; for (; !sections[i].last; i++) { if (strstr (sections[i].name, "__TEXT.__thread_starts")) { starts_offset = sections[i].offset; starts_size = sections[i].size; break; } } R_FREE (sections); ut64 kernel_base = 0; struct MACH0_(segment_command) *seg; int nsegs = R_MIN (mach0->nsegs, 128); for (i = 0; i < nsegs; i++) { char segname[17]; seg = &mach0->segs[i]; r_str_ncpy (segname, seg->segname, 17); if (!strncmp (segname, "__TEXT", 6) && segname[6] == '\0') { kernel_base = seg->vmaddr; break; } } if (starts_offset == 0 || starts_size == 0 || kernel_base == 0) { return NULL; } int n_starts = starts_size / 4; if (n_starts <= 1) { return NULL; } rebase_ranges = R_NEWS0 (RFileRange, n_starts - 1); if (rebase_ranges == NULL) { return NULL; } ut64 multiplier = 4; for (i = 0; i != n_starts; i++) { ut8 bytes[4]; if (r_buf_read_at (cache_buf, starts_offset + i * 4, bytes, 4) < 4) { goto beach; } if (i == 0) { multiplier += 4 * (r_read_le32 (bytes) & 1); continue; } rebase_ranges[i - 1].offset = r_read_le32 (bytes); rebase_ranges[i - 1].size = UT64_MAX; } RRebaseInfo *rebase_info = R_NEW0 (RRebaseInfo); if (rebase_info == NULL) { goto beach; } rebase_info->ranges = rebase_ranges; rebase_info->n_ranges = n_starts - 1; rebase_info->multiplier = multiplier; rebase_info->kernel_base = kernel_base; return rebase_info; beach: R_FREE (rebase_ranges); return NULL; } static void r_rebase_info_free(RRebaseInfo *info) { if (!info) { return; } if (info->ranges) { R_FREE (info->ranges); info->ranges = NULL; } R_FREE (info); } static void r_rebase_info_populate(RRebaseInfo *info, RKernelCacheObj *obj) { struct section_t *sections = NULL; int i = 0; if (obj->rebase_info_populated) { return; } obj->rebase_info_populated = true; for (; i < info->n_ranges; i++) { if (info->ranges[i].size != UT64_MAX) { goto cleanup; } else if (sections == NULL) { if (!(sections = MACH0_(get_sections) (obj->mach0))) { return; } } info->ranges[i].offset = r_rebase_offset_to_paddr (obj, sections, info->ranges[i].offset); ut64 end = iterate_rebase_list (obj->cache_buf, info->multiplier, info->ranges[i].offset, NULL, NULL); if (end != UT64_MAX) { info->ranges[i].size = end - info->ranges[i].offset + 8; } else { info->ranges[i].size = 0; } } cleanup: R_FREE (sections); } static ut64 r_rebase_offset_to_paddr(RKernelCacheObj *obj, struct section_t *sections, ut64 offset) { ut64 vaddr = obj->rebase_info->kernel_base + offset; int i = 0; for (; !sections[i].last; i++) { if (sections[i].addr <= vaddr && vaddr < (sections[i].addr + sections[i].vsize)) { return sections[i].offset + (vaddr - sections[i].addr); } } return offset; } static ut64 iterate_rebase_list(RBuffer *cache_buf, ut64 multiplier, ut64 start_offset, ROnRebaseFunc func, void *user_data) { ut8 bytes[8]; ut64 cursor = start_offset; while (true) { if (r_buf_read_at (cache_buf, cursor, bytes, 8) < 8) { return UT64_MAX; } ut64 decorated_addr = r_read_le64 (bytes); if (func) { bool carry_on = func (cursor, decorated_addr, user_data); if (!carry_on) { break; } } ut64 delta = ((decorated_addr >> 51) & 0x7ff) * multiplier; if (delta == 0) { break; } cursor += delta; } return cursor; } static void swizzle_io_read(RKernelCacheObj *obj, RIO *io) { r_return_if_fail (io && io->desc && io->desc->plugin); RIOPlugin *plugin = io->desc->plugin; obj->original_io_read = plugin->read; plugin->read = &kernelcache_io_read; } static int kernelcache_io_read(RIO *io, RIODesc *fd, ut8 *buf, int count) { r_return_val_if_fail (io, -1); RCore *core = (RCore*) io->corebind.core; if (!fd || !core || !core->bin || !core->bin->binfiles) { return -1; } RKernelCacheObj *cache = NULL; RListIter *iter; RBinFile *bf; r_list_foreach (core->bin->binfiles, iter, bf) { if (bf->fd == fd->fd && bf->o && bf->o->bin_obj) { cache = bf->o->bin_obj; if (pending_bin_files) { RListIter *to_remove = r_list_contains (pending_bin_files, bf); if (to_remove) { r_list_delete (pending_bin_files, to_remove); if (r_list_empty (pending_bin_files)) { r_list_free (pending_bin_files); pending_bin_files = NULL; } } } break; } } if (!cache) { r_list_foreach (pending_bin_files, iter, bf) { if (bf->fd == fd->fd && bf->o) { cache = bf->o->bin_obj; break; } } } if (!cache || !cache->original_io_read || cache->rebasing_buffer) { if (cache) { if ((!cache->rebasing_buffer && fd->plugin->read == &kernelcache_io_read) || (cache->rebasing_buffer && !cache->original_io_read)) { return -1; } if (cache->rebasing_buffer) { return cache->original_io_read (io, fd, buf, count); } } if (fd->plugin->read == kernelcache_io_read) { if (core->bin->verbose) { eprintf ("Avoid recursive reads\n"); } return -1; } return fd->plugin->read (io, fd, buf, count); } if (cache->rebase_info) { r_rebase_info_populate (cache->rebase_info, cache); } static ut8 *internal_buffer = NULL; static int internal_buf_size = 0; if (count > internal_buf_size) { if (internal_buffer) { R_FREE (internal_buffer); internal_buffer = NULL; } internal_buffer = (ut8 *) malloc (count); internal_buf_size = count; } if (!cache->original_io_read) { return -1; } ut64 io_off = io->off; int result = cache->original_io_read (io, fd, internal_buffer, count); if (result == count) { if (cache->mach0->chained_starts) { rebase_buffer_fixup (cache, io_off, fd, internal_buffer, count); } else if (cache->rebase_info) { rebase_buffer (cache, io_off, fd, internal_buffer, count); } memcpy (buf, internal_buffer, result); } return result; } static void rebase_buffer(RKernelCacheObj *obj, ut64 off, RIODesc *fd, ut8 *buf, int count) { if (obj->rebasing_buffer || !buf) { return; } obj->rebasing_buffer = true; ut64 eob = off + count; int i = 0; RRebaseCtx ctx; ctx.off = off; ctx.eob = eob; ctx.buf = buf; ctx.count = count; ctx.obj = obj; for (; i < obj->rebase_info->n_ranges; i++) { ut64 start = obj->rebase_info->ranges[i].offset; ut64 end = start + obj->rebase_info->ranges[i].size; if (end >= off && start <= eob) { iterate_rebase_list (obj->cache_buf, obj->rebase_info->multiplier, start, (ROnRebaseFunc) on_rebase_pointer, &ctx); } } obj->rebasing_buffer = false; } static void rebase_buffer_fixup(RKernelCacheObj *kobj, ut64 off, RIODesc *fd, ut8 *buf, int count) { if (kobj->rebasing_buffer) { return; } kobj->rebasing_buffer = true; struct MACH0_(obj_t) *obj = kobj->mach0; ut64 eob = off + count; size_t i = 0; for (; i < obj->nsegs; i++) { if (!obj->chained_starts[i]) { continue; } ut64 page_size = obj->chained_starts[i]->page_size; ut64 start = obj->segs[i].fileoff; ut64 end = start + obj->segs[i].filesize; if (end >= off && start <= eob) { ut64 page_idx = (R_MAX (start, off) - start) / page_size; ut64 page_end_idx = (R_MIN (eob, end) - start) / page_size; for (; page_idx <= page_end_idx; page_idx++) { if (page_idx >= obj->chained_starts[i]->page_count) { break; } ut16 page_start = obj->chained_starts[i]->page_start[page_idx]; if (page_start == DYLD_CHAINED_PTR_START_NONE) { continue; } ut64 cursor = start + page_idx * page_size + page_start; while (cursor < eob && cursor < end) { ut8 tmp[8]; if (r_buf_read_at (obj->b, cursor, tmp, 8) != 8) { break; } ut64 raw_ptr = r_read_le64 (tmp); ut64 ptr_value = raw_ptr; ut64 delta = 0; ut64 stride = 8; if (obj->chained_starts[i]->pointer_format == DYLD_CHAINED_PTR_ARM64E) { bool is_auth = IS_PTR_AUTH (raw_ptr); bool is_bind = IS_PTR_BIND (raw_ptr); if (is_auth && is_bind) { struct dyld_chained_ptr_arm64e_auth_bind *p = (struct dyld_chained_ptr_arm64e_auth_bind *) &raw_ptr; delta = p->next; } else if (!is_auth && is_bind) { struct dyld_chained_ptr_arm64e_bind *p = (struct dyld_chained_ptr_arm64e_bind *) &raw_ptr; delta = p->next; } else if (is_auth && !is_bind) { struct dyld_chained_ptr_arm64e_auth_rebase *p = (struct dyld_chained_ptr_arm64e_auth_rebase *) &raw_ptr; delta = p->next; ptr_value = p->target + obj->baddr; } else { struct dyld_chained_ptr_arm64e_rebase *p = (struct dyld_chained_ptr_arm64e_rebase *) &raw_ptr; delta = p->next; ptr_value = ((ut64)p->high8 << 56) | p->target; ptr_value += obj->baddr; } } else if (obj->chained_starts[i]->pointer_format == DYLD_CHAINED_PTR_64_KERNEL_CACHE || obj->chained_starts[i]->pointer_format == DYLD_CHAINED_PTR_ARM64E_KERNEL) { bool is_auth = IS_PTR_AUTH (raw_ptr); stride = 4; if (is_auth) { struct dyld_chained_ptr_arm64e_cache_auth_rebase *p = (struct dyld_chained_ptr_arm64e_cache_auth_rebase *) &raw_ptr; delta = p->next; ptr_value = p->target + obj->baddr; } else { struct dyld_chained_ptr_arm64e_cache_rebase *p = (struct dyld_chained_ptr_arm64e_cache_rebase *) &raw_ptr; delta = p->next; ptr_value = ((ut64)p->high8 << 56) | p->target; ptr_value += obj->baddr; } } else { eprintf ("Unsupported pointer format: %u\n", obj->chained_starts[i]->pointer_format); } ut64 in_buf = cursor - off; if (cursor >= off && cursor <= eob - 8) { r_write_le64 (&buf[in_buf], ptr_value); } cursor += delta * stride; if (!delta) { break; } } } } } kobj->rebasing_buffer = false; } static bool on_rebase_pointer(ut64 offset, ut64 decorated_addr, RRebaseCtx *ctx) { if (offset < ctx->off) { return true; } if (offset >= ctx->eob) { return false; } ut64 in_buf = offset - ctx->off; if (in_buf >= ctx->count || (in_buf + 8) > ctx->count) { return false; } RParsedPointer ptr; r_parse_pointer (&ptr, decorated_addr, ctx->obj); r_write_le64 (&ctx->buf[in_buf], ptr.address); return true; } static bool r_parse_pointer(RParsedPointer *ptr, ut64 decorated_addr, RKernelCacheObj *obj) { /* * Logic taken from: * https://github.com/Synacktiv/kernelcache-laundering/blob/master/ios12_kernel_cache_helper.py */ if ((decorated_addr & 0x4000000000000000LL) == 0 && obj->rebase_info) { if (decorated_addr & 0x8000000000000000LL) { ptr->address = obj->rebase_info->kernel_base + (decorated_addr & 0xFFFFFFFFLL); } else { ptr->address = ((decorated_addr << 13) & 0xFF00000000000000LL) | (decorated_addr & 0x7ffffffffffLL); if (decorated_addr & 0x40000000000LL) { ptr->address |= 0xfffc0000000000LL; } } } else { ptr->address = decorated_addr; } return true; } RBinPlugin r_bin_plugin_xnu_kernelcache = { .name = "kernelcache", .desc = "kernelcache bin plugin", .license = "LGPL3", .destroy = &destroy, .load_buffer = &load_buffer, .entries = &entries, .baddr = &baddr, .symbols = &symbols, .sections = &sections, .check_buffer = &check_buffer, .info = &info }; #ifndef R2_PLUGIN_INCORE RLibStruct radare_plugin = { .type = R_LIB_TYPE_BIN, .data = &r_bin_plugin_kernelcache, .version = R2_VERSION }; #endif
static bool load_buffer(RBinFile *bf, void **bin_obj, RBuffer *buf, ut64 loadaddr, Sdb *sdb) { RBuffer *fbuf = r_buf_ref (buf); struct MACH0_(opts_t) opts; MACH0_(opts_set_default) (&opts, bf); struct MACH0_(obj_t) *main_mach0 = MACH0_(new_buf) (fbuf, &opts); if (!main_mach0) { return false; } RRebaseInfo *rebase_info = r_rebase_info_new_from_mach0 (fbuf, main_mach0); RKernelCacheObj *obj = NULL; RPrelinkRange *prelink_range = get_prelink_info_range_from_mach0 (main_mach0); if (!prelink_range) { goto beach; } obj = R_NEW0 (RKernelCacheObj); if (!obj) { R_FREE (prelink_range); goto beach; } RCFValueDict *prelink_info = NULL; if (main_mach0->hdr.filetype != MH_FILESET && prelink_range->range.size) { prelink_info = r_cf_value_dict_parse (fbuf, prelink_range->range.offset, prelink_range->range.size, R_CF_OPTION_SKIP_NSDATA); if (!prelink_info) { R_FREE (prelink_range); R_FREE (obj); goto beach; } } if (!pending_bin_files) { pending_bin_files = r_list_new (); if (!pending_bin_files) { R_FREE (prelink_range); R_FREE (obj); R_FREE (prelink_info); goto beach; } } obj->mach0 = main_mach0; obj->rebase_info = rebase_info; obj->prelink_info = prelink_info; obj->cache_buf = fbuf; obj->pa2va_exec = prelink_range->pa2va_exec; obj->pa2va_data = prelink_range->pa2va_data; R_FREE (prelink_range); *bin_obj = obj; r_list_push (pending_bin_files, bf); if (rebase_info || main_mach0->chained_starts) { RIO *io = bf->rbin->iob.io; swizzle_io_read (obj, io); } return true; beach: r_buf_free (fbuf); obj->cache_buf = NULL; MACH0_(mach0_free) (main_mach0); return false; }
static bool load_buffer(RBinFile *bf, void **bin_obj, RBuffer *buf, ut64 loadaddr, Sdb *sdb) { RBuffer *fbuf = r_buf_ref (buf); struct MACH0_(opts_t) opts; MACH0_(opts_set_default) (&opts, bf); struct MACH0_(obj_t) *main_mach0 = MACH0_(new_buf) (fbuf, &opts); if (!main_mach0) { return false; } RRebaseInfo *rebase_info = r_rebase_info_new_from_mach0 (fbuf, main_mach0); RKernelCacheObj *obj = NULL; RPrelinkRange *prelink_range = get_prelink_info_range_from_mach0 (main_mach0); if (!prelink_range) { goto beach; } obj = R_NEW0 (RKernelCacheObj); if (!obj) { R_FREE (prelink_range); goto beach; } RCFValueDict *prelink_info = NULL; if (main_mach0->hdr.filetype != MH_FILESET && prelink_range->range.size) { prelink_info = r_cf_value_dict_parse (fbuf, prelink_range->range.offset, prelink_range->range.size, R_CF_OPTION_SKIP_NSDATA); if (!prelink_info) { R_FREE (prelink_range); R_FREE (obj); goto beach; } } if (!pending_bin_files) { pending_bin_files = r_list_new (); if (!pending_bin_files) { R_FREE (prelink_range); R_FREE (obj); R_FREE (prelink_info); goto beach; } } obj->mach0 = main_mach0; obj->rebase_info = rebase_info; obj->prelink_info = prelink_info; obj->cache_buf = fbuf; obj->pa2va_exec = prelink_range->pa2va_exec; obj->pa2va_data = prelink_range->pa2va_data; R_FREE (prelink_range); *bin_obj = obj; r_list_push (pending_bin_files, bf); if (rebase_info || main_mach0->chained_starts) { RIO *io = bf->rbin->iob.io; swizzle_io_read (obj, io); } return true; beach: r_buf_free (fbuf); if (obj) { obj->cache_buf = NULL; } MACH0_(mach0_free) (main_mach0); return false; }
{'added': [(245, '\tif (obj) {'), (246, '\t\tobj->cache_buf = NULL;'), (247, '\t}')], 'deleted': [(245, '\tobj->cache_buf = NULL;')]}
3
1
1,884
12,892
58
353
11
https://github.com/radareorg/radare2
CVE-2022-0419
CWE-476
275
dm9000_driver.c
C
dm9000WritePhyReg
/** * @file dm9000_driver.c * @brief DM9000A/B Ethernet controller * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "core/net.h" #include "core/ethernet.h" #include "drivers/eth/dm9000_driver.h" #include "debug.h" /** * @brief DM9000 driver **/ const NicDriver dm9000Driver = { NIC_TYPE_ETHERNET, ETH_MTU, dm9000Init, dm9000Tick, dm9000EnableIrq, dm9000DisableIrq, dm9000EventHandler, dm9000SendPacket, dm9000UpdateMacAddrFilter, NULL, NULL, NULL, TRUE, TRUE, TRUE, FALSE }; /** * @brief DM9000 controller initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t dm9000Init(NetInterface *interface) { uint_t i; uint16_t vendorId; uint16_t productId; uint8_t chipRevision; Dm9000Context *context; //Debug message TRACE_INFO("Initializing DM9000 Ethernet controller...\r\n"); //Initialize external interrupt line interface->extIntDriver->init(); //Point to the driver context context = (Dm9000Context *) interface->nicContext; //Initialize driver specific variables context->queuedPackets = 0; //Allocate TX and RX buffers context->txBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); context->rxBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); //Failed to allocate memory? if(context->txBuffer == NULL || context->rxBuffer == NULL) { //Clean up side effects memPoolFree(context->txBuffer); memPoolFree(context->rxBuffer); //Report an error return ERROR_OUT_OF_MEMORY; } //Retrieve vendorID, product ID and chip revision vendorId = (dm9000ReadReg(DM9000_REG_VIDH) << 8) | dm9000ReadReg(DM9000_REG_VIDL); productId = (dm9000ReadReg(DM9000_REG_PIDH) << 8) | dm9000ReadReg(DM9000_REG_PIDL); chipRevision = dm9000ReadReg(DM9000_REG_CHIPR); //Check vendor ID and product ID if(vendorId != DM9000_VID || productId != DM9000_PID) { return ERROR_WRONG_IDENTIFIER; } //Check chip revision if(chipRevision != DM9000A_CHIP_REV && chipRevision != DM9000B_CHIP_REV) { return ERROR_WRONG_IDENTIFIER; } //Power up the internal PHY by clearing PHYPD dm9000WriteReg(DM9000_REG_GPR, 0x00); //Wait for the PHY to be ready sleep(10); //Software reset dm9000WriteReg(DM9000_REG_NCR, NCR_RST); //Wait for the reset to complete while((dm9000ReadReg(DM9000_REG_NCR) & NCR_RST) != 0) { } //PHY software reset dm9000WritePhyReg(DM9000_PHY_REG_BMCR, BMCR_RST); //Wait for the PHY reset to complete while((dm9000ReadPhyReg(DM9000_PHY_REG_BMCR) & BMCR_RST) != 0) { } //Debug message TRACE_INFO(" VID = 0x%04" PRIX16 "\r\n", vendorId); TRACE_INFO(" PID = 0x%04" PRIX16 "\r\n", productId); TRACE_INFO(" CHIPR = 0x%02" PRIX8 "\r\n", chipRevision); TRACE_INFO(" PHYIDR1 = 0x%04" PRIX16 "\r\n", dm9000ReadPhyReg(DM9000_PHY_REG_PHYIDR1)); TRACE_INFO(" PHYIDR2 = 0x%04" PRIX16 "\r\n", dm9000ReadPhyReg(DM9000_PHY_REG_PHYIDR2)); //Enable loopback mode? #if (DM9000_LOOPBACK_MODE == ENABLED) dm9000WriteReg(DM9000_REG_NCR, DM9000_LBK_PHY); dm9000WritePhyReg(DM9000_PHY_REG_BMCR, BMCR_LOOPBACK | BMCR_SPEED_SEL | BMCR_AN_EN | BMCR_DUPLEX_MODE); #endif //Set host MAC address for(i = 0; i < 6; i++) { dm9000WriteReg(DM9000_REG_PAR0 + i, interface->macAddr.b[i]); } //Initialize hash table for(i = 0; i < 8; i++) { dm9000WriteReg(DM9000_REG_MAR0 + i, 0x00); } //Always accept broadcast packets dm9000WriteReg(DM9000_REG_MAR7, 0x80); //Enable the Pointer Auto Return function dm9000WriteReg(DM9000_REG_IMR, IMR_PAR); //Clear NSR status bits dm9000WriteReg(DM9000_REG_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END); //Clear interrupt flags dm9000WriteReg(DM9000_REG_ISR, ISR_LNKCHG | ISR_UDRUN | ISR_ROO | ISR_ROS | ISR_PT | ISR_PR); //Enable interrupts dm9000WriteReg(DM9000_REG_IMR, IMR_PAR | IMR_LNKCHGI | IMR_PTI | IMR_PRI); //Enable the receiver by setting RXEN dm9000WriteReg(DM9000_REG_RCR, RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN); //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Force the TCP/IP stack to poll the link state at startup interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; } /** * @brief DM9000 timer handler * @param[in] interface Underlying network interface **/ void dm9000Tick(NetInterface *interface) { } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void dm9000EnableIrq(NetInterface *interface) { //Enable interrupts interface->extIntDriver->enableIrq(); } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void dm9000DisableIrq(NetInterface *interface) { //Disable interrupts interface->extIntDriver->disableIrq(); } /** * @brief DM9000 interrupt service routine * @param[in] interface Underlying network interface * @return TRUE if a higher priority task must be woken. Else FALSE is returned **/ bool_t dm9000IrqHandler(NetInterface *interface) { bool_t flag; uint8_t status; uint8_t mask; Dm9000Context *context; //This flag will be set if a higher priority task must be woken flag = FALSE; //Point to the driver context context = (Dm9000Context *) interface->nicContext; //Read interrupt status register status = dm9000ReadReg(DM9000_REG_ISR); //Link status change? if((status & ISR_LNKCHG) != 0) { //Read interrupt mask register mask = dm9000ReadReg(DM9000_REG_IMR); //Disable LNKCHGI interrupt dm9000WriteReg(DM9000_REG_IMR, mask & ~IMR_LNKCHGI); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((status & ISR_PT) != 0) { //Check TX complete status bits if(dm9000ReadReg(DM9000_REG_NSR) & (NSR_TX2END | NSR_TX1END)) { //The transmission of the current packet is complete if(context->queuedPackets > 0) { context->queuedPackets--; } //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } //Clear interrupt flag dm9000WriteReg(DM9000_REG_ISR, ISR_PT); } //Packet received? if((status & ISR_PR) != 0) { //Read interrupt mask register mask = dm9000ReadReg(DM9000_REG_IMR); //Disable PRI interrupt dm9000WriteReg(DM9000_REG_IMR, mask & ~IMR_PRI); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //A higher priority task must be woken? return flag; } /** * @brief DM9000 event handler * @param[in] interface Underlying network interface **/ void dm9000EventHandler(NetInterface *interface) { error_t error; uint8_t status; //Read interrupt status register status = dm9000ReadReg(DM9000_REG_ISR); //Check whether the link status has changed? if((status & ISR_LNKCHG) != 0) { //Clear interrupt flag dm9000WriteReg(DM9000_REG_ISR, ISR_LNKCHG); //Read network status register status = dm9000ReadReg(DM9000_REG_NSR); //Check link state if((status & NSR_LINKST) != 0) { //Get current speed if((status & NSR_SPEED) != 0) { interface->linkSpeed = NIC_LINK_SPEED_10MBPS; } else { interface->linkSpeed = NIC_LINK_SPEED_100MBPS; } //Read network control register status = dm9000ReadReg(DM9000_REG_NCR); //Determine the new duplex mode if((status & NCR_FDX) != 0) { interface->duplexMode = NIC_FULL_DUPLEX_MODE; } else { interface->duplexMode = NIC_HALF_DUPLEX_MODE; } //Link is up interface->linkState = TRUE; } else { //Link is down interface->linkState = FALSE; } //Process link state change event nicNotifyLinkChange(interface); } //Check whether a packet has been received? if((status & ISR_PR) != 0) { //Clear interrupt flag dm9000WriteReg(DM9000_REG_ISR, ISR_PR); //Process all pending packets do { //Read incoming packet error = dm9000ReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable LNKCHGI and PRI interrupts dm9000WriteReg(DM9000_REG_IMR, IMR_PAR | IMR_LNKCHGI | IMR_PTI | IMR_PRI); } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t dm9000SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t i; size_t length; uint16_t *p; Dm9000Context *context; //Point to the driver context context = (Dm9000Context *) interface->nicContext; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > ETH_MAX_FRAME_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Copy user data netBufferRead(context->txBuffer, buffer, offset, length); //A dummy write is required before accessing FIFO dm9000WriteReg(DM9000_REG_MWCMDX, 0); //Select MWCMD register DM9000_INDEX_REG = DM9000_REG_MWCMD; //Point to the beginning of the buffer p = (uint16_t *) context->txBuffer; //Write data to the FIFO using 16-bit mode for(i = length; i > 1; i -= 2) { DM9000_DATA_REG = *(p++); } //Odd number of bytes? if(i > 0) { DM9000_DATA_REG = *((uint8_t *) p); } //Write the number of bytes to send dm9000WriteReg(DM9000_REG_TXPLL, LSB(length)); dm9000WriteReg(DM9000_REG_TXPLH, MSB(length)); //Clear interrupt flag dm9000WriteReg(DM9000_REG_ISR, ISR_PT); //Start data transfer dm9000WriteReg(DM9000_REG_TCR, TCR_TXREQ); //The packet was successfully written to FIFO context->queuedPackets++; //Successful processing return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t dm9000ReceivePacket(NetInterface *interface) { error_t error; size_t i; size_t n; size_t length; volatile uint8_t status; volatile uint16_t data; Dm9000Context *context; //Point to the driver context context = (Dm9000Context *) interface->nicContext; //A dummy read is required before accessing the 4-byte header data = dm9000ReadReg(DM9000_REG_MRCMDX); //Select MRCMDX1 register DM9000_INDEX_REG = DM9000_REG_MRCMDX1; //Read the first byte of the header status = LSB(DM9000_DATA_REG); //The first byte indicates if a packet has been received if(status == 0x01) { //Select MRCMD register DM9000_INDEX_REG = DM9000_REG_MRCMD; //The second byte is the RX status byte status = MSB(DM9000_DATA_REG); //Retrieve packet length length = DM9000_DATA_REG; //Limit the number of data to read n = MIN(length, ETH_MAX_FRAME_SIZE); //Point to the beginning of the buffer i = 0; //Make sure no error occurred if((status & (RSR_LCS | RSR_RWTO | RSR_PLE | RSR_AE | RSR_CE | RSR_FOE)) == 0) { //Read data from FIFO using 16-bit mode while((i + 1) < n) { data = DM9000_DATA_REG; context->rxBuffer[i++] = LSB(data); context->rxBuffer[i++] = MSB(data); } //Odd number of bytes to read? if((i + 1) == n) { data = DM9000_DATA_REG; context->rxBuffer[i] = LSB(data); i += 2; } //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } //Flush remaining bytes while(i < length) { data = DM9000_DATA_REG; i += 2; } } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, context->rxBuffer, n, &ancillary); } //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t dm9000UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint8_t hashTable[8]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //Always accept broadcast packets regardless of the MAC filter table hashTable[7] = 0x80; //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = dm9000CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = crc & 0x3F; //Update hash table contents hashTable[k / 8] |= (1 << (k % 8)); } } //Write the hash table to the DM9000 controller for(i = 0; i < 8; i++) { dm9000WriteReg(DM9000_REG_MAR0 + i, hashTable[i]); } //Debug message TRACE_DEBUG(" MAR = %02" PRIX8 " %02" PRIX8 " %02" PRIX8 " %02" PRIX8 " " "%02" PRIX8 " %02" PRIX8 " %02" PRIX8 " %02" PRIX8 "\r\n", dm9000ReadReg(DM9000_REG_MAR0), dm9000ReadReg(DM9000_REG_MAR1), dm9000ReadReg(DM9000_REG_MAR2), dm9000ReadReg(DM9000_REG_MAR3), dm9000ReadReg(DM9000_REG_MAR4), dm9000ReadReg(DM9000_REG_MAR5), dm9000ReadReg(DM9000_REG_MAR6), dm9000ReadReg(DM9000_REG_MAR7)); //Successful processing return NO_ERROR; } /** * @brief Write DM9000 register * @param[in] address Register address * @param[in] data Register value **/ void dm9000WriteReg(uint8_t address, uint8_t data) { //Write register address to INDEX register DM9000_INDEX_REG = address; //Write register value to DATA register DM9000_DATA_REG = data; } /** * @brief Read DM9000 register * @param[in] address Register address * @return Register value **/ uint8_t dm9000ReadReg(uint8_t address) { //Write register address to INDEX register DM9000_INDEX_REG = address; //Read register value from DATA register return DM9000_DATA_REG; } /** * @brief Write DM9000 PHY register * @param[in] address PHY register address * @param[in] data Register value **/ void dm9000WritePhyReg(uint8_t address, uint16_t data) { //Write PHY register address dm9000WriteReg(DM9000_REG_EPAR, 0x40 | address); //Write register value dm9000WriteReg(DM9000_REG_EPDRL, LSB(data)); dm9000WriteReg(DM9000_REG_EPDRH, MSB(data)); //Start the write operation dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS | EPCR_ERPRW); //PHY access is still in progress? while((dm9000ReadReg(DM9000_REG_EPCR) & EPCR_ERRE) != 0) { } //Wait 5us minimum usleep(5); //Clear command register dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS); } /** * @brief Read DM9000 PHY register * @param[in] address PHY register address * @return Register value **/ uint16_t dm9000ReadPhyReg(uint8_t address) { //Write PHY register address dm9000WriteReg(DM9000_REG_EPAR, 0x40 | address); //Start the read operation dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS | EPCR_ERPRR); //PHY access is still in progress? while((dm9000ReadReg(DM9000_REG_EPCR) & EPCR_ERRE) != 0) { } //Clear command register dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS); //Wait 5us minimum usleep(5); //Return register value return (dm9000ReadReg(DM9000_REG_EPDRH) << 8) | dm9000ReadReg(DM9000_REG_EPDRL); } /** * @brief CRC calculation * @param[in] data Pointer to the data over which to calculate the CRC * @param[in] length Number of bytes to process * @return Resulting CRC value **/ uint32_t dm9000CalcCrc(const void *data, size_t length) { uint_t i; uint_t j; uint32_t crc; const uint8_t *p; //Point to the data over which to calculate the CRC p = (uint8_t *) data; //CRC preset value crc = 0xFFFFFFFF; //Loop through data for(i = 0; i < length; i++) { //Update CRC value crc ^= p[i]; //The message is processed bit by bit for(j = 0; j < 8; j++) { if((crc & 0x01) != 0) { crc = (crc >> 1) ^ 0xEDB88320; } else { crc = crc >> 1; } } } //Return CRC value return crc; }
/** * @file dm9000_driver.c * @brief DM9000A/B Ethernet controller * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "core/net.h" #include "core/ethernet.h" #include "drivers/eth/dm9000_driver.h" #include "debug.h" /** * @brief DM9000 driver **/ const NicDriver dm9000Driver = { NIC_TYPE_ETHERNET, ETH_MTU, dm9000Init, dm9000Tick, dm9000EnableIrq, dm9000DisableIrq, dm9000EventHandler, dm9000SendPacket, dm9000UpdateMacAddrFilter, NULL, NULL, NULL, TRUE, TRUE, TRUE, FALSE }; /** * @brief DM9000 controller initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t dm9000Init(NetInterface *interface) { uint_t i; uint16_t vendorId; uint16_t productId; uint8_t chipRev; Dm9000Context *context; //Debug message TRACE_INFO("Initializing DM9000 Ethernet controller...\r\n"); //Initialize external interrupt line interface->extIntDriver->init(); //Point to the driver context context = (Dm9000Context *) interface->nicContext; //Initialize driver specific variables context->queuedPackets = 0; //Allocate TX and RX buffers context->txBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); context->rxBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); //Failed to allocate memory? if(context->txBuffer == NULL || context->rxBuffer == NULL) { //Clean up side effects memPoolFree(context->txBuffer); memPoolFree(context->rxBuffer); //Report an error return ERROR_OUT_OF_MEMORY; } //Retrieve vendorID, product ID and chip revision vendorId = (dm9000ReadReg(DM9000_VIDH) << 8) | dm9000ReadReg(DM9000_VIDL); productId = (dm9000ReadReg(DM9000_PIDH) << 8) | dm9000ReadReg(DM9000_PIDL); chipRev = dm9000ReadReg(DM9000_CHIPR); //Check vendor ID and product ID if(vendorId != DM9000_VID || productId != DM9000_PID) { return ERROR_WRONG_IDENTIFIER; } //Check chip revision if(chipRev != DM9000_CHIPR_REV_A && chipRev != DM9000_CHIPR_REV_B) { return ERROR_WRONG_IDENTIFIER; } //Power up the internal PHY by clearing PHYPD dm9000WriteReg(DM9000_GPR, 0x00); //Wait for the PHY to be ready sleep(10); //Software reset dm9000WriteReg(DM9000_NCR, DM9000_NCR_RST); //Wait for the reset to complete while((dm9000ReadReg(DM9000_NCR) & DM9000_NCR_RST) != 0) { } //PHY software reset dm9000WritePhyReg(DM9000_BMCR, DM9000_BMCR_RST); //Wait for the PHY reset to complete while((dm9000ReadPhyReg(DM9000_BMCR) & DM9000_BMCR_RST) != 0) { } //Debug message TRACE_INFO(" VID = 0x%04" PRIX16 "\r\n", vendorId); TRACE_INFO(" PID = 0x%04" PRIX16 "\r\n", productId); TRACE_INFO(" CHIPR = 0x%02" PRIX8 "\r\n", chipRev); TRACE_INFO(" PHYIDR1 = 0x%04" PRIX16 "\r\n", dm9000ReadPhyReg(DM9000_PHYIDR1)); TRACE_INFO(" PHYIDR2 = 0x%04" PRIX16 "\r\n", dm9000ReadPhyReg(DM9000_PHYIDR2)); //Enable loopback mode? #if (DM9000_LOOPBACK_MODE == ENABLED) //Enable loopback mode dm9000WriteReg(DM9000_NCR, DM9000_NCR_LBK_PHY); //Set operation mode dm9000WritePhyReg(DM9000_BMCR, DM9000_BMCR_LOOPBACK | DM9000_BMCR_SPEED_SEL | DM9000_BMCR_AN_EN | DM9000_BMCR_DUPLEX_MODE); #endif //Set host MAC address for(i = 0; i < 6; i++) { dm9000WriteReg(DM9000_PAR0 + i, interface->macAddr.b[i]); } //Initialize hash table for(i = 0; i < 8; i++) { dm9000WriteReg(DM9000_MAR0 + i, 0x00); } //Always accept broadcast packets dm9000WriteReg(DM9000_MAR7, 0x80); //Enable the Pointer Auto Return function dm9000WriteReg(DM9000_IMR, DM9000_IMR_PAR); //Clear NSR status bits dm9000WriteReg(DM9000_NSR, DM9000_NSR_WAKEST | DM9000_NSR_TX2END | DM9000_NSR_TX1END); //Clear interrupt flags dm9000WriteReg(DM9000_ISR, DM9000_ISR_LNKCHG | DM9000_ISR_UDRUN | DM9000_ISR_ROO | DM9000_ISR_ROS | DM9000_ISR_PT | DM9000_ISR_PR); //Enable interrupts dm9000WriteReg(DM9000_IMR, DM9000_IMR_PAR | DM9000_IMR_LNKCHGI | DM9000_IMR_PTI | DM9000_IMR_PRI); //Enable the receiver by setting RXEN dm9000WriteReg(DM9000_RCR, DM9000_RCR_DIS_LONG | DM9000_RCR_DIS_CRC | DM9000_RCR_RXEN); //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Force the TCP/IP stack to poll the link state at startup interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; } /** * @brief DM9000 timer handler * @param[in] interface Underlying network interface **/ void dm9000Tick(NetInterface *interface) { } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void dm9000EnableIrq(NetInterface *interface) { //Enable interrupts interface->extIntDriver->enableIrq(); } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void dm9000DisableIrq(NetInterface *interface) { //Disable interrupts interface->extIntDriver->disableIrq(); } /** * @brief DM9000 interrupt service routine * @param[in] interface Underlying network interface * @return TRUE if a higher priority task must be woken. Else FALSE is returned **/ bool_t dm9000IrqHandler(NetInterface *interface) { bool_t flag; uint8_t status; uint8_t mask; Dm9000Context *context; //This flag will be set if a higher priority task must be woken flag = FALSE; //Point to the driver context context = (Dm9000Context *) interface->nicContext; //Read interrupt status register status = dm9000ReadReg(DM9000_ISR); //Link status change? if((status & DM9000_ISR_LNKCHG) != 0) { //Read interrupt mask register mask = dm9000ReadReg(DM9000_IMR); //Disable LNKCHGI interrupt dm9000WriteReg(DM9000_IMR, mask & ~DM9000_IMR_LNKCHGI); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((status & DM9000_ISR_PT) != 0) { //Check TX complete status bits if((dm9000ReadReg(DM9000_NSR) & (DM9000_NSR_TX2END | DM9000_NSR_TX1END)) != 0) { //The transmission of the current packet is complete if(context->queuedPackets > 0) { context->queuedPackets--; } //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } //Clear interrupt flag dm9000WriteReg(DM9000_ISR, DM9000_ISR_PT); } //Packet received? if((status & DM9000_ISR_PR) != 0) { //Read interrupt mask register mask = dm9000ReadReg(DM9000_IMR); //Disable PRI interrupt dm9000WriteReg(DM9000_IMR, mask & ~DM9000_IMR_PRI); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //A higher priority task must be woken? return flag; } /** * @brief DM9000 event handler * @param[in] interface Underlying network interface **/ void dm9000EventHandler(NetInterface *interface) { error_t error; uint8_t status; //Read interrupt status register status = dm9000ReadReg(DM9000_ISR); //Check whether the link status has changed? if((status & DM9000_ISR_LNKCHG) != 0) { //Clear interrupt flag dm9000WriteReg(DM9000_ISR, DM9000_ISR_LNKCHG); //Read network status register status = dm9000ReadReg(DM9000_NSR); //Check link state if((status & DM9000_NSR_LINKST) != 0) { //Get current speed if((status & DM9000_NSR_SPEED) != 0) { interface->linkSpeed = NIC_LINK_SPEED_10MBPS; } else { interface->linkSpeed = NIC_LINK_SPEED_100MBPS; } //Read network control register status = dm9000ReadReg(DM9000_NCR); //Determine the new duplex mode if((status & DM9000_NCR_FDX) != 0) { interface->duplexMode = NIC_FULL_DUPLEX_MODE; } else { interface->duplexMode = NIC_HALF_DUPLEX_MODE; } //Link is up interface->linkState = TRUE; } else { //Link is down interface->linkState = FALSE; } //Process link state change event nicNotifyLinkChange(interface); } //Check whether a packet has been received? if((status & DM9000_ISR_PR) != 0) { //Clear interrupt flag dm9000WriteReg(DM9000_ISR, DM9000_ISR_PR); //Process all pending packets do { //Read incoming packet error = dm9000ReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable LNKCHGI and PRI interrupts dm9000WriteReg(DM9000_IMR, DM9000_IMR_PAR | DM9000_IMR_LNKCHGI | DM9000_IMR_PTI | DM9000_IMR_PRI); } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t dm9000SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t i; size_t length; uint16_t *p; Dm9000Context *context; //Point to the driver context context = (Dm9000Context *) interface->nicContext; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > ETH_MAX_FRAME_SIZE) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Copy user data netBufferRead(context->txBuffer, buffer, offset, length); //A dummy write is required before accessing FIFO dm9000WriteReg(DM9000_MWCMDX, 0); //Select MWCMD register DM9000_INDEX_REG = DM9000_MWCMD; //Point to the beginning of the buffer p = (uint16_t *) context->txBuffer; //Write data to the FIFO using 16-bit mode for(i = length; i > 1; i -= 2) { DM9000_DATA_REG = *(p++); } //Odd number of bytes? if(i > 0) { DM9000_DATA_REG = *((uint8_t *) p); } //Write the number of bytes to send dm9000WriteReg(DM9000_TXPLL, LSB(length)); dm9000WriteReg(DM9000_TXPLH, MSB(length)); //Clear interrupt flag dm9000WriteReg(DM9000_ISR, DM9000_ISR_PT); //Start data transfer dm9000WriteReg(DM9000_TCR, DM9000_TCR_TXREQ); //The packet was successfully written to FIFO context->queuedPackets++; //Successful processing return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t dm9000ReceivePacket(NetInterface *interface) { error_t error; size_t i; size_t n; size_t length; volatile uint8_t status; volatile uint16_t data; Dm9000Context *context; //Point to the driver context context = (Dm9000Context *) interface->nicContext; //A dummy read is required before accessing the 4-byte header data = dm9000ReadReg(DM9000_MRCMDX); //Select MRCMDX1 register DM9000_INDEX_REG = DM9000_MRCMDX1; //Read the first byte of the header status = LSB(DM9000_DATA_REG); //The first byte indicates if a packet has been received if(status == 0x01) { //Select MRCMD register DM9000_INDEX_REG = DM9000_MRCMD; //The second byte is the RX status byte status = MSB(DM9000_DATA_REG); //Retrieve packet length length = DM9000_DATA_REG; //Limit the number of data to read n = MIN(length, ETH_MAX_FRAME_SIZE); //Point to the beginning of the buffer i = 0; //Make sure no error occurred if((status & (DM9000_RSR_LCS | DM9000_RSR_RWTO | DM9000_RSR_PLE | DM9000_RSR_AE | DM9000_RSR_CE | DM9000_RSR_FOE)) == 0) { //Read data from FIFO using 16-bit mode while((i + 1) < n) { data = DM9000_DATA_REG; context->rxBuffer[i++] = LSB(data); context->rxBuffer[i++] = MSB(data); } //Odd number of bytes to read? if((i + 1) == n) { data = DM9000_DATA_REG; context->rxBuffer[i] = LSB(data); i += 2; } //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } //Flush remaining bytes while(i < length) { data = DM9000_DATA_REG; i += 2; } } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, context->rxBuffer, n, &ancillary); } //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t dm9000UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint8_t hashTable[8]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //Always accept broadcast packets regardless of the MAC filter table hashTable[7] = 0x80; //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = dm9000CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = crc & 0x3F; //Update hash table contents hashTable[k / 8] |= (1 << (k % 8)); } } //Write the hash table to the DM9000 controller for(i = 0; i < 8; i++) { dm9000WriteReg(DM9000_MAR0 + i, hashTable[i]); } //Debug message TRACE_DEBUG(" MAR = %02" PRIX8 " %02" PRIX8 " %02" PRIX8 " %02" PRIX8 " " "%02" PRIX8 " %02" PRIX8 " %02" PRIX8 " %02" PRIX8 "\r\n", dm9000ReadReg(DM9000_MAR0), dm9000ReadReg(DM9000_MAR1), dm9000ReadReg(DM9000_MAR2), dm9000ReadReg(DM9000_MAR3), dm9000ReadReg(DM9000_MAR4), dm9000ReadReg(DM9000_MAR5), dm9000ReadReg(DM9000_MAR6), dm9000ReadReg(DM9000_MAR7)); //Successful processing return NO_ERROR; } /** * @brief Write DM9000 register * @param[in] address Register address * @param[in] data Register value **/ void dm9000WriteReg(uint8_t address, uint8_t data) { //Write register address to INDEX register DM9000_INDEX_REG = address; //Write register value to DATA register DM9000_DATA_REG = data; } /** * @brief Read DM9000 register * @param[in] address Register address * @return Register value **/ uint8_t dm9000ReadReg(uint8_t address) { //Write register address to INDEX register DM9000_INDEX_REG = address; //Read register value from DATA register return DM9000_DATA_REG; } /** * @brief Write DM9000 PHY register * @param[in] address PHY register address * @param[in] data Register value **/ void dm9000WritePhyReg(uint8_t address, uint16_t data) { //Write PHY register address dm9000WriteReg(DM9000_EPAR, 0x40 | address); //Write register value dm9000WriteReg(DM9000_EPDRL, LSB(data)); dm9000WriteReg(DM9000_EPDRH, MSB(data)); //Start the write operation dm9000WriteReg(DM9000_EPCR, DM9000_EPCR_EPOS | DM9000_EPCR_ERPRW); //PHY access is still in progress? while((dm9000ReadReg(DM9000_EPCR) & DM9000_EPCR_ERRE) != 0) { } //Wait 5us minimum usleep(5); //Clear command register dm9000WriteReg(DM9000_EPCR, DM9000_EPCR_EPOS); } /** * @brief Read DM9000 PHY register * @param[in] address PHY register address * @return Register value **/ uint16_t dm9000ReadPhyReg(uint8_t address) { //Write PHY register address dm9000WriteReg(DM9000_EPAR, 0x40 | address); //Start the read operation dm9000WriteReg(DM9000_EPCR, DM9000_EPCR_EPOS | DM9000_EPCR_ERPRR); //PHY access is still in progress? while((dm9000ReadReg(DM9000_EPCR) & DM9000_EPCR_ERRE) != 0) { } //Clear command register dm9000WriteReg(DM9000_EPCR, DM9000_EPCR_EPOS); //Wait 5us minimum usleep(5); //Return register value return (dm9000ReadReg(DM9000_EPDRH) << 8) | dm9000ReadReg(DM9000_EPDRL); } /** * @brief CRC calculation * @param[in] data Pointer to the data over which to calculate the CRC * @param[in] length Number of bytes to process * @return Resulting CRC value **/ uint32_t dm9000CalcCrc(const void *data, size_t length) { uint_t i; uint_t j; uint32_t crc; const uint8_t *p; //Point to the data over which to calculate the CRC p = (uint8_t *) data; //CRC preset value crc = 0xFFFFFFFF; //Loop through data for(i = 0; i < length; i++) { //Update CRC value crc ^= p[i]; //The message is processed bit by bit for(j = 0; j < 8; j++) { if((crc & 0x01) != 0) { crc = (crc >> 1) ^ 0xEDB88320; } else { crc = crc >> 1; } } } //Return CRC value return crc; }
void dm9000WritePhyReg(uint8_t address, uint16_t data) { //Write PHY register address dm9000WriteReg(DM9000_REG_EPAR, 0x40 | address); //Write register value dm9000WriteReg(DM9000_REG_EPDRL, LSB(data)); dm9000WriteReg(DM9000_REG_EPDRH, MSB(data)); //Start the write operation dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS | EPCR_ERPRW); //PHY access is still in progress? while((dm9000ReadReg(DM9000_REG_EPCR) & EPCR_ERRE) != 0) { } //Wait 5us minimum usleep(5); //Clear command register dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS); }
void dm9000WritePhyReg(uint8_t address, uint16_t data) { //Write PHY register address dm9000WriteReg(DM9000_EPAR, 0x40 | address); //Write register value dm9000WriteReg(DM9000_EPDRL, LSB(data)); dm9000WriteReg(DM9000_EPDRH, MSB(data)); //Start the write operation dm9000WriteReg(DM9000_EPCR, DM9000_EPCR_EPOS | DM9000_EPCR_ERPRW); //PHY access is still in progress? while((dm9000ReadReg(DM9000_EPCR) & DM9000_EPCR_ERRE) != 0) { } //Wait 5us minimum usleep(5); //Clear command register dm9000WriteReg(DM9000_EPCR, DM9000_EPCR_EPOS); }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (77, ' uint8_t chipRev;'), (108, ' vendorId = (dm9000ReadReg(DM9000_VIDH) << 8) | dm9000ReadReg(DM9000_VIDL);'), (109, ' productId = (dm9000ReadReg(DM9000_PIDH) << 8) | dm9000ReadReg(DM9000_PIDL);'), (110, ' chipRev = dm9000ReadReg(DM9000_CHIPR);'), (119, ' if(chipRev != DM9000_CHIPR_REV_A && chipRev != DM9000_CHIPR_REV_B)'), (125, ' dm9000WriteReg(DM9000_GPR, 0x00);'), (130, ' dm9000WriteReg(DM9000_NCR, DM9000_NCR_RST);'), (132, ' while((dm9000ReadReg(DM9000_NCR) & DM9000_NCR_RST) != 0)'), (137, ' dm9000WritePhyReg(DM9000_BMCR, DM9000_BMCR_RST);'), (139, ' while((dm9000ReadPhyReg(DM9000_BMCR) & DM9000_BMCR_RST) != 0)'), (146, ' TRACE_INFO(" CHIPR = 0x%02" PRIX8 "\\r\\n", chipRev);'), (147, ' TRACE_INFO(" PHYIDR1 = 0x%04" PRIX16 "\\r\\n", dm9000ReadPhyReg(DM9000_PHYIDR1));'), (148, ' TRACE_INFO(" PHYIDR2 = 0x%04" PRIX16 "\\r\\n", dm9000ReadPhyReg(DM9000_PHYIDR2));'), (152, ' //Enable loopback mode'), (153, ' dm9000WriteReg(DM9000_NCR, DM9000_NCR_LBK_PHY);'), (154, ''), (155, ' //Set operation mode'), (156, ' dm9000WritePhyReg(DM9000_BMCR, DM9000_BMCR_LOOPBACK | DM9000_BMCR_SPEED_SEL |'), (157, ' DM9000_BMCR_AN_EN | DM9000_BMCR_DUPLEX_MODE);'), (163, ' dm9000WriteReg(DM9000_PAR0 + i, interface->macAddr.b[i]);'), (169, ' dm9000WriteReg(DM9000_MAR0 + i, 0x00);'), (173, ' dm9000WriteReg(DM9000_MAR7, 0x80);'), (176, ' dm9000WriteReg(DM9000_IMR, DM9000_IMR_PAR);'), (177, ''), (179, ' dm9000WriteReg(DM9000_NSR, DM9000_NSR_WAKEST | DM9000_NSR_TX2END |'), (180, ' DM9000_NSR_TX1END);'), (181, ''), (183, ' dm9000WriteReg(DM9000_ISR, DM9000_ISR_LNKCHG | DM9000_ISR_UDRUN |'), (184, ' DM9000_ISR_ROO | DM9000_ISR_ROS | DM9000_ISR_PT | DM9000_ISR_PR);'), (185, ''), (187, ' dm9000WriteReg(DM9000_IMR, DM9000_IMR_PAR | DM9000_IMR_LNKCHGI |'), (188, ' DM9000_IMR_PTI | DM9000_IMR_PRI);'), (189, ''), (191, ' dm9000WriteReg(DM9000_RCR, DM9000_RCR_DIS_LONG | DM9000_RCR_DIS_CRC |'), (192, ' DM9000_RCR_RXEN);'), (261, ' status = dm9000ReadReg(DM9000_ISR);'), (264, ' if((status & DM9000_ISR_LNKCHG) != 0)'), (267, ' mask = dm9000ReadReg(DM9000_IMR);'), (269, ' dm9000WriteReg(DM9000_IMR, mask & ~DM9000_IMR_LNKCHGI);'), (278, ' if((status & DM9000_ISR_PT) != 0)'), (281, ' if((dm9000ReadReg(DM9000_NSR) & (DM9000_NSR_TX2END | DM9000_NSR_TX1END)) != 0)'), (294, ' dm9000WriteReg(DM9000_ISR, DM9000_ISR_PT);'), (298, ' if((status & DM9000_ISR_PR) != 0)'), (301, ' mask = dm9000ReadReg(DM9000_IMR);'), (303, ' dm9000WriteReg(DM9000_IMR, mask & ~DM9000_IMR_PRI);'), (327, ' status = dm9000ReadReg(DM9000_ISR);'), (330, ' if((status & DM9000_ISR_LNKCHG) != 0)'), (333, ' dm9000WriteReg(DM9000_ISR, DM9000_ISR_LNKCHG);'), (335, ' status = dm9000ReadReg(DM9000_NSR);'), (338, ' if((status & DM9000_NSR_LINKST) != 0)'), (341, ' if((status & DM9000_NSR_SPEED) != 0)'), (351, ' status = dm9000ReadReg(DM9000_NCR);'), (354, ' if((status & DM9000_NCR_FDX) != 0)'), (377, ' if((status & DM9000_ISR_PR) != 0)'), (380, ' dm9000WriteReg(DM9000_ISR, DM9000_ISR_PR);'), (393, ' dm9000WriteReg(DM9000_IMR, DM9000_IMR_PAR | DM9000_IMR_LNKCHGI |'), (394, ' DM9000_IMR_PTI | DM9000_IMR_PRI);'), (435, ' dm9000WriteReg(DM9000_MWCMDX, 0);'), (437, ' DM9000_INDEX_REG = DM9000_MWCMD;'), (455, ' dm9000WriteReg(DM9000_TXPLL, LSB(length));'), (456, ' dm9000WriteReg(DM9000_TXPLH, MSB(length));'), (459, ' dm9000WriteReg(DM9000_ISR, DM9000_ISR_PT);'), (461, ' dm9000WriteReg(DM9000_TCR, DM9000_TCR_TXREQ);'), (491, ' data = dm9000ReadReg(DM9000_MRCMDX);'), (494, ' DM9000_INDEX_REG = DM9000_MRCMDX1;'), (502, ' DM9000_INDEX_REG = DM9000_MRCMD;'), (515, ' if((status & (DM9000_RSR_LCS | DM9000_RSR_RWTO | DM9000_RSR_PLE |'), (516, ' DM9000_RSR_AE | DM9000_RSR_CE | DM9000_RSR_FOE)) == 0)'), (617, ' dm9000WriteReg(DM9000_MAR0 + i, hashTable[i]);'), (623, ' dm9000ReadReg(DM9000_MAR0), dm9000ReadReg(DM9000_MAR1),'), (624, ' dm9000ReadReg(DM9000_MAR2), dm9000ReadReg(DM9000_MAR3),'), (625, ' dm9000ReadReg(DM9000_MAR4), dm9000ReadReg(DM9000_MAR5),'), (626, ' dm9000ReadReg(DM9000_MAR6), dm9000ReadReg(DM9000_MAR7));'), (672, ' dm9000WriteReg(DM9000_EPAR, 0x40 | address);'), (674, ' dm9000WriteReg(DM9000_EPDRL, LSB(data));'), (675, ' dm9000WriteReg(DM9000_EPDRH, MSB(data));'), (678, ' dm9000WriteReg(DM9000_EPCR, DM9000_EPCR_EPOS | DM9000_EPCR_ERPRW);'), (679, ''), (681, ' while((dm9000ReadReg(DM9000_EPCR) & DM9000_EPCR_ERRE) != 0)'), (688, ' dm9000WriteReg(DM9000_EPCR, DM9000_EPCR_EPOS);'), (701, ' dm9000WriteReg(DM9000_EPAR, 0x40 | address);'), (704, ' dm9000WriteReg(DM9000_EPCR, DM9000_EPCR_EPOS | DM9000_EPCR_ERPRR);'), (705, ''), (707, ' while((dm9000ReadReg(DM9000_EPCR) & DM9000_EPCR_ERRE) != 0)'), (712, ' dm9000WriteReg(DM9000_EPCR, DM9000_EPCR_EPOS);'), (717, ' return (dm9000ReadReg(DM9000_EPDRH) << 8) | dm9000ReadReg(DM9000_EPDRL);')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (77, ' uint8_t chipRevision;'), (108, ' vendorId = (dm9000ReadReg(DM9000_REG_VIDH) << 8) | dm9000ReadReg(DM9000_REG_VIDL);'), (109, ' productId = (dm9000ReadReg(DM9000_REG_PIDH) << 8) | dm9000ReadReg(DM9000_REG_PIDL);'), (110, ' chipRevision = dm9000ReadReg(DM9000_REG_CHIPR);'), (119, ' if(chipRevision != DM9000A_CHIP_REV && chipRevision != DM9000B_CHIP_REV)'), (125, ' dm9000WriteReg(DM9000_REG_GPR, 0x00);'), (130, ' dm9000WriteReg(DM9000_REG_NCR, NCR_RST);'), (132, ' while((dm9000ReadReg(DM9000_REG_NCR) & NCR_RST) != 0)'), (137, ' dm9000WritePhyReg(DM9000_PHY_REG_BMCR, BMCR_RST);'), (139, ' while((dm9000ReadPhyReg(DM9000_PHY_REG_BMCR) & BMCR_RST) != 0)'), (146, ' TRACE_INFO(" CHIPR = 0x%02" PRIX8 "\\r\\n", chipRevision);'), (147, ' TRACE_INFO(" PHYIDR1 = 0x%04" PRIX16 "\\r\\n", dm9000ReadPhyReg(DM9000_PHY_REG_PHYIDR1));'), (148, ' TRACE_INFO(" PHYIDR2 = 0x%04" PRIX16 "\\r\\n", dm9000ReadPhyReg(DM9000_PHY_REG_PHYIDR2));'), (152, ' dm9000WriteReg(DM9000_REG_NCR, DM9000_LBK_PHY);'), (153, ' dm9000WritePhyReg(DM9000_PHY_REG_BMCR, BMCR_LOOPBACK | BMCR_SPEED_SEL | BMCR_AN_EN | BMCR_DUPLEX_MODE);'), (159, ' dm9000WriteReg(DM9000_REG_PAR0 + i, interface->macAddr.b[i]);'), (165, ' dm9000WriteReg(DM9000_REG_MAR0 + i, 0x00);'), (169, ' dm9000WriteReg(DM9000_REG_MAR7, 0x80);'), (172, ' dm9000WriteReg(DM9000_REG_IMR, IMR_PAR);'), (174, ' dm9000WriteReg(DM9000_REG_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);'), (176, ' dm9000WriteReg(DM9000_REG_ISR, ISR_LNKCHG | ISR_UDRUN | ISR_ROO | ISR_ROS | ISR_PT | ISR_PR);'), (178, ' dm9000WriteReg(DM9000_REG_IMR, IMR_PAR | IMR_LNKCHGI | IMR_PTI | IMR_PRI);'), (180, ' dm9000WriteReg(DM9000_REG_RCR, RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN);'), (249, ' status = dm9000ReadReg(DM9000_REG_ISR);'), (252, ' if((status & ISR_LNKCHG) != 0)'), (255, ' mask = dm9000ReadReg(DM9000_REG_IMR);'), (257, ' dm9000WriteReg(DM9000_REG_IMR, mask & ~IMR_LNKCHGI);'), (266, ' if((status & ISR_PT) != 0)'), (269, ' if(dm9000ReadReg(DM9000_REG_NSR) & (NSR_TX2END | NSR_TX1END))'), (282, ' dm9000WriteReg(DM9000_REG_ISR, ISR_PT);'), (286, ' if((status & ISR_PR) != 0)'), (289, ' mask = dm9000ReadReg(DM9000_REG_IMR);'), (291, ' dm9000WriteReg(DM9000_REG_IMR, mask & ~IMR_PRI);'), (315, ' status = dm9000ReadReg(DM9000_REG_ISR);'), (318, ' if((status & ISR_LNKCHG) != 0)'), (321, ' dm9000WriteReg(DM9000_REG_ISR, ISR_LNKCHG);'), (323, ' status = dm9000ReadReg(DM9000_REG_NSR);'), (326, ' if((status & NSR_LINKST) != 0)'), (329, ' if((status & NSR_SPEED) != 0)'), (339, ' status = dm9000ReadReg(DM9000_REG_NCR);'), (342, ' if((status & NCR_FDX) != 0)'), (365, ' if((status & ISR_PR) != 0)'), (368, ' dm9000WriteReg(DM9000_REG_ISR, ISR_PR);'), (381, ' dm9000WriteReg(DM9000_REG_IMR, IMR_PAR | IMR_LNKCHGI | IMR_PTI | IMR_PRI);'), (422, ' dm9000WriteReg(DM9000_REG_MWCMDX, 0);'), (424, ' DM9000_INDEX_REG = DM9000_REG_MWCMD;'), (442, ' dm9000WriteReg(DM9000_REG_TXPLL, LSB(length));'), (443, ' dm9000WriteReg(DM9000_REG_TXPLH, MSB(length));'), (446, ' dm9000WriteReg(DM9000_REG_ISR, ISR_PT);'), (448, ' dm9000WriteReg(DM9000_REG_TCR, TCR_TXREQ);'), (478, ' data = dm9000ReadReg(DM9000_REG_MRCMDX);'), (481, ' DM9000_INDEX_REG = DM9000_REG_MRCMDX1;'), (489, ' DM9000_INDEX_REG = DM9000_REG_MRCMD;'), (502, ' if((status & (RSR_LCS | RSR_RWTO | RSR_PLE | RSR_AE | RSR_CE | RSR_FOE)) == 0)'), (603, ' dm9000WriteReg(DM9000_REG_MAR0 + i, hashTable[i]);'), (609, ' dm9000ReadReg(DM9000_REG_MAR0), dm9000ReadReg(DM9000_REG_MAR1),'), (610, ' dm9000ReadReg(DM9000_REG_MAR2), dm9000ReadReg(DM9000_REG_MAR3),'), (611, ' dm9000ReadReg(DM9000_REG_MAR4), dm9000ReadReg(DM9000_REG_MAR5),'), (612, ' dm9000ReadReg(DM9000_REG_MAR6), dm9000ReadReg(DM9000_REG_MAR7));'), (658, ' dm9000WriteReg(DM9000_REG_EPAR, 0x40 | address);'), (660, ' dm9000WriteReg(DM9000_REG_EPDRL, LSB(data));'), (661, ' dm9000WriteReg(DM9000_REG_EPDRH, MSB(data));'), (664, ' dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS | EPCR_ERPRW);'), (666, ' while((dm9000ReadReg(DM9000_REG_EPCR) & EPCR_ERRE) != 0)'), (673, ' dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS);'), (686, ' dm9000WriteReg(DM9000_REG_EPAR, 0x40 | address);'), (689, ' dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS | EPCR_ERPRR);'), (691, ' while((dm9000ReadReg(DM9000_REG_EPCR) & EPCR_ERRE) != 0)'), (696, ' dm9000WriteReg(DM9000_REG_EPCR, EPCR_EPOS);'), (701, ' return (dm9000ReadReg(DM9000_REG_EPDRH) << 8) | dm9000ReadReg(DM9000_REG_EPDRL);')]}
88
72
371
1,880
12
75
2
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
CWE-20
647
auditsc.c
C
audit_log_execve_info
/* auditsc.c -- System-call auditing support * Handles all system-call specific auditing features. * * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. * Copyright 2005 Hewlett-Packard Development Company, L.P. * Copyright (C) 2005, 2006 IBM Corporation * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Written by Rickard E. (Rik) Faith <faith@redhat.com> * * Many of the ideas implemented here are from Stephen C. Tweedie, * especially the idea of avoiding a copy by using getname. * * The method for actual interception of syscall entry and exit (not in * this file -- see entry.S) is based on a GPL'd patch written by * okir@suse.de and Copyright 2003 SuSE Linux AG. * * POSIX message queue support added by George Wilson <ltcgcw@us.ibm.com>, * 2006. * * The support of additional filter rules compares (>, <, >=, <=) was * added by Dustin Kirkland <dustin.kirkland@us.ibm.com>, 2005. * * Modified by Amy Griffis <amy.griffis@hp.com> to collect additional * filesystem information. * * Subject and object context labeling support added by <danjones@us.ibm.com> * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <asm/types.h> #include <linux/atomic.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/mm.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/mount.h> #include <linux/socket.h> #include <linux/mqueue.h> #include <linux/audit.h> #include <linux/personality.h> #include <linux/time.h> #include <linux/netlink.h> #include <linux/compiler.h> #include <asm/unistd.h> #include <linux/security.h> #include <linux/list.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/highmem.h> #include <linux/syscalls.h> #include <asm/syscall.h> #include <linux/capability.h> #include <linux/fs_struct.h> #include <linux/compat.h> #include <linux/ctype.h> #include <linux/string.h> #include <uapi/linux/limits.h> #include "audit.h" /* flags stating the success for a syscall */ #define AUDITSC_INVALID 0 #define AUDITSC_SUCCESS 1 #define AUDITSC_FAILURE 2 /* no execve audit message should be longer than this (userspace limits) */ #define MAX_EXECVE_AUDIT_LEN 7500 /* max length to print of cmdline/proctitle value during audit */ #define MAX_PROCTITLE_AUDIT_LEN 128 /* number of audit rules */ int audit_n_rules; /* determines whether we collect data for signals sent */ int audit_signals; struct audit_aux_data { struct audit_aux_data *next; int type; }; #define AUDIT_AUX_IPCPERM 0 /* Number of target pids per aux struct. */ #define AUDIT_AUX_PIDS 16 struct audit_aux_data_pids { struct audit_aux_data d; pid_t target_pid[AUDIT_AUX_PIDS]; kuid_t target_auid[AUDIT_AUX_PIDS]; kuid_t target_uid[AUDIT_AUX_PIDS]; unsigned int target_sessionid[AUDIT_AUX_PIDS]; u32 target_sid[AUDIT_AUX_PIDS]; char target_comm[AUDIT_AUX_PIDS][TASK_COMM_LEN]; int pid_count; }; struct audit_aux_data_bprm_fcaps { struct audit_aux_data d; struct audit_cap_data fcap; unsigned int fcap_ver; struct audit_cap_data old_pcap; struct audit_cap_data new_pcap; }; struct audit_tree_refs { struct audit_tree_refs *next; struct audit_chunk *c[31]; }; static int audit_match_perm(struct audit_context *ctx, int mask) { unsigned n; if (unlikely(!ctx)) return 0; n = ctx->major; switch (audit_classify_syscall(ctx->arch, n)) { case 0: /* native */ if ((mask & AUDIT_PERM_WRITE) && audit_match_class(AUDIT_CLASS_WRITE, n)) return 1; if ((mask & AUDIT_PERM_READ) && audit_match_class(AUDIT_CLASS_READ, n)) return 1; if ((mask & AUDIT_PERM_ATTR) && audit_match_class(AUDIT_CLASS_CHATTR, n)) return 1; return 0; case 1: /* 32bit on biarch */ if ((mask & AUDIT_PERM_WRITE) && audit_match_class(AUDIT_CLASS_WRITE_32, n)) return 1; if ((mask & AUDIT_PERM_READ) && audit_match_class(AUDIT_CLASS_READ_32, n)) return 1; if ((mask & AUDIT_PERM_ATTR) && audit_match_class(AUDIT_CLASS_CHATTR_32, n)) return 1; return 0; case 2: /* open */ return mask & ACC_MODE(ctx->argv[1]); case 3: /* openat */ return mask & ACC_MODE(ctx->argv[2]); case 4: /* socketcall */ return ((mask & AUDIT_PERM_WRITE) && ctx->argv[0] == SYS_BIND); case 5: /* execve */ return mask & AUDIT_PERM_EXEC; default: return 0; } } static int audit_match_filetype(struct audit_context *ctx, int val) { struct audit_names *n; umode_t mode = (umode_t)val; if (unlikely(!ctx)) return 0; list_for_each_entry(n, &ctx->names_list, list) { if ((n->ino != AUDIT_INO_UNSET) && ((n->mode & S_IFMT) == mode)) return 1; } return 0; } /* * We keep a linked list of fixed-sized (31 pointer) arrays of audit_chunk *; * ->first_trees points to its beginning, ->trees - to the current end of data. * ->tree_count is the number of free entries in array pointed to by ->trees. * Original condition is (NULL, NULL, 0); as soon as it grows we never revert to NULL, * "empty" becomes (p, p, 31) afterwards. We don't shrink the list (and seriously, * it's going to remain 1-element for almost any setup) until we free context itself. * References in it _are_ dropped - at the same time we free/drop aux stuff. */ #ifdef CONFIG_AUDIT_TREE static void audit_set_auditable(struct audit_context *ctx) { if (!ctx->prio) { ctx->prio = 1; ctx->current_state = AUDIT_RECORD_CONTEXT; } } static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk) { struct audit_tree_refs *p = ctx->trees; int left = ctx->tree_count; if (likely(left)) { p->c[--left] = chunk; ctx->tree_count = left; return 1; } if (!p) return 0; p = p->next; if (p) { p->c[30] = chunk; ctx->trees = p; ctx->tree_count = 30; return 1; } return 0; } static int grow_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p = ctx->trees; ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL); if (!ctx->trees) { ctx->trees = p; return 0; } if (p) p->next = ctx->trees; else ctx->first_trees = ctx->trees; ctx->tree_count = 31; return 1; } #endif static void unroll_tree_refs(struct audit_context *ctx, struct audit_tree_refs *p, int count) { #ifdef CONFIG_AUDIT_TREE struct audit_tree_refs *q; int n; if (!p) { /* we started with empty chain */ p = ctx->first_trees; count = 31; /* if the very first allocation has failed, nothing to do */ if (!p) return; } n = count; for (q = p; q != ctx->trees; q = q->next, n = 31) { while (n--) { audit_put_chunk(q->c[n]); q->c[n] = NULL; } } while (n-- > ctx->tree_count) { audit_put_chunk(q->c[n]); q->c[n] = NULL; } ctx->trees = p; ctx->tree_count = count; #endif } static void free_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p, *q; for (p = ctx->first_trees; p; p = q) { q = p->next; kfree(p); } } static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree) { #ifdef CONFIG_AUDIT_TREE struct audit_tree_refs *p; int n; if (!tree) return 0; /* full ones */ for (p = ctx->first_trees; p != ctx->trees; p = p->next) { for (n = 0; n < 31; n++) if (audit_tree_match(p->c[n], tree)) return 1; } /* partial */ if (p) { for (n = ctx->tree_count; n < 31; n++) if (audit_tree_match(p->c[n], tree)) return 1; } #endif return 0; } static int audit_compare_uid(kuid_t uid, struct audit_names *name, struct audit_field *f, struct audit_context *ctx) { struct audit_names *n; int rc; if (name) { rc = audit_uid_comparator(uid, f->op, name->uid); if (rc) return rc; } if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { rc = audit_uid_comparator(uid, f->op, n->uid); if (rc) return rc; } } return 0; } static int audit_compare_gid(kgid_t gid, struct audit_names *name, struct audit_field *f, struct audit_context *ctx) { struct audit_names *n; int rc; if (name) { rc = audit_gid_comparator(gid, f->op, name->gid); if (rc) return rc; } if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { rc = audit_gid_comparator(gid, f->op, n->gid); if (rc) return rc; } } return 0; } static int audit_field_compare(struct task_struct *tsk, const struct cred *cred, struct audit_field *f, struct audit_context *ctx, struct audit_names *name) { switch (f->val) { /* process to file object comparisons */ case AUDIT_COMPARE_UID_TO_OBJ_UID: return audit_compare_uid(cred->uid, name, f, ctx); case AUDIT_COMPARE_GID_TO_OBJ_GID: return audit_compare_gid(cred->gid, name, f, ctx); case AUDIT_COMPARE_EUID_TO_OBJ_UID: return audit_compare_uid(cred->euid, name, f, ctx); case AUDIT_COMPARE_EGID_TO_OBJ_GID: return audit_compare_gid(cred->egid, name, f, ctx); case AUDIT_COMPARE_AUID_TO_OBJ_UID: return audit_compare_uid(tsk->loginuid, name, f, ctx); case AUDIT_COMPARE_SUID_TO_OBJ_UID: return audit_compare_uid(cred->suid, name, f, ctx); case AUDIT_COMPARE_SGID_TO_OBJ_GID: return audit_compare_gid(cred->sgid, name, f, ctx); case AUDIT_COMPARE_FSUID_TO_OBJ_UID: return audit_compare_uid(cred->fsuid, name, f, ctx); case AUDIT_COMPARE_FSGID_TO_OBJ_GID: return audit_compare_gid(cred->fsgid, name, f, ctx); /* uid comparisons */ case AUDIT_COMPARE_UID_TO_AUID: return audit_uid_comparator(cred->uid, f->op, tsk->loginuid); case AUDIT_COMPARE_UID_TO_EUID: return audit_uid_comparator(cred->uid, f->op, cred->euid); case AUDIT_COMPARE_UID_TO_SUID: return audit_uid_comparator(cred->uid, f->op, cred->suid); case AUDIT_COMPARE_UID_TO_FSUID: return audit_uid_comparator(cred->uid, f->op, cred->fsuid); /* auid comparisons */ case AUDIT_COMPARE_AUID_TO_EUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->euid); case AUDIT_COMPARE_AUID_TO_SUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->suid); case AUDIT_COMPARE_AUID_TO_FSUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->fsuid); /* euid comparisons */ case AUDIT_COMPARE_EUID_TO_SUID: return audit_uid_comparator(cred->euid, f->op, cred->suid); case AUDIT_COMPARE_EUID_TO_FSUID: return audit_uid_comparator(cred->euid, f->op, cred->fsuid); /* suid comparisons */ case AUDIT_COMPARE_SUID_TO_FSUID: return audit_uid_comparator(cred->suid, f->op, cred->fsuid); /* gid comparisons */ case AUDIT_COMPARE_GID_TO_EGID: return audit_gid_comparator(cred->gid, f->op, cred->egid); case AUDIT_COMPARE_GID_TO_SGID: return audit_gid_comparator(cred->gid, f->op, cred->sgid); case AUDIT_COMPARE_GID_TO_FSGID: return audit_gid_comparator(cred->gid, f->op, cred->fsgid); /* egid comparisons */ case AUDIT_COMPARE_EGID_TO_SGID: return audit_gid_comparator(cred->egid, f->op, cred->sgid); case AUDIT_COMPARE_EGID_TO_FSGID: return audit_gid_comparator(cred->egid, f->op, cred->fsgid); /* sgid comparison */ case AUDIT_COMPARE_SGID_TO_FSGID: return audit_gid_comparator(cred->sgid, f->op, cred->fsgid); default: WARN(1, "Missing AUDIT_COMPARE define. Report as a bug\n"); return 0; } return 0; } /* Determine if any context name data matches a rule's watch data */ /* Compare a task_struct with an audit_rule. Return 1 on match, 0 * otherwise. * * If task_creation is true, this is an explicit indication that we are * filtering a task rule at task creation time. This and tsk == current are * the only situations where tsk->cred may be accessed without an rcu read lock. */ static int audit_filter_rules(struct task_struct *tsk, struct audit_krule *rule, struct audit_context *ctx, struct audit_names *name, enum audit_state *state, bool task_creation) { const struct cred *cred; int i, need_sid = 1; u32 sid; cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation); for (i = 0; i < rule->field_count; i++) { struct audit_field *f = &rule->fields[i]; struct audit_names *n; int result = 0; pid_t pid; switch (f->type) { case AUDIT_PID: pid = task_pid_nr(tsk); result = audit_comparator(pid, f->op, f->val); break; case AUDIT_PPID: if (ctx) { if (!ctx->ppid) ctx->ppid = task_ppid_nr(tsk); result = audit_comparator(ctx->ppid, f->op, f->val); } break; case AUDIT_EXE: result = audit_exe_compare(tsk, rule->exe); break; case AUDIT_UID: result = audit_uid_comparator(cred->uid, f->op, f->uid); break; case AUDIT_EUID: result = audit_uid_comparator(cred->euid, f->op, f->uid); break; case AUDIT_SUID: result = audit_uid_comparator(cred->suid, f->op, f->uid); break; case AUDIT_FSUID: result = audit_uid_comparator(cred->fsuid, f->op, f->uid); break; case AUDIT_GID: result = audit_gid_comparator(cred->gid, f->op, f->gid); if (f->op == Audit_equal) { if (!result) result = in_group_p(f->gid); } else if (f->op == Audit_not_equal) { if (result) result = !in_group_p(f->gid); } break; case AUDIT_EGID: result = audit_gid_comparator(cred->egid, f->op, f->gid); if (f->op == Audit_equal) { if (!result) result = in_egroup_p(f->gid); } else if (f->op == Audit_not_equal) { if (result) result = !in_egroup_p(f->gid); } break; case AUDIT_SGID: result = audit_gid_comparator(cred->sgid, f->op, f->gid); break; case AUDIT_FSGID: result = audit_gid_comparator(cred->fsgid, f->op, f->gid); break; case AUDIT_PERS: result = audit_comparator(tsk->personality, f->op, f->val); break; case AUDIT_ARCH: if (ctx) result = audit_comparator(ctx->arch, f->op, f->val); break; case AUDIT_EXIT: if (ctx && ctx->return_valid) result = audit_comparator(ctx->return_code, f->op, f->val); break; case AUDIT_SUCCESS: if (ctx && ctx->return_valid) { if (f->val) result = audit_comparator(ctx->return_valid, f->op, AUDITSC_SUCCESS); else result = audit_comparator(ctx->return_valid, f->op, AUDITSC_FAILURE); } break; case AUDIT_DEVMAJOR: if (name) { if (audit_comparator(MAJOR(name->dev), f->op, f->val) || audit_comparator(MAJOR(name->rdev), f->op, f->val)) ++result; } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(MAJOR(n->dev), f->op, f->val) || audit_comparator(MAJOR(n->rdev), f->op, f->val)) { ++result; break; } } } break; case AUDIT_DEVMINOR: if (name) { if (audit_comparator(MINOR(name->dev), f->op, f->val) || audit_comparator(MINOR(name->rdev), f->op, f->val)) ++result; } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(MINOR(n->dev), f->op, f->val) || audit_comparator(MINOR(n->rdev), f->op, f->val)) { ++result; break; } } } break; case AUDIT_INODE: if (name) result = audit_comparator(name->ino, f->op, f->val); else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(n->ino, f->op, f->val)) { ++result; break; } } } break; case AUDIT_OBJ_UID: if (name) { result = audit_uid_comparator(name->uid, f->op, f->uid); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_uid_comparator(n->uid, f->op, f->uid)) { ++result; break; } } } break; case AUDIT_OBJ_GID: if (name) { result = audit_gid_comparator(name->gid, f->op, f->gid); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_gid_comparator(n->gid, f->op, f->gid)) { ++result; break; } } } break; case AUDIT_WATCH: if (name) result = audit_watch_compare(rule->watch, name->ino, name->dev); break; case AUDIT_DIR: if (ctx) result = match_tree_refs(ctx, rule->tree); break; case AUDIT_LOGINUID: result = audit_uid_comparator(tsk->loginuid, f->op, f->uid); break; case AUDIT_LOGINUID_SET: result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val); break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: /* NOTE: this may return negative values indicating a temporary error. We simply treat this as a match for now to avoid losing information that may be wanted. An error message will also be logged upon error */ if (f->lsm_rule) { if (need_sid) { security_task_getsecid(tsk, &sid); need_sid = 0; } result = security_audit_rule_match(sid, f->type, f->op, f->lsm_rule, ctx); } break; case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: /* The above note for AUDIT_SUBJ_USER...AUDIT_SUBJ_CLR also applies here */ if (f->lsm_rule) { /* Find files that match */ if (name) { result = security_audit_rule_match( name->osid, f->type, f->op, f->lsm_rule, ctx); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (security_audit_rule_match(n->osid, f->type, f->op, f->lsm_rule, ctx)) { ++result; break; } } } /* Find ipc objects that match */ if (!ctx || ctx->type != AUDIT_IPC) break; if (security_audit_rule_match(ctx->ipc.osid, f->type, f->op, f->lsm_rule, ctx)) ++result; } break; case AUDIT_ARG0: case AUDIT_ARG1: case AUDIT_ARG2: case AUDIT_ARG3: if (ctx) result = audit_comparator(ctx->argv[f->type-AUDIT_ARG0], f->op, f->val); break; case AUDIT_FILTERKEY: /* ignore this field for filtering */ result = 1; break; case AUDIT_PERM: result = audit_match_perm(ctx, f->val); break; case AUDIT_FILETYPE: result = audit_match_filetype(ctx, f->val); break; case AUDIT_FIELD_COMPARE: result = audit_field_compare(tsk, cred, f, ctx, name); break; } if (!result) return 0; } if (ctx) { if (rule->prio <= ctx->prio) return 0; if (rule->filterkey) { kfree(ctx->filterkey); ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); } ctx->prio = rule->prio; } switch (rule->action) { case AUDIT_NEVER: *state = AUDIT_DISABLED; break; case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; } return 1; } /* At process creation time, we can determine if system-call auditing is * completely disabled for this task. Since we only have the task * structure at this point, we can only check uid and gid. */ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key) { struct audit_entry *e; enum audit_state state; rcu_read_lock(); list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) { if (audit_filter_rules(tsk, &e->rule, NULL, NULL, &state, true)) { if (state == AUDIT_RECORD_CONTEXT) *key = kstrdup(e->rule.filterkey, GFP_ATOMIC); rcu_read_unlock(); return state; } } rcu_read_unlock(); return AUDIT_BUILD_CONTEXT; } static int audit_in_mask(const struct audit_krule *rule, unsigned long val) { int word, bit; if (val > 0xffffffff) return false; word = AUDIT_WORD(val); if (word >= AUDIT_BITMASK_SIZE) return false; bit = AUDIT_BIT(val); return rule->mask[word] & bit; } /* At syscall entry and exit time, this filter is called if the * audit_state is not low enough that auditing cannot take place, but is * also not high enough that we already know we have to write an audit * record (i.e., the state is AUDIT_SETUP_CONTEXT or AUDIT_BUILD_CONTEXT). */ static enum audit_state audit_filter_syscall(struct task_struct *tsk, struct audit_context *ctx, struct list_head *list) { struct audit_entry *e; enum audit_state state; if (audit_pid && tsk->tgid == audit_pid) return AUDIT_DISABLED; rcu_read_lock(); if (!list_empty(list)) { list_for_each_entry_rcu(e, list, list) { if (audit_in_mask(&e->rule, ctx->major) && audit_filter_rules(tsk, &e->rule, ctx, NULL, &state, false)) { rcu_read_unlock(); ctx->current_state = state; return state; } } } rcu_read_unlock(); return AUDIT_BUILD_CONTEXT; } /* * Given an audit_name check the inode hash table to see if they match. * Called holding the rcu read lock to protect the use of audit_inode_hash */ static int audit_filter_inode_name(struct task_struct *tsk, struct audit_names *n, struct audit_context *ctx) { int h = audit_hash_ino((u32)n->ino); struct list_head *list = &audit_inode_hash[h]; struct audit_entry *e; enum audit_state state; if (list_empty(list)) return 0; list_for_each_entry_rcu(e, list, list) { if (audit_in_mask(&e->rule, ctx->major) && audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) { ctx->current_state = state; return 1; } } return 0; } /* At syscall exit time, this filter is called if any audit_names have been * collected during syscall processing. We only check rules in sublists at hash * buckets applicable to the inode numbers in audit_names. * Regarding audit_state, same rules apply as for audit_filter_syscall(). */ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx) { struct audit_names *n; if (audit_pid && tsk->tgid == audit_pid) return; rcu_read_lock(); list_for_each_entry(n, &ctx->names_list, list) { if (audit_filter_inode_name(tsk, n, ctx)) break; } rcu_read_unlock(); } /* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */ static inline struct audit_context *audit_take_context(struct task_struct *tsk, int return_valid, long return_code) { struct audit_context *context = tsk->audit_context; if (!context) return NULL; context->return_valid = return_valid; /* * we need to fix up the return code in the audit logs if the actual * return codes are later going to be fixed up by the arch specific * signal handlers * * This is actually a test for: * (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) || * (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK) * * but is faster than a bunch of || */ if (unlikely(return_code <= -ERESTARTSYS) && (return_code >= -ERESTART_RESTARTBLOCK) && (return_code != -ENOIOCTLCMD)) context->return_code = -EINTR; else context->return_code = return_code; if (context->in_syscall && !context->dummy) { audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); audit_filter_inodes(tsk, context); } tsk->audit_context = NULL; return context; } static inline void audit_proctitle_free(struct audit_context *context) { kfree(context->proctitle.value); context->proctitle.value = NULL; context->proctitle.len = 0; } static inline void audit_free_names(struct audit_context *context) { struct audit_names *n, *next; list_for_each_entry_safe(n, next, &context->names_list, list) { list_del(&n->list); if (n->name) putname(n->name); if (n->should_free) kfree(n); } context->name_count = 0; path_put(&context->pwd); context->pwd.dentry = NULL; context->pwd.mnt = NULL; } static inline void audit_free_aux(struct audit_context *context) { struct audit_aux_data *aux; while ((aux = context->aux)) { context->aux = aux->next; kfree(aux); } while ((aux = context->aux_pids)) { context->aux_pids = aux->next; kfree(aux); } } static inline struct audit_context *audit_alloc_context(enum audit_state state) { struct audit_context *context; context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return NULL; context->state = state; context->prio = state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; INIT_LIST_HEAD(&context->killed_trees); INIT_LIST_HEAD(&context->names_list); return context; } /** * audit_alloc - allocate an audit context block for a task * @tsk: task * * Filter on the task information and allocate a per-task audit context * if necessary. Doing so turns on system call auditing for the * specified task. This is called from copy_process, so no lock is * needed. */ int audit_alloc(struct task_struct *tsk) { struct audit_context *context; enum audit_state state; char *key = NULL; if (likely(!audit_ever_enabled)) return 0; /* Return if not auditing. */ state = audit_filter_task(tsk, &key); if (state == AUDIT_DISABLED) { clear_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT); return 0; } if (!(context = audit_alloc_context(state))) { kfree(key); audit_log_lost("out of memory in audit_alloc"); return -ENOMEM; } context->filterkey = key; tsk->audit_context = context; set_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT); return 0; } static inline void audit_free_context(struct audit_context *context) { audit_free_names(context); unroll_tree_refs(context, NULL, 0); free_tree_refs(context); audit_free_aux(context); kfree(context->filterkey); kfree(context->sockaddr); audit_proctitle_free(context); kfree(context); } static int audit_log_pid_context(struct audit_context *context, pid_t pid, kuid_t auid, kuid_t uid, unsigned int sessionid, u32 sid, char *comm) { struct audit_buffer *ab; char *ctx = NULL; u32 len; int rc = 0; ab = audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID); if (!ab) return rc; audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, from_kuid(&init_user_ns, auid), from_kuid(&init_user_ns, uid), sessionid); if (sid) { if (security_secid_to_secctx(sid, &ctx, &len)) { audit_log_format(ab, " obj=(none)"); rc = 1; } else { audit_log_format(ab, " obj=%s", ctx); security_release_secctx(ctx, len); } } audit_log_format(ab, " ocomm="); audit_log_untrustedstring(ab, comm); audit_log_end(ab); return rc; } /* * to_send and len_sent accounting are very loose estimates. We aren't * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being * within about 500 bytes (next page boundary) * * why snprintf? an int is up to 12 digits long. if we just assumed when * logging that a[%d]= was going to be 16 characters long we would be wasting * space in every audit message. In one 7500 byte message we can log up to * about 1000 min size arguments. That comes down to about 50% waste of space * if we didn't do the snprintf to find out how long arg_num_len was. */ static int audit_log_single_execve_arg(struct audit_context *context, struct audit_buffer **ab, int arg_num, size_t *len_sent, const char __user *p, char *buf) { char arg_num_len_buf[12]; const char __user *tmp_p = p; /* how many digits are in arg_num? 5 is the length of ' a=""' */ size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5; size_t len, len_left, to_send; size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN; unsigned int i, has_cntl = 0, too_long = 0; int ret; /* strnlen_user includes the null we don't want to send */ len_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1; /* * We just created this mm, if we can't find the strings * we just copied into it something is _very_ wrong. Similar * for strings that are too long, we should not have created * any. */ if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) { send_sig(SIGKILL, current, 0); return -1; } /* walk the whole argument looking for non-ascii chars */ do { if (len_left > MAX_EXECVE_AUDIT_LEN) to_send = MAX_EXECVE_AUDIT_LEN; else to_send = len_left; ret = copy_from_user(buf, tmp_p, to_send); /* * There is no reason for this copy to be short. We just * copied them here, and the mm hasn't been exposed to user- * space yet. */ if (ret) { WARN_ON(1); send_sig(SIGKILL, current, 0); return -1; } buf[to_send] = '\0'; has_cntl = audit_string_contains_control(buf, to_send); if (has_cntl) { /* * hex messages get logged as 2 bytes, so we can only * send half as much in each message */ max_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2; break; } len_left -= to_send; tmp_p += to_send; } while (len_left > 0); len_left = len; if (len > max_execve_audit_len) too_long = 1; /* rewalk the argument actually logging the message */ for (i = 0; len_left > 0; i++) { int room_left; if (len_left > max_execve_audit_len) to_send = max_execve_audit_len; else to_send = len_left; /* do we have space left to send this argument in this ab? */ room_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent; if (has_cntl) room_left -= (to_send * 2); else room_left -= to_send; if (room_left < 0) { *len_sent = 0; audit_log_end(*ab); *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE); if (!*ab) return 0; } /* * first record needs to say how long the original string was * so we can be sure nothing was lost. */ if ((i == 0) && (too_long)) audit_log_format(*ab, " a%d_len=%zu", arg_num, has_cntl ? 2*len : len); /* * normally arguments are small enough to fit and we already * filled buf above when we checked for control characters * so don't bother with another copy_from_user */ if (len >= max_execve_audit_len) ret = copy_from_user(buf, p, to_send); else ret = 0; if (ret) { WARN_ON(1); send_sig(SIGKILL, current, 0); return -1; } buf[to_send] = '\0'; /* actually log it */ audit_log_format(*ab, " a%d", arg_num); if (too_long) audit_log_format(*ab, "[%d]", i); audit_log_format(*ab, "="); if (has_cntl) audit_log_n_hex(*ab, buf, to_send); else audit_log_string(*ab, buf); p += to_send; len_left -= to_send; *len_sent += arg_num_len; if (has_cntl) *len_sent += to_send * 2; else *len_sent += to_send; } /* include the null we didn't log */ return len + 1; } static void audit_log_execve_info(struct audit_context *context, struct audit_buffer **ab) { int i, len; size_t len_sent = 0; const char __user *p; char *buf; p = (const char __user *)current->mm->arg_start; audit_log_format(*ab, "argc=%d", context->execve.argc); /* * we need some kernel buffer to hold the userspace args. Just * allocate one big one rather than allocating one of the right size * for every single argument inside audit_log_single_execve_arg() * should be <8k allocation so should be pretty safe. */ buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); if (!buf) { audit_panic("out of memory for argv string"); return; } for (i = 0; i < context->execve.argc; i++) { len = audit_log_single_execve_arg(context, ab, i, &len_sent, p, buf); if (len <= 0) break; p += len; } kfree(buf); } static void show_special(struct audit_context *context, int *call_panic) { struct audit_buffer *ab; int i; ab = audit_log_start(context, GFP_KERNEL, context->type); if (!ab) return; switch (context->type) { case AUDIT_SOCKETCALL: { int nargs = context->socketcall.nargs; audit_log_format(ab, "nargs=%d", nargs); for (i = 0; i < nargs; i++) audit_log_format(ab, " a%d=%lx", i, context->socketcall.args[i]); break; } case AUDIT_IPC: { u32 osid = context->ipc.osid; audit_log_format(ab, "ouid=%u ogid=%u mode=%#ho", from_kuid(&init_user_ns, context->ipc.uid), from_kgid(&init_user_ns, context->ipc.gid), context->ipc.mode); if (osid) { char *ctx = NULL; u32 len; if (security_secid_to_secctx(osid, &ctx, &len)) { audit_log_format(ab, " osid=%u", osid); *call_panic = 1; } else { audit_log_format(ab, " obj=%s", ctx); security_release_secctx(ctx, len); } } if (context->ipc.has_perm) { audit_log_end(ab); ab = audit_log_start(context, GFP_KERNEL, AUDIT_IPC_SET_PERM); if (unlikely(!ab)) return; audit_log_format(ab, "qbytes=%lx ouid=%u ogid=%u mode=%#ho", context->ipc.qbytes, context->ipc.perm_uid, context->ipc.perm_gid, context->ipc.perm_mode); } break; } case AUDIT_MQ_OPEN: { audit_log_format(ab, "oflag=0x%x mode=%#ho mq_flags=0x%lx mq_maxmsg=%ld " "mq_msgsize=%ld mq_curmsgs=%ld", context->mq_open.oflag, context->mq_open.mode, context->mq_open.attr.mq_flags, context->mq_open.attr.mq_maxmsg, context->mq_open.attr.mq_msgsize, context->mq_open.attr.mq_curmsgs); break; } case AUDIT_MQ_SENDRECV: { audit_log_format(ab, "mqdes=%d msg_len=%zd msg_prio=%u " "abs_timeout_sec=%ld abs_timeout_nsec=%ld", context->mq_sendrecv.mqdes, context->mq_sendrecv.msg_len, context->mq_sendrecv.msg_prio, context->mq_sendrecv.abs_timeout.tv_sec, context->mq_sendrecv.abs_timeout.tv_nsec); break; } case AUDIT_MQ_NOTIFY: { audit_log_format(ab, "mqdes=%d sigev_signo=%d", context->mq_notify.mqdes, context->mq_notify.sigev_signo); break; } case AUDIT_MQ_GETSETATTR: { struct mq_attr *attr = &context->mq_getsetattr.mqstat; audit_log_format(ab, "mqdes=%d mq_flags=0x%lx mq_maxmsg=%ld mq_msgsize=%ld " "mq_curmsgs=%ld ", context->mq_getsetattr.mqdes, attr->mq_flags, attr->mq_maxmsg, attr->mq_msgsize, attr->mq_curmsgs); break; } case AUDIT_CAPSET: { audit_log_format(ab, "pid=%d", context->capset.pid); audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable); audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted); audit_log_cap(ab, "cap_pe", &context->capset.cap.effective); break; } case AUDIT_MMAP: { audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd, context->mmap.flags); break; } case AUDIT_EXECVE: { audit_log_execve_info(context, &ab); break; } } audit_log_end(ab); } static inline int audit_proctitle_rtrim(char *proctitle, int len) { char *end = proctitle + len - 1; while (end > proctitle && !isprint(*end)) end--; /* catch the case where proctitle is only 1 non-print character */ len = end - proctitle + 1; len -= isprint(proctitle[len-1]) == 0; return len; } static void audit_log_proctitle(struct task_struct *tsk, struct audit_context *context) { int res; char *buf; char *msg = "(null)"; int len = strlen(msg); struct audit_buffer *ab; ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); if (!ab) return; /* audit_panic or being filtered */ audit_log_format(ab, "proctitle="); /* Not cached */ if (!context->proctitle.value) { buf = kmalloc(MAX_PROCTITLE_AUDIT_LEN, GFP_KERNEL); if (!buf) goto out; /* Historically called this from procfs naming */ res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN); if (res == 0) { kfree(buf); goto out; } res = audit_proctitle_rtrim(buf, res); if (res == 0) { kfree(buf); goto out; } context->proctitle.value = buf; context->proctitle.len = res; } msg = context->proctitle.value; len = context->proctitle.len; out: audit_log_n_untrustedstring(ab, msg, len); audit_log_end(ab); } static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) { int i, call_panic = 0; struct audit_buffer *ab; struct audit_aux_data *aux; struct audit_names *n; /* tsk == current */ context->personality = tsk->personality; ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); if (!ab) return; /* audit_panic has been called */ audit_log_format(ab, "arch=%x syscall=%d", context->arch, context->major); if (context->personality != PER_LINUX) audit_log_format(ab, " per=%lx", context->personality); if (context->return_valid) audit_log_format(ab, " success=%s exit=%ld", (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", context->return_code); audit_log_format(ab, " a0=%lx a1=%lx a2=%lx a3=%lx items=%d", context->argv[0], context->argv[1], context->argv[2], context->argv[3], context->name_count); audit_log_task_info(ab, tsk); audit_log_key(ab, context->filterkey); audit_log_end(ab); for (aux = context->aux; aux; aux = aux->next) { ab = audit_log_start(context, GFP_KERNEL, aux->type); if (!ab) continue; /* audit_panic has been called */ switch (aux->type) { case AUDIT_BPRM_FCAPS: { struct audit_aux_data_bprm_fcaps *axs = (void *)aux; audit_log_format(ab, "fver=%x", axs->fcap_ver); audit_log_cap(ab, "fp", &axs->fcap.permitted); audit_log_cap(ab, "fi", &axs->fcap.inheritable); audit_log_format(ab, " fe=%d", axs->fcap.fE); audit_log_cap(ab, "old_pp", &axs->old_pcap.permitted); audit_log_cap(ab, "old_pi", &axs->old_pcap.inheritable); audit_log_cap(ab, "old_pe", &axs->old_pcap.effective); audit_log_cap(ab, "new_pp", &axs->new_pcap.permitted); audit_log_cap(ab, "new_pi", &axs->new_pcap.inheritable); audit_log_cap(ab, "new_pe", &axs->new_pcap.effective); break; } } audit_log_end(ab); } if (context->type) show_special(context, &call_panic); if (context->fds[0] >= 0) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_FD_PAIR); if (ab) { audit_log_format(ab, "fd0=%d fd1=%d", context->fds[0], context->fds[1]); audit_log_end(ab); } } if (context->sockaddr_len) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_SOCKADDR); if (ab) { audit_log_format(ab, "saddr="); audit_log_n_hex(ab, (void *)context->sockaddr, context->sockaddr_len); audit_log_end(ab); } } for (aux = context->aux_pids; aux; aux = aux->next) { struct audit_aux_data_pids *axs = (void *)aux; for (i = 0; i < axs->pid_count; i++) if (audit_log_pid_context(context, axs->target_pid[i], axs->target_auid[i], axs->target_uid[i], axs->target_sessionid[i], axs->target_sid[i], axs->target_comm[i])) call_panic = 1; } if (context->target_pid && audit_log_pid_context(context, context->target_pid, context->target_auid, context->target_uid, context->target_sessionid, context->target_sid, context->target_comm)) call_panic = 1; if (context->pwd.dentry && context->pwd.mnt) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD); if (ab) { audit_log_d_path(ab, "cwd=", &context->pwd); audit_log_end(ab); } } i = 0; list_for_each_entry(n, &context->names_list, list) { if (n->hidden) continue; audit_log_name(context, n, NULL, i++, &call_panic); } audit_log_proctitle(tsk, context); /* Send end of event record to help user space know we are finished */ ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); if (ab) audit_log_end(ab); if (call_panic) audit_panic("error converting sid to string"); } /** * audit_free - free a per-task audit context * @tsk: task whose audit context block to free * * Called from copy_process and do_exit */ void __audit_free(struct task_struct *tsk) { struct audit_context *context; context = audit_take_context(tsk, 0, 0); if (!context) return; /* Check for system calls that do not go through the exit * function (e.g., exit_group), then free context block. * We use GFP_ATOMIC here because we might be doing this * in the context of the idle thread */ /* that can happen only if we are called from do_exit() */ if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) audit_log_exit(context, tsk); if (!list_empty(&context->killed_trees)) audit_kill_trees(&context->killed_trees); audit_free_context(context); } /** * audit_syscall_entry - fill in an audit record at syscall entry * @major: major syscall type (function) * @a1: additional syscall register 1 * @a2: additional syscall register 2 * @a3: additional syscall register 3 * @a4: additional syscall register 4 * * Fill in audit context at syscall entry. This only happens if the * audit context was created when the task was created and the state or * filters demand the audit context be built. If the state from the * per-task filter or from the per-syscall filter is AUDIT_RECORD_CONTEXT, * then the record will be written at syscall exit time (otherwise, it * will only be written if another part of the kernel requests that it * be written). */ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4) { struct task_struct *tsk = current; struct audit_context *context = tsk->audit_context; enum audit_state state; if (!context) return; BUG_ON(context->in_syscall || context->name_count); if (!audit_enabled) return; context->arch = syscall_get_arch(); context->major = major; context->argv[0] = a1; context->argv[1] = a2; context->argv[2] = a3; context->argv[3] = a4; state = context->state; context->dummy = !audit_n_rules; if (!context->dummy && state == AUDIT_BUILD_CONTEXT) { context->prio = 0; state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]); } if (state == AUDIT_DISABLED) return; context->serial = 0; context->ctime = CURRENT_TIME; context->in_syscall = 1; context->current_state = state; context->ppid = 0; } /** * audit_syscall_exit - deallocate audit context after a system call * @success: success value of the syscall * @return_code: return value of the syscall * * Tear down after system call. If the audit context has been marked as * auditable (either because of the AUDIT_RECORD_CONTEXT state from * filtering, or because some other part of the kernel wrote an audit * message), then write out the syscall information. In call cases, * free the names stored from getname(). */ void __audit_syscall_exit(int success, long return_code) { struct task_struct *tsk = current; struct audit_context *context; if (success) success = AUDITSC_SUCCESS; else success = AUDITSC_FAILURE; context = audit_take_context(tsk, success, return_code); if (!context) return; if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) audit_log_exit(context, tsk); context->in_syscall = 0; context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; if (!list_empty(&context->killed_trees)) audit_kill_trees(&context->killed_trees); audit_free_names(context); unroll_tree_refs(context, NULL, 0); audit_free_aux(context); context->aux = NULL; context->aux_pids = NULL; context->target_pid = 0; context->target_sid = 0; context->sockaddr_len = 0; context->type = 0; context->fds[0] = -1; if (context->state != AUDIT_RECORD_CONTEXT) { kfree(context->filterkey); context->filterkey = NULL; } tsk->audit_context = context; } static inline void handle_one(const struct inode *inode) { #ifdef CONFIG_AUDIT_TREE struct audit_context *context; struct audit_tree_refs *p; struct audit_chunk *chunk; int count; if (likely(hlist_empty(&inode->i_fsnotify_marks))) return; context = current->audit_context; p = context->trees; count = context->tree_count; rcu_read_lock(); chunk = audit_tree_lookup(inode); rcu_read_unlock(); if (!chunk) return; if (likely(put_tree_ref(context, chunk))) return; if (unlikely(!grow_tree_refs(context))) { pr_warn("out of memory, audit has lost a tree reference\n"); audit_set_auditable(context); audit_put_chunk(chunk); unroll_tree_refs(context, p, count); return; } put_tree_ref(context, chunk); #endif } static void handle_path(const struct dentry *dentry) { #ifdef CONFIG_AUDIT_TREE struct audit_context *context; struct audit_tree_refs *p; const struct dentry *d, *parent; struct audit_chunk *drop; unsigned long seq; int count; context = current->audit_context; p = context->trees; count = context->tree_count; retry: drop = NULL; d = dentry; rcu_read_lock(); seq = read_seqbegin(&rename_lock); for(;;) { struct inode *inode = d_backing_inode(d); if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) { struct audit_chunk *chunk; chunk = audit_tree_lookup(inode); if (chunk) { if (unlikely(!put_tree_ref(context, chunk))) { drop = chunk; break; } } } parent = d->d_parent; if (parent == d) break; d = parent; } if (unlikely(read_seqretry(&rename_lock, seq) || drop)) { /* in this order */ rcu_read_unlock(); if (!drop) { /* just a race with rename */ unroll_tree_refs(context, p, count); goto retry; } audit_put_chunk(drop); if (grow_tree_refs(context)) { /* OK, got more space */ unroll_tree_refs(context, p, count); goto retry; } /* too bad */ pr_warn("out of memory, audit has lost a tree reference\n"); unroll_tree_refs(context, p, count); audit_set_auditable(context); return; } rcu_read_unlock(); #endif } static struct audit_names *audit_alloc_name(struct audit_context *context, unsigned char type) { struct audit_names *aname; if (context->name_count < AUDIT_NAMES) { aname = &context->preallocated_names[context->name_count]; memset(aname, 0, sizeof(*aname)); } else { aname = kzalloc(sizeof(*aname), GFP_NOFS); if (!aname) return NULL; aname->should_free = true; } aname->ino = AUDIT_INO_UNSET; aname->type = type; list_add_tail(&aname->list, &context->names_list); context->name_count++; return aname; } /** * audit_reusename - fill out filename with info from existing entry * @uptr: userland ptr to pathname * * Search the audit_names list for the current audit context. If there is an * existing entry with a matching "uptr" then return the filename * associated with that audit_name. If not, return NULL. */ struct filename * __audit_reusename(const __user char *uptr) { struct audit_context *context = current->audit_context; struct audit_names *n; list_for_each_entry(n, &context->names_list, list) { if (!n->name) continue; if (n->name->uptr == uptr) { n->name->refcnt++; return n->name; } } return NULL; } /** * audit_getname - add a name to the list * @name: name to add * * Add a name to the list of audit names for this context. * Called from fs/namei.c:getname(). */ void __audit_getname(struct filename *name) { struct audit_context *context = current->audit_context; struct audit_names *n; if (!context->in_syscall) return; n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); if (!n) return; n->name = name; n->name_len = AUDIT_NAME_FULL; name->aname = n; name->refcnt++; if (!context->pwd.dentry) get_fs_pwd(current->fs, &context->pwd); } /** * __audit_inode - store the inode and device from a lookup * @name: name being audited * @dentry: dentry being audited * @flags: attributes for this particular entry */ void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags) { struct audit_context *context = current->audit_context; struct inode *inode = d_backing_inode(dentry); struct audit_names *n; bool parent = flags & AUDIT_INODE_PARENT; if (!context->in_syscall) return; if (!name) goto out_alloc; /* * If we have a pointer to an audit_names entry already, then we can * just use it directly if the type is correct. */ n = name->aname; if (n) { if (parent) { if (n->type == AUDIT_TYPE_PARENT || n->type == AUDIT_TYPE_UNKNOWN) goto out; } else { if (n->type != AUDIT_TYPE_PARENT) goto out; } } list_for_each_entry_reverse(n, &context->names_list, list) { if (n->ino) { /* valid inode number, use that for the comparison */ if (n->ino != inode->i_ino || n->dev != inode->i_sb->s_dev) continue; } else if (n->name) { /* inode number has not been set, check the name */ if (strcmp(n->name->name, name->name)) continue; } else /* no inode and no name (?!) ... this is odd ... */ continue; /* match the correct record type */ if (parent) { if (n->type == AUDIT_TYPE_PARENT || n->type == AUDIT_TYPE_UNKNOWN) goto out; } else { if (n->type != AUDIT_TYPE_PARENT) goto out; } } out_alloc: /* unable to find an entry with both a matching name and type */ n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); if (!n) return; if (name) { n->name = name; name->refcnt++; } out: if (parent) { n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL; n->type = AUDIT_TYPE_PARENT; if (flags & AUDIT_INODE_HIDDEN) n->hidden = true; } else { n->name_len = AUDIT_NAME_FULL; n->type = AUDIT_TYPE_NORMAL; } handle_path(dentry); audit_copy_inode(n, dentry, inode); } void __audit_file(const struct file *file) { __audit_inode(NULL, file->f_path.dentry, 0); } /** * __audit_inode_child - collect inode info for created/removed objects * @parent: inode of dentry parent * @dentry: dentry being audited * @type: AUDIT_TYPE_* value that we're looking for * * For syscalls that create or remove filesystem objects, audit_inode * can only collect information for the filesystem object's parent. * This call updates the audit context with the child's information. * Syscalls that create a new filesystem object must be hooked after * the object is created. Syscalls that remove a filesystem object * must be hooked prior, in order to capture the target inode during * unsuccessful attempts. */ void __audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { struct audit_context *context = current->audit_context; struct inode *inode = d_backing_inode(dentry); const char *dname = dentry->d_name.name; struct audit_names *n, *found_parent = NULL, *found_child = NULL; if (!context->in_syscall) return; if (inode) handle_one(inode); /* look for a parent entry first */ list_for_each_entry(n, &context->names_list, list) { if (!n->name || (n->type != AUDIT_TYPE_PARENT && n->type != AUDIT_TYPE_UNKNOWN)) continue; if (n->ino == parent->i_ino && n->dev == parent->i_sb->s_dev && !audit_compare_dname_path(dname, n->name->name, n->name_len)) { if (n->type == AUDIT_TYPE_UNKNOWN) n->type = AUDIT_TYPE_PARENT; found_parent = n; break; } } /* is there a matching child entry? */ list_for_each_entry(n, &context->names_list, list) { /* can only match entries that have a name */ if (!n->name || (n->type != type && n->type != AUDIT_TYPE_UNKNOWN)) continue; if (!strcmp(dname, n->name->name) || !audit_compare_dname_path(dname, n->name->name, found_parent ? found_parent->name_len : AUDIT_NAME_FULL)) { if (n->type == AUDIT_TYPE_UNKNOWN) n->type = type; found_child = n; break; } } if (!found_parent) { /* create a new, "anonymous" parent record */ n = audit_alloc_name(context, AUDIT_TYPE_PARENT); if (!n) return; audit_copy_inode(n, NULL, parent); } if (!found_child) { found_child = audit_alloc_name(context, type); if (!found_child) return; /* Re-use the name belonging to the slot for a matching parent * directory. All names for this context are relinquished in * audit_free_names() */ if (found_parent) { found_child->name = found_parent->name; found_child->name_len = AUDIT_NAME_FULL; found_child->name->refcnt++; } } if (inode) audit_copy_inode(found_child, dentry, inode); else found_child->ino = AUDIT_INO_UNSET; } EXPORT_SYMBOL_GPL(__audit_inode_child); /** * auditsc_get_stamp - get local copies of audit_context values * @ctx: audit_context for the task * @t: timespec to store time recorded in the audit_context * @serial: serial value that is recorded in the audit_context * * Also sets the context as auditable. */ int auditsc_get_stamp(struct audit_context *ctx, struct timespec *t, unsigned int *serial) { if (!ctx->in_syscall) return 0; if (!ctx->serial) ctx->serial = audit_serial(); t->tv_sec = ctx->ctime.tv_sec; t->tv_nsec = ctx->ctime.tv_nsec; *serial = ctx->serial; if (!ctx->prio) { ctx->prio = 1; ctx->current_state = AUDIT_RECORD_CONTEXT; } return 1; } /* global counter which is incremented every time something logs in */ static atomic_t session_id = ATOMIC_INIT(0); static int audit_set_loginuid_perm(kuid_t loginuid) { /* if we are unset, we don't need privs */ if (!audit_loginuid_set(current)) return 0; /* if AUDIT_FEATURE_LOGINUID_IMMUTABLE means never ever allow a change*/ if (is_audit_feature_set(AUDIT_FEATURE_LOGINUID_IMMUTABLE)) return -EPERM; /* it is set, you need permission */ if (!capable(CAP_AUDIT_CONTROL)) return -EPERM; /* reject if this is not an unset and we don't allow that */ if (is_audit_feature_set(AUDIT_FEATURE_ONLY_UNSET_LOGINUID) && uid_valid(loginuid)) return -EPERM; return 0; } static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, unsigned int oldsessionid, unsigned int sessionid, int rc) { struct audit_buffer *ab; uid_t uid, oldloginuid, loginuid; if (!audit_enabled) return; uid = from_kuid(&init_user_ns, task_uid(current)); oldloginuid = from_kuid(&init_user_ns, koldloginuid); loginuid = from_kuid(&init_user_ns, kloginuid), ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); if (!ab) return; audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid); audit_log_task_context(ab); audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d", oldloginuid, loginuid, oldsessionid, sessionid, !rc); audit_log_end(ab); } /** * audit_set_loginuid - set current task's audit_context loginuid * @loginuid: loginuid value * * Returns 0. * * Called (set) from fs/proc/base.c::proc_loginuid_write(). */ int audit_set_loginuid(kuid_t loginuid) { struct task_struct *task = current; unsigned int oldsessionid, sessionid = (unsigned int)-1; kuid_t oldloginuid; int rc; oldloginuid = audit_get_loginuid(current); oldsessionid = audit_get_sessionid(current); rc = audit_set_loginuid_perm(loginuid); if (rc) goto out; /* are we setting or clearing? */ if (uid_valid(loginuid)) sessionid = (unsigned int)atomic_inc_return(&session_id); task->sessionid = sessionid; task->loginuid = loginuid; out: audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc); return rc; } /** * __audit_mq_open - record audit data for a POSIX MQ open * @oflag: open flag * @mode: mode bits * @attr: queue attributes * */ void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { struct audit_context *context = current->audit_context; if (attr) memcpy(&context->mq_open.attr, attr, sizeof(struct mq_attr)); else memset(&context->mq_open.attr, 0, sizeof(struct mq_attr)); context->mq_open.oflag = oflag; context->mq_open.mode = mode; context->type = AUDIT_MQ_OPEN; } /** * __audit_mq_sendrecv - record audit data for a POSIX MQ timed send/receive * @mqdes: MQ descriptor * @msg_len: Message length * @msg_prio: Message priority * @abs_timeout: Message timeout in absolute time * */ void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout) { struct audit_context *context = current->audit_context; struct timespec *p = &context->mq_sendrecv.abs_timeout; if (abs_timeout) memcpy(p, abs_timeout, sizeof(struct timespec)); else memset(p, 0, sizeof(struct timespec)); context->mq_sendrecv.mqdes = mqdes; context->mq_sendrecv.msg_len = msg_len; context->mq_sendrecv.msg_prio = msg_prio; context->type = AUDIT_MQ_SENDRECV; } /** * __audit_mq_notify - record audit data for a POSIX MQ notify * @mqdes: MQ descriptor * @notification: Notification event * */ void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { struct audit_context *context = current->audit_context; if (notification) context->mq_notify.sigev_signo = notification->sigev_signo; else context->mq_notify.sigev_signo = 0; context->mq_notify.mqdes = mqdes; context->type = AUDIT_MQ_NOTIFY; } /** * __audit_mq_getsetattr - record audit data for a POSIX MQ get/set attribute * @mqdes: MQ descriptor * @mqstat: MQ flags * */ void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { struct audit_context *context = current->audit_context; context->mq_getsetattr.mqdes = mqdes; context->mq_getsetattr.mqstat = *mqstat; context->type = AUDIT_MQ_GETSETATTR; } /** * audit_ipc_obj - record audit data for ipc object * @ipcp: ipc permissions * */ void __audit_ipc_obj(struct kern_ipc_perm *ipcp) { struct audit_context *context = current->audit_context; context->ipc.uid = ipcp->uid; context->ipc.gid = ipcp->gid; context->ipc.mode = ipcp->mode; context->ipc.has_perm = 0; security_ipc_getsecid(ipcp, &context->ipc.osid); context->type = AUDIT_IPC; } /** * audit_ipc_set_perm - record audit data for new ipc permissions * @qbytes: msgq bytes * @uid: msgq user id * @gid: msgq group id * @mode: msgq mode (permissions) * * Called only after audit_ipc_obj(). */ void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { struct audit_context *context = current->audit_context; context->ipc.qbytes = qbytes; context->ipc.perm_uid = uid; context->ipc.perm_gid = gid; context->ipc.perm_mode = mode; context->ipc.has_perm = 1; } void __audit_bprm(struct linux_binprm *bprm) { struct audit_context *context = current->audit_context; context->type = AUDIT_EXECVE; context->execve.argc = bprm->argc; } /** * audit_socketcall - record audit data for sys_socketcall * @nargs: number of args, which should not be more than AUDITSC_ARGS. * @args: args array * */ int __audit_socketcall(int nargs, unsigned long *args) { struct audit_context *context = current->audit_context; if (nargs <= 0 || nargs > AUDITSC_ARGS || !args) return -EINVAL; context->type = AUDIT_SOCKETCALL; context->socketcall.nargs = nargs; memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long)); return 0; } /** * __audit_fd_pair - record audit data for pipe and socketpair * @fd1: the first file descriptor * @fd2: the second file descriptor * */ void __audit_fd_pair(int fd1, int fd2) { struct audit_context *context = current->audit_context; context->fds[0] = fd1; context->fds[1] = fd2; } /** * audit_sockaddr - record audit data for sys_bind, sys_connect, sys_sendto * @len: data length in user space * @a: data address in kernel space * * Returns 0 for success or NULL context or < 0 on error. */ int __audit_sockaddr(int len, void *a) { struct audit_context *context = current->audit_context; if (!context->sockaddr) { void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL); if (!p) return -ENOMEM; context->sockaddr = p; } context->sockaddr_len = len; memcpy(context->sockaddr, a, len); return 0; } void __audit_ptrace(struct task_struct *t) { struct audit_context *context = current->audit_context; context->target_pid = task_pid_nr(t); context->target_auid = audit_get_loginuid(t); context->target_uid = task_uid(t); context->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &context->target_sid); memcpy(context->target_comm, t->comm, TASK_COMM_LEN); } /** * audit_signal_info - record signal info for shutting down audit subsystem * @sig: signal value * @t: task being signaled * * If the audit subsystem is being terminated, record the task (pid) * and uid that is doing that. */ int __audit_signal_info(int sig, struct task_struct *t) { struct audit_aux_data_pids *axp; struct task_struct *tsk = current; struct audit_context *ctx = tsk->audit_context; kuid_t uid = current_uid(), t_uid = task_uid(t); if (audit_pid && t->tgid == audit_pid) { if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { audit_sig_pid = task_pid_nr(tsk); if (uid_valid(tsk->loginuid)) audit_sig_uid = tsk->loginuid; else audit_sig_uid = uid; security_task_getsecid(tsk, &audit_sig_sid); } if (!audit_signals || audit_dummy_context()) return 0; } /* optimize the common case by putting first signal recipient directly * in audit_context */ if (!ctx->target_pid) { ctx->target_pid = task_tgid_nr(t); ctx->target_auid = audit_get_loginuid(t); ctx->target_uid = t_uid; ctx->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &ctx->target_sid); memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN); return 0; } axp = (void *)ctx->aux_pids; if (!axp || axp->pid_count == AUDIT_AUX_PIDS) { axp = kzalloc(sizeof(*axp), GFP_ATOMIC); if (!axp) return -ENOMEM; axp->d.type = AUDIT_OBJ_PID; axp->d.next = ctx->aux_pids; ctx->aux_pids = (void *)axp; } BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS); axp->target_pid[axp->pid_count] = task_tgid_nr(t); axp->target_auid[axp->pid_count] = audit_get_loginuid(t); axp->target_uid[axp->pid_count] = t_uid; axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); security_task_getsecid(t, &axp->target_sid[axp->pid_count]); memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN); axp->pid_count++; return 0; } /** * __audit_log_bprm_fcaps - store information about a loading bprm and relevant fcaps * @bprm: pointer to the bprm being processed * @new: the proposed new credentials * @old: the old credentials * * Simply check if the proc already has the caps given by the file and if not * store the priv escalation info for later auditing at the end of the syscall * * -Eric */ int __audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old) { struct audit_aux_data_bprm_fcaps *ax; struct audit_context *context = current->audit_context; struct cpu_vfs_cap_data vcaps; ax = kmalloc(sizeof(*ax), GFP_KERNEL); if (!ax) return -ENOMEM; ax->d.type = AUDIT_BPRM_FCAPS; ax->d.next = context->aux; context->aux = (void *)ax; get_vfs_caps_from_disk(bprm->file->f_path.dentry, &vcaps); ax->fcap.permitted = vcaps.permitted; ax->fcap.inheritable = vcaps.inheritable; ax->fcap.fE = !!(vcaps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE); ax->fcap_ver = (vcaps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT; ax->old_pcap.permitted = old->cap_permitted; ax->old_pcap.inheritable = old->cap_inheritable; ax->old_pcap.effective = old->cap_effective; ax->new_pcap.permitted = new->cap_permitted; ax->new_pcap.inheritable = new->cap_inheritable; ax->new_pcap.effective = new->cap_effective; return 0; } /** * __audit_log_capset - store information about the arguments to the capset syscall * @new: the new credentials * @old: the old (current) credentials * * Record the arguments userspace sent to sys_capset for later printing by the * audit system if applicable */ void __audit_log_capset(const struct cred *new, const struct cred *old) { struct audit_context *context = current->audit_context; context->capset.pid = task_pid_nr(current); context->capset.cap.effective = new->cap_effective; context->capset.cap.inheritable = new->cap_effective; context->capset.cap.permitted = new->cap_permitted; context->type = AUDIT_CAPSET; } void __audit_mmap_fd(int fd, int flags) { struct audit_context *context = current->audit_context; context->mmap.fd = fd; context->mmap.flags = flags; context->type = AUDIT_MMAP; } static void audit_log_task(struct audit_buffer *ab) { kuid_t auid, uid; kgid_t gid; unsigned int sessionid; char comm[sizeof(current->comm)]; auid = audit_get_loginuid(current); sessionid = audit_get_sessionid(current); current_uid_gid(&uid, &gid); audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u", from_kuid(&init_user_ns, auid), from_kuid(&init_user_ns, uid), from_kgid(&init_user_ns, gid), sessionid); audit_log_task_context(ab); audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); audit_log_untrustedstring(ab, get_task_comm(comm, current)); audit_log_d_path_exe(ab, current->mm); } /** * audit_core_dumps - record information about processes that end abnormally * @signr: signal value * * If a process ends with a core dump, something fishy is going on and we * should record the event for investigation. */ void audit_core_dumps(long signr) { struct audit_buffer *ab; if (!audit_enabled) return; if (signr == SIGQUIT) /* don't care for those */ return; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); if (unlikely(!ab)) return; audit_log_task(ab); audit_log_format(ab, " sig=%ld", signr); audit_log_end(ab); } void __audit_seccomp(unsigned long syscall, long signr, int code) { struct audit_buffer *ab; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP); if (unlikely(!ab)) return; audit_log_task(ab); audit_log_format(ab, " sig=%ld arch=%x syscall=%ld compat=%d ip=0x%lx code=0x%x", signr, syscall_get_arch(), syscall, in_compat_syscall(), KSTK_EIP(current), code); audit_log_end(ab); } struct list_head *audit_killed_trees(void) { struct audit_context *ctx = current->audit_context; if (likely(!ctx || !ctx->in_syscall)) return NULL; return &ctx->killed_trees; }
/* auditsc.c -- System-call auditing support * Handles all system-call specific auditing features. * * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. * Copyright 2005 Hewlett-Packard Development Company, L.P. * Copyright (C) 2005, 2006 IBM Corporation * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Written by Rickard E. (Rik) Faith <faith@redhat.com> * * Many of the ideas implemented here are from Stephen C. Tweedie, * especially the idea of avoiding a copy by using getname. * * The method for actual interception of syscall entry and exit (not in * this file -- see entry.S) is based on a GPL'd patch written by * okir@suse.de and Copyright 2003 SuSE Linux AG. * * POSIX message queue support added by George Wilson <ltcgcw@us.ibm.com>, * 2006. * * The support of additional filter rules compares (>, <, >=, <=) was * added by Dustin Kirkland <dustin.kirkland@us.ibm.com>, 2005. * * Modified by Amy Griffis <amy.griffis@hp.com> to collect additional * filesystem information. * * Subject and object context labeling support added by <danjones@us.ibm.com> * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <asm/types.h> #include <linux/atomic.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/mm.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/mount.h> #include <linux/socket.h> #include <linux/mqueue.h> #include <linux/audit.h> #include <linux/personality.h> #include <linux/time.h> #include <linux/netlink.h> #include <linux/compiler.h> #include <asm/unistd.h> #include <linux/security.h> #include <linux/list.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/highmem.h> #include <linux/syscalls.h> #include <asm/syscall.h> #include <linux/capability.h> #include <linux/fs_struct.h> #include <linux/compat.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/uaccess.h> #include <uapi/linux/limits.h> #include "audit.h" /* flags stating the success for a syscall */ #define AUDITSC_INVALID 0 #define AUDITSC_SUCCESS 1 #define AUDITSC_FAILURE 2 /* no execve audit message should be longer than this (userspace limits), * see the note near the top of audit_log_execve_info() about this value */ #define MAX_EXECVE_AUDIT_LEN 7500 /* max length to print of cmdline/proctitle value during audit */ #define MAX_PROCTITLE_AUDIT_LEN 128 /* number of audit rules */ int audit_n_rules; /* determines whether we collect data for signals sent */ int audit_signals; struct audit_aux_data { struct audit_aux_data *next; int type; }; #define AUDIT_AUX_IPCPERM 0 /* Number of target pids per aux struct. */ #define AUDIT_AUX_PIDS 16 struct audit_aux_data_pids { struct audit_aux_data d; pid_t target_pid[AUDIT_AUX_PIDS]; kuid_t target_auid[AUDIT_AUX_PIDS]; kuid_t target_uid[AUDIT_AUX_PIDS]; unsigned int target_sessionid[AUDIT_AUX_PIDS]; u32 target_sid[AUDIT_AUX_PIDS]; char target_comm[AUDIT_AUX_PIDS][TASK_COMM_LEN]; int pid_count; }; struct audit_aux_data_bprm_fcaps { struct audit_aux_data d; struct audit_cap_data fcap; unsigned int fcap_ver; struct audit_cap_data old_pcap; struct audit_cap_data new_pcap; }; struct audit_tree_refs { struct audit_tree_refs *next; struct audit_chunk *c[31]; }; static int audit_match_perm(struct audit_context *ctx, int mask) { unsigned n; if (unlikely(!ctx)) return 0; n = ctx->major; switch (audit_classify_syscall(ctx->arch, n)) { case 0: /* native */ if ((mask & AUDIT_PERM_WRITE) && audit_match_class(AUDIT_CLASS_WRITE, n)) return 1; if ((mask & AUDIT_PERM_READ) && audit_match_class(AUDIT_CLASS_READ, n)) return 1; if ((mask & AUDIT_PERM_ATTR) && audit_match_class(AUDIT_CLASS_CHATTR, n)) return 1; return 0; case 1: /* 32bit on biarch */ if ((mask & AUDIT_PERM_WRITE) && audit_match_class(AUDIT_CLASS_WRITE_32, n)) return 1; if ((mask & AUDIT_PERM_READ) && audit_match_class(AUDIT_CLASS_READ_32, n)) return 1; if ((mask & AUDIT_PERM_ATTR) && audit_match_class(AUDIT_CLASS_CHATTR_32, n)) return 1; return 0; case 2: /* open */ return mask & ACC_MODE(ctx->argv[1]); case 3: /* openat */ return mask & ACC_MODE(ctx->argv[2]); case 4: /* socketcall */ return ((mask & AUDIT_PERM_WRITE) && ctx->argv[0] == SYS_BIND); case 5: /* execve */ return mask & AUDIT_PERM_EXEC; default: return 0; } } static int audit_match_filetype(struct audit_context *ctx, int val) { struct audit_names *n; umode_t mode = (umode_t)val; if (unlikely(!ctx)) return 0; list_for_each_entry(n, &ctx->names_list, list) { if ((n->ino != AUDIT_INO_UNSET) && ((n->mode & S_IFMT) == mode)) return 1; } return 0; } /* * We keep a linked list of fixed-sized (31 pointer) arrays of audit_chunk *; * ->first_trees points to its beginning, ->trees - to the current end of data. * ->tree_count is the number of free entries in array pointed to by ->trees. * Original condition is (NULL, NULL, 0); as soon as it grows we never revert to NULL, * "empty" becomes (p, p, 31) afterwards. We don't shrink the list (and seriously, * it's going to remain 1-element for almost any setup) until we free context itself. * References in it _are_ dropped - at the same time we free/drop aux stuff. */ #ifdef CONFIG_AUDIT_TREE static void audit_set_auditable(struct audit_context *ctx) { if (!ctx->prio) { ctx->prio = 1; ctx->current_state = AUDIT_RECORD_CONTEXT; } } static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk) { struct audit_tree_refs *p = ctx->trees; int left = ctx->tree_count; if (likely(left)) { p->c[--left] = chunk; ctx->tree_count = left; return 1; } if (!p) return 0; p = p->next; if (p) { p->c[30] = chunk; ctx->trees = p; ctx->tree_count = 30; return 1; } return 0; } static int grow_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p = ctx->trees; ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL); if (!ctx->trees) { ctx->trees = p; return 0; } if (p) p->next = ctx->trees; else ctx->first_trees = ctx->trees; ctx->tree_count = 31; return 1; } #endif static void unroll_tree_refs(struct audit_context *ctx, struct audit_tree_refs *p, int count) { #ifdef CONFIG_AUDIT_TREE struct audit_tree_refs *q; int n; if (!p) { /* we started with empty chain */ p = ctx->first_trees; count = 31; /* if the very first allocation has failed, nothing to do */ if (!p) return; } n = count; for (q = p; q != ctx->trees; q = q->next, n = 31) { while (n--) { audit_put_chunk(q->c[n]); q->c[n] = NULL; } } while (n-- > ctx->tree_count) { audit_put_chunk(q->c[n]); q->c[n] = NULL; } ctx->trees = p; ctx->tree_count = count; #endif } static void free_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p, *q; for (p = ctx->first_trees; p; p = q) { q = p->next; kfree(p); } } static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree) { #ifdef CONFIG_AUDIT_TREE struct audit_tree_refs *p; int n; if (!tree) return 0; /* full ones */ for (p = ctx->first_trees; p != ctx->trees; p = p->next) { for (n = 0; n < 31; n++) if (audit_tree_match(p->c[n], tree)) return 1; } /* partial */ if (p) { for (n = ctx->tree_count; n < 31; n++) if (audit_tree_match(p->c[n], tree)) return 1; } #endif return 0; } static int audit_compare_uid(kuid_t uid, struct audit_names *name, struct audit_field *f, struct audit_context *ctx) { struct audit_names *n; int rc; if (name) { rc = audit_uid_comparator(uid, f->op, name->uid); if (rc) return rc; } if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { rc = audit_uid_comparator(uid, f->op, n->uid); if (rc) return rc; } } return 0; } static int audit_compare_gid(kgid_t gid, struct audit_names *name, struct audit_field *f, struct audit_context *ctx) { struct audit_names *n; int rc; if (name) { rc = audit_gid_comparator(gid, f->op, name->gid); if (rc) return rc; } if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { rc = audit_gid_comparator(gid, f->op, n->gid); if (rc) return rc; } } return 0; } static int audit_field_compare(struct task_struct *tsk, const struct cred *cred, struct audit_field *f, struct audit_context *ctx, struct audit_names *name) { switch (f->val) { /* process to file object comparisons */ case AUDIT_COMPARE_UID_TO_OBJ_UID: return audit_compare_uid(cred->uid, name, f, ctx); case AUDIT_COMPARE_GID_TO_OBJ_GID: return audit_compare_gid(cred->gid, name, f, ctx); case AUDIT_COMPARE_EUID_TO_OBJ_UID: return audit_compare_uid(cred->euid, name, f, ctx); case AUDIT_COMPARE_EGID_TO_OBJ_GID: return audit_compare_gid(cred->egid, name, f, ctx); case AUDIT_COMPARE_AUID_TO_OBJ_UID: return audit_compare_uid(tsk->loginuid, name, f, ctx); case AUDIT_COMPARE_SUID_TO_OBJ_UID: return audit_compare_uid(cred->suid, name, f, ctx); case AUDIT_COMPARE_SGID_TO_OBJ_GID: return audit_compare_gid(cred->sgid, name, f, ctx); case AUDIT_COMPARE_FSUID_TO_OBJ_UID: return audit_compare_uid(cred->fsuid, name, f, ctx); case AUDIT_COMPARE_FSGID_TO_OBJ_GID: return audit_compare_gid(cred->fsgid, name, f, ctx); /* uid comparisons */ case AUDIT_COMPARE_UID_TO_AUID: return audit_uid_comparator(cred->uid, f->op, tsk->loginuid); case AUDIT_COMPARE_UID_TO_EUID: return audit_uid_comparator(cred->uid, f->op, cred->euid); case AUDIT_COMPARE_UID_TO_SUID: return audit_uid_comparator(cred->uid, f->op, cred->suid); case AUDIT_COMPARE_UID_TO_FSUID: return audit_uid_comparator(cred->uid, f->op, cred->fsuid); /* auid comparisons */ case AUDIT_COMPARE_AUID_TO_EUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->euid); case AUDIT_COMPARE_AUID_TO_SUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->suid); case AUDIT_COMPARE_AUID_TO_FSUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->fsuid); /* euid comparisons */ case AUDIT_COMPARE_EUID_TO_SUID: return audit_uid_comparator(cred->euid, f->op, cred->suid); case AUDIT_COMPARE_EUID_TO_FSUID: return audit_uid_comparator(cred->euid, f->op, cred->fsuid); /* suid comparisons */ case AUDIT_COMPARE_SUID_TO_FSUID: return audit_uid_comparator(cred->suid, f->op, cred->fsuid); /* gid comparisons */ case AUDIT_COMPARE_GID_TO_EGID: return audit_gid_comparator(cred->gid, f->op, cred->egid); case AUDIT_COMPARE_GID_TO_SGID: return audit_gid_comparator(cred->gid, f->op, cred->sgid); case AUDIT_COMPARE_GID_TO_FSGID: return audit_gid_comparator(cred->gid, f->op, cred->fsgid); /* egid comparisons */ case AUDIT_COMPARE_EGID_TO_SGID: return audit_gid_comparator(cred->egid, f->op, cred->sgid); case AUDIT_COMPARE_EGID_TO_FSGID: return audit_gid_comparator(cred->egid, f->op, cred->fsgid); /* sgid comparison */ case AUDIT_COMPARE_SGID_TO_FSGID: return audit_gid_comparator(cred->sgid, f->op, cred->fsgid); default: WARN(1, "Missing AUDIT_COMPARE define. Report as a bug\n"); return 0; } return 0; } /* Determine if any context name data matches a rule's watch data */ /* Compare a task_struct with an audit_rule. Return 1 on match, 0 * otherwise. * * If task_creation is true, this is an explicit indication that we are * filtering a task rule at task creation time. This and tsk == current are * the only situations where tsk->cred may be accessed without an rcu read lock. */ static int audit_filter_rules(struct task_struct *tsk, struct audit_krule *rule, struct audit_context *ctx, struct audit_names *name, enum audit_state *state, bool task_creation) { const struct cred *cred; int i, need_sid = 1; u32 sid; cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation); for (i = 0; i < rule->field_count; i++) { struct audit_field *f = &rule->fields[i]; struct audit_names *n; int result = 0; pid_t pid; switch (f->type) { case AUDIT_PID: pid = task_pid_nr(tsk); result = audit_comparator(pid, f->op, f->val); break; case AUDIT_PPID: if (ctx) { if (!ctx->ppid) ctx->ppid = task_ppid_nr(tsk); result = audit_comparator(ctx->ppid, f->op, f->val); } break; case AUDIT_EXE: result = audit_exe_compare(tsk, rule->exe); break; case AUDIT_UID: result = audit_uid_comparator(cred->uid, f->op, f->uid); break; case AUDIT_EUID: result = audit_uid_comparator(cred->euid, f->op, f->uid); break; case AUDIT_SUID: result = audit_uid_comparator(cred->suid, f->op, f->uid); break; case AUDIT_FSUID: result = audit_uid_comparator(cred->fsuid, f->op, f->uid); break; case AUDIT_GID: result = audit_gid_comparator(cred->gid, f->op, f->gid); if (f->op == Audit_equal) { if (!result) result = in_group_p(f->gid); } else if (f->op == Audit_not_equal) { if (result) result = !in_group_p(f->gid); } break; case AUDIT_EGID: result = audit_gid_comparator(cred->egid, f->op, f->gid); if (f->op == Audit_equal) { if (!result) result = in_egroup_p(f->gid); } else if (f->op == Audit_not_equal) { if (result) result = !in_egroup_p(f->gid); } break; case AUDIT_SGID: result = audit_gid_comparator(cred->sgid, f->op, f->gid); break; case AUDIT_FSGID: result = audit_gid_comparator(cred->fsgid, f->op, f->gid); break; case AUDIT_PERS: result = audit_comparator(tsk->personality, f->op, f->val); break; case AUDIT_ARCH: if (ctx) result = audit_comparator(ctx->arch, f->op, f->val); break; case AUDIT_EXIT: if (ctx && ctx->return_valid) result = audit_comparator(ctx->return_code, f->op, f->val); break; case AUDIT_SUCCESS: if (ctx && ctx->return_valid) { if (f->val) result = audit_comparator(ctx->return_valid, f->op, AUDITSC_SUCCESS); else result = audit_comparator(ctx->return_valid, f->op, AUDITSC_FAILURE); } break; case AUDIT_DEVMAJOR: if (name) { if (audit_comparator(MAJOR(name->dev), f->op, f->val) || audit_comparator(MAJOR(name->rdev), f->op, f->val)) ++result; } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(MAJOR(n->dev), f->op, f->val) || audit_comparator(MAJOR(n->rdev), f->op, f->val)) { ++result; break; } } } break; case AUDIT_DEVMINOR: if (name) { if (audit_comparator(MINOR(name->dev), f->op, f->val) || audit_comparator(MINOR(name->rdev), f->op, f->val)) ++result; } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(MINOR(n->dev), f->op, f->val) || audit_comparator(MINOR(n->rdev), f->op, f->val)) { ++result; break; } } } break; case AUDIT_INODE: if (name) result = audit_comparator(name->ino, f->op, f->val); else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(n->ino, f->op, f->val)) { ++result; break; } } } break; case AUDIT_OBJ_UID: if (name) { result = audit_uid_comparator(name->uid, f->op, f->uid); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_uid_comparator(n->uid, f->op, f->uid)) { ++result; break; } } } break; case AUDIT_OBJ_GID: if (name) { result = audit_gid_comparator(name->gid, f->op, f->gid); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_gid_comparator(n->gid, f->op, f->gid)) { ++result; break; } } } break; case AUDIT_WATCH: if (name) result = audit_watch_compare(rule->watch, name->ino, name->dev); break; case AUDIT_DIR: if (ctx) result = match_tree_refs(ctx, rule->tree); break; case AUDIT_LOGINUID: result = audit_uid_comparator(tsk->loginuid, f->op, f->uid); break; case AUDIT_LOGINUID_SET: result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val); break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: /* NOTE: this may return negative values indicating a temporary error. We simply treat this as a match for now to avoid losing information that may be wanted. An error message will also be logged upon error */ if (f->lsm_rule) { if (need_sid) { security_task_getsecid(tsk, &sid); need_sid = 0; } result = security_audit_rule_match(sid, f->type, f->op, f->lsm_rule, ctx); } break; case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: /* The above note for AUDIT_SUBJ_USER...AUDIT_SUBJ_CLR also applies here */ if (f->lsm_rule) { /* Find files that match */ if (name) { result = security_audit_rule_match( name->osid, f->type, f->op, f->lsm_rule, ctx); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (security_audit_rule_match(n->osid, f->type, f->op, f->lsm_rule, ctx)) { ++result; break; } } } /* Find ipc objects that match */ if (!ctx || ctx->type != AUDIT_IPC) break; if (security_audit_rule_match(ctx->ipc.osid, f->type, f->op, f->lsm_rule, ctx)) ++result; } break; case AUDIT_ARG0: case AUDIT_ARG1: case AUDIT_ARG2: case AUDIT_ARG3: if (ctx) result = audit_comparator(ctx->argv[f->type-AUDIT_ARG0], f->op, f->val); break; case AUDIT_FILTERKEY: /* ignore this field for filtering */ result = 1; break; case AUDIT_PERM: result = audit_match_perm(ctx, f->val); break; case AUDIT_FILETYPE: result = audit_match_filetype(ctx, f->val); break; case AUDIT_FIELD_COMPARE: result = audit_field_compare(tsk, cred, f, ctx, name); break; } if (!result) return 0; } if (ctx) { if (rule->prio <= ctx->prio) return 0; if (rule->filterkey) { kfree(ctx->filterkey); ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); } ctx->prio = rule->prio; } switch (rule->action) { case AUDIT_NEVER: *state = AUDIT_DISABLED; break; case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; } return 1; } /* At process creation time, we can determine if system-call auditing is * completely disabled for this task. Since we only have the task * structure at this point, we can only check uid and gid. */ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key) { struct audit_entry *e; enum audit_state state; rcu_read_lock(); list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) { if (audit_filter_rules(tsk, &e->rule, NULL, NULL, &state, true)) { if (state == AUDIT_RECORD_CONTEXT) *key = kstrdup(e->rule.filterkey, GFP_ATOMIC); rcu_read_unlock(); return state; } } rcu_read_unlock(); return AUDIT_BUILD_CONTEXT; } static int audit_in_mask(const struct audit_krule *rule, unsigned long val) { int word, bit; if (val > 0xffffffff) return false; word = AUDIT_WORD(val); if (word >= AUDIT_BITMASK_SIZE) return false; bit = AUDIT_BIT(val); return rule->mask[word] & bit; } /* At syscall entry and exit time, this filter is called if the * audit_state is not low enough that auditing cannot take place, but is * also not high enough that we already know we have to write an audit * record (i.e., the state is AUDIT_SETUP_CONTEXT or AUDIT_BUILD_CONTEXT). */ static enum audit_state audit_filter_syscall(struct task_struct *tsk, struct audit_context *ctx, struct list_head *list) { struct audit_entry *e; enum audit_state state; if (audit_pid && tsk->tgid == audit_pid) return AUDIT_DISABLED; rcu_read_lock(); if (!list_empty(list)) { list_for_each_entry_rcu(e, list, list) { if (audit_in_mask(&e->rule, ctx->major) && audit_filter_rules(tsk, &e->rule, ctx, NULL, &state, false)) { rcu_read_unlock(); ctx->current_state = state; return state; } } } rcu_read_unlock(); return AUDIT_BUILD_CONTEXT; } /* * Given an audit_name check the inode hash table to see if they match. * Called holding the rcu read lock to protect the use of audit_inode_hash */ static int audit_filter_inode_name(struct task_struct *tsk, struct audit_names *n, struct audit_context *ctx) { int h = audit_hash_ino((u32)n->ino); struct list_head *list = &audit_inode_hash[h]; struct audit_entry *e; enum audit_state state; if (list_empty(list)) return 0; list_for_each_entry_rcu(e, list, list) { if (audit_in_mask(&e->rule, ctx->major) && audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) { ctx->current_state = state; return 1; } } return 0; } /* At syscall exit time, this filter is called if any audit_names have been * collected during syscall processing. We only check rules in sublists at hash * buckets applicable to the inode numbers in audit_names. * Regarding audit_state, same rules apply as for audit_filter_syscall(). */ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx) { struct audit_names *n; if (audit_pid && tsk->tgid == audit_pid) return; rcu_read_lock(); list_for_each_entry(n, &ctx->names_list, list) { if (audit_filter_inode_name(tsk, n, ctx)) break; } rcu_read_unlock(); } /* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */ static inline struct audit_context *audit_take_context(struct task_struct *tsk, int return_valid, long return_code) { struct audit_context *context = tsk->audit_context; if (!context) return NULL; context->return_valid = return_valid; /* * we need to fix up the return code in the audit logs if the actual * return codes are later going to be fixed up by the arch specific * signal handlers * * This is actually a test for: * (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) || * (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK) * * but is faster than a bunch of || */ if (unlikely(return_code <= -ERESTARTSYS) && (return_code >= -ERESTART_RESTARTBLOCK) && (return_code != -ENOIOCTLCMD)) context->return_code = -EINTR; else context->return_code = return_code; if (context->in_syscall && !context->dummy) { audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); audit_filter_inodes(tsk, context); } tsk->audit_context = NULL; return context; } static inline void audit_proctitle_free(struct audit_context *context) { kfree(context->proctitle.value); context->proctitle.value = NULL; context->proctitle.len = 0; } static inline void audit_free_names(struct audit_context *context) { struct audit_names *n, *next; list_for_each_entry_safe(n, next, &context->names_list, list) { list_del(&n->list); if (n->name) putname(n->name); if (n->should_free) kfree(n); } context->name_count = 0; path_put(&context->pwd); context->pwd.dentry = NULL; context->pwd.mnt = NULL; } static inline void audit_free_aux(struct audit_context *context) { struct audit_aux_data *aux; while ((aux = context->aux)) { context->aux = aux->next; kfree(aux); } while ((aux = context->aux_pids)) { context->aux_pids = aux->next; kfree(aux); } } static inline struct audit_context *audit_alloc_context(enum audit_state state) { struct audit_context *context; context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return NULL; context->state = state; context->prio = state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; INIT_LIST_HEAD(&context->killed_trees); INIT_LIST_HEAD(&context->names_list); return context; } /** * audit_alloc - allocate an audit context block for a task * @tsk: task * * Filter on the task information and allocate a per-task audit context * if necessary. Doing so turns on system call auditing for the * specified task. This is called from copy_process, so no lock is * needed. */ int audit_alloc(struct task_struct *tsk) { struct audit_context *context; enum audit_state state; char *key = NULL; if (likely(!audit_ever_enabled)) return 0; /* Return if not auditing. */ state = audit_filter_task(tsk, &key); if (state == AUDIT_DISABLED) { clear_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT); return 0; } if (!(context = audit_alloc_context(state))) { kfree(key); audit_log_lost("out of memory in audit_alloc"); return -ENOMEM; } context->filterkey = key; tsk->audit_context = context; set_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT); return 0; } static inline void audit_free_context(struct audit_context *context) { audit_free_names(context); unroll_tree_refs(context, NULL, 0); free_tree_refs(context); audit_free_aux(context); kfree(context->filterkey); kfree(context->sockaddr); audit_proctitle_free(context); kfree(context); } static int audit_log_pid_context(struct audit_context *context, pid_t pid, kuid_t auid, kuid_t uid, unsigned int sessionid, u32 sid, char *comm) { struct audit_buffer *ab; char *ctx = NULL; u32 len; int rc = 0; ab = audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID); if (!ab) return rc; audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, from_kuid(&init_user_ns, auid), from_kuid(&init_user_ns, uid), sessionid); if (sid) { if (security_secid_to_secctx(sid, &ctx, &len)) { audit_log_format(ab, " obj=(none)"); rc = 1; } else { audit_log_format(ab, " obj=%s", ctx); security_release_secctx(ctx, len); } } audit_log_format(ab, " ocomm="); audit_log_untrustedstring(ab, comm); audit_log_end(ab); return rc; } static void audit_log_execve_info(struct audit_context *context, struct audit_buffer **ab) { long len_max; long len_rem; long len_full; long len_buf; long len_abuf; long len_tmp; bool require_data; bool encode; unsigned int iter; unsigned int arg; char *buf_head; char *buf; const char __user *p = (const char __user *)current->mm->arg_start; /* NOTE: this buffer needs to be large enough to hold all the non-arg * data we put in the audit record for this argument (see the * code below) ... at this point in time 96 is plenty */ char abuf[96]; /* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the * current value of 7500 is not as important as the fact that it * is less than 8k, a setting of 7500 gives us plenty of wiggle * room if we go over a little bit in the logging below */ WARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500); len_max = MAX_EXECVE_AUDIT_LEN; /* scratch buffer to hold the userspace args */ buf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); if (!buf_head) { audit_panic("out of memory for argv string"); return; } buf = buf_head; audit_log_format(*ab, "argc=%d", context->execve.argc); len_rem = len_max; len_buf = 0; len_full = 0; require_data = true; encode = false; iter = 0; arg = 0; do { /* NOTE: we don't ever want to trust this value for anything * serious, but the audit record format insists we * provide an argument length for really long arguments, * e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but * to use strncpy_from_user() to obtain this value for * recording in the log, although we don't use it * anywhere here to avoid a double-fetch problem */ if (len_full == 0) len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1; /* read more data from userspace */ if (require_data) { /* can we make more room in the buffer? */ if (buf != buf_head) { memmove(buf_head, buf, len_buf); buf = buf_head; } /* fetch as much as we can of the argument */ len_tmp = strncpy_from_user(&buf_head[len_buf], p, len_max - len_buf); if (len_tmp == -EFAULT) { /* unable to copy from userspace */ send_sig(SIGKILL, current, 0); goto out; } else if (len_tmp == (len_max - len_buf)) { /* buffer is not large enough */ require_data = true; /* NOTE: if we are going to span multiple * buffers force the encoding so we stand * a chance at a sane len_full value and * consistent record encoding */ encode = true; len_full = len_full * 2; p += len_tmp; } else { require_data = false; if (!encode) encode = audit_string_contains_control( buf, len_tmp); /* try to use a trusted value for len_full */ if (len_full < len_max) len_full = (encode ? len_tmp * 2 : len_tmp); p += len_tmp + 1; } len_buf += len_tmp; buf_head[len_buf] = '\0'; /* length of the buffer in the audit record? */ len_abuf = (encode ? len_buf * 2 : len_buf + 2); } /* write as much as we can to the audit log */ if (len_buf > 0) { /* NOTE: some magic numbers here - basically if we * can't fit a reasonable amount of data into the * existing audit buffer, flush it and start with * a new buffer */ if ((sizeof(abuf) + 8) > len_rem) { len_rem = len_max; audit_log_end(*ab); *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE); if (!*ab) goto out; } /* create the non-arg portion of the arg record */ len_tmp = 0; if (require_data || (iter > 0) || ((len_abuf + sizeof(abuf)) > len_rem)) { if (iter == 0) { len_tmp += snprintf(&abuf[len_tmp], sizeof(abuf) - len_tmp, " a%d_len=%lu", arg, len_full); } len_tmp += snprintf(&abuf[len_tmp], sizeof(abuf) - len_tmp, " a%d[%d]=", arg, iter++); } else len_tmp += snprintf(&abuf[len_tmp], sizeof(abuf) - len_tmp, " a%d=", arg); WARN_ON(len_tmp >= sizeof(abuf)); abuf[sizeof(abuf) - 1] = '\0'; /* log the arg in the audit record */ audit_log_format(*ab, "%s", abuf); len_rem -= len_tmp; len_tmp = len_buf; if (encode) { if (len_abuf > len_rem) len_tmp = len_rem / 2; /* encoding */ audit_log_n_hex(*ab, buf, len_tmp); len_rem -= len_tmp * 2; len_abuf -= len_tmp * 2; } else { if (len_abuf > len_rem) len_tmp = len_rem - 2; /* quotes */ audit_log_n_string(*ab, buf, len_tmp); len_rem -= len_tmp + 2; /* don't subtract the "2" because we still need * to add quotes to the remaining string */ len_abuf -= len_tmp; } len_buf -= len_tmp; buf += len_tmp; } /* ready to move to the next argument? */ if ((len_buf == 0) && !require_data) { arg++; iter = 0; len_full = 0; require_data = true; encode = false; } } while (arg < context->execve.argc); /* NOTE: the caller handles the final audit_log_end() call */ out: kfree(buf_head); } static void show_special(struct audit_context *context, int *call_panic) { struct audit_buffer *ab; int i; ab = audit_log_start(context, GFP_KERNEL, context->type); if (!ab) return; switch (context->type) { case AUDIT_SOCKETCALL: { int nargs = context->socketcall.nargs; audit_log_format(ab, "nargs=%d", nargs); for (i = 0; i < nargs; i++) audit_log_format(ab, " a%d=%lx", i, context->socketcall.args[i]); break; } case AUDIT_IPC: { u32 osid = context->ipc.osid; audit_log_format(ab, "ouid=%u ogid=%u mode=%#ho", from_kuid(&init_user_ns, context->ipc.uid), from_kgid(&init_user_ns, context->ipc.gid), context->ipc.mode); if (osid) { char *ctx = NULL; u32 len; if (security_secid_to_secctx(osid, &ctx, &len)) { audit_log_format(ab, " osid=%u", osid); *call_panic = 1; } else { audit_log_format(ab, " obj=%s", ctx); security_release_secctx(ctx, len); } } if (context->ipc.has_perm) { audit_log_end(ab); ab = audit_log_start(context, GFP_KERNEL, AUDIT_IPC_SET_PERM); if (unlikely(!ab)) return; audit_log_format(ab, "qbytes=%lx ouid=%u ogid=%u mode=%#ho", context->ipc.qbytes, context->ipc.perm_uid, context->ipc.perm_gid, context->ipc.perm_mode); } break; } case AUDIT_MQ_OPEN: { audit_log_format(ab, "oflag=0x%x mode=%#ho mq_flags=0x%lx mq_maxmsg=%ld " "mq_msgsize=%ld mq_curmsgs=%ld", context->mq_open.oflag, context->mq_open.mode, context->mq_open.attr.mq_flags, context->mq_open.attr.mq_maxmsg, context->mq_open.attr.mq_msgsize, context->mq_open.attr.mq_curmsgs); break; } case AUDIT_MQ_SENDRECV: { audit_log_format(ab, "mqdes=%d msg_len=%zd msg_prio=%u " "abs_timeout_sec=%ld abs_timeout_nsec=%ld", context->mq_sendrecv.mqdes, context->mq_sendrecv.msg_len, context->mq_sendrecv.msg_prio, context->mq_sendrecv.abs_timeout.tv_sec, context->mq_sendrecv.abs_timeout.tv_nsec); break; } case AUDIT_MQ_NOTIFY: { audit_log_format(ab, "mqdes=%d sigev_signo=%d", context->mq_notify.mqdes, context->mq_notify.sigev_signo); break; } case AUDIT_MQ_GETSETATTR: { struct mq_attr *attr = &context->mq_getsetattr.mqstat; audit_log_format(ab, "mqdes=%d mq_flags=0x%lx mq_maxmsg=%ld mq_msgsize=%ld " "mq_curmsgs=%ld ", context->mq_getsetattr.mqdes, attr->mq_flags, attr->mq_maxmsg, attr->mq_msgsize, attr->mq_curmsgs); break; } case AUDIT_CAPSET: { audit_log_format(ab, "pid=%d", context->capset.pid); audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable); audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted); audit_log_cap(ab, "cap_pe", &context->capset.cap.effective); break; } case AUDIT_MMAP: { audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd, context->mmap.flags); break; } case AUDIT_EXECVE: { audit_log_execve_info(context, &ab); break; } } audit_log_end(ab); } static inline int audit_proctitle_rtrim(char *proctitle, int len) { char *end = proctitle + len - 1; while (end > proctitle && !isprint(*end)) end--; /* catch the case where proctitle is only 1 non-print character */ len = end - proctitle + 1; len -= isprint(proctitle[len-1]) == 0; return len; } static void audit_log_proctitle(struct task_struct *tsk, struct audit_context *context) { int res; char *buf; char *msg = "(null)"; int len = strlen(msg); struct audit_buffer *ab; ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); if (!ab) return; /* audit_panic or being filtered */ audit_log_format(ab, "proctitle="); /* Not cached */ if (!context->proctitle.value) { buf = kmalloc(MAX_PROCTITLE_AUDIT_LEN, GFP_KERNEL); if (!buf) goto out; /* Historically called this from procfs naming */ res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN); if (res == 0) { kfree(buf); goto out; } res = audit_proctitle_rtrim(buf, res); if (res == 0) { kfree(buf); goto out; } context->proctitle.value = buf; context->proctitle.len = res; } msg = context->proctitle.value; len = context->proctitle.len; out: audit_log_n_untrustedstring(ab, msg, len); audit_log_end(ab); } static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) { int i, call_panic = 0; struct audit_buffer *ab; struct audit_aux_data *aux; struct audit_names *n; /* tsk == current */ context->personality = tsk->personality; ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); if (!ab) return; /* audit_panic has been called */ audit_log_format(ab, "arch=%x syscall=%d", context->arch, context->major); if (context->personality != PER_LINUX) audit_log_format(ab, " per=%lx", context->personality); if (context->return_valid) audit_log_format(ab, " success=%s exit=%ld", (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", context->return_code); audit_log_format(ab, " a0=%lx a1=%lx a2=%lx a3=%lx items=%d", context->argv[0], context->argv[1], context->argv[2], context->argv[3], context->name_count); audit_log_task_info(ab, tsk); audit_log_key(ab, context->filterkey); audit_log_end(ab); for (aux = context->aux; aux; aux = aux->next) { ab = audit_log_start(context, GFP_KERNEL, aux->type); if (!ab) continue; /* audit_panic has been called */ switch (aux->type) { case AUDIT_BPRM_FCAPS: { struct audit_aux_data_bprm_fcaps *axs = (void *)aux; audit_log_format(ab, "fver=%x", axs->fcap_ver); audit_log_cap(ab, "fp", &axs->fcap.permitted); audit_log_cap(ab, "fi", &axs->fcap.inheritable); audit_log_format(ab, " fe=%d", axs->fcap.fE); audit_log_cap(ab, "old_pp", &axs->old_pcap.permitted); audit_log_cap(ab, "old_pi", &axs->old_pcap.inheritable); audit_log_cap(ab, "old_pe", &axs->old_pcap.effective); audit_log_cap(ab, "new_pp", &axs->new_pcap.permitted); audit_log_cap(ab, "new_pi", &axs->new_pcap.inheritable); audit_log_cap(ab, "new_pe", &axs->new_pcap.effective); break; } } audit_log_end(ab); } if (context->type) show_special(context, &call_panic); if (context->fds[0] >= 0) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_FD_PAIR); if (ab) { audit_log_format(ab, "fd0=%d fd1=%d", context->fds[0], context->fds[1]); audit_log_end(ab); } } if (context->sockaddr_len) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_SOCKADDR); if (ab) { audit_log_format(ab, "saddr="); audit_log_n_hex(ab, (void *)context->sockaddr, context->sockaddr_len); audit_log_end(ab); } } for (aux = context->aux_pids; aux; aux = aux->next) { struct audit_aux_data_pids *axs = (void *)aux; for (i = 0; i < axs->pid_count; i++) if (audit_log_pid_context(context, axs->target_pid[i], axs->target_auid[i], axs->target_uid[i], axs->target_sessionid[i], axs->target_sid[i], axs->target_comm[i])) call_panic = 1; } if (context->target_pid && audit_log_pid_context(context, context->target_pid, context->target_auid, context->target_uid, context->target_sessionid, context->target_sid, context->target_comm)) call_panic = 1; if (context->pwd.dentry && context->pwd.mnt) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD); if (ab) { audit_log_d_path(ab, "cwd=", &context->pwd); audit_log_end(ab); } } i = 0; list_for_each_entry(n, &context->names_list, list) { if (n->hidden) continue; audit_log_name(context, n, NULL, i++, &call_panic); } audit_log_proctitle(tsk, context); /* Send end of event record to help user space know we are finished */ ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); if (ab) audit_log_end(ab); if (call_panic) audit_panic("error converting sid to string"); } /** * audit_free - free a per-task audit context * @tsk: task whose audit context block to free * * Called from copy_process and do_exit */ void __audit_free(struct task_struct *tsk) { struct audit_context *context; context = audit_take_context(tsk, 0, 0); if (!context) return; /* Check for system calls that do not go through the exit * function (e.g., exit_group), then free context block. * We use GFP_ATOMIC here because we might be doing this * in the context of the idle thread */ /* that can happen only if we are called from do_exit() */ if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) audit_log_exit(context, tsk); if (!list_empty(&context->killed_trees)) audit_kill_trees(&context->killed_trees); audit_free_context(context); } /** * audit_syscall_entry - fill in an audit record at syscall entry * @major: major syscall type (function) * @a1: additional syscall register 1 * @a2: additional syscall register 2 * @a3: additional syscall register 3 * @a4: additional syscall register 4 * * Fill in audit context at syscall entry. This only happens if the * audit context was created when the task was created and the state or * filters demand the audit context be built. If the state from the * per-task filter or from the per-syscall filter is AUDIT_RECORD_CONTEXT, * then the record will be written at syscall exit time (otherwise, it * will only be written if another part of the kernel requests that it * be written). */ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4) { struct task_struct *tsk = current; struct audit_context *context = tsk->audit_context; enum audit_state state; if (!context) return; BUG_ON(context->in_syscall || context->name_count); if (!audit_enabled) return; context->arch = syscall_get_arch(); context->major = major; context->argv[0] = a1; context->argv[1] = a2; context->argv[2] = a3; context->argv[3] = a4; state = context->state; context->dummy = !audit_n_rules; if (!context->dummy && state == AUDIT_BUILD_CONTEXT) { context->prio = 0; state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]); } if (state == AUDIT_DISABLED) return; context->serial = 0; context->ctime = CURRENT_TIME; context->in_syscall = 1; context->current_state = state; context->ppid = 0; } /** * audit_syscall_exit - deallocate audit context after a system call * @success: success value of the syscall * @return_code: return value of the syscall * * Tear down after system call. If the audit context has been marked as * auditable (either because of the AUDIT_RECORD_CONTEXT state from * filtering, or because some other part of the kernel wrote an audit * message), then write out the syscall information. In call cases, * free the names stored from getname(). */ void __audit_syscall_exit(int success, long return_code) { struct task_struct *tsk = current; struct audit_context *context; if (success) success = AUDITSC_SUCCESS; else success = AUDITSC_FAILURE; context = audit_take_context(tsk, success, return_code); if (!context) return; if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) audit_log_exit(context, tsk); context->in_syscall = 0; context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; if (!list_empty(&context->killed_trees)) audit_kill_trees(&context->killed_trees); audit_free_names(context); unroll_tree_refs(context, NULL, 0); audit_free_aux(context); context->aux = NULL; context->aux_pids = NULL; context->target_pid = 0; context->target_sid = 0; context->sockaddr_len = 0; context->type = 0; context->fds[0] = -1; if (context->state != AUDIT_RECORD_CONTEXT) { kfree(context->filterkey); context->filterkey = NULL; } tsk->audit_context = context; } static inline void handle_one(const struct inode *inode) { #ifdef CONFIG_AUDIT_TREE struct audit_context *context; struct audit_tree_refs *p; struct audit_chunk *chunk; int count; if (likely(hlist_empty(&inode->i_fsnotify_marks))) return; context = current->audit_context; p = context->trees; count = context->tree_count; rcu_read_lock(); chunk = audit_tree_lookup(inode); rcu_read_unlock(); if (!chunk) return; if (likely(put_tree_ref(context, chunk))) return; if (unlikely(!grow_tree_refs(context))) { pr_warn("out of memory, audit has lost a tree reference\n"); audit_set_auditable(context); audit_put_chunk(chunk); unroll_tree_refs(context, p, count); return; } put_tree_ref(context, chunk); #endif } static void handle_path(const struct dentry *dentry) { #ifdef CONFIG_AUDIT_TREE struct audit_context *context; struct audit_tree_refs *p; const struct dentry *d, *parent; struct audit_chunk *drop; unsigned long seq; int count; context = current->audit_context; p = context->trees; count = context->tree_count; retry: drop = NULL; d = dentry; rcu_read_lock(); seq = read_seqbegin(&rename_lock); for(;;) { struct inode *inode = d_backing_inode(d); if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) { struct audit_chunk *chunk; chunk = audit_tree_lookup(inode); if (chunk) { if (unlikely(!put_tree_ref(context, chunk))) { drop = chunk; break; } } } parent = d->d_parent; if (parent == d) break; d = parent; } if (unlikely(read_seqretry(&rename_lock, seq) || drop)) { /* in this order */ rcu_read_unlock(); if (!drop) { /* just a race with rename */ unroll_tree_refs(context, p, count); goto retry; } audit_put_chunk(drop); if (grow_tree_refs(context)) { /* OK, got more space */ unroll_tree_refs(context, p, count); goto retry; } /* too bad */ pr_warn("out of memory, audit has lost a tree reference\n"); unroll_tree_refs(context, p, count); audit_set_auditable(context); return; } rcu_read_unlock(); #endif } static struct audit_names *audit_alloc_name(struct audit_context *context, unsigned char type) { struct audit_names *aname; if (context->name_count < AUDIT_NAMES) { aname = &context->preallocated_names[context->name_count]; memset(aname, 0, sizeof(*aname)); } else { aname = kzalloc(sizeof(*aname), GFP_NOFS); if (!aname) return NULL; aname->should_free = true; } aname->ino = AUDIT_INO_UNSET; aname->type = type; list_add_tail(&aname->list, &context->names_list); context->name_count++; return aname; } /** * audit_reusename - fill out filename with info from existing entry * @uptr: userland ptr to pathname * * Search the audit_names list for the current audit context. If there is an * existing entry with a matching "uptr" then return the filename * associated with that audit_name. If not, return NULL. */ struct filename * __audit_reusename(const __user char *uptr) { struct audit_context *context = current->audit_context; struct audit_names *n; list_for_each_entry(n, &context->names_list, list) { if (!n->name) continue; if (n->name->uptr == uptr) { n->name->refcnt++; return n->name; } } return NULL; } /** * audit_getname - add a name to the list * @name: name to add * * Add a name to the list of audit names for this context. * Called from fs/namei.c:getname(). */ void __audit_getname(struct filename *name) { struct audit_context *context = current->audit_context; struct audit_names *n; if (!context->in_syscall) return; n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); if (!n) return; n->name = name; n->name_len = AUDIT_NAME_FULL; name->aname = n; name->refcnt++; if (!context->pwd.dentry) get_fs_pwd(current->fs, &context->pwd); } /** * __audit_inode - store the inode and device from a lookup * @name: name being audited * @dentry: dentry being audited * @flags: attributes for this particular entry */ void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags) { struct audit_context *context = current->audit_context; struct inode *inode = d_backing_inode(dentry); struct audit_names *n; bool parent = flags & AUDIT_INODE_PARENT; if (!context->in_syscall) return; if (!name) goto out_alloc; /* * If we have a pointer to an audit_names entry already, then we can * just use it directly if the type is correct. */ n = name->aname; if (n) { if (parent) { if (n->type == AUDIT_TYPE_PARENT || n->type == AUDIT_TYPE_UNKNOWN) goto out; } else { if (n->type != AUDIT_TYPE_PARENT) goto out; } } list_for_each_entry_reverse(n, &context->names_list, list) { if (n->ino) { /* valid inode number, use that for the comparison */ if (n->ino != inode->i_ino || n->dev != inode->i_sb->s_dev) continue; } else if (n->name) { /* inode number has not been set, check the name */ if (strcmp(n->name->name, name->name)) continue; } else /* no inode and no name (?!) ... this is odd ... */ continue; /* match the correct record type */ if (parent) { if (n->type == AUDIT_TYPE_PARENT || n->type == AUDIT_TYPE_UNKNOWN) goto out; } else { if (n->type != AUDIT_TYPE_PARENT) goto out; } } out_alloc: /* unable to find an entry with both a matching name and type */ n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); if (!n) return; if (name) { n->name = name; name->refcnt++; } out: if (parent) { n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL; n->type = AUDIT_TYPE_PARENT; if (flags & AUDIT_INODE_HIDDEN) n->hidden = true; } else { n->name_len = AUDIT_NAME_FULL; n->type = AUDIT_TYPE_NORMAL; } handle_path(dentry); audit_copy_inode(n, dentry, inode); } void __audit_file(const struct file *file) { __audit_inode(NULL, file->f_path.dentry, 0); } /** * __audit_inode_child - collect inode info for created/removed objects * @parent: inode of dentry parent * @dentry: dentry being audited * @type: AUDIT_TYPE_* value that we're looking for * * For syscalls that create or remove filesystem objects, audit_inode * can only collect information for the filesystem object's parent. * This call updates the audit context with the child's information. * Syscalls that create a new filesystem object must be hooked after * the object is created. Syscalls that remove a filesystem object * must be hooked prior, in order to capture the target inode during * unsuccessful attempts. */ void __audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { struct audit_context *context = current->audit_context; struct inode *inode = d_backing_inode(dentry); const char *dname = dentry->d_name.name; struct audit_names *n, *found_parent = NULL, *found_child = NULL; if (!context->in_syscall) return; if (inode) handle_one(inode); /* look for a parent entry first */ list_for_each_entry(n, &context->names_list, list) { if (!n->name || (n->type != AUDIT_TYPE_PARENT && n->type != AUDIT_TYPE_UNKNOWN)) continue; if (n->ino == parent->i_ino && n->dev == parent->i_sb->s_dev && !audit_compare_dname_path(dname, n->name->name, n->name_len)) { if (n->type == AUDIT_TYPE_UNKNOWN) n->type = AUDIT_TYPE_PARENT; found_parent = n; break; } } /* is there a matching child entry? */ list_for_each_entry(n, &context->names_list, list) { /* can only match entries that have a name */ if (!n->name || (n->type != type && n->type != AUDIT_TYPE_UNKNOWN)) continue; if (!strcmp(dname, n->name->name) || !audit_compare_dname_path(dname, n->name->name, found_parent ? found_parent->name_len : AUDIT_NAME_FULL)) { if (n->type == AUDIT_TYPE_UNKNOWN) n->type = type; found_child = n; break; } } if (!found_parent) { /* create a new, "anonymous" parent record */ n = audit_alloc_name(context, AUDIT_TYPE_PARENT); if (!n) return; audit_copy_inode(n, NULL, parent); } if (!found_child) { found_child = audit_alloc_name(context, type); if (!found_child) return; /* Re-use the name belonging to the slot for a matching parent * directory. All names for this context are relinquished in * audit_free_names() */ if (found_parent) { found_child->name = found_parent->name; found_child->name_len = AUDIT_NAME_FULL; found_child->name->refcnt++; } } if (inode) audit_copy_inode(found_child, dentry, inode); else found_child->ino = AUDIT_INO_UNSET; } EXPORT_SYMBOL_GPL(__audit_inode_child); /** * auditsc_get_stamp - get local copies of audit_context values * @ctx: audit_context for the task * @t: timespec to store time recorded in the audit_context * @serial: serial value that is recorded in the audit_context * * Also sets the context as auditable. */ int auditsc_get_stamp(struct audit_context *ctx, struct timespec *t, unsigned int *serial) { if (!ctx->in_syscall) return 0; if (!ctx->serial) ctx->serial = audit_serial(); t->tv_sec = ctx->ctime.tv_sec; t->tv_nsec = ctx->ctime.tv_nsec; *serial = ctx->serial; if (!ctx->prio) { ctx->prio = 1; ctx->current_state = AUDIT_RECORD_CONTEXT; } return 1; } /* global counter which is incremented every time something logs in */ static atomic_t session_id = ATOMIC_INIT(0); static int audit_set_loginuid_perm(kuid_t loginuid) { /* if we are unset, we don't need privs */ if (!audit_loginuid_set(current)) return 0; /* if AUDIT_FEATURE_LOGINUID_IMMUTABLE means never ever allow a change*/ if (is_audit_feature_set(AUDIT_FEATURE_LOGINUID_IMMUTABLE)) return -EPERM; /* it is set, you need permission */ if (!capable(CAP_AUDIT_CONTROL)) return -EPERM; /* reject if this is not an unset and we don't allow that */ if (is_audit_feature_set(AUDIT_FEATURE_ONLY_UNSET_LOGINUID) && uid_valid(loginuid)) return -EPERM; return 0; } static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, unsigned int oldsessionid, unsigned int sessionid, int rc) { struct audit_buffer *ab; uid_t uid, oldloginuid, loginuid; if (!audit_enabled) return; uid = from_kuid(&init_user_ns, task_uid(current)); oldloginuid = from_kuid(&init_user_ns, koldloginuid); loginuid = from_kuid(&init_user_ns, kloginuid), ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); if (!ab) return; audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid); audit_log_task_context(ab); audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d", oldloginuid, loginuid, oldsessionid, sessionid, !rc); audit_log_end(ab); } /** * audit_set_loginuid - set current task's audit_context loginuid * @loginuid: loginuid value * * Returns 0. * * Called (set) from fs/proc/base.c::proc_loginuid_write(). */ int audit_set_loginuid(kuid_t loginuid) { struct task_struct *task = current; unsigned int oldsessionid, sessionid = (unsigned int)-1; kuid_t oldloginuid; int rc; oldloginuid = audit_get_loginuid(current); oldsessionid = audit_get_sessionid(current); rc = audit_set_loginuid_perm(loginuid); if (rc) goto out; /* are we setting or clearing? */ if (uid_valid(loginuid)) sessionid = (unsigned int)atomic_inc_return(&session_id); task->sessionid = sessionid; task->loginuid = loginuid; out: audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc); return rc; } /** * __audit_mq_open - record audit data for a POSIX MQ open * @oflag: open flag * @mode: mode bits * @attr: queue attributes * */ void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { struct audit_context *context = current->audit_context; if (attr) memcpy(&context->mq_open.attr, attr, sizeof(struct mq_attr)); else memset(&context->mq_open.attr, 0, sizeof(struct mq_attr)); context->mq_open.oflag = oflag; context->mq_open.mode = mode; context->type = AUDIT_MQ_OPEN; } /** * __audit_mq_sendrecv - record audit data for a POSIX MQ timed send/receive * @mqdes: MQ descriptor * @msg_len: Message length * @msg_prio: Message priority * @abs_timeout: Message timeout in absolute time * */ void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout) { struct audit_context *context = current->audit_context; struct timespec *p = &context->mq_sendrecv.abs_timeout; if (abs_timeout) memcpy(p, abs_timeout, sizeof(struct timespec)); else memset(p, 0, sizeof(struct timespec)); context->mq_sendrecv.mqdes = mqdes; context->mq_sendrecv.msg_len = msg_len; context->mq_sendrecv.msg_prio = msg_prio; context->type = AUDIT_MQ_SENDRECV; } /** * __audit_mq_notify - record audit data for a POSIX MQ notify * @mqdes: MQ descriptor * @notification: Notification event * */ void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { struct audit_context *context = current->audit_context; if (notification) context->mq_notify.sigev_signo = notification->sigev_signo; else context->mq_notify.sigev_signo = 0; context->mq_notify.mqdes = mqdes; context->type = AUDIT_MQ_NOTIFY; } /** * __audit_mq_getsetattr - record audit data for a POSIX MQ get/set attribute * @mqdes: MQ descriptor * @mqstat: MQ flags * */ void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { struct audit_context *context = current->audit_context; context->mq_getsetattr.mqdes = mqdes; context->mq_getsetattr.mqstat = *mqstat; context->type = AUDIT_MQ_GETSETATTR; } /** * audit_ipc_obj - record audit data for ipc object * @ipcp: ipc permissions * */ void __audit_ipc_obj(struct kern_ipc_perm *ipcp) { struct audit_context *context = current->audit_context; context->ipc.uid = ipcp->uid; context->ipc.gid = ipcp->gid; context->ipc.mode = ipcp->mode; context->ipc.has_perm = 0; security_ipc_getsecid(ipcp, &context->ipc.osid); context->type = AUDIT_IPC; } /** * audit_ipc_set_perm - record audit data for new ipc permissions * @qbytes: msgq bytes * @uid: msgq user id * @gid: msgq group id * @mode: msgq mode (permissions) * * Called only after audit_ipc_obj(). */ void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { struct audit_context *context = current->audit_context; context->ipc.qbytes = qbytes; context->ipc.perm_uid = uid; context->ipc.perm_gid = gid; context->ipc.perm_mode = mode; context->ipc.has_perm = 1; } void __audit_bprm(struct linux_binprm *bprm) { struct audit_context *context = current->audit_context; context->type = AUDIT_EXECVE; context->execve.argc = bprm->argc; } /** * audit_socketcall - record audit data for sys_socketcall * @nargs: number of args, which should not be more than AUDITSC_ARGS. * @args: args array * */ int __audit_socketcall(int nargs, unsigned long *args) { struct audit_context *context = current->audit_context; if (nargs <= 0 || nargs > AUDITSC_ARGS || !args) return -EINVAL; context->type = AUDIT_SOCKETCALL; context->socketcall.nargs = nargs; memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long)); return 0; } /** * __audit_fd_pair - record audit data for pipe and socketpair * @fd1: the first file descriptor * @fd2: the second file descriptor * */ void __audit_fd_pair(int fd1, int fd2) { struct audit_context *context = current->audit_context; context->fds[0] = fd1; context->fds[1] = fd2; } /** * audit_sockaddr - record audit data for sys_bind, sys_connect, sys_sendto * @len: data length in user space * @a: data address in kernel space * * Returns 0 for success or NULL context or < 0 on error. */ int __audit_sockaddr(int len, void *a) { struct audit_context *context = current->audit_context; if (!context->sockaddr) { void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL); if (!p) return -ENOMEM; context->sockaddr = p; } context->sockaddr_len = len; memcpy(context->sockaddr, a, len); return 0; } void __audit_ptrace(struct task_struct *t) { struct audit_context *context = current->audit_context; context->target_pid = task_pid_nr(t); context->target_auid = audit_get_loginuid(t); context->target_uid = task_uid(t); context->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &context->target_sid); memcpy(context->target_comm, t->comm, TASK_COMM_LEN); } /** * audit_signal_info - record signal info for shutting down audit subsystem * @sig: signal value * @t: task being signaled * * If the audit subsystem is being terminated, record the task (pid) * and uid that is doing that. */ int __audit_signal_info(int sig, struct task_struct *t) { struct audit_aux_data_pids *axp; struct task_struct *tsk = current; struct audit_context *ctx = tsk->audit_context; kuid_t uid = current_uid(), t_uid = task_uid(t); if (audit_pid && t->tgid == audit_pid) { if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { audit_sig_pid = task_pid_nr(tsk); if (uid_valid(tsk->loginuid)) audit_sig_uid = tsk->loginuid; else audit_sig_uid = uid; security_task_getsecid(tsk, &audit_sig_sid); } if (!audit_signals || audit_dummy_context()) return 0; } /* optimize the common case by putting first signal recipient directly * in audit_context */ if (!ctx->target_pid) { ctx->target_pid = task_tgid_nr(t); ctx->target_auid = audit_get_loginuid(t); ctx->target_uid = t_uid; ctx->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &ctx->target_sid); memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN); return 0; } axp = (void *)ctx->aux_pids; if (!axp || axp->pid_count == AUDIT_AUX_PIDS) { axp = kzalloc(sizeof(*axp), GFP_ATOMIC); if (!axp) return -ENOMEM; axp->d.type = AUDIT_OBJ_PID; axp->d.next = ctx->aux_pids; ctx->aux_pids = (void *)axp; } BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS); axp->target_pid[axp->pid_count] = task_tgid_nr(t); axp->target_auid[axp->pid_count] = audit_get_loginuid(t); axp->target_uid[axp->pid_count] = t_uid; axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); security_task_getsecid(t, &axp->target_sid[axp->pid_count]); memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN); axp->pid_count++; return 0; } /** * __audit_log_bprm_fcaps - store information about a loading bprm and relevant fcaps * @bprm: pointer to the bprm being processed * @new: the proposed new credentials * @old: the old credentials * * Simply check if the proc already has the caps given by the file and if not * store the priv escalation info for later auditing at the end of the syscall * * -Eric */ int __audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old) { struct audit_aux_data_bprm_fcaps *ax; struct audit_context *context = current->audit_context; struct cpu_vfs_cap_data vcaps; ax = kmalloc(sizeof(*ax), GFP_KERNEL); if (!ax) return -ENOMEM; ax->d.type = AUDIT_BPRM_FCAPS; ax->d.next = context->aux; context->aux = (void *)ax; get_vfs_caps_from_disk(bprm->file->f_path.dentry, &vcaps); ax->fcap.permitted = vcaps.permitted; ax->fcap.inheritable = vcaps.inheritable; ax->fcap.fE = !!(vcaps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE); ax->fcap_ver = (vcaps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT; ax->old_pcap.permitted = old->cap_permitted; ax->old_pcap.inheritable = old->cap_inheritable; ax->old_pcap.effective = old->cap_effective; ax->new_pcap.permitted = new->cap_permitted; ax->new_pcap.inheritable = new->cap_inheritable; ax->new_pcap.effective = new->cap_effective; return 0; } /** * __audit_log_capset - store information about the arguments to the capset syscall * @new: the new credentials * @old: the old (current) credentials * * Record the arguments userspace sent to sys_capset for later printing by the * audit system if applicable */ void __audit_log_capset(const struct cred *new, const struct cred *old) { struct audit_context *context = current->audit_context; context->capset.pid = task_pid_nr(current); context->capset.cap.effective = new->cap_effective; context->capset.cap.inheritable = new->cap_effective; context->capset.cap.permitted = new->cap_permitted; context->type = AUDIT_CAPSET; } void __audit_mmap_fd(int fd, int flags) { struct audit_context *context = current->audit_context; context->mmap.fd = fd; context->mmap.flags = flags; context->type = AUDIT_MMAP; } static void audit_log_task(struct audit_buffer *ab) { kuid_t auid, uid; kgid_t gid; unsigned int sessionid; char comm[sizeof(current->comm)]; auid = audit_get_loginuid(current); sessionid = audit_get_sessionid(current); current_uid_gid(&uid, &gid); audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u", from_kuid(&init_user_ns, auid), from_kuid(&init_user_ns, uid), from_kgid(&init_user_ns, gid), sessionid); audit_log_task_context(ab); audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); audit_log_untrustedstring(ab, get_task_comm(comm, current)); audit_log_d_path_exe(ab, current->mm); } /** * audit_core_dumps - record information about processes that end abnormally * @signr: signal value * * If a process ends with a core dump, something fishy is going on and we * should record the event for investigation. */ void audit_core_dumps(long signr) { struct audit_buffer *ab; if (!audit_enabled) return; if (signr == SIGQUIT) /* don't care for those */ return; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); if (unlikely(!ab)) return; audit_log_task(ab); audit_log_format(ab, " sig=%ld", signr); audit_log_end(ab); } void __audit_seccomp(unsigned long syscall, long signr, int code) { struct audit_buffer *ab; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP); if (unlikely(!ab)) return; audit_log_task(ab); audit_log_format(ab, " sig=%ld arch=%x syscall=%ld compat=%d ip=0x%lx code=0x%x", signr, syscall_get_arch(), syscall, in_compat_syscall(), KSTK_EIP(current), code); audit_log_end(ab); } struct list_head *audit_killed_trees(void) { struct audit_context *ctx = current->audit_context; if (likely(!ctx || !ctx->in_syscall)) return NULL; return &ctx->killed_trees; }
static void audit_log_execve_info(struct audit_context *context, struct audit_buffer **ab) { int i, len; size_t len_sent = 0; const char __user *p; char *buf; p = (const char __user *)current->mm->arg_start; audit_log_format(*ab, "argc=%d", context->execve.argc); /* * we need some kernel buffer to hold the userspace args. Just * allocate one big one rather than allocating one of the right size * for every single argument inside audit_log_single_execve_arg() * should be <8k allocation so should be pretty safe. */ buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); if (!buf) { audit_panic("out of memory for argv string"); return; } for (i = 0; i < context->execve.argc; i++) { len = audit_log_single_execve_arg(context, ab, i, &len_sent, p, buf); if (len <= 0) break; p += len; } kfree(buf); }
static void audit_log_execve_info(struct audit_context *context, struct audit_buffer **ab) { long len_max; long len_rem; long len_full; long len_buf; long len_abuf; long len_tmp; bool require_data; bool encode; unsigned int iter; unsigned int arg; char *buf_head; char *buf; const char __user *p = (const char __user *)current->mm->arg_start; /* NOTE: this buffer needs to be large enough to hold all the non-arg * data we put in the audit record for this argument (see the * code below) ... at this point in time 96 is plenty */ char abuf[96]; /* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the * current value of 7500 is not as important as the fact that it * is less than 8k, a setting of 7500 gives us plenty of wiggle * room if we go over a little bit in the logging below */ WARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500); len_max = MAX_EXECVE_AUDIT_LEN; /* scratch buffer to hold the userspace args */ buf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); if (!buf_head) { audit_panic("out of memory for argv string"); return; } buf = buf_head; audit_log_format(*ab, "argc=%d", context->execve.argc); len_rem = len_max; len_buf = 0; len_full = 0; require_data = true; encode = false; iter = 0; arg = 0; do { /* NOTE: we don't ever want to trust this value for anything * serious, but the audit record format insists we * provide an argument length for really long arguments, * e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but * to use strncpy_from_user() to obtain this value for * recording in the log, although we don't use it * anywhere here to avoid a double-fetch problem */ if (len_full == 0) len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1; /* read more data from userspace */ if (require_data) { /* can we make more room in the buffer? */ if (buf != buf_head) { memmove(buf_head, buf, len_buf); buf = buf_head; } /* fetch as much as we can of the argument */ len_tmp = strncpy_from_user(&buf_head[len_buf], p, len_max - len_buf); if (len_tmp == -EFAULT) { /* unable to copy from userspace */ send_sig(SIGKILL, current, 0); goto out; } else if (len_tmp == (len_max - len_buf)) { /* buffer is not large enough */ require_data = true; /* NOTE: if we are going to span multiple * buffers force the encoding so we stand * a chance at a sane len_full value and * consistent record encoding */ encode = true; len_full = len_full * 2; p += len_tmp; } else { require_data = false; if (!encode) encode = audit_string_contains_control( buf, len_tmp); /* try to use a trusted value for len_full */ if (len_full < len_max) len_full = (encode ? len_tmp * 2 : len_tmp); p += len_tmp + 1; } len_buf += len_tmp; buf_head[len_buf] = '\0'; /* length of the buffer in the audit record? */ len_abuf = (encode ? len_buf * 2 : len_buf + 2); } /* write as much as we can to the audit log */ if (len_buf > 0) { /* NOTE: some magic numbers here - basically if we * can't fit a reasonable amount of data into the * existing audit buffer, flush it and start with * a new buffer */ if ((sizeof(abuf) + 8) > len_rem) { len_rem = len_max; audit_log_end(*ab); *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE); if (!*ab) goto out; } /* create the non-arg portion of the arg record */ len_tmp = 0; if (require_data || (iter > 0) || ((len_abuf + sizeof(abuf)) > len_rem)) { if (iter == 0) { len_tmp += snprintf(&abuf[len_tmp], sizeof(abuf) - len_tmp, " a%d_len=%lu", arg, len_full); } len_tmp += snprintf(&abuf[len_tmp], sizeof(abuf) - len_tmp, " a%d[%d]=", arg, iter++); } else len_tmp += snprintf(&abuf[len_tmp], sizeof(abuf) - len_tmp, " a%d=", arg); WARN_ON(len_tmp >= sizeof(abuf)); abuf[sizeof(abuf) - 1] = '\0'; /* log the arg in the audit record */ audit_log_format(*ab, "%s", abuf); len_rem -= len_tmp; len_tmp = len_buf; if (encode) { if (len_abuf > len_rem) len_tmp = len_rem / 2; /* encoding */ audit_log_n_hex(*ab, buf, len_tmp); len_rem -= len_tmp * 2; len_abuf -= len_tmp * 2; } else { if (len_abuf > len_rem) len_tmp = len_rem - 2; /* quotes */ audit_log_n_string(*ab, buf, len_tmp); len_rem -= len_tmp + 2; /* don't subtract the "2" because we still need * to add quotes to the remaining string */ len_abuf -= len_tmp; } len_buf -= len_tmp; buf += len_tmp; } /* ready to move to the next argument? */ if ((len_buf == 0) && !require_data) { arg++; iter = 0; len_full = 0; require_data = true; encode = false; } } while (arg < context->execve.argc); /* NOTE: the caller handles the final audit_log_end() call */ out: kfree(buf_head); }
{'added': [(76, '#include <linux/uaccess.h>'), (86, '/* no execve audit message should be longer than this (userspace limits),'), (87, ' * see the note near the top of audit_log_execve_info() about this value */'), (997, 'static void audit_log_execve_info(struct audit_context *context,'), (998, '\t\t\t\t struct audit_buffer **ab)'), (1000, '\tlong len_max;'), (1001, '\tlong len_rem;'), (1002, '\tlong len_full;'), (1003, '\tlong len_buf;'), (1004, '\tlong len_abuf;'), (1005, '\tlong len_tmp;'), (1006, '\tbool require_data;'), (1007, '\tbool encode;'), (1008, '\tunsigned int iter;'), (1009, '\tunsigned int arg;'), (1010, '\tchar *buf_head;'), (1011, '\tchar *buf;'), (1012, '\tconst char __user *p = (const char __user *)current->mm->arg_start;'), (1013, ''), (1014, '\t/* NOTE: this buffer needs to be large enough to hold all the non-arg'), (1015, '\t * data we put in the audit record for this argument (see the'), (1016, '\t * code below) ... at this point in time 96 is plenty */'), (1017, '\tchar abuf[96];'), (1018, ''), (1019, '\t/* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the'), (1020, '\t * current value of 7500 is not as important as the fact that it'), (1021, '\t * is less than 8k, a setting of 7500 gives us plenty of wiggle'), (1022, '\t * room if we go over a little bit in the logging below */'), (1023, '\tWARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500);'), (1024, '\tlen_max = MAX_EXECVE_AUDIT_LEN;'), (1025, ''), (1026, '\t/* scratch buffer to hold the userspace args */'), (1027, '\tbuf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);'), (1028, '\tif (!buf_head) {'), (1029, '\t\taudit_panic("out of memory for argv string");'), (1030, '\t\treturn;'), (1032, '\tbuf = buf_head;'), (1034, '\taudit_log_format(*ab, "argc=%d", context->execve.argc);'), (1035, ''), (1036, '\tlen_rem = len_max;'), (1037, '\tlen_buf = 0;'), (1038, '\tlen_full = 0;'), (1039, '\trequire_data = true;'), (1040, '\tencode = false;'), (1041, '\titer = 0;'), (1042, '\targ = 0;'), (1044, "\t\t/* NOTE: we don't ever want to trust this value for anything"), (1045, '\t\t * serious, but the audit record format insists we'), (1046, '\t\t * provide an argument length for really long arguments,'), (1047, '\t\t * e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but'), (1048, '\t\t * to use strncpy_from_user() to obtain this value for'), (1049, "\t\t * recording in the log, although we don't use it"), (1050, '\t\t * anywhere here to avoid a double-fetch problem */'), (1051, '\t\tif (len_full == 0)'), (1052, '\t\t\tlen_full = strnlen_user(p, MAX_ARG_STRLEN) - 1;'), (1053, ''), (1054, '\t\t/* read more data from userspace */'), (1055, '\t\tif (require_data) {'), (1056, '\t\t\t/* can we make more room in the buffer? */'), (1057, '\t\t\tif (buf != buf_head) {'), (1058, '\t\t\t\tmemmove(buf_head, buf, len_buf);'), (1059, '\t\t\t\tbuf = buf_head;'), (1060, '\t\t\t}'), (1061, ''), (1062, '\t\t\t/* fetch as much as we can of the argument */'), (1063, '\t\t\tlen_tmp = strncpy_from_user(&buf_head[len_buf], p,'), (1064, '\t\t\t\t\t\t len_max - len_buf);'), (1065, '\t\t\tif (len_tmp == -EFAULT) {'), (1066, '\t\t\t\t/* unable to copy from userspace */'), (1067, '\t\t\t\tsend_sig(SIGKILL, current, 0);'), (1068, '\t\t\t\tgoto out;'), (1069, '\t\t\t} else if (len_tmp == (len_max - len_buf)) {'), (1070, '\t\t\t\t/* buffer is not large enough */'), (1071, '\t\t\t\trequire_data = true;'), (1072, '\t\t\t\t/* NOTE: if we are going to span multiple'), (1073, '\t\t\t\t * buffers force the encoding so we stand'), (1074, '\t\t\t\t * a chance at a sane len_full value and'), (1075, '\t\t\t\t * consistent record encoding */'), (1076, '\t\t\t\tencode = true;'), (1077, '\t\t\t\tlen_full = len_full * 2;'), (1078, '\t\t\t\tp += len_tmp;'), (1079, '\t\t\t} else {'), (1080, '\t\t\t\trequire_data = false;'), (1081, '\t\t\t\tif (!encode)'), (1082, '\t\t\t\t\tencode = audit_string_contains_control('), (1083, '\t\t\t\t\t\t\t\tbuf, len_tmp);'), (1084, '\t\t\t\t/* try to use a trusted value for len_full */'), (1085, '\t\t\t\tif (len_full < len_max)'), (1086, '\t\t\t\t\tlen_full = (encode ?'), (1087, '\t\t\t\t\t\t len_tmp * 2 : len_tmp);'), (1088, '\t\t\t\tp += len_tmp + 1;'), (1089, '\t\t\t}'), (1090, '\t\t\tlen_buf += len_tmp;'), (1091, "\t\t\tbuf_head[len_buf] = '\\0';"), (1093, '\t\t\t/* length of the buffer in the audit record? */'), (1094, '\t\t\tlen_abuf = (encode ? len_buf * 2 : len_buf + 2);'), (1097, '\t\t/* write as much as we can to the audit log */'), (1098, '\t\tif (len_buf > 0) {'), (1099, '\t\t\t/* NOTE: some magic numbers here - basically if we'), (1100, "\t\t\t * can't fit a reasonable amount of data into the"), (1101, '\t\t\t * existing audit buffer, flush it and start with'), (1102, '\t\t\t * a new buffer */'), (1103, '\t\t\tif ((sizeof(abuf) + 8) > len_rem) {'), (1104, '\t\t\t\tlen_rem = len_max;'), (1105, '\t\t\t\taudit_log_end(*ab);'), (1106, '\t\t\t\t*ab = audit_log_start(context,'), (1107, '\t\t\t\t\t\t GFP_KERNEL, AUDIT_EXECVE);'), (1108, '\t\t\t\tif (!*ab)'), (1109, '\t\t\t\t\tgoto out;'), (1110, '\t\t\t}'), (1112, '\t\t\t/* create the non-arg portion of the arg record */'), (1113, '\t\t\tlen_tmp = 0;'), (1114, '\t\t\tif (require_data || (iter > 0) ||'), (1115, '\t\t\t ((len_abuf + sizeof(abuf)) > len_rem)) {'), (1116, '\t\t\t\tif (iter == 0) {'), (1117, '\t\t\t\t\tlen_tmp += snprintf(&abuf[len_tmp],'), (1118, '\t\t\t\t\t\t\tsizeof(abuf) - len_tmp,'), (1119, '\t\t\t\t\t\t\t" a%d_len=%lu",'), (1120, '\t\t\t\t\t\t\targ, len_full);'), (1121, '\t\t\t\t}'), (1122, '\t\t\t\tlen_tmp += snprintf(&abuf[len_tmp],'), (1123, '\t\t\t\t\t\t sizeof(abuf) - len_tmp,'), (1124, '\t\t\t\t\t\t " a%d[%d]=", arg, iter++);'), (1125, '\t\t\t} else'), (1126, '\t\t\t\tlen_tmp += snprintf(&abuf[len_tmp],'), (1127, '\t\t\t\t\t\t sizeof(abuf) - len_tmp,'), (1128, '\t\t\t\t\t\t " a%d=", arg);'), (1129, '\t\t\tWARN_ON(len_tmp >= sizeof(abuf));'), (1130, "\t\t\tabuf[sizeof(abuf) - 1] = '\\0';"), (1131, ''), (1132, '\t\t\t/* log the arg in the audit record */'), (1133, '\t\t\taudit_log_format(*ab, "%s", abuf);'), (1134, '\t\t\tlen_rem -= len_tmp;'), (1135, '\t\t\tlen_tmp = len_buf;'), (1136, '\t\t\tif (encode) {'), (1137, '\t\t\t\tif (len_abuf > len_rem)'), (1138, '\t\t\t\t\tlen_tmp = len_rem / 2; /* encoding */'), (1139, '\t\t\t\taudit_log_n_hex(*ab, buf, len_tmp);'), (1140, '\t\t\t\tlen_rem -= len_tmp * 2;'), (1141, '\t\t\t\tlen_abuf -= len_tmp * 2;'), (1142, '\t\t\t} else {'), (1143, '\t\t\t\tif (len_abuf > len_rem)'), (1144, '\t\t\t\t\tlen_tmp = len_rem - 2; /* quotes */'), (1145, '\t\t\t\taudit_log_n_string(*ab, buf, len_tmp);'), (1146, '\t\t\t\tlen_rem -= len_tmp + 2;'), (1147, '\t\t\t\t/* don\'t subtract the "2" because we still need'), (1148, '\t\t\t\t * to add quotes to the remaining string */'), (1149, '\t\t\t\tlen_abuf -= len_tmp;'), (1150, '\t\t\t}'), (1151, '\t\t\tlen_buf -= len_tmp;'), (1152, '\t\t\tbuf += len_tmp;'), (1153, '\t\t}'), (1155, '\t\t/* ready to move to the next argument? */'), (1156, '\t\tif ((len_buf == 0) && !require_data) {'), (1157, '\t\t\targ++;'), (1158, '\t\t\titer = 0;'), (1159, '\t\t\tlen_full = 0;'), (1160, '\t\t\trequire_data = true;'), (1161, '\t\t\tencode = false;'), (1162, '\t\t}'), (1163, '\t} while (arg < context->execve.argc);'), (1165, '\t/* NOTE: the caller handles the final audit_log_end() call */'), (1167, 'out:'), (1168, '\tkfree(buf_head);')], 'deleted': [(85, '/* no execve audit message should be longer than this (userspace limits) */'), (995, '/*'), (996, " * to_send and len_sent accounting are very loose estimates. We aren't"), (997, ' * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being'), (998, ' * within about 500 bytes (next page boundary)'), (999, ' *'), (1000, ' * why snprintf? an int is up to 12 digits long. if we just assumed when'), (1001, ' * logging that a[%d]= was going to be 16 characters long we would be wasting'), (1002, ' * space in every audit message. In one 7500 byte message we can log up to'), (1003, ' * about 1000 min size arguments. That comes down to about 50% waste of space'), (1004, " * if we didn't do the snprintf to find out how long arg_num_len was."), (1005, ' */'), (1006, 'static int audit_log_single_execve_arg(struct audit_context *context,'), (1007, '\t\t\t\t\tstruct audit_buffer **ab,'), (1008, '\t\t\t\t\tint arg_num,'), (1009, '\t\t\t\t\tsize_t *len_sent,'), (1010, '\t\t\t\t\tconst char __user *p,'), (1011, '\t\t\t\t\tchar *buf)'), (1013, '\tchar arg_num_len_buf[12];'), (1014, '\tconst char __user *tmp_p = p;'), (1015, '\t/* how many digits are in arg_num? 5 is the length of \' a=""\' */'), (1016, '\tsize_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5;'), (1017, '\tsize_t len, len_left, to_send;'), (1018, '\tsize_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN;'), (1019, '\tunsigned int i, has_cntl = 0, too_long = 0;'), (1020, '\tint ret;'), (1021, ''), (1022, "\t/* strnlen_user includes the null we don't want to send */"), (1023, '\tlen_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1;'), (1024, ''), (1025, '\t/*'), (1026, "\t * We just created this mm, if we can't find the strings"), (1027, '\t * we just copied into it something is _very_ wrong. Similar'), (1028, '\t * for strings that are too long, we should not have created'), (1029, '\t * any.'), (1030, '\t */'), (1031, '\tif (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) {'), (1032, '\t\tsend_sig(SIGKILL, current, 0);'), (1033, '\t\treturn -1;'), (1036, '\t/* walk the whole argument looking for non-ascii chars */'), (1038, '\t\tif (len_left > MAX_EXECVE_AUDIT_LEN)'), (1039, '\t\t\tto_send = MAX_EXECVE_AUDIT_LEN;'), (1040, '\t\telse'), (1041, '\t\t\tto_send = len_left;'), (1042, '\t\tret = copy_from_user(buf, tmp_p, to_send);'), (1043, '\t\t/*'), (1044, '\t\t * There is no reason for this copy to be short. We just'), (1045, "\t\t * copied them here, and the mm hasn't been exposed to user-"), (1046, '\t\t * space yet.'), (1047, '\t\t */'), (1048, '\t\tif (ret) {'), (1049, '\t\t\tWARN_ON(1);'), (1050, '\t\t\tsend_sig(SIGKILL, current, 0);'), (1051, '\t\t\treturn -1;'), (1052, '\t\t}'), (1053, "\t\tbuf[to_send] = '\\0';"), (1054, '\t\thas_cntl = audit_string_contains_control(buf, to_send);'), (1055, '\t\tif (has_cntl) {'), (1056, '\t\t\t/*'), (1057, '\t\t\t * hex messages get logged as 2 bytes, so we can only'), (1058, '\t\t\t * send half as much in each message'), (1059, '\t\t\t */'), (1060, '\t\t\tmax_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2;'), (1061, '\t\t\tbreak;'), (1062, '\t\t}'), (1063, '\t\tlen_left -= to_send;'), (1064, '\t\ttmp_p += to_send;'), (1065, '\t} while (len_left > 0);'), (1066, ''), (1067, '\tlen_left = len;'), (1068, ''), (1069, '\tif (len > max_execve_audit_len)'), (1070, '\t\ttoo_long = 1;'), (1071, ''), (1072, '\t/* rewalk the argument actually logging the message */'), (1073, '\tfor (i = 0; len_left > 0; i++) {'), (1074, '\t\tint room_left;'), (1075, ''), (1076, '\t\tif (len_left > max_execve_audit_len)'), (1077, '\t\t\tto_send = max_execve_audit_len;'), (1078, '\t\telse'), (1079, '\t\t\tto_send = len_left;'), (1080, ''), (1081, '\t\t/* do we have space left to send this argument in this ab? */'), (1082, '\t\troom_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent;'), (1083, '\t\tif (has_cntl)'), (1084, '\t\t\troom_left -= (to_send * 2);'), (1085, '\t\telse'), (1086, '\t\t\troom_left -= to_send;'), (1087, '\t\tif (room_left < 0) {'), (1088, '\t\t\t*len_sent = 0;'), (1089, '\t\t\taudit_log_end(*ab);'), (1090, '\t\t\t*ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE);'), (1091, '\t\t\tif (!*ab)'), (1092, '\t\t\t\treturn 0;'), (1093, '\t\t}'), (1095, '\t\t/*'), (1096, '\t\t * first record needs to say how long the original string was'), (1097, '\t\t * so we can be sure nothing was lost.'), (1098, '\t\t */'), (1099, '\t\tif ((i == 0) && (too_long))'), (1100, '\t\t\taudit_log_format(*ab, " a%d_len=%zu", arg_num,'), (1101, '\t\t\t\t\t has_cntl ? 2*len : len);'), (1102, ''), (1103, '\t\t/*'), (1104, '\t\t * normally arguments are small enough to fit and we already'), (1105, '\t\t * filled buf above when we checked for control characters'), (1106, "\t\t * so don't bother with another copy_from_user"), (1107, '\t\t */'), (1108, '\t\tif (len >= max_execve_audit_len)'), (1109, '\t\t\tret = copy_from_user(buf, p, to_send);'), (1110, '\t\telse'), (1111, '\t\t\tret = 0;'), (1112, '\t\tif (ret) {'), (1113, '\t\t\tWARN_ON(1);'), (1114, '\t\t\tsend_sig(SIGKILL, current, 0);'), (1115, '\t\t\treturn -1;'), (1117, "\t\tbuf[to_send] = '\\0';"), (1118, ''), (1119, '\t\t/* actually log it */'), (1120, '\t\taudit_log_format(*ab, " a%d", arg_num);'), (1121, '\t\tif (too_long)'), (1122, '\t\t\taudit_log_format(*ab, "[%d]", i);'), (1123, '\t\taudit_log_format(*ab, "=");'), (1124, '\t\tif (has_cntl)'), (1125, '\t\t\taudit_log_n_hex(*ab, buf, to_send);'), (1126, '\t\telse'), (1127, '\t\t\taudit_log_string(*ab, buf);'), (1128, ''), (1129, '\t\tp += to_send;'), (1130, '\t\tlen_left -= to_send;'), (1131, '\t\t*len_sent += arg_num_len;'), (1132, '\t\tif (has_cntl)'), (1133, '\t\t\t*len_sent += to_send * 2;'), (1134, '\t\telse'), (1135, '\t\t\t*len_sent += to_send;'), (1136, '\t}'), (1137, "\t/* include the null we didn't log */"), (1138, '\treturn len + 1;'), (1139, '}'), (1141, 'static void audit_log_execve_info(struct audit_context *context,'), (1142, '\t\t\t\t struct audit_buffer **ab)'), (1143, '{'), (1144, '\tint i, len;'), (1145, '\tsize_t len_sent = 0;'), (1146, '\tconst char __user *p;'), (1147, '\tchar *buf;'), (1149, '\tp = (const char __user *)current->mm->arg_start;'), (1151, '\taudit_log_format(*ab, "argc=%d", context->execve.argc);'), (1153, '\t/*'), (1154, '\t * we need some kernel buffer to hold the userspace args. Just'), (1155, '\t * allocate one big one rather than allocating one of the right size'), (1156, '\t * for every single argument inside audit_log_single_execve_arg()'), (1157, '\t * should be <8k allocation so should be pretty safe.'), (1158, '\t */'), (1159, '\tbuf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);'), (1160, '\tif (!buf) {'), (1161, '\t\taudit_panic("out of memory for argv string");'), (1162, '\t\treturn;'), (1163, '\t}'), (1165, '\tfor (i = 0; i < context->execve.argc; i++) {'), (1166, '\t\tlen = audit_log_single_execve_arg(context, ab, i,'), (1167, '\t\t\t\t\t\t &len_sent, p, buf);'), (1168, '\t\tif (len <= 0)'), (1169, '\t\t\tbreak;'), (1170, '\t\tp += len;'), (1171, '\t}'), (1172, '\tkfree(buf);')]}
164
168
1,786
10,930
23
141
4
https://github.com/torvalds/linux
CVE-2016-6136
CWE-362
3,239
nsvdec.c
C
nsv_read_chunk
/* * NSV demuxer * Copyright (c) 2004 The Libav Project * * first version by Francois Revol <revol@free.fr> * * This file is part of Libav. * * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/attributes.h" #include "libavutil/mathematics.h" #include "avformat.h" #include "internal.h" #include "libavutil/dict.h" /* max bytes to crawl for trying to resync * stupid streaming servers don't start at chunk boundaries... */ #define NSV_MAX_RESYNC (500*1024) #define NSV_MAX_RESYNC_TRIES 300 /* * References: * (1) http://www.multimedia.cx/nsv-format.txt * seems someone came to the same conclusions as me, and updated it: * (2) http://www.stud.ktu.lt/~vitslav/nsv/nsv-format.txt * http://www.stud.ktu.lt/~vitslav/nsv/ * official docs * (3) http://ultravox.aol.com/NSVFormat.rtf * Sample files: * (S1) http://www.nullsoft.com/nsv/samples/ * http://www.nullsoft.com/nsv/samples/faster.nsv * http://streamripper.sourceforge.net/openbb/read.php?TID=492&page=4 */ /* * notes on the header (Francois Revol): * * It is followed by strings, then a table, but nothing tells * where the table begins according to (1). After checking faster.nsv, * I believe NVSf[16-19] gives the size of the strings data * (that is the offset of the data table after the header). * After checking all samples from (S1) all confirms this. * * Then, about NSVf[12-15], faster.nsf has 179700. When viewing it in VLC, * I noticed there was about 1 NVSs chunk/s, so I ran * strings faster.nsv | grep NSVs | wc -l * which gave me 180. That leads me to think that NSVf[12-15] might be the * file length in milliseconds. * Let's try that: * for f in *.nsv; do HTIME="$(od -t x4 "$f" | head -1 | sed 's/.* //')"; echo "'$f' $((0x$HTIME))s = $((0x$HTIME/1000/60)):$((0x$HTIME/1000%60))"; done * except for nsvtrailer (which doesn't have an NSVf header), it reports correct time. * * nsvtrailer.nsv (S1) does not have any NSVf header, only NSVs chunks, * so the header seems to not be mandatory. (for streaming). * * index slice duration check (excepts nsvtrailer.nsv): * for f in [^n]*.nsv; do * DUR="$(avconv -i "$f" 2> /dev/null | grep 'NSVf duration' | cut -d ' ' -f 4)" * IC="$(avconv -i "$f" 2> /dev/null | grep 'INDEX ENTRIES' | cut -d ' ' -f 2)" * echo "duration $DUR, slite time $(($DUR/$IC))" * done */ /* * TODO: * - handle timestamps !!! * - use index * - mime-type in probe() * - seek */ #if 0 struct NSVf_header { uint32_t chunk_tag; /* 'NSVf' */ uint32_t chunk_size; uint32_t file_size; /* max 4GB ??? no one learns anything it seems :^) */ uint32_t file_length; //unknown1; /* what about MSB of file_size ? */ uint32_t info_strings_size; /* size of the info strings */ //unknown2; uint32_t table_entries; uint32_t table_entries_used; /* the left ones should be -1 */ }; struct NSVs_header { uint32_t chunk_tag; /* 'NSVs' */ uint32_t v4cc; /* or 'NONE' */ uint32_t a4cc; /* or 'NONE' */ uint16_t vwidth; /* assert(vwidth%16==0) */ uint16_t vheight; /* assert(vheight%16==0) */ uint8_t framerate; /* value = (framerate&0x80)?frtable[frameratex0x7f]:framerate */ uint16_t unknown; }; struct nsv_avchunk_header { uint8_t vchunk_size_lsb; uint16_t vchunk_size_msb; /* value = (vchunk_size_msb << 4) | (vchunk_size_lsb >> 4) */ uint16_t achunk_size; }; struct nsv_pcm_header { uint8_t bits_per_sample; uint8_t channel_count; uint16_t sample_rate; }; #endif /* variation from avi.h */ /*typedef struct CodecTag { int id; unsigned int tag; } CodecTag;*/ /* tags */ #define T_NSVF MKTAG('N', 'S', 'V', 'f') /* file header */ #define T_NSVS MKTAG('N', 'S', 'V', 's') /* chunk header */ #define T_TOC2 MKTAG('T', 'O', 'C', '2') /* extra index marker */ #define T_NONE MKTAG('N', 'O', 'N', 'E') /* null a/v 4CC */ #define T_SUBT MKTAG('S', 'U', 'B', 'T') /* subtitle aux data */ #define T_ASYN MKTAG('A', 'S', 'Y', 'N') /* async a/v aux marker */ #define T_KEYF MKTAG('K', 'E', 'Y', 'F') /* video keyframe aux marker (addition) */ #define TB_NSVF MKBETAG('N', 'S', 'V', 'f') #define TB_NSVS MKBETAG('N', 'S', 'V', 's') /* hardcoded stream indexes */ #define NSV_ST_VIDEO 0 #define NSV_ST_AUDIO 1 #define NSV_ST_SUBT 2 enum NSVStatus { NSV_UNSYNC, NSV_FOUND_NSVF, NSV_HAS_READ_NSVF, NSV_FOUND_NSVS, NSV_HAS_READ_NSVS, NSV_FOUND_BEEF, NSV_GOT_VIDEO, NSV_GOT_AUDIO, }; typedef struct NSVStream { int frame_offset; /* current frame (video) or byte (audio) counter (used to compute the pts) */ int scale; int rate; int sample_size; /* audio only data */ int start; int new_frame_offset; /* temporary storage (used during seek) */ int cum_len; /* temporary storage (used during seek) */ } NSVStream; typedef struct NSVContext { int base_offset; int NSVf_end; uint32_t *nsvs_file_offset; int index_entries; enum NSVStatus state; AVPacket ahead[2]; /* [v, a] if .data is !NULL there is something */ /* cached */ int64_t duration; uint32_t vtag, atag; uint16_t vwidth, vheight; int16_t avsync; AVRational framerate; uint32_t *nsvs_timestamps; } NSVContext; static const AVCodecTag nsv_codec_video_tags[] = { { AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', ' ') }, { AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') }, { AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') }, { AV_CODEC_ID_VP5, MKTAG('V', 'P', '5', ' ') }, { AV_CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') }, { AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', ' ') }, { AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') }, { AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') }, { AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') }, /* { AV_CODEC_ID_VP4, MKTAG('V', 'P', '4', ' ') }, { AV_CODEC_ID_VP4, MKTAG('V', 'P', '4', '0') }, */ { AV_CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') }, /* cf sample xvid decoder from nsv_codec_sdk.zip */ { AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', '3') }, { AV_CODEC_ID_NONE, 0 }, }; static const AVCodecTag nsv_codec_audio_tags[] = { { AV_CODEC_ID_MP3, MKTAG('M', 'P', '3', ' ') }, { AV_CODEC_ID_AAC, MKTAG('A', 'A', 'C', ' ') }, { AV_CODEC_ID_AAC, MKTAG('A', 'A', 'C', 'P') }, { AV_CODEC_ID_SPEEX, MKTAG('S', 'P', 'X', ' ') }, { AV_CODEC_ID_PCM_U16LE, MKTAG('P', 'C', 'M', ' ') }, { AV_CODEC_ID_NONE, 0 }, }; //static int nsv_load_index(AVFormatContext *s); static int nsv_read_chunk(AVFormatContext *s, int fill_header); /* try to find something we recognize, and set the state accordingly */ static int nsv_resync(AVFormatContext *s) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; uint32_t v = 0; int i; for (i = 0; i < NSV_MAX_RESYNC; i++) { if (pb->eof_reached) { av_log(s, AV_LOG_TRACE, "NSV EOF\n"); nsv->state = NSV_UNSYNC; return -1; } v <<= 8; v |= avio_r8(pb); if (i < 8) { av_log(s, AV_LOG_TRACE, "NSV resync: [%d] = %02"PRIx32"\n", i, v & 0x0FF); } if ((v & 0x0000ffff) == 0xefbe) { /* BEEF */ av_log(s, AV_LOG_TRACE, "NSV resynced on BEEF after %d bytes\n", i+1); nsv->state = NSV_FOUND_BEEF; return 0; } /* we read as big-endian, thus the MK*BE* */ if (v == TB_NSVF) { /* NSVf */ av_log(s, AV_LOG_TRACE, "NSV resynced on NSVf after %d bytes\n", i+1); nsv->state = NSV_FOUND_NSVF; return 0; } if (v == MKBETAG('N', 'S', 'V', 's')) { /* NSVs */ av_log(s, AV_LOG_TRACE, "NSV resynced on NSVs after %d bytes\n", i+1); nsv->state = NSV_FOUND_NSVS; return 0; } } av_log(s, AV_LOG_TRACE, "NSV sync lost\n"); return -1; } static int nsv_parse_NSVf_header(AVFormatContext *s) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; unsigned int av_unused file_size; unsigned int size; int64_t duration; int strings_size; int table_entries; int table_entries_used; nsv->state = NSV_UNSYNC; /* in case we fail */ size = avio_rl32(pb); if (size < 28) return -1; nsv->NSVf_end = size; file_size = (uint32_t)avio_rl32(pb); av_log(s, AV_LOG_TRACE, "NSV NSVf chunk_size %u\n", size); av_log(s, AV_LOG_TRACE, "NSV NSVf file_size %u\n", file_size); nsv->duration = duration = avio_rl32(pb); /* in ms */ av_log(s, AV_LOG_TRACE, "NSV NSVf duration %"PRId64" ms\n", duration); // XXX: store it in AVStreams strings_size = avio_rl32(pb); table_entries = avio_rl32(pb); table_entries_used = avio_rl32(pb); av_log(s, AV_LOG_TRACE, "NSV NSVf info-strings size: %d, table entries: %d, bis %d\n", strings_size, table_entries, table_entries_used); if (pb->eof_reached) return -1; av_log(s, AV_LOG_TRACE, "NSV got header; filepos %"PRId64"\n", avio_tell(pb)); if (strings_size > 0) { char *strings; /* last byte will be '\0' to play safe with str*() */ char *p, *endp; char *token, *value; char quote; p = strings = av_mallocz((size_t)strings_size + 1); if (!p) return AVERROR(ENOMEM); endp = strings + strings_size; avio_read(pb, strings, strings_size); while (p < endp) { while (*p == ' ') p++; /* strip out spaces */ if (p >= endp-2) break; token = p; p = strchr(p, '='); if (!p || p >= endp-2) break; *p++ = '\0'; quote = *p++; value = p; p = strchr(p, quote); if (!p || p >= endp) break; *p++ = '\0'; av_log(s, AV_LOG_TRACE, "NSV NSVf INFO: %s='%s'\n", token, value); av_dict_set(&s->metadata, token, value, 0); } av_free(strings); } if (pb->eof_reached) return -1; av_log(s, AV_LOG_TRACE, "NSV got infos; filepos %"PRId64"\n", avio_tell(pb)); if (table_entries_used > 0) { int i; nsv->index_entries = table_entries_used; if((unsigned)table_entries_used >= UINT_MAX / sizeof(uint32_t)) return -1; nsv->nsvs_file_offset = av_malloc((unsigned)table_entries_used * sizeof(uint32_t)); if (!nsv->nsvs_file_offset) return AVERROR(ENOMEM); for(i=0;i<table_entries_used;i++) nsv->nsvs_file_offset[i] = avio_rl32(pb) + size; if(table_entries > table_entries_used && avio_rl32(pb) == MKTAG('T','O','C','2')) { nsv->nsvs_timestamps = av_malloc((unsigned)table_entries_used*sizeof(uint32_t)); if (!nsv->nsvs_timestamps) return AVERROR(ENOMEM); for(i=0;i<table_entries_used;i++) { nsv->nsvs_timestamps[i] = avio_rl32(pb); } } } av_log(s, AV_LOG_TRACE, "NSV got index; filepos %"PRId64"\n", avio_tell(pb)); avio_seek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */ if (pb->eof_reached) return -1; nsv->state = NSV_HAS_READ_NSVF; return 0; } static int nsv_parse_NSVs_header(AVFormatContext *s) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; uint32_t vtag, atag; uint16_t vwidth, vheight; AVRational framerate; int i; AVStream *st; NSVStream *nst; vtag = avio_rl32(pb); atag = avio_rl32(pb); vwidth = avio_rl16(pb); vheight = avio_rl16(pb); i = avio_r8(pb); av_log(s, AV_LOG_TRACE, "NSV NSVs framerate code %2x\n", i); if(i&0x80) { /* odd way of giving native framerates from docs */ int t=(i & 0x7F)>>2; if(t<16) framerate = (AVRational){1, t+1}; else framerate = (AVRational){t-15, 1}; if(i&1){ framerate.num *= 1000; framerate.den *= 1001; } if((i&3)==3) framerate.num *= 24; else if((i&3)==2) framerate.num *= 25; else framerate.num *= 30; } else framerate= (AVRational){i, 1}; nsv->avsync = avio_rl16(pb); nsv->framerate = framerate; av_log(s, AV_LOG_TRACE, "NSV NSVs vsize %"PRIu16"x%"PRIu16"\n", vwidth, vheight); /* XXX change to ap != NULL ? */ if (s->nb_streams == 0) { /* streams not yet published, let's do that */ nsv->vtag = vtag; nsv->atag = atag; nsv->vwidth = vwidth; nsv->vheight = vwidth; if (vtag != T_NONE) { int i; st = avformat_new_stream(s, NULL); if (!st) goto fail; st->id = NSV_ST_VIDEO; nst = av_mallocz(sizeof(NSVStream)); if (!nst) goto fail; st->priv_data = nst; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_tag = vtag; st->codecpar->codec_id = ff_codec_get_id(nsv_codec_video_tags, vtag); st->codecpar->width = vwidth; st->codecpar->height = vheight; st->codecpar->bits_per_coded_sample = 24; /* depth XXX */ avpriv_set_pts_info(st, 64, framerate.den, framerate.num); st->start_time = 0; st->duration = av_rescale(nsv->duration, framerate.num, 1000*framerate.den); for(i=0;i<nsv->index_entries;i++) { if(nsv->nsvs_timestamps) { av_add_index_entry(st, nsv->nsvs_file_offset[i], nsv->nsvs_timestamps[i], 0, 0, AVINDEX_KEYFRAME); } else { int64_t ts = av_rescale(i*nsv->duration/nsv->index_entries, framerate.num, 1000*framerate.den); av_add_index_entry(st, nsv->nsvs_file_offset[i], ts, 0, 0, AVINDEX_KEYFRAME); } } } if (atag != T_NONE) { st = avformat_new_stream(s, NULL); if (!st) goto fail; st->id = NSV_ST_AUDIO; nst = av_mallocz(sizeof(NSVStream)); if (!nst) goto fail; st->priv_data = nst; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_tag = atag; st->codecpar->codec_id = ff_codec_get_id(nsv_codec_audio_tags, atag); st->need_parsing = AVSTREAM_PARSE_FULL; /* for PCM we will read a chunk later and put correct info */ /* set timebase to common denominator of ms and framerate */ avpriv_set_pts_info(st, 64, 1, framerate.num*1000); st->start_time = 0; st->duration = (int64_t)nsv->duration * framerate.num; } } else { if (nsv->vtag != vtag || nsv->atag != atag || nsv->vwidth != vwidth || nsv->vheight != vwidth) { av_log(s, AV_LOG_TRACE, "NSV NSVs header values differ from the first one!!!\n"); //return -1; } } nsv->state = NSV_HAS_READ_NSVS; return 0; fail: /* XXX */ nsv->state = NSV_UNSYNC; return -1; } static int nsv_read_header(AVFormatContext *s) { NSVContext *nsv = s->priv_data; int i, err; nsv->state = NSV_UNSYNC; nsv->ahead[0].data = nsv->ahead[1].data = NULL; for (i = 0; i < NSV_MAX_RESYNC_TRIES; i++) { if (nsv_resync(s) < 0) return -1; if (nsv->state == NSV_FOUND_NSVF) { err = nsv_parse_NSVf_header(s); if (err < 0) return err; } /* we need the first NSVs also... */ if (nsv->state == NSV_FOUND_NSVS) { err = nsv_parse_NSVs_header(s); if (err < 0) return err; break; /* we just want the first one */ } } if (s->nb_streams < 1) /* no luck so far */ return -1; /* now read the first chunk, so we can attempt to decode more info */ err = nsv_read_chunk(s, 1); av_log(s, AV_LOG_TRACE, "parsed header\n"); return err; } static int nsv_read_chunk(AVFormatContext *s, int fill_header) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; AVStream *st[2] = {NULL, NULL}; NSVStream *nst; AVPacket *pkt; int i, err = 0; uint8_t auxcount; /* number of aux metadata, also 4 bits of vsize */ uint32_t vsize; uint16_t asize; uint16_t auxsize; if (nsv->ahead[0].data || nsv->ahead[1].data) return 0; //-1; /* hey! eat what you've in your plate first! */ null_chunk_retry: if (pb->eof_reached) return -1; for (i = 0; i < NSV_MAX_RESYNC_TRIES && nsv->state < NSV_FOUND_NSVS && !err; i++) err = nsv_resync(s); if (err < 0) return err; if (nsv->state == NSV_FOUND_NSVS) err = nsv_parse_NSVs_header(s); if (err < 0) return err; if (nsv->state != NSV_HAS_READ_NSVS && nsv->state != NSV_FOUND_BEEF) return -1; auxcount = avio_r8(pb); vsize = avio_rl16(pb); asize = avio_rl16(pb); vsize = (vsize << 4) | (auxcount >> 4); auxcount &= 0x0f; av_log(s, AV_LOG_TRACE, "NSV CHUNK %"PRIu8" aux, %"PRIu32" bytes video, %"PRIu16" bytes audio\n", auxcount, vsize, asize); /* skip aux stuff */ for (i = 0; i < auxcount; i++) { uint32_t av_unused auxtag; auxsize = avio_rl16(pb); auxtag = avio_rl32(pb); avio_skip(pb, auxsize); vsize -= auxsize + sizeof(uint16_t) + sizeof(uint32_t); /* that's becoming brain-dead */ } if (pb->eof_reached) return -1; if (!vsize && !asize) { nsv->state = NSV_UNSYNC; goto null_chunk_retry; } /* map back streams to v,a */ if (s->nb_streams > 0) st[s->streams[0]->id] = s->streams[0]; if (s->nb_streams > 1) st[s->streams[1]->id] = s->streams[1]; if (vsize && st[NSV_ST_VIDEO]) { nst = st[NSV_ST_VIDEO]->priv_data; pkt = &nsv->ahead[NSV_ST_VIDEO]; av_get_packet(pb, pkt, vsize); pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO; pkt->dts = nst->frame_offset; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ for (i = 0; i < FFMIN(8, vsize); i++) av_log(s, AV_LOG_TRACE, "NSV video: [%d] = %02"PRIx8"\n", i, pkt->data[i]); } if(st[NSV_ST_VIDEO]) ((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset++; if (asize && st[NSV_ST_AUDIO]) { nst = st[NSV_ST_AUDIO]->priv_data; pkt = &nsv->ahead[NSV_ST_AUDIO]; /* read raw audio specific header on the first audio chunk... */ /* on ALL audio chunks ?? seems so! */ if (asize && st[NSV_ST_AUDIO]->codecpar->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) { uint8_t bps; uint8_t channels; uint16_t samplerate; bps = avio_r8(pb); channels = avio_r8(pb); samplerate = avio_rl16(pb); if (!channels || !samplerate) return AVERROR_INVALIDDATA; asize-=4; av_log(s, AV_LOG_TRACE, "NSV RAWAUDIO: bps %"PRIu8", nchan %"PRIu8", srate %"PRIu16"\n", bps, channels, samplerate); if (fill_header) { st[NSV_ST_AUDIO]->need_parsing = AVSTREAM_PARSE_NONE; /* we know everything */ if (bps != 16) { av_log(s, AV_LOG_TRACE, "NSV AUDIO bit/sample != 16 (%"PRIu8")!!!\n", bps); } bps /= channels; // ??? if (bps == 8) st[NSV_ST_AUDIO]->codecpar->codec_id = AV_CODEC_ID_PCM_U8; samplerate /= 4;/* UGH ??? XXX */ channels = 1; st[NSV_ST_AUDIO]->codecpar->channels = channels; st[NSV_ST_AUDIO]->codecpar->sample_rate = samplerate; av_log(s, AV_LOG_TRACE, "NSV RAWAUDIO: bps %"PRIu8", nchan %"PRIu8", srate %"PRIu16"\n", bps, channels, samplerate); } } av_get_packet(pb, pkt, asize); pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) { /* on a nsvs frame we have new information on a/v sync */ pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1); pkt->dts *= (int64_t)1000 * nsv->framerate.den; pkt->dts += (int64_t)nsv->avsync * nsv->framerate.num; av_log(s, AV_LOG_TRACE, "NSV AUDIO: sync:%"PRId16", dts:%"PRId64, nsv->avsync, pkt->dts); } nst->frame_offset++; } nsv->state = NSV_UNSYNC; return 0; } static int nsv_read_packet(AVFormatContext *s, AVPacket *pkt) { NSVContext *nsv = s->priv_data; int i, err = 0; /* in case we don't already have something to eat ... */ if (!nsv->ahead[0].data && !nsv->ahead[1].data) err = nsv_read_chunk(s, 0); if (err < 0) return err; /* now pick one of the plates */ for (i = 0; i < 2; i++) { if (nsv->ahead[i].data) { /* avoid the cost of new_packet + memcpy(->data) */ memcpy(pkt, &nsv->ahead[i], sizeof(AVPacket)); nsv->ahead[i].data = NULL; /* we ate that one */ return pkt->size; } } /* this restaurant is not provisioned :^] */ return -1; } static int nsv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { NSVContext *nsv = s->priv_data; AVStream *st = s->streams[stream_index]; NSVStream *nst = st->priv_data; int index; index = av_index_search_timestamp(st, timestamp, flags); if(index < 0) return -1; if (avio_seek(s->pb, st->index_entries[index].pos, SEEK_SET) < 0) return -1; nst->frame_offset = st->index_entries[index].timestamp; nsv->state = NSV_UNSYNC; return 0; } static int nsv_read_close(AVFormatContext *s) { NSVContext *nsv = s->priv_data; av_freep(&nsv->nsvs_file_offset); av_freep(&nsv->nsvs_timestamps); if (nsv->ahead[0].data) av_packet_unref(&nsv->ahead[0]); if (nsv->ahead[1].data) av_packet_unref(&nsv->ahead[1]); return 0; } static int nsv_probe(AVProbeData *p) { int i; int score; int vsize, asize, auxcount; score = 0; av_log(NULL, AV_LOG_TRACE, "nsv_probe(), buf_size %d\n", p->buf_size); /* check file header */ /* streamed files might not have any header */ if (p->buf[0] == 'N' && p->buf[1] == 'S' && p->buf[2] == 'V' && (p->buf[3] == 'f' || p->buf[3] == 's')) return AVPROBE_SCORE_MAX; /* XXX: do streamed files always start at chunk boundary ?? */ /* or do we need to search NSVs in the byte stream ? */ /* seems the servers don't bother starting clean chunks... */ /* sometimes even the first header is at 9KB or something :^) */ for (i = 1; i < p->buf_size - 3; i++) { if (p->buf[i+0] == 'N' && p->buf[i+1] == 'S' && p->buf[i+2] == 'V' && p->buf[i+3] == 's') { score = AVPROBE_SCORE_MAX/5; /* Get the chunk size and check if at the end we are getting 0xBEEF */ auxcount = p->buf[i+19]; vsize = p->buf[i+20] | p->buf[i+21] << 8; asize = p->buf[i+22] | p->buf[i+23] << 8; vsize = (vsize << 4) | (auxcount >> 4); if ((asize + vsize + i + 23) < p->buf_size - 2) { if (p->buf[i+23+asize+vsize+1] == 0xEF && p->buf[i+23+asize+vsize+2] == 0xBE) return AVPROBE_SCORE_MAX-20; } } } /* so we'll have more luck on extension... */ if (av_match_ext(p->filename, "nsv")) return AVPROBE_SCORE_EXTENSION; /* FIXME: add mime-type check */ return score; } AVInputFormat ff_nsv_demuxer = { .name = "nsv", .long_name = NULL_IF_CONFIG_SMALL("Nullsoft Streaming Video"), .priv_data_size = sizeof(NSVContext), .read_probe = nsv_probe, .read_header = nsv_read_header, .read_packet = nsv_read_packet, .read_close = nsv_read_close, .read_seek = nsv_read_seek, };
/* * NSV demuxer * Copyright (c) 2004 The Libav Project * * first version by Francois Revol <revol@free.fr> * * This file is part of Libav. * * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/attributes.h" #include "libavutil/mathematics.h" #include "avformat.h" #include "internal.h" #include "libavutil/dict.h" /* max bytes to crawl for trying to resync * stupid streaming servers don't start at chunk boundaries... */ #define NSV_MAX_RESYNC (500*1024) #define NSV_MAX_RESYNC_TRIES 300 /* * References: * (1) http://www.multimedia.cx/nsv-format.txt * seems someone came to the same conclusions as me, and updated it: * (2) http://www.stud.ktu.lt/~vitslav/nsv/nsv-format.txt * http://www.stud.ktu.lt/~vitslav/nsv/ * official docs * (3) http://ultravox.aol.com/NSVFormat.rtf * Sample files: * (S1) http://www.nullsoft.com/nsv/samples/ * http://www.nullsoft.com/nsv/samples/faster.nsv * http://streamripper.sourceforge.net/openbb/read.php?TID=492&page=4 */ /* * notes on the header (Francois Revol): * * It is followed by strings, then a table, but nothing tells * where the table begins according to (1). After checking faster.nsv, * I believe NVSf[16-19] gives the size of the strings data * (that is the offset of the data table after the header). * After checking all samples from (S1) all confirms this. * * Then, about NSVf[12-15], faster.nsf has 179700. When viewing it in VLC, * I noticed there was about 1 NVSs chunk/s, so I ran * strings faster.nsv | grep NSVs | wc -l * which gave me 180. That leads me to think that NSVf[12-15] might be the * file length in milliseconds. * Let's try that: * for f in *.nsv; do HTIME="$(od -t x4 "$f" | head -1 | sed 's/.* //')"; echo "'$f' $((0x$HTIME))s = $((0x$HTIME/1000/60)):$((0x$HTIME/1000%60))"; done * except for nsvtrailer (which doesn't have an NSVf header), it reports correct time. * * nsvtrailer.nsv (S1) does not have any NSVf header, only NSVs chunks, * so the header seems to not be mandatory. (for streaming). * * index slice duration check (excepts nsvtrailer.nsv): * for f in [^n]*.nsv; do * DUR="$(avconv -i "$f" 2> /dev/null | grep 'NSVf duration' | cut -d ' ' -f 4)" * IC="$(avconv -i "$f" 2> /dev/null | grep 'INDEX ENTRIES' | cut -d ' ' -f 2)" * echo "duration $DUR, slite time $(($DUR/$IC))" * done */ /* * TODO: * - handle timestamps !!! * - use index * - mime-type in probe() * - seek */ #if 0 struct NSVf_header { uint32_t chunk_tag; /* 'NSVf' */ uint32_t chunk_size; uint32_t file_size; /* max 4GB ??? no one learns anything it seems :^) */ uint32_t file_length; //unknown1; /* what about MSB of file_size ? */ uint32_t info_strings_size; /* size of the info strings */ //unknown2; uint32_t table_entries; uint32_t table_entries_used; /* the left ones should be -1 */ }; struct NSVs_header { uint32_t chunk_tag; /* 'NSVs' */ uint32_t v4cc; /* or 'NONE' */ uint32_t a4cc; /* or 'NONE' */ uint16_t vwidth; /* assert(vwidth%16==0) */ uint16_t vheight; /* assert(vheight%16==0) */ uint8_t framerate; /* value = (framerate&0x80)?frtable[frameratex0x7f]:framerate */ uint16_t unknown; }; struct nsv_avchunk_header { uint8_t vchunk_size_lsb; uint16_t vchunk_size_msb; /* value = (vchunk_size_msb << 4) | (vchunk_size_lsb >> 4) */ uint16_t achunk_size; }; struct nsv_pcm_header { uint8_t bits_per_sample; uint8_t channel_count; uint16_t sample_rate; }; #endif /* variation from avi.h */ /*typedef struct CodecTag { int id; unsigned int tag; } CodecTag;*/ /* tags */ #define T_NSVF MKTAG('N', 'S', 'V', 'f') /* file header */ #define T_NSVS MKTAG('N', 'S', 'V', 's') /* chunk header */ #define T_TOC2 MKTAG('T', 'O', 'C', '2') /* extra index marker */ #define T_NONE MKTAG('N', 'O', 'N', 'E') /* null a/v 4CC */ #define T_SUBT MKTAG('S', 'U', 'B', 'T') /* subtitle aux data */ #define T_ASYN MKTAG('A', 'S', 'Y', 'N') /* async a/v aux marker */ #define T_KEYF MKTAG('K', 'E', 'Y', 'F') /* video keyframe aux marker (addition) */ #define TB_NSVF MKBETAG('N', 'S', 'V', 'f') #define TB_NSVS MKBETAG('N', 'S', 'V', 's') /* hardcoded stream indexes */ #define NSV_ST_VIDEO 0 #define NSV_ST_AUDIO 1 #define NSV_ST_SUBT 2 enum NSVStatus { NSV_UNSYNC, NSV_FOUND_NSVF, NSV_HAS_READ_NSVF, NSV_FOUND_NSVS, NSV_HAS_READ_NSVS, NSV_FOUND_BEEF, NSV_GOT_VIDEO, NSV_GOT_AUDIO, }; typedef struct NSVStream { int frame_offset; /* current frame (video) or byte (audio) counter (used to compute the pts) */ int scale; int rate; int sample_size; /* audio only data */ int start; int new_frame_offset; /* temporary storage (used during seek) */ int cum_len; /* temporary storage (used during seek) */ } NSVStream; typedef struct NSVContext { int base_offset; int NSVf_end; uint32_t *nsvs_file_offset; int index_entries; enum NSVStatus state; AVPacket ahead[2]; /* [v, a] if .data is !NULL there is something */ /* cached */ int64_t duration; uint32_t vtag, atag; uint16_t vwidth, vheight; int16_t avsync; AVRational framerate; uint32_t *nsvs_timestamps; } NSVContext; static const AVCodecTag nsv_codec_video_tags[] = { { AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', ' ') }, { AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') }, { AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') }, { AV_CODEC_ID_VP5, MKTAG('V', 'P', '5', ' ') }, { AV_CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') }, { AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', ' ') }, { AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') }, { AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') }, { AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') }, /* { AV_CODEC_ID_VP4, MKTAG('V', 'P', '4', ' ') }, { AV_CODEC_ID_VP4, MKTAG('V', 'P', '4', '0') }, */ { AV_CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') }, /* cf sample xvid decoder from nsv_codec_sdk.zip */ { AV_CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', '3') }, { AV_CODEC_ID_NONE, 0 }, }; static const AVCodecTag nsv_codec_audio_tags[] = { { AV_CODEC_ID_MP3, MKTAG('M', 'P', '3', ' ') }, { AV_CODEC_ID_AAC, MKTAG('A', 'A', 'C', ' ') }, { AV_CODEC_ID_AAC, MKTAG('A', 'A', 'C', 'P') }, { AV_CODEC_ID_SPEEX, MKTAG('S', 'P', 'X', ' ') }, { AV_CODEC_ID_PCM_U16LE, MKTAG('P', 'C', 'M', ' ') }, { AV_CODEC_ID_NONE, 0 }, }; //static int nsv_load_index(AVFormatContext *s); static int nsv_read_chunk(AVFormatContext *s, int fill_header); /* try to find something we recognize, and set the state accordingly */ static int nsv_resync(AVFormatContext *s) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; uint32_t v = 0; int i; for (i = 0; i < NSV_MAX_RESYNC; i++) { if (pb->eof_reached) { av_log(s, AV_LOG_TRACE, "NSV EOF\n"); nsv->state = NSV_UNSYNC; return -1; } v <<= 8; v |= avio_r8(pb); if (i < 8) { av_log(s, AV_LOG_TRACE, "NSV resync: [%d] = %02"PRIx32"\n", i, v & 0x0FF); } if ((v & 0x0000ffff) == 0xefbe) { /* BEEF */ av_log(s, AV_LOG_TRACE, "NSV resynced on BEEF after %d bytes\n", i+1); nsv->state = NSV_FOUND_BEEF; return 0; } /* we read as big-endian, thus the MK*BE* */ if (v == TB_NSVF) { /* NSVf */ av_log(s, AV_LOG_TRACE, "NSV resynced on NSVf after %d bytes\n", i+1); nsv->state = NSV_FOUND_NSVF; return 0; } if (v == MKBETAG('N', 'S', 'V', 's')) { /* NSVs */ av_log(s, AV_LOG_TRACE, "NSV resynced on NSVs after %d bytes\n", i+1); nsv->state = NSV_FOUND_NSVS; return 0; } } av_log(s, AV_LOG_TRACE, "NSV sync lost\n"); return -1; } static int nsv_parse_NSVf_header(AVFormatContext *s) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; unsigned int av_unused file_size; unsigned int size; int64_t duration; int strings_size; int table_entries; int table_entries_used; nsv->state = NSV_UNSYNC; /* in case we fail */ size = avio_rl32(pb); if (size < 28) return -1; nsv->NSVf_end = size; file_size = (uint32_t)avio_rl32(pb); av_log(s, AV_LOG_TRACE, "NSV NSVf chunk_size %u\n", size); av_log(s, AV_LOG_TRACE, "NSV NSVf file_size %u\n", file_size); nsv->duration = duration = avio_rl32(pb); /* in ms */ av_log(s, AV_LOG_TRACE, "NSV NSVf duration %"PRId64" ms\n", duration); // XXX: store it in AVStreams strings_size = avio_rl32(pb); table_entries = avio_rl32(pb); table_entries_used = avio_rl32(pb); av_log(s, AV_LOG_TRACE, "NSV NSVf info-strings size: %d, table entries: %d, bis %d\n", strings_size, table_entries, table_entries_used); if (pb->eof_reached) return -1; av_log(s, AV_LOG_TRACE, "NSV got header; filepos %"PRId64"\n", avio_tell(pb)); if (strings_size > 0) { char *strings; /* last byte will be '\0' to play safe with str*() */ char *p, *endp; char *token, *value; char quote; p = strings = av_mallocz((size_t)strings_size + 1); if (!p) return AVERROR(ENOMEM); endp = strings + strings_size; avio_read(pb, strings, strings_size); while (p < endp) { while (*p == ' ') p++; /* strip out spaces */ if (p >= endp-2) break; token = p; p = strchr(p, '='); if (!p || p >= endp-2) break; *p++ = '\0'; quote = *p++; value = p; p = strchr(p, quote); if (!p || p >= endp) break; *p++ = '\0'; av_log(s, AV_LOG_TRACE, "NSV NSVf INFO: %s='%s'\n", token, value); av_dict_set(&s->metadata, token, value, 0); } av_free(strings); } if (pb->eof_reached) return -1; av_log(s, AV_LOG_TRACE, "NSV got infos; filepos %"PRId64"\n", avio_tell(pb)); if (table_entries_used > 0) { int i; nsv->index_entries = table_entries_used; if((unsigned)table_entries_used >= UINT_MAX / sizeof(uint32_t)) return -1; nsv->nsvs_file_offset = av_malloc((unsigned)table_entries_used * sizeof(uint32_t)); if (!nsv->nsvs_file_offset) return AVERROR(ENOMEM); for(i=0;i<table_entries_used;i++) nsv->nsvs_file_offset[i] = avio_rl32(pb) + size; if(table_entries > table_entries_used && avio_rl32(pb) == MKTAG('T','O','C','2')) { nsv->nsvs_timestamps = av_malloc((unsigned)table_entries_used*sizeof(uint32_t)); if (!nsv->nsvs_timestamps) return AVERROR(ENOMEM); for(i=0;i<table_entries_used;i++) { nsv->nsvs_timestamps[i] = avio_rl32(pb); } } } av_log(s, AV_LOG_TRACE, "NSV got index; filepos %"PRId64"\n", avio_tell(pb)); avio_seek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */ if (pb->eof_reached) return -1; nsv->state = NSV_HAS_READ_NSVF; return 0; } static int nsv_parse_NSVs_header(AVFormatContext *s) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; uint32_t vtag, atag; uint16_t vwidth, vheight; AVRational framerate; int i; AVStream *st; NSVStream *nst; vtag = avio_rl32(pb); atag = avio_rl32(pb); vwidth = avio_rl16(pb); vheight = avio_rl16(pb); i = avio_r8(pb); av_log(s, AV_LOG_TRACE, "NSV NSVs framerate code %2x\n", i); if(i&0x80) { /* odd way of giving native framerates from docs */ int t=(i & 0x7F)>>2; if(t<16) framerate = (AVRational){1, t+1}; else framerate = (AVRational){t-15, 1}; if(i&1){ framerate.num *= 1000; framerate.den *= 1001; } if((i&3)==3) framerate.num *= 24; else if((i&3)==2) framerate.num *= 25; else framerate.num *= 30; } else framerate= (AVRational){i, 1}; nsv->avsync = avio_rl16(pb); nsv->framerate = framerate; av_log(s, AV_LOG_TRACE, "NSV NSVs vsize %"PRIu16"x%"PRIu16"\n", vwidth, vheight); /* XXX change to ap != NULL ? */ if (s->nb_streams == 0) { /* streams not yet published, let's do that */ nsv->vtag = vtag; nsv->atag = atag; nsv->vwidth = vwidth; nsv->vheight = vwidth; if (vtag != T_NONE) { int i; st = avformat_new_stream(s, NULL); if (!st) goto fail; st->id = NSV_ST_VIDEO; nst = av_mallocz(sizeof(NSVStream)); if (!nst) goto fail; st->priv_data = nst; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_tag = vtag; st->codecpar->codec_id = ff_codec_get_id(nsv_codec_video_tags, vtag); st->codecpar->width = vwidth; st->codecpar->height = vheight; st->codecpar->bits_per_coded_sample = 24; /* depth XXX */ avpriv_set_pts_info(st, 64, framerate.den, framerate.num); st->start_time = 0; st->duration = av_rescale(nsv->duration, framerate.num, 1000*framerate.den); for(i=0;i<nsv->index_entries;i++) { if(nsv->nsvs_timestamps) { av_add_index_entry(st, nsv->nsvs_file_offset[i], nsv->nsvs_timestamps[i], 0, 0, AVINDEX_KEYFRAME); } else { int64_t ts = av_rescale(i*nsv->duration/nsv->index_entries, framerate.num, 1000*framerate.den); av_add_index_entry(st, nsv->nsvs_file_offset[i], ts, 0, 0, AVINDEX_KEYFRAME); } } } if (atag != T_NONE) { st = avformat_new_stream(s, NULL); if (!st) goto fail; st->id = NSV_ST_AUDIO; nst = av_mallocz(sizeof(NSVStream)); if (!nst) goto fail; st->priv_data = nst; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_tag = atag; st->codecpar->codec_id = ff_codec_get_id(nsv_codec_audio_tags, atag); st->need_parsing = AVSTREAM_PARSE_FULL; /* for PCM we will read a chunk later and put correct info */ /* set timebase to common denominator of ms and framerate */ avpriv_set_pts_info(st, 64, 1, framerate.num*1000); st->start_time = 0; st->duration = (int64_t)nsv->duration * framerate.num; } } else { if (nsv->vtag != vtag || nsv->atag != atag || nsv->vwidth != vwidth || nsv->vheight != vwidth) { av_log(s, AV_LOG_TRACE, "NSV NSVs header values differ from the first one!!!\n"); //return -1; } } nsv->state = NSV_HAS_READ_NSVS; return 0; fail: /* XXX */ nsv->state = NSV_UNSYNC; return -1; } static int nsv_read_header(AVFormatContext *s) { NSVContext *nsv = s->priv_data; int i, err; nsv->state = NSV_UNSYNC; nsv->ahead[0].data = nsv->ahead[1].data = NULL; for (i = 0; i < NSV_MAX_RESYNC_TRIES; i++) { if (nsv_resync(s) < 0) return -1; if (nsv->state == NSV_FOUND_NSVF) { err = nsv_parse_NSVf_header(s); if (err < 0) return err; } /* we need the first NSVs also... */ if (nsv->state == NSV_FOUND_NSVS) { err = nsv_parse_NSVs_header(s); if (err < 0) return err; break; /* we just want the first one */ } } if (s->nb_streams < 1) /* no luck so far */ return -1; /* now read the first chunk, so we can attempt to decode more info */ err = nsv_read_chunk(s, 1); av_log(s, AV_LOG_TRACE, "parsed header\n"); return err; } static int nsv_read_chunk(AVFormatContext *s, int fill_header) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; AVStream *st[2] = {NULL, NULL}; NSVStream *nst; AVPacket *pkt; int i, err = 0; uint8_t auxcount; /* number of aux metadata, also 4 bits of vsize */ uint32_t vsize; uint16_t asize; uint16_t auxsize; int ret; if (nsv->ahead[0].data || nsv->ahead[1].data) return 0; //-1; /* hey! eat what you've in your plate first! */ null_chunk_retry: if (pb->eof_reached) return -1; for (i = 0; i < NSV_MAX_RESYNC_TRIES && nsv->state < NSV_FOUND_NSVS && !err; i++) err = nsv_resync(s); if (err < 0) return err; if (nsv->state == NSV_FOUND_NSVS) err = nsv_parse_NSVs_header(s); if (err < 0) return err; if (nsv->state != NSV_HAS_READ_NSVS && nsv->state != NSV_FOUND_BEEF) return -1; auxcount = avio_r8(pb); vsize = avio_rl16(pb); asize = avio_rl16(pb); vsize = (vsize << 4) | (auxcount >> 4); auxcount &= 0x0f; av_log(s, AV_LOG_TRACE, "NSV CHUNK %"PRIu8" aux, %"PRIu32" bytes video, %"PRIu16" bytes audio\n", auxcount, vsize, asize); /* skip aux stuff */ for (i = 0; i < auxcount; i++) { uint32_t av_unused auxtag; auxsize = avio_rl16(pb); auxtag = avio_rl32(pb); avio_skip(pb, auxsize); vsize -= auxsize + sizeof(uint16_t) + sizeof(uint32_t); /* that's becoming brain-dead */ } if (pb->eof_reached) return -1; if (!vsize && !asize) { nsv->state = NSV_UNSYNC; goto null_chunk_retry; } /* map back streams to v,a */ if (s->nb_streams > 0) st[s->streams[0]->id] = s->streams[0]; if (s->nb_streams > 1) st[s->streams[1]->id] = s->streams[1]; if (vsize && st[NSV_ST_VIDEO]) { nst = st[NSV_ST_VIDEO]->priv_data; pkt = &nsv->ahead[NSV_ST_VIDEO]; if ((ret = av_get_packet(pb, pkt, vsize)) < 0) return ret; pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO; pkt->dts = nst->frame_offset; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ for (i = 0; i < FFMIN(8, vsize); i++) av_log(s, AV_LOG_TRACE, "NSV video: [%d] = %02"PRIx8"\n", i, pkt->data[i]); } if(st[NSV_ST_VIDEO]) ((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset++; if (asize && st[NSV_ST_AUDIO]) { nst = st[NSV_ST_AUDIO]->priv_data; pkt = &nsv->ahead[NSV_ST_AUDIO]; /* read raw audio specific header on the first audio chunk... */ /* on ALL audio chunks ?? seems so! */ if (asize && st[NSV_ST_AUDIO]->codecpar->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) { uint8_t bps; uint8_t channels; uint16_t samplerate; bps = avio_r8(pb); channels = avio_r8(pb); samplerate = avio_rl16(pb); if (!channels || !samplerate) return AVERROR_INVALIDDATA; asize-=4; av_log(s, AV_LOG_TRACE, "NSV RAWAUDIO: bps %"PRIu8", nchan %"PRIu8", srate %"PRIu16"\n", bps, channels, samplerate); if (fill_header) { st[NSV_ST_AUDIO]->need_parsing = AVSTREAM_PARSE_NONE; /* we know everything */ if (bps != 16) { av_log(s, AV_LOG_TRACE, "NSV AUDIO bit/sample != 16 (%"PRIu8")!!!\n", bps); } bps /= channels; // ??? if (bps == 8) st[NSV_ST_AUDIO]->codecpar->codec_id = AV_CODEC_ID_PCM_U8; samplerate /= 4;/* UGH ??? XXX */ channels = 1; st[NSV_ST_AUDIO]->codecpar->channels = channels; st[NSV_ST_AUDIO]->codecpar->sample_rate = samplerate; av_log(s, AV_LOG_TRACE, "NSV RAWAUDIO: bps %"PRIu8", nchan %"PRIu8", srate %"PRIu16"\n", bps, channels, samplerate); } } if ((ret = av_get_packet(pb, pkt, asize)) < 0) return ret; pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) { /* on a nsvs frame we have new information on a/v sync */ pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1); pkt->dts *= (int64_t)1000 * nsv->framerate.den; pkt->dts += (int64_t)nsv->avsync * nsv->framerate.num; av_log(s, AV_LOG_TRACE, "NSV AUDIO: sync:%"PRId16", dts:%"PRId64, nsv->avsync, pkt->dts); } nst->frame_offset++; } nsv->state = NSV_UNSYNC; return 0; } static int nsv_read_packet(AVFormatContext *s, AVPacket *pkt) { NSVContext *nsv = s->priv_data; int i, err = 0; /* in case we don't already have something to eat ... */ if (!nsv->ahead[0].data && !nsv->ahead[1].data) err = nsv_read_chunk(s, 0); if (err < 0) return err; /* now pick one of the plates */ for (i = 0; i < 2; i++) { if (nsv->ahead[i].data) { /* avoid the cost of new_packet + memcpy(->data) */ memcpy(pkt, &nsv->ahead[i], sizeof(AVPacket)); nsv->ahead[i].data = NULL; /* we ate that one */ return pkt->size; } } /* this restaurant is not provisioned :^] */ return -1; } static int nsv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { NSVContext *nsv = s->priv_data; AVStream *st = s->streams[stream_index]; NSVStream *nst = st->priv_data; int index; index = av_index_search_timestamp(st, timestamp, flags); if(index < 0) return -1; if (avio_seek(s->pb, st->index_entries[index].pos, SEEK_SET) < 0) return -1; nst->frame_offset = st->index_entries[index].timestamp; nsv->state = NSV_UNSYNC; return 0; } static int nsv_read_close(AVFormatContext *s) { NSVContext *nsv = s->priv_data; av_freep(&nsv->nsvs_file_offset); av_freep(&nsv->nsvs_timestamps); if (nsv->ahead[0].data) av_packet_unref(&nsv->ahead[0]); if (nsv->ahead[1].data) av_packet_unref(&nsv->ahead[1]); return 0; } static int nsv_probe(AVProbeData *p) { int i; int score; int vsize, asize, auxcount; score = 0; av_log(NULL, AV_LOG_TRACE, "nsv_probe(), buf_size %d\n", p->buf_size); /* check file header */ /* streamed files might not have any header */ if (p->buf[0] == 'N' && p->buf[1] == 'S' && p->buf[2] == 'V' && (p->buf[3] == 'f' || p->buf[3] == 's')) return AVPROBE_SCORE_MAX; /* XXX: do streamed files always start at chunk boundary ?? */ /* or do we need to search NSVs in the byte stream ? */ /* seems the servers don't bother starting clean chunks... */ /* sometimes even the first header is at 9KB or something :^) */ for (i = 1; i < p->buf_size - 3; i++) { if (p->buf[i+0] == 'N' && p->buf[i+1] == 'S' && p->buf[i+2] == 'V' && p->buf[i+3] == 's') { score = AVPROBE_SCORE_MAX/5; /* Get the chunk size and check if at the end we are getting 0xBEEF */ auxcount = p->buf[i+19]; vsize = p->buf[i+20] | p->buf[i+21] << 8; asize = p->buf[i+22] | p->buf[i+23] << 8; vsize = (vsize << 4) | (auxcount >> 4); if ((asize + vsize + i + 23) < p->buf_size - 2) { if (p->buf[i+23+asize+vsize+1] == 0xEF && p->buf[i+23+asize+vsize+2] == 0xBE) return AVPROBE_SCORE_MAX-20; } } } /* so we'll have more luck on extension... */ if (av_match_ext(p->filename, "nsv")) return AVPROBE_SCORE_EXTENSION; /* FIXME: add mime-type check */ return score; } AVInputFormat ff_nsv_demuxer = { .name = "nsv", .long_name = NULL_IF_CONFIG_SMALL("Nullsoft Streaming Video"), .priv_data_size = sizeof(NSVContext), .read_probe = nsv_probe, .read_header = nsv_read_header, .read_packet = nsv_read_packet, .read_close = nsv_read_close, .read_seek = nsv_read_seek, };
static int nsv_read_chunk(AVFormatContext *s, int fill_header) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; AVStream *st[2] = {NULL, NULL}; NSVStream *nst; AVPacket *pkt; int i, err = 0; uint8_t auxcount; /* number of aux metadata, also 4 bits of vsize */ uint32_t vsize; uint16_t asize; uint16_t auxsize; if (nsv->ahead[0].data || nsv->ahead[1].data) return 0; //-1; /* hey! eat what you've in your plate first! */ null_chunk_retry: if (pb->eof_reached) return -1; for (i = 0; i < NSV_MAX_RESYNC_TRIES && nsv->state < NSV_FOUND_NSVS && !err; i++) err = nsv_resync(s); if (err < 0) return err; if (nsv->state == NSV_FOUND_NSVS) err = nsv_parse_NSVs_header(s); if (err < 0) return err; if (nsv->state != NSV_HAS_READ_NSVS && nsv->state != NSV_FOUND_BEEF) return -1; auxcount = avio_r8(pb); vsize = avio_rl16(pb); asize = avio_rl16(pb); vsize = (vsize << 4) | (auxcount >> 4); auxcount &= 0x0f; av_log(s, AV_LOG_TRACE, "NSV CHUNK %"PRIu8" aux, %"PRIu32" bytes video, %"PRIu16" bytes audio\n", auxcount, vsize, asize); /* skip aux stuff */ for (i = 0; i < auxcount; i++) { uint32_t av_unused auxtag; auxsize = avio_rl16(pb); auxtag = avio_rl32(pb); avio_skip(pb, auxsize); vsize -= auxsize + sizeof(uint16_t) + sizeof(uint32_t); /* that's becoming brain-dead */ } if (pb->eof_reached) return -1; if (!vsize && !asize) { nsv->state = NSV_UNSYNC; goto null_chunk_retry; } /* map back streams to v,a */ if (s->nb_streams > 0) st[s->streams[0]->id] = s->streams[0]; if (s->nb_streams > 1) st[s->streams[1]->id] = s->streams[1]; if (vsize && st[NSV_ST_VIDEO]) { nst = st[NSV_ST_VIDEO]->priv_data; pkt = &nsv->ahead[NSV_ST_VIDEO]; av_get_packet(pb, pkt, vsize); pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO; pkt->dts = nst->frame_offset; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ for (i = 0; i < FFMIN(8, vsize); i++) av_log(s, AV_LOG_TRACE, "NSV video: [%d] = %02"PRIx8"\n", i, pkt->data[i]); } if(st[NSV_ST_VIDEO]) ((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset++; if (asize && st[NSV_ST_AUDIO]) { nst = st[NSV_ST_AUDIO]->priv_data; pkt = &nsv->ahead[NSV_ST_AUDIO]; /* read raw audio specific header on the first audio chunk... */ /* on ALL audio chunks ?? seems so! */ if (asize && st[NSV_ST_AUDIO]->codecpar->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) { uint8_t bps; uint8_t channels; uint16_t samplerate; bps = avio_r8(pb); channels = avio_r8(pb); samplerate = avio_rl16(pb); if (!channels || !samplerate) return AVERROR_INVALIDDATA; asize-=4; av_log(s, AV_LOG_TRACE, "NSV RAWAUDIO: bps %"PRIu8", nchan %"PRIu8", srate %"PRIu16"\n", bps, channels, samplerate); if (fill_header) { st[NSV_ST_AUDIO]->need_parsing = AVSTREAM_PARSE_NONE; /* we know everything */ if (bps != 16) { av_log(s, AV_LOG_TRACE, "NSV AUDIO bit/sample != 16 (%"PRIu8")!!!\n", bps); } bps /= channels; // ??? if (bps == 8) st[NSV_ST_AUDIO]->codecpar->codec_id = AV_CODEC_ID_PCM_U8; samplerate /= 4;/* UGH ??? XXX */ channels = 1; st[NSV_ST_AUDIO]->codecpar->channels = channels; st[NSV_ST_AUDIO]->codecpar->sample_rate = samplerate; av_log(s, AV_LOG_TRACE, "NSV RAWAUDIO: bps %"PRIu8", nchan %"PRIu8", srate %"PRIu16"\n", bps, channels, samplerate); } } av_get_packet(pb, pkt, asize); pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) { /* on a nsvs frame we have new information on a/v sync */ pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1); pkt->dts *= (int64_t)1000 * nsv->framerate.den; pkt->dts += (int64_t)nsv->avsync * nsv->framerate.num; av_log(s, AV_LOG_TRACE, "NSV AUDIO: sync:%"PRId16", dts:%"PRId64, nsv->avsync, pkt->dts); } nst->frame_offset++; } nsv->state = NSV_UNSYNC; return 0; }
static int nsv_read_chunk(AVFormatContext *s, int fill_header) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; AVStream *st[2] = {NULL, NULL}; NSVStream *nst; AVPacket *pkt; int i, err = 0; uint8_t auxcount; /* number of aux metadata, also 4 bits of vsize */ uint32_t vsize; uint16_t asize; uint16_t auxsize; int ret; if (nsv->ahead[0].data || nsv->ahead[1].data) return 0; //-1; /* hey! eat what you've in your plate first! */ null_chunk_retry: if (pb->eof_reached) return -1; for (i = 0; i < NSV_MAX_RESYNC_TRIES && nsv->state < NSV_FOUND_NSVS && !err; i++) err = nsv_resync(s); if (err < 0) return err; if (nsv->state == NSV_FOUND_NSVS) err = nsv_parse_NSVs_header(s); if (err < 0) return err; if (nsv->state != NSV_HAS_READ_NSVS && nsv->state != NSV_FOUND_BEEF) return -1; auxcount = avio_r8(pb); vsize = avio_rl16(pb); asize = avio_rl16(pb); vsize = (vsize << 4) | (auxcount >> 4); auxcount &= 0x0f; av_log(s, AV_LOG_TRACE, "NSV CHUNK %"PRIu8" aux, %"PRIu32" bytes video, %"PRIu16" bytes audio\n", auxcount, vsize, asize); /* skip aux stuff */ for (i = 0; i < auxcount; i++) { uint32_t av_unused auxtag; auxsize = avio_rl16(pb); auxtag = avio_rl32(pb); avio_skip(pb, auxsize); vsize -= auxsize + sizeof(uint16_t) + sizeof(uint32_t); /* that's becoming brain-dead */ } if (pb->eof_reached) return -1; if (!vsize && !asize) { nsv->state = NSV_UNSYNC; goto null_chunk_retry; } /* map back streams to v,a */ if (s->nb_streams > 0) st[s->streams[0]->id] = s->streams[0]; if (s->nb_streams > 1) st[s->streams[1]->id] = s->streams[1]; if (vsize && st[NSV_ST_VIDEO]) { nst = st[NSV_ST_VIDEO]->priv_data; pkt = &nsv->ahead[NSV_ST_VIDEO]; if ((ret = av_get_packet(pb, pkt, vsize)) < 0) return ret; pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO; pkt->dts = nst->frame_offset; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ for (i = 0; i < FFMIN(8, vsize); i++) av_log(s, AV_LOG_TRACE, "NSV video: [%d] = %02"PRIx8"\n", i, pkt->data[i]); } if(st[NSV_ST_VIDEO]) ((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset++; if (asize && st[NSV_ST_AUDIO]) { nst = st[NSV_ST_AUDIO]->priv_data; pkt = &nsv->ahead[NSV_ST_AUDIO]; /* read raw audio specific header on the first audio chunk... */ /* on ALL audio chunks ?? seems so! */ if (asize && st[NSV_ST_AUDIO]->codecpar->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) { uint8_t bps; uint8_t channels; uint16_t samplerate; bps = avio_r8(pb); channels = avio_r8(pb); samplerate = avio_rl16(pb); if (!channels || !samplerate) return AVERROR_INVALIDDATA; asize-=4; av_log(s, AV_LOG_TRACE, "NSV RAWAUDIO: bps %"PRIu8", nchan %"PRIu8", srate %"PRIu16"\n", bps, channels, samplerate); if (fill_header) { st[NSV_ST_AUDIO]->need_parsing = AVSTREAM_PARSE_NONE; /* we know everything */ if (bps != 16) { av_log(s, AV_LOG_TRACE, "NSV AUDIO bit/sample != 16 (%"PRIu8")!!!\n", bps); } bps /= channels; // ??? if (bps == 8) st[NSV_ST_AUDIO]->codecpar->codec_id = AV_CODEC_ID_PCM_U8; samplerate /= 4;/* UGH ??? XXX */ channels = 1; st[NSV_ST_AUDIO]->codecpar->channels = channels; st[NSV_ST_AUDIO]->codecpar->sample_rate = samplerate; av_log(s, AV_LOG_TRACE, "NSV RAWAUDIO: bps %"PRIu8", nchan %"PRIu8", srate %"PRIu16"\n", bps, channels, samplerate); } } if ((ret = av_get_packet(pb, pkt, asize)) < 0) return ret; pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) { /* on a nsvs frame we have new information on a/v sync */ pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1); pkt->dts *= (int64_t)1000 * nsv->framerate.den; pkt->dts += (int64_t)nsv->avsync * nsv->framerate.num; av_log(s, AV_LOG_TRACE, "NSV AUDIO: sync:%"PRId16", dts:%"PRId64, nsv->avsync, pkt->dts); } nst->frame_offset++; } nsv->state = NSV_UNSYNC; return 0; }
{'added': [(523, ' int ret;'), (575, ' if ((ret = av_get_packet(pb, pkt, vsize)) < 0)'), (576, ' return ret;'), (620, ' if ((ret = av_get_packet(pb, pkt, asize)) < 0)'), (621, ' return ret;')], 'deleted': [(574, ' av_get_packet(pb, pkt, vsize);'), (618, ' av_get_packet(pb, pkt, asize);')]}
5
2
532
3,851
110
885
35
https://github.com/libav/libav
CVE-2017-9051
CWE-476
3,199
hb-set.cc
C
hb_set_union
/* * Copyright © 2012 Google, Inc. * * This is part of HarfBuzz, a text shaping library. * * Permission is hereby granted, without written agreement and without * license or royalty fees, to use, copy, modify, and distribute this * software and its documentation for any purpose, provided that the * above copyright notice and the following two paragraphs appear in * all copies of this software. * * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. * * Google Author(s): Behdad Esfahbod */ #include "hb-set.hh" /** * SECTION:hb-set * @title: hb-set * @short_description: Objects representing a set of integers * @include: hb.h * * Set objects represent a mathematical set of integer values. They are * used in non-shaping APIs to query certain sets of characters or glyphs, * or other integer values. **/ /** * hb_set_create: (Xconstructor) * * Creates a new, initially empty set. * * Return value: (transfer full): The new #hb_set_t * * Since: 0.9.2 **/ hb_set_t * hb_set_create () { hb_set_t *set; if (!(set = hb_object_create<hb_set_t> ())) return hb_set_get_empty (); set->init_shallow (); return set; } /** * hb_set_get_empty: * * Fetches the singleton empty #hb_set_t. * * Return value: (transfer full): The empty #hb_set_t * * Since: 0.9.2 **/ hb_set_t * hb_set_get_empty () { return const_cast<hb_set_t *> (&Null (hb_set_t)); } /** * hb_set_reference: (skip) * @set: A set * * Increases the reference count on a set. * * Return value: (transfer full): The set * * Since: 0.9.2 **/ hb_set_t * hb_set_reference (hb_set_t *set) { return hb_object_reference (set); } /** * hb_set_destroy: (skip) * @set: A set * * Decreases the reference count on a set. When * the reference count reaches zero, the set is * destroyed, freeing all memory. * * Since: 0.9.2 **/ void hb_set_destroy (hb_set_t *set) { if (!hb_object_destroy (set)) return; set->fini_shallow (); hb_free (set); } /** * hb_set_set_user_data: (skip) * @set: A set * @key: The user-data key to set * @data: A pointer to the user data to set * @destroy: (nullable): A callback to call when @data is not needed anymore * @replace: Whether to replace an existing data with the same key * * Attaches a user-data key/data pair to the specified set. * * Return value: %true if success, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_set_user_data (hb_set_t *set, hb_user_data_key_t *key, void * data, hb_destroy_func_t destroy, hb_bool_t replace) { return hb_object_set_user_data (set, key, data, destroy, replace); } /** * hb_set_get_user_data: (skip) * @set: A set * @key: The user-data key to query * * Fetches the user data associated with the specified key, * attached to the specified set. * * Return value: (transfer none): A pointer to the user data * * Since: 0.9.2 **/ void * hb_set_get_user_data (hb_set_t *set, hb_user_data_key_t *key) { return hb_object_get_user_data (set, key); } /** * hb_set_allocation_successful: * @set: A set * * Tests whether memory allocation for a set was successful. * * Return value: %true if allocation succeeded, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_allocation_successful (const hb_set_t *set) { return !set->in_error (); } /** * hb_set_copy: * @set: A set * * Allocate a copy of @set. * * Return value: Newly-allocated set. * * Since: 2.8.2 **/ hb_set_t * hb_set_copy (const hb_set_t *set) { hb_set_t *copy = hb_set_create (); copy->set (*set); return copy; } /** * hb_set_clear: * @set: A set * * Clears out the contents of a set. * * Since: 0.9.2 **/ void hb_set_clear (hb_set_t *set) { if (unlikely (hb_object_is_immutable (set))) return; set->clear (); } /** * hb_set_is_empty: * @set: a set. * * Tests whether a set is empty (contains no elements). * * Return value: %true if @set is empty * * Since: 0.9.7 **/ hb_bool_t hb_set_is_empty (const hb_set_t *set) { return set->is_empty (); } /** * hb_set_has: * @set: A set * @codepoint: The element to query * * Tests whether @codepoint belongs to @set. * * Return value: %true if @codepoint is in @set, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_has (const hb_set_t *set, hb_codepoint_t codepoint) { return set->has (codepoint); } /** * hb_set_add: * @set: A set * @codepoint: The element to add to @set * * Adds @codepoint to @set. * * Since: 0.9.2 **/ void hb_set_add (hb_set_t *set, hb_codepoint_t codepoint) { /* Immutible-safe. */ set->add (codepoint); } /** * hb_set_add_range: * @set: A set * @first: The first element to add to @set * @last: The final element to add to @set * * Adds all of the elements from @first to @last * (inclusive) to @set. * * Since: 0.9.7 **/ void hb_set_add_range (hb_set_t *set, hb_codepoint_t first, hb_codepoint_t last) { /* Immutible-safe. */ set->add_range (first, last); } /** * hb_set_del: * @set: A set * @codepoint: Removes @codepoint from @set * * Removes @codepoint from @set. * * Since: 0.9.2 **/ void hb_set_del (hb_set_t *set, hb_codepoint_t codepoint) { /* Immutible-safe. */ set->del (codepoint); } /** * hb_set_del_range: * @set: A set * @first: The first element to remove from @set * @last: The final element to remove from @set * * Removes all of the elements from @first to @last * (inclusive) from @set. * * If @last is #HB_SET_VALUE_INVALID, then all values * greater than or equal to @first are removed. * * Since: 0.9.7 **/ void hb_set_del_range (hb_set_t *set, hb_codepoint_t first, hb_codepoint_t last) { /* Immutible-safe. */ set->del_range (first, last); } /** * hb_set_is_equal: * @set: A set * @other: Another set * * Tests whether @set and @other are equal (contain the same * elements). * * Return value: %true if the two sets are equal, %false otherwise. * * Since: 0.9.7 **/ hb_bool_t hb_set_is_equal (const hb_set_t *set, const hb_set_t *other) { return set->is_equal (*other); } /** * hb_set_is_subset: * @set: A set * @larger_set: Another set * * Tests whether @set is a subset of @larger_set. * * Return value: %true if the @set is a subset of (or equal to) @larger_set, %false otherwise. * * Since: 1.8.1 **/ hb_bool_t hb_set_is_subset (const hb_set_t *set, const hb_set_t *larger_set) { return set->is_subset (*larger_set); } /** * hb_set_set: * @set: A set * @other: Another set * * Makes the contents of @set equal to the contents of @other. * * Since: 0.9.2 **/ void hb_set_set (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->set (*other); } /** * hb_set_union: * @set: A set * @other: Another set * * Makes @set the union of @set and @other. * * Since: 0.9.2 **/ void hb_set_union (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->union_ (*other); } /** * hb_set_intersect: * @set: A set * @other: Another set * * Makes @set the intersection of @set and @other. * * Since: 0.9.2 **/ void hb_set_intersect (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->intersect (*other); } /** * hb_set_subtract: * @set: A set * @other: Another set * * Subtracts the contents of @other from @set. * * Since: 0.9.2 **/ void hb_set_subtract (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->subtract (*other); } /** * hb_set_symmetric_difference: * @set: A set * @other: Another set * * Makes @set the symmetric difference of @set * and @other. * * Since: 0.9.2 **/ void hb_set_symmetric_difference (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->symmetric_difference (*other); } /** * hb_set_invert: * @set: A set * * Inverts the contents of @set. * * Since: 3.0.0 **/ void hb_set_invert (hb_set_t *set) { if (unlikely (hb_object_is_immutable (set))) return; set->invert (); } /** * hb_set_get_population: * @set: A set * * Returns the number of elements in the set. * * Return value: The population of @set * * Since: 0.9.7 **/ unsigned int hb_set_get_population (const hb_set_t *set) { return set->get_population (); } /** * hb_set_get_min: * @set: A set * * Finds the smallest element in the set. * * Return value: minimum of @set, or #HB_SET_VALUE_INVALID if @set is empty. * * Since: 0.9.7 **/ hb_codepoint_t hb_set_get_min (const hb_set_t *set) { return set->get_min (); } /** * hb_set_get_max: * @set: A set * * Finds the largest element in the set. * * Return value: maximum of @set, or #HB_SET_VALUE_INVALID if @set is empty. * * Since: 0.9.7 **/ hb_codepoint_t hb_set_get_max (const hb_set_t *set) { return set->get_max (); } /** * hb_set_next: * @set: A set * @codepoint: (inout): Input = Code point to query * Output = Code point retrieved * * Fetches the next element in @set that is greater than current value of @codepoint. * * Set @codepoint to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a next value, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_next (const hb_set_t *set, hb_codepoint_t *codepoint) { return set->next (codepoint); } /** * hb_set_previous: * @set: A set * @codepoint: (inout): Input = Code point to query * Output = Code point retrieved * * Fetches the previous element in @set that is lower than current value of @codepoint. * * Set @codepoint to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a previous value, %false otherwise * * Since: 1.8.0 **/ hb_bool_t hb_set_previous (const hb_set_t *set, hb_codepoint_t *codepoint) { return set->previous (codepoint); } /** * hb_set_next_range: * @set: A set * @first: (out): The first code point in the range * @last: (inout): Input = The current last code point in the range * Output = The last code point in the range * * Fetches the next consecutive range of elements in @set that * are greater than current value of @last. * * Set @last to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a next range, %false otherwise * * Since: 0.9.7 **/ hb_bool_t hb_set_next_range (const hb_set_t *set, hb_codepoint_t *first, hb_codepoint_t *last) { return set->next_range (first, last); } /** * hb_set_previous_range: * @set: A set * @first: (inout): Input = The current first code point in the range * Output = The first code point in the range * @last: (out): The last code point in the range * * Fetches the previous consecutive range of elements in @set that * are greater than current value of @last. * * Set @first to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a previous range, %false otherwise * * Since: 1.8.0 **/ hb_bool_t hb_set_previous_range (const hb_set_t *set, hb_codepoint_t *first, hb_codepoint_t *last) { return set->previous_range (first, last); }
/* * Copyright © 2012 Google, Inc. * * This is part of HarfBuzz, a text shaping library. * * Permission is hereby granted, without written agreement and without * license or royalty fees, to use, copy, modify, and distribute this * software and its documentation for any purpose, provided that the * above copyright notice and the following two paragraphs appear in * all copies of this software. * * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. * * Google Author(s): Behdad Esfahbod */ #include "hb-set.hh" /** * SECTION:hb-set * @title: hb-set * @short_description: Objects representing a set of integers * @include: hb.h * * Set objects represent a mathematical set of integer values. They are * used in non-shaping APIs to query certain sets of characters or glyphs, * or other integer values. **/ /** * hb_set_create: (Xconstructor) * * Creates a new, initially empty set. * * Return value: (transfer full): The new #hb_set_t * * Since: 0.9.2 **/ hb_set_t * hb_set_create () { hb_set_t *set; if (!(set = hb_object_create<hb_set_t> ())) return hb_set_get_empty (); set->init_shallow (); return set; } /** * hb_set_get_empty: * * Fetches the singleton empty #hb_set_t. * * Return value: (transfer full): The empty #hb_set_t * * Since: 0.9.2 **/ hb_set_t * hb_set_get_empty () { return const_cast<hb_set_t *> (&Null (hb_set_t)); } /** * hb_set_reference: (skip) * @set: A set * * Increases the reference count on a set. * * Return value: (transfer full): The set * * Since: 0.9.2 **/ hb_set_t * hb_set_reference (hb_set_t *set) { return hb_object_reference (set); } /** * hb_set_destroy: (skip) * @set: A set * * Decreases the reference count on a set. When * the reference count reaches zero, the set is * destroyed, freeing all memory. * * Since: 0.9.2 **/ void hb_set_destroy (hb_set_t *set) { if (!hb_object_destroy (set)) return; set->fini_shallow (); hb_free (set); } /** * hb_set_set_user_data: (skip) * @set: A set * @key: The user-data key to set * @data: A pointer to the user data to set * @destroy: (nullable): A callback to call when @data is not needed anymore * @replace: Whether to replace an existing data with the same key * * Attaches a user-data key/data pair to the specified set. * * Return value: %true if success, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_set_user_data (hb_set_t *set, hb_user_data_key_t *key, void * data, hb_destroy_func_t destroy, hb_bool_t replace) { return hb_object_set_user_data (set, key, data, destroy, replace); } /** * hb_set_get_user_data: (skip) * @set: A set * @key: The user-data key to query * * Fetches the user data associated with the specified key, * attached to the specified set. * * Return value: (transfer none): A pointer to the user data * * Since: 0.9.2 **/ void * hb_set_get_user_data (hb_set_t *set, hb_user_data_key_t *key) { return hb_object_get_user_data (set, key); } /** * hb_set_allocation_successful: * @set: A set * * Tests whether memory allocation for a set was successful. * * Return value: %true if allocation succeeded, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_allocation_successful (const hb_set_t *set) { return !set->in_error (); } /** * hb_set_copy: * @set: A set * * Allocate a copy of @set. * * Return value: Newly-allocated set. * * Since: 2.8.2 **/ hb_set_t * hb_set_copy (const hb_set_t *set) { hb_set_t *copy = hb_set_create (); copy->set (*set); return copy; } /** * hb_set_clear: * @set: A set * * Clears out the contents of a set. * * Since: 0.9.2 **/ void hb_set_clear (hb_set_t *set) { /* Immutible-safe. */ set->clear (); } /** * hb_set_is_empty: * @set: a set. * * Tests whether a set is empty (contains no elements). * * Return value: %true if @set is empty * * Since: 0.9.7 **/ hb_bool_t hb_set_is_empty (const hb_set_t *set) { return set->is_empty (); } /** * hb_set_has: * @set: A set * @codepoint: The element to query * * Tests whether @codepoint belongs to @set. * * Return value: %true if @codepoint is in @set, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_has (const hb_set_t *set, hb_codepoint_t codepoint) { return set->has (codepoint); } /** * hb_set_add: * @set: A set * @codepoint: The element to add to @set * * Adds @codepoint to @set. * * Since: 0.9.2 **/ void hb_set_add (hb_set_t *set, hb_codepoint_t codepoint) { /* Immutible-safe. */ set->add (codepoint); } /** * hb_set_add_range: * @set: A set * @first: The first element to add to @set * @last: The final element to add to @set * * Adds all of the elements from @first to @last * (inclusive) to @set. * * Since: 0.9.7 **/ void hb_set_add_range (hb_set_t *set, hb_codepoint_t first, hb_codepoint_t last) { /* Immutible-safe. */ set->add_range (first, last); } /** * hb_set_del: * @set: A set * @codepoint: Removes @codepoint from @set * * Removes @codepoint from @set. * * Since: 0.9.2 **/ void hb_set_del (hb_set_t *set, hb_codepoint_t codepoint) { /* Immutible-safe. */ set->del (codepoint); } /** * hb_set_del_range: * @set: A set * @first: The first element to remove from @set * @last: The final element to remove from @set * * Removes all of the elements from @first to @last * (inclusive) from @set. * * If @last is #HB_SET_VALUE_INVALID, then all values * greater than or equal to @first are removed. * * Since: 0.9.7 **/ void hb_set_del_range (hb_set_t *set, hb_codepoint_t first, hb_codepoint_t last) { /* Immutible-safe. */ set->del_range (first, last); } /** * hb_set_is_equal: * @set: A set * @other: Another set * * Tests whether @set and @other are equal (contain the same * elements). * * Return value: %true if the two sets are equal, %false otherwise. * * Since: 0.9.7 **/ hb_bool_t hb_set_is_equal (const hb_set_t *set, const hb_set_t *other) { return set->is_equal (*other); } /** * hb_set_is_subset: * @set: A set * @larger_set: Another set * * Tests whether @set is a subset of @larger_set. * * Return value: %true if the @set is a subset of (or equal to) @larger_set, %false otherwise. * * Since: 1.8.1 **/ hb_bool_t hb_set_is_subset (const hb_set_t *set, const hb_set_t *larger_set) { return set->is_subset (*larger_set); } /** * hb_set_set: * @set: A set * @other: Another set * * Makes the contents of @set equal to the contents of @other. * * Since: 0.9.2 **/ void hb_set_set (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->set (*other); } /** * hb_set_union: * @set: A set * @other: Another set * * Makes @set the union of @set and @other. * * Since: 0.9.2 **/ void hb_set_union (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->union_ (*other); } /** * hb_set_intersect: * @set: A set * @other: Another set * * Makes @set the intersection of @set and @other. * * Since: 0.9.2 **/ void hb_set_intersect (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->intersect (*other); } /** * hb_set_subtract: * @set: A set * @other: Another set * * Subtracts the contents of @other from @set. * * Since: 0.9.2 **/ void hb_set_subtract (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->subtract (*other); } /** * hb_set_symmetric_difference: * @set: A set * @other: Another set * * Makes @set the symmetric difference of @set * and @other. * * Since: 0.9.2 **/ void hb_set_symmetric_difference (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->symmetric_difference (*other); } /** * hb_set_invert: * @set: A set * * Inverts the contents of @set. * * Since: 3.0.0 **/ void hb_set_invert (hb_set_t *set) { /* Immutible-safe. */ set->invert (); } /** * hb_set_get_population: * @set: A set * * Returns the number of elements in the set. * * Return value: The population of @set * * Since: 0.9.7 **/ unsigned int hb_set_get_population (const hb_set_t *set) { return set->get_population (); } /** * hb_set_get_min: * @set: A set * * Finds the smallest element in the set. * * Return value: minimum of @set, or #HB_SET_VALUE_INVALID if @set is empty. * * Since: 0.9.7 **/ hb_codepoint_t hb_set_get_min (const hb_set_t *set) { return set->get_min (); } /** * hb_set_get_max: * @set: A set * * Finds the largest element in the set. * * Return value: maximum of @set, or #HB_SET_VALUE_INVALID if @set is empty. * * Since: 0.9.7 **/ hb_codepoint_t hb_set_get_max (const hb_set_t *set) { return set->get_max (); } /** * hb_set_next: * @set: A set * @codepoint: (inout): Input = Code point to query * Output = Code point retrieved * * Fetches the next element in @set that is greater than current value of @codepoint. * * Set @codepoint to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a next value, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_next (const hb_set_t *set, hb_codepoint_t *codepoint) { return set->next (codepoint); } /** * hb_set_previous: * @set: A set * @codepoint: (inout): Input = Code point to query * Output = Code point retrieved * * Fetches the previous element in @set that is lower than current value of @codepoint. * * Set @codepoint to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a previous value, %false otherwise * * Since: 1.8.0 **/ hb_bool_t hb_set_previous (const hb_set_t *set, hb_codepoint_t *codepoint) { return set->previous (codepoint); } /** * hb_set_next_range: * @set: A set * @first: (out): The first code point in the range * @last: (inout): Input = The current last code point in the range * Output = The last code point in the range * * Fetches the next consecutive range of elements in @set that * are greater than current value of @last. * * Set @last to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a next range, %false otherwise * * Since: 0.9.7 **/ hb_bool_t hb_set_next_range (const hb_set_t *set, hb_codepoint_t *first, hb_codepoint_t *last) { return set->next_range (first, last); } /** * hb_set_previous_range: * @set: A set * @first: (inout): Input = The current first code point in the range * Output = The first code point in the range * @last: (out): The last code point in the range * * Fetches the previous consecutive range of elements in @set that * are greater than current value of @last. * * Set @first to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a previous range, %false otherwise * * Since: 1.8.0 **/ hb_bool_t hb_set_previous_range (const hb_set_t *set, hb_codepoint_t *first, hb_codepoint_t *last) { return set->previous_range (first, last); }
hb_set_union (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->union_ (*other); }
hb_set_union (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->union_ (*other); }
{'added': [(204, ' /* Immutible-safe. */'), (369, ' /* Immutible-safe. */'), (386, ' /* Immutible-safe. */'), (403, ' /* Immutible-safe. */'), (420, ' /* Immutible-safe. */'), (438, ' /* Immutible-safe. */'), (453, ' /* Immutible-safe. */')], 'deleted': [(204, ' if (unlikely (hb_object_is_immutable (set)))'), (205, ' return;'), (206, ''), (371, ' if (unlikely (hb_object_is_immutable (set)))'), (372, ' return;'), (373, ''), (390, ' if (unlikely (hb_object_is_immutable (set)))'), (391, ' return;'), (392, ''), (409, ' if (unlikely (hb_object_is_immutable (set)))'), (410, ' return;'), (411, ''), (428, ' if (unlikely (hb_object_is_immutable (set)))'), (429, ' return;'), (430, ''), (448, ' if (unlikely (hb_object_is_immutable (set)))'), (449, ' return;'), (450, ''), (465, ' if (unlikely (hb_object_is_immutable (set)))'), (466, ' return;'), (467, '')]}
7
21
184
679
7
33
2
https://github.com/harfbuzz/harfbuzz
CVE-2021-45931
CWE-787
868
print-wb.c
C
wb_id
/* * Copyright (c) 1993, 1994, 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <tcpdump-stdinc.h> #include "interface.h" #include "addrtoname.h" #include "extract.h" static const char tstr[] = "[|wb]"; /* XXX need to add byte-swapping macros! */ /* XXX - you mean like the ones in "extract.h"? */ /* * Largest packet size. Everything should fit within this space. * For instance, multiline objects are sent piecewise. */ #define MAXFRAMESIZE 1024 /* * Multiple drawing ops can be sent in one packet. Each one starts on a * an even multiple of DOP_ALIGN bytes, which must be a power of two. */ #define DOP_ALIGN 4 #define DOP_ROUNDUP(x) ((((int)(x)) + (DOP_ALIGN - 1)) & ~(DOP_ALIGN - 1)) #define DOP_NEXT(d)\ ((struct dophdr *)((u_char *)(d) + \ DOP_ROUNDUP(EXTRACT_16BITS(&(d)->dh_len) + sizeof(*(d))))) /* * Format of the whiteboard packet header. * The transport level header. */ struct pkt_hdr { uint32_t ph_src; /* site id of source */ uint32_t ph_ts; /* time stamp (for skew computation) */ uint16_t ph_version; /* version number */ u_char ph_type; /* message type */ u_char ph_flags; /* message flags */ }; /* Packet types */ #define PT_DRAWOP 0 /* drawing operation */ #define PT_ID 1 /* announcement packet */ #define PT_RREQ 2 /* repair request */ #define PT_RREP 3 /* repair reply */ #define PT_KILL 4 /* terminate participation */ #define PT_PREQ 5 /* page vector request */ #define PT_PREP 7 /* page vector reply */ #ifdef PF_USER #undef PF_USER /* {Digital,Tru64} UNIX define this, alas */ #endif /* flags */ #define PF_USER 0x01 /* hint that packet has interactive data */ #define PF_VIS 0x02 /* only visible ops wanted */ struct PageID { uint32_t p_sid; /* session id of initiator */ uint32_t p_uid; /* page number */ }; struct dophdr { uint32_t dh_ts; /* sender's timestamp */ uint16_t dh_len; /* body length */ u_char dh_flags; u_char dh_type; /* body type */ /* body follows */ }; /* * Drawing op sub-types. */ #define DT_RECT 2 #define DT_LINE 3 #define DT_ML 4 #define DT_DEL 5 #define DT_XFORM 6 #define DT_ELL 7 #define DT_CHAR 8 #define DT_STR 9 #define DT_NOP 10 #define DT_PSCODE 11 #define DT_PSCOMP 12 #define DT_REF 13 #define DT_SKIP 14 #define DT_HOLE 15 #define DT_MAXTYPE 15 /* * A drawing operation. */ struct pkt_dop { struct PageID pd_page; /* page that operations apply to */ uint32_t pd_sseq; /* start sequence number */ uint32_t pd_eseq; /* end sequence number */ /* drawing ops follow */ }; /* * A repair request. */ struct pkt_rreq { uint32_t pr_id; /* source id of drawops to be repaired */ struct PageID pr_page; /* page of drawops */ uint32_t pr_sseq; /* start seqno */ uint32_t pr_eseq; /* end seqno */ }; /* * A repair reply. */ struct pkt_rrep { uint32_t pr_id; /* original site id of ops */ struct pkt_dop pr_dop; /* drawing ops follow */ }; struct id_off { uint32_t id; uint32_t off; }; struct pgstate { uint32_t slot; struct PageID page; uint16_t nid; uint16_t rsvd; /* seqptr's */ }; /* * An announcement packet. */ struct pkt_id { uint32_t pi_mslot; struct PageID pi_mpage; /* current page */ struct pgstate pi_ps; /* seqptr's */ /* null-terminated site name */ }; struct pkt_preq { struct PageID pp_page; uint32_t pp_low; uint32_t pp_high; }; struct pkt_prep { uint32_t pp_n; /* size of pageid array */ /* pgstate's follow */ }; static int wb_id(netdissect_options *ndo, const struct pkt_id *id, u_int len) { int i; const char *cp; const struct id_off *io; char c; int nid; ND_PRINT((ndo, " wb-id:")); if (len < sizeof(*id) || !ND_TTEST(*id)) return (-1); len -= sizeof(*id); ND_PRINT((ndo, " %u/%s:%u (max %u/%s:%u) ", EXTRACT_32BITS(&id->pi_ps.slot), ipaddr_string(ndo, &id->pi_ps.page.p_sid), EXTRACT_32BITS(&id->pi_ps.page.p_uid), EXTRACT_32BITS(&id->pi_mslot), ipaddr_string(ndo, &id->pi_mpage.p_sid), EXTRACT_32BITS(&id->pi_mpage.p_uid))); nid = EXTRACT_16BITS(&id->pi_ps.nid); len -= sizeof(*io) * nid; io = (struct id_off *)(id + 1); cp = (char *)(io + nid); if (!ND_TTEST2(cp, len)) { ND_PRINT((ndo, "\"")); fn_print(ndo, (u_char *)cp, (u_char *)cp + len); ND_PRINT((ndo, "\"")); } c = '<'; for (i = 0; i < nid && ND_TTEST(*io); ++io, ++i) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } if (i >= nid) { ND_PRINT((ndo, ">")); return (0); } return (-1); } static int wb_rreq(netdissect_options *ndo, const struct pkt_rreq *rreq, u_int len) { ND_PRINT((ndo, " wb-rreq:")); if (len < sizeof(*rreq) || !ND_TTEST(*rreq)) return (-1); ND_PRINT((ndo, " please repair %s %s:%u<%u:%u>", ipaddr_string(ndo, &rreq->pr_id), ipaddr_string(ndo, &rreq->pr_page.p_sid), EXTRACT_32BITS(&rreq->pr_page.p_uid), EXTRACT_32BITS(&rreq->pr_sseq), EXTRACT_32BITS(&rreq->pr_eseq))); return (0); } static int wb_preq(netdissect_options *ndo, const struct pkt_preq *preq, u_int len) { ND_PRINT((ndo, " wb-preq:")); if (len < sizeof(*preq) || !ND_TTEST(*preq)) return (-1); ND_PRINT((ndo, " need %u/%s:%u", EXTRACT_32BITS(&preq->pp_low), ipaddr_string(ndo, &preq->pp_page.p_sid), EXTRACT_32BITS(&preq->pp_page.p_uid))); return (0); } static int wb_prep(netdissect_options *ndo, const struct pkt_prep *prep, u_int len) { int n; const struct pgstate *ps; const u_char *ep = ndo->ndo_snapend; ND_PRINT((ndo, " wb-prep:")); if (len < sizeof(*prep)) { return (-1); } n = EXTRACT_32BITS(&prep->pp_n); ps = (const struct pgstate *)(prep + 1); while (--n >= 0 && !ND_TTEST(*ps)) { const struct id_off *io, *ie; char c = '<'; ND_PRINT((ndo, " %u/%s:%u", EXTRACT_32BITS(&ps->slot), ipaddr_string(ndo, &ps->page.p_sid), EXTRACT_32BITS(&ps->page.p_uid))); io = (struct id_off *)(ps + 1); for (ie = io + ps->nid; io < ie && !ND_TTEST(*io); ++io) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } ND_PRINT((ndo, ">")); ps = (struct pgstate *)io; } return ((u_char *)ps <= ep? 0 : -1); } static const char *dopstr[] = { "dop-0!", "dop-1!", "RECT", "LINE", "ML", "DEL", "XFORM", "ELL", "CHAR", "STR", "NOP", "PSCODE", "PSCOMP", "REF", "SKIP", "HOLE", }; static int wb_dops(netdissect_options *ndo, const struct pkt_dop *dop, uint32_t ss, uint32_t es) { const struct dophdr *dh = (const struct dophdr *)((const u_char *)dop + sizeof(*dop)); ND_PRINT((ndo, " <")); for ( ; ss <= es; ++ss) { int t; if (!ND_TTEST(*dh)) { ND_PRINT((ndo, "%s", tstr)); break; } t = dh->dh_type; if (t > DT_MAXTYPE) ND_PRINT((ndo, " dop-%d!", t)); else { ND_PRINT((ndo, " %s", dopstr[t])); if (t == DT_SKIP || t == DT_HOLE) { uint32_t ts = EXTRACT_32BITS(&dh->dh_ts); ND_PRINT((ndo, "%d", ts - ss + 1)); if (ss > ts || ts > es) { ND_PRINT((ndo, "[|]")); if (ts < ss) return (0); } ss = ts; } } dh = DOP_NEXT(dh); } ND_PRINT((ndo, " >")); return (0); } static int wb_rrep(netdissect_options *ndo, const struct pkt_rrep *rrep, u_int len) { const struct pkt_dop *dop = &rrep->pr_dop; ND_PRINT((ndo, " wb-rrep:")); if (len < sizeof(*rrep) || !ND_TTEST(*rrep)) return (-1); len -= sizeof(*rrep); ND_PRINT((ndo, " for %s %s:%u<%u:%u>", ipaddr_string(ndo, &rrep->pr_id), ipaddr_string(ndo, &dop->pd_page.p_sid), EXTRACT_32BITS(&dop->pd_page.p_uid), EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); if (ndo->ndo_vflag) return (wb_dops(ndo, dop, EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); return (0); } static int wb_drawop(netdissect_options *ndo, const struct pkt_dop *dop, u_int len) { ND_PRINT((ndo, " wb-dop:")); if (len < sizeof(*dop) || !ND_TTEST(*dop)) return (-1); len -= sizeof(*dop); ND_PRINT((ndo, " %s:%u<%u:%u>", ipaddr_string(ndo, &dop->pd_page.p_sid), EXTRACT_32BITS(&dop->pd_page.p_uid), EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); if (ndo->ndo_vflag) return (wb_dops(ndo, dop, EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); return (0); } /* * Print whiteboard multicast packets. */ void wb_print(netdissect_options *ndo, register const void *hdr, register u_int len) { register const struct pkt_hdr *ph; ph = (const struct pkt_hdr *)hdr; if (len < sizeof(*ph) || !ND_TTEST(*ph)) { ND_PRINT((ndo, "%s", tstr)); return; } len -= sizeof(*ph); if (ph->ph_flags) ND_PRINT((ndo, "*")); switch (ph->ph_type) { case PT_KILL: ND_PRINT((ndo, " wb-kill")); return; case PT_ID: if (wb_id(ndo, (struct pkt_id *)(ph + 1), len) >= 0) return; break; case PT_RREQ: if (wb_rreq(ndo, (struct pkt_rreq *)(ph + 1), len) >= 0) return; break; case PT_RREP: if (wb_rrep(ndo, (struct pkt_rrep *)(ph + 1), len) >= 0) return; break; case PT_DRAWOP: if (wb_drawop(ndo, (struct pkt_dop *)(ph + 1), len) >= 0) return; break; case PT_PREQ: if (wb_preq(ndo, (struct pkt_preq *)(ph + 1), len) >= 0) return; break; case PT_PREP: if (wb_prep(ndo, (struct pkt_prep *)(ph + 1), len) >= 0) return; break; default: ND_PRINT((ndo, " wb-%d!", ph->ph_type)); return; } }
/* * Copyright (c) 1993, 1994, 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <tcpdump-stdinc.h> #include "interface.h" #include "addrtoname.h" #include "extract.h" static const char tstr[] = "[|wb]"; /* XXX need to add byte-swapping macros! */ /* XXX - you mean like the ones in "extract.h"? */ /* * Largest packet size. Everything should fit within this space. * For instance, multiline objects are sent piecewise. */ #define MAXFRAMESIZE 1024 /* * Multiple drawing ops can be sent in one packet. Each one starts on a * an even multiple of DOP_ALIGN bytes, which must be a power of two. */ #define DOP_ALIGN 4 #define DOP_ROUNDUP(x) ((((int)(x)) + (DOP_ALIGN - 1)) & ~(DOP_ALIGN - 1)) #define DOP_NEXT(d)\ ((struct dophdr *)((u_char *)(d) + \ DOP_ROUNDUP(EXTRACT_16BITS(&(d)->dh_len) + sizeof(*(d))))) /* * Format of the whiteboard packet header. * The transport level header. */ struct pkt_hdr { uint32_t ph_src; /* site id of source */ uint32_t ph_ts; /* time stamp (for skew computation) */ uint16_t ph_version; /* version number */ u_char ph_type; /* message type */ u_char ph_flags; /* message flags */ }; /* Packet types */ #define PT_DRAWOP 0 /* drawing operation */ #define PT_ID 1 /* announcement packet */ #define PT_RREQ 2 /* repair request */ #define PT_RREP 3 /* repair reply */ #define PT_KILL 4 /* terminate participation */ #define PT_PREQ 5 /* page vector request */ #define PT_PREP 7 /* page vector reply */ #ifdef PF_USER #undef PF_USER /* {Digital,Tru64} UNIX define this, alas */ #endif /* flags */ #define PF_USER 0x01 /* hint that packet has interactive data */ #define PF_VIS 0x02 /* only visible ops wanted */ struct PageID { uint32_t p_sid; /* session id of initiator */ uint32_t p_uid; /* page number */ }; struct dophdr { uint32_t dh_ts; /* sender's timestamp */ uint16_t dh_len; /* body length */ u_char dh_flags; u_char dh_type; /* body type */ /* body follows */ }; /* * Drawing op sub-types. */ #define DT_RECT 2 #define DT_LINE 3 #define DT_ML 4 #define DT_DEL 5 #define DT_XFORM 6 #define DT_ELL 7 #define DT_CHAR 8 #define DT_STR 9 #define DT_NOP 10 #define DT_PSCODE 11 #define DT_PSCOMP 12 #define DT_REF 13 #define DT_SKIP 14 #define DT_HOLE 15 #define DT_MAXTYPE 15 /* * A drawing operation. */ struct pkt_dop { struct PageID pd_page; /* page that operations apply to */ uint32_t pd_sseq; /* start sequence number */ uint32_t pd_eseq; /* end sequence number */ /* drawing ops follow */ }; /* * A repair request. */ struct pkt_rreq { uint32_t pr_id; /* source id of drawops to be repaired */ struct PageID pr_page; /* page of drawops */ uint32_t pr_sseq; /* start seqno */ uint32_t pr_eseq; /* end seqno */ }; /* * A repair reply. */ struct pkt_rrep { uint32_t pr_id; /* original site id of ops */ struct pkt_dop pr_dop; /* drawing ops follow */ }; struct id_off { uint32_t id; uint32_t off; }; struct pgstate { uint32_t slot; struct PageID page; uint16_t nid; uint16_t rsvd; /* seqptr's */ }; /* * An announcement packet. */ struct pkt_id { uint32_t pi_mslot; struct PageID pi_mpage; /* current page */ struct pgstate pi_ps; /* seqptr's */ /* null-terminated site name */ }; struct pkt_preq { struct PageID pp_page; uint32_t pp_low; uint32_t pp_high; }; struct pkt_prep { uint32_t pp_n; /* size of pageid array */ /* pgstate's follow */ }; static int wb_id(netdissect_options *ndo, const struct pkt_id *id, u_int len) { int i; const char *cp; const struct id_off *io; char c; int nid; ND_PRINT((ndo, " wb-id:")); if (len < sizeof(*id) || !ND_TTEST(*id)) return (-1); len -= sizeof(*id); ND_PRINT((ndo, " %u/%s:%u (max %u/%s:%u) ", EXTRACT_32BITS(&id->pi_ps.slot), ipaddr_string(ndo, &id->pi_ps.page.p_sid), EXTRACT_32BITS(&id->pi_ps.page.p_uid), EXTRACT_32BITS(&id->pi_mslot), ipaddr_string(ndo, &id->pi_mpage.p_sid), EXTRACT_32BITS(&id->pi_mpage.p_uid))); nid = EXTRACT_16BITS(&id->pi_ps.nid); len -= sizeof(*io) * nid; io = (struct id_off *)(id + 1); cp = (char *)(io + nid); if (ND_TTEST2(cp, len)) { ND_PRINT((ndo, "\"")); fn_print(ndo, (u_char *)cp, (u_char *)cp + len); ND_PRINT((ndo, "\"")); } c = '<'; for (i = 0; i < nid && ND_TTEST(*io); ++io, ++i) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } if (i >= nid) { ND_PRINT((ndo, ">")); return (0); } return (-1); } static int wb_rreq(netdissect_options *ndo, const struct pkt_rreq *rreq, u_int len) { ND_PRINT((ndo, " wb-rreq:")); if (len < sizeof(*rreq) || !ND_TTEST(*rreq)) return (-1); ND_PRINT((ndo, " please repair %s %s:%u<%u:%u>", ipaddr_string(ndo, &rreq->pr_id), ipaddr_string(ndo, &rreq->pr_page.p_sid), EXTRACT_32BITS(&rreq->pr_page.p_uid), EXTRACT_32BITS(&rreq->pr_sseq), EXTRACT_32BITS(&rreq->pr_eseq))); return (0); } static int wb_preq(netdissect_options *ndo, const struct pkt_preq *preq, u_int len) { ND_PRINT((ndo, " wb-preq:")); if (len < sizeof(*preq) || !ND_TTEST(*preq)) return (-1); ND_PRINT((ndo, " need %u/%s:%u", EXTRACT_32BITS(&preq->pp_low), ipaddr_string(ndo, &preq->pp_page.p_sid), EXTRACT_32BITS(&preq->pp_page.p_uid))); return (0); } static int wb_prep(netdissect_options *ndo, const struct pkt_prep *prep, u_int len) { int n; const struct pgstate *ps; const u_char *ep = ndo->ndo_snapend; ND_PRINT((ndo, " wb-prep:")); if (len < sizeof(*prep)) { return (-1); } n = EXTRACT_32BITS(&prep->pp_n); ps = (const struct pgstate *)(prep + 1); while (--n >= 0 && ND_TTEST(*ps)) { const struct id_off *io, *ie; char c = '<'; ND_PRINT((ndo, " %u/%s:%u", EXTRACT_32BITS(&ps->slot), ipaddr_string(ndo, &ps->page.p_sid), EXTRACT_32BITS(&ps->page.p_uid))); io = (struct id_off *)(ps + 1); for (ie = io + ps->nid; io < ie && ND_TTEST(*io); ++io) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } ND_PRINT((ndo, ">")); ps = (struct pgstate *)io; } return ((u_char *)ps <= ep? 0 : -1); } static const char *dopstr[] = { "dop-0!", "dop-1!", "RECT", "LINE", "ML", "DEL", "XFORM", "ELL", "CHAR", "STR", "NOP", "PSCODE", "PSCOMP", "REF", "SKIP", "HOLE", }; static int wb_dops(netdissect_options *ndo, const struct pkt_dop *dop, uint32_t ss, uint32_t es) { const struct dophdr *dh = (const struct dophdr *)((const u_char *)dop + sizeof(*dop)); ND_PRINT((ndo, " <")); for ( ; ss <= es; ++ss) { int t; if (!ND_TTEST(*dh)) { ND_PRINT((ndo, "%s", tstr)); break; } t = dh->dh_type; if (t > DT_MAXTYPE) ND_PRINT((ndo, " dop-%d!", t)); else { ND_PRINT((ndo, " %s", dopstr[t])); if (t == DT_SKIP || t == DT_HOLE) { uint32_t ts = EXTRACT_32BITS(&dh->dh_ts); ND_PRINT((ndo, "%d", ts - ss + 1)); if (ss > ts || ts > es) { ND_PRINT((ndo, "[|]")); if (ts < ss) return (0); } ss = ts; } } dh = DOP_NEXT(dh); } ND_PRINT((ndo, " >")); return (0); } static int wb_rrep(netdissect_options *ndo, const struct pkt_rrep *rrep, u_int len) { const struct pkt_dop *dop = &rrep->pr_dop; ND_PRINT((ndo, " wb-rrep:")); if (len < sizeof(*rrep) || !ND_TTEST(*rrep)) return (-1); len -= sizeof(*rrep); ND_PRINT((ndo, " for %s %s:%u<%u:%u>", ipaddr_string(ndo, &rrep->pr_id), ipaddr_string(ndo, &dop->pd_page.p_sid), EXTRACT_32BITS(&dop->pd_page.p_uid), EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); if (ndo->ndo_vflag) return (wb_dops(ndo, dop, EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); return (0); } static int wb_drawop(netdissect_options *ndo, const struct pkt_dop *dop, u_int len) { ND_PRINT((ndo, " wb-dop:")); if (len < sizeof(*dop) || !ND_TTEST(*dop)) return (-1); len -= sizeof(*dop); ND_PRINT((ndo, " %s:%u<%u:%u>", ipaddr_string(ndo, &dop->pd_page.p_sid), EXTRACT_32BITS(&dop->pd_page.p_uid), EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); if (ndo->ndo_vflag) return (wb_dops(ndo, dop, EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); return (0); } /* * Print whiteboard multicast packets. */ void wb_print(netdissect_options *ndo, register const void *hdr, register u_int len) { register const struct pkt_hdr *ph; ph = (const struct pkt_hdr *)hdr; if (len < sizeof(*ph) || !ND_TTEST(*ph)) { ND_PRINT((ndo, "%s", tstr)); return; } len -= sizeof(*ph); if (ph->ph_flags) ND_PRINT((ndo, "*")); switch (ph->ph_type) { case PT_KILL: ND_PRINT((ndo, " wb-kill")); return; case PT_ID: if (wb_id(ndo, (struct pkt_id *)(ph + 1), len) >= 0) return; break; case PT_RREQ: if (wb_rreq(ndo, (struct pkt_rreq *)(ph + 1), len) >= 0) return; break; case PT_RREP: if (wb_rrep(ndo, (struct pkt_rrep *)(ph + 1), len) >= 0) return; break; case PT_DRAWOP: if (wb_drawop(ndo, (struct pkt_dop *)(ph + 1), len) >= 0) return; break; case PT_PREQ: if (wb_preq(ndo, (struct pkt_preq *)(ph + 1), len) >= 0) return; break; case PT_PREP: if (wb_prep(ndo, (struct pkt_prep *)(ph + 1), len) >= 0) return; break; default: ND_PRINT((ndo, " wb-%d!", ph->ph_type)); return; } }
wb_id(netdissect_options *ndo, const struct pkt_id *id, u_int len) { int i; const char *cp; const struct id_off *io; char c; int nid; ND_PRINT((ndo, " wb-id:")); if (len < sizeof(*id) || !ND_TTEST(*id)) return (-1); len -= sizeof(*id); ND_PRINT((ndo, " %u/%s:%u (max %u/%s:%u) ", EXTRACT_32BITS(&id->pi_ps.slot), ipaddr_string(ndo, &id->pi_ps.page.p_sid), EXTRACT_32BITS(&id->pi_ps.page.p_uid), EXTRACT_32BITS(&id->pi_mslot), ipaddr_string(ndo, &id->pi_mpage.p_sid), EXTRACT_32BITS(&id->pi_mpage.p_uid))); nid = EXTRACT_16BITS(&id->pi_ps.nid); len -= sizeof(*io) * nid; io = (struct id_off *)(id + 1); cp = (char *)(io + nid); if (!ND_TTEST2(cp, len)) { ND_PRINT((ndo, "\"")); fn_print(ndo, (u_char *)cp, (u_char *)cp + len); ND_PRINT((ndo, "\"")); } c = '<'; for (i = 0; i < nid && ND_TTEST(*io); ++io, ++i) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } if (i >= nid) { ND_PRINT((ndo, ">")); return (0); } return (-1); }
wb_id(netdissect_options *ndo, const struct pkt_id *id, u_int len) { int i; const char *cp; const struct id_off *io; char c; int nid; ND_PRINT((ndo, " wb-id:")); if (len < sizeof(*id) || !ND_TTEST(*id)) return (-1); len -= sizeof(*id); ND_PRINT((ndo, " %u/%s:%u (max %u/%s:%u) ", EXTRACT_32BITS(&id->pi_ps.slot), ipaddr_string(ndo, &id->pi_ps.page.p_sid), EXTRACT_32BITS(&id->pi_ps.page.p_uid), EXTRACT_32BITS(&id->pi_mslot), ipaddr_string(ndo, &id->pi_mpage.p_sid), EXTRACT_32BITS(&id->pi_mpage.p_uid))); nid = EXTRACT_16BITS(&id->pi_ps.nid); len -= sizeof(*io) * nid; io = (struct id_off *)(id + 1); cp = (char *)(io + nid); if (ND_TTEST2(cp, len)) { ND_PRINT((ndo, "\"")); fn_print(ndo, (u_char *)cp, (u_char *)cp + len); ND_PRINT((ndo, "\"")); } c = '<'; for (i = 0; i < nid && ND_TTEST(*io); ++io, ++i) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } if (i >= nid) { ND_PRINT((ndo, ">")); return (0); } return (-1); }
{'added': [(204, '\tif (ND_TTEST2(cp, len)) {'), (269, '\twhile (--n >= 0 && ND_TTEST(*ps)) {'), (278, '\t\tfor (ie = io + ps->nid; io < ie && ND_TTEST(*io); ++io) {')], 'deleted': [(204, '\tif (!ND_TTEST2(cp, len)) {'), (269, '\twhile (--n >= 0 && !ND_TTEST(*ps)) {'), (278, '\t\tfor (ie = io + ps->nid; io < ie && !ND_TTEST(*io); ++io) {')]}
3
3
298
1,873
40
337
7
https://github.com/the-tcpdump-group/tcpdump
CVE-2015-3138
CWE-20